Merge branch '3.0' into cpwu/3.0

This commit is contained in:
cpwu 2022-05-19 19:34:47 +08:00
commit 9840c85d47
357 changed files with 16107 additions and 13875 deletions

View File

@ -117,27 +117,29 @@ def pre_test(){
def pre_test_win(){
bat '''
hostname
ipconfig
set
date /t
time /t
taskkill /f /t /im python.exe
taskkill /f /t /im bash.exe
rd /s /Q C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug
exit 0
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git reset --hard
git fetch || git fetch
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git reset --hard
git fetch || git fetch
git checkout -f
'''
script {
if (env.CHANGE_TARGET == 'master') {
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git checkout master
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git checkout master
'''
@ -145,6 +147,8 @@ def pre_test_win(){
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git checkout 2.0
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git checkout 2.0
'''
@ -152,6 +156,8 @@ def pre_test_win(){
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git checkout 3.0
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git checkout 3.0
'''
@ -159,6 +165,8 @@ def pre_test_win(){
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git checkout develop
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git checkout develop
'''
@ -169,30 +177,52 @@ def pre_test_win(){
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git pull
git log -5
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git pull
git fetch origin +refs/pull/${CHANGE_ID}/merge
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git fetch origin +refs/pull/%CHANGE_ID%/merge
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git checkout -qf FETCH_HEAD
git log -5
'''
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git pull
git fetch origin +refs/pull/${CHANGE_ID}/merge
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git fetch origin +refs/pull/%CHANGE_ID%/merge
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git checkout -qf FETCH_HEAD
git log -5
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git pull
git log -5
'''
} else {
sh '''
echo "unmatched reposiotry ${CHANGE_URL}"
bat '''
echo "unmatched reposiotry %CHANGE_URL%"
'''
}
}
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git branch
git log -5
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git branch
git log -5
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git submodule update --init --recursive
@ -205,10 +235,15 @@ def pre_test_build_win() {
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
mkdir debug
cd debug
time /t
call "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat" x64
set CL=/MP8
cmake .. -G "NMake Makefiles JOM"
jom -j 4 || exit 8
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> cmake"
time /t
cmake .. -G "NMake Makefiles JOM" || exit 7
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> jom -j 6"
time /t
jom -j 6 || exit 8
time /t
'''
return 1
@ -226,6 +261,13 @@ pipeline {
stages {
stage('run test') {
parallel {
stage('windows test') {
agent{label " windows10_01 || windows10_02 || windows10_03 || windows10_04 "}
steps {
pre_test_win()
pre_test_build_win()
}
}
stage('linux test') {
agent{label " slave3_0 || slave15 || slave16 || slave17 "}
options { skipDefaultCheckout() }

View File

@ -5,22 +5,27 @@ IF (TD_LINUX)
ELSEIF (TD_WINDOWS)
SET(CMAKE_INSTALL_PREFIX C:/TDengine)
INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/go DESTINATION connector)
INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/nodejs DESTINATION connector)
INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/python DESTINATION connector)
INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/C\# DESTINATION connector)
INSTALL(DIRECTORY ${TD_SOURCE_DIR}/examples DESTINATION .)
# INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/go DESTINATION connector)
# INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/nodejs DESTINATION connector)
# INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/python DESTINATION connector)
# INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/C\# DESTINATION connector)
# INSTALL(DIRECTORY ${TD_SOURCE_DIR}/examples DESTINATION .)
INSTALL(FILES ${TD_SOURCE_DIR}/packaging/cfg/taos.cfg DESTINATION cfg)
INSTALL(FILES ${TD_SOURCE_DIR}/src/inc/taos.h DESTINATION include)
INSTALL(FILES ${TD_SOURCE_DIR}/src/inc/taoserror.h DESTINATION include)
INSTALL(FILES ${TD_SOURCE_DIR}/include/client/taos.h DESTINATION include)
INSTALL(FILES ${TD_SOURCE_DIR}/include/util/taoserror.h DESTINATION include)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos_static.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosd.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/udfd.exe DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.38-dist.jar DESTINATION connector/jdbc)
ENDIF ()
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.bat")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} :needAdmin ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Windows ${TD_VER_NUMBER})")
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")

View File

@ -226,10 +226,10 @@ endif(${BUILD_WITH_NURAFT})
if(${BUILD_PTHREAD})
set(CMAKE_BUILD_TYPE release)
add_definitions(-DPTW32_STATIC_LIB)
add_subdirectory(pthread)
add_subdirectory(pthread EXCLUDE_FROM_ALL)
set_target_properties(libpthreadVC3 PROPERTIES OUTPUT_NAME pthread)
add_library(pthread STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET pthread PROPERTY IMPORTED_LOCATION ${LIBRARY_OUTPUT_PATH}/pthread.lib)
add_library(pthread INTERFACE)
target_link_libraries(pthread INTERFACE libpthreadVC3)
endif()
# iconv

View File

@ -167,7 +167,7 @@ tmq_t* build_consumer() {
tmq_conf_set(conf, "td.connect.pass", "taosdata");
/*tmq_conf_set(conf, "td.connect.db", "abc1");*/
tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set_offset_commit_cb(conf, tmq_commit_cb_print, NULL);
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
assert(tmq);
return tmq;
@ -176,6 +176,7 @@ tmq_t* build_consumer() {
tmq_list_t* build_topic_list() {
tmq_list_t* topic_list = tmq_list_new();
tmq_list_append(topic_list, "topic_ctb_column");
/*tmq_list_append(topic_list, "tmq_test_db_multi_insert_topic");*/
return topic_list;
}
@ -190,7 +191,7 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
int32_t cnt = 0;
/*clock_t startTime = clock();*/
while (running) {
TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 500);
TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 0);
if (tmqmessage) {
cnt++;
/*printf("get data\n");*/
@ -238,7 +239,7 @@ void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
msg_process(tmqmessage);
taos_free_result(tmqmessage);
tmq_commit(tmq, NULL, 1);
tmq_commit_async(tmq, NULL, tmq_commit_cb_print, NULL);
/*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/
}
}

View File

@ -232,9 +232,11 @@ DLL_EXPORT tmq_resp_err_t tmq_unsubscribe(tmq_t *tmq);
DLL_EXPORT tmq_resp_err_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t wait_time);
DLL_EXPORT tmq_resp_err_t tmq_consumer_close(tmq_t *tmq);
DLL_EXPORT tmq_resp_err_t tmq_commit(tmq_t *tmq, const tmq_topic_vgroup_list_t *offsets, int32_t async);
DLL_EXPORT tmq_resp_err_t tmq_commit_sync(tmq_t *tmq, const tmq_topic_vgroup_list_t *offsets);
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const tmq_topic_vgroup_list_t *offsets, tmq_commit_cb *cb, void *param);
#if 0
DLL_EXPORT tmq_resp_err_t tmq_commit_message(tmq_t* tmq, const tmq_message_t* tmqmessage, int32_t async);
DLL_EXPORT tmq_resp_err_t tmq_commit(tmq_t *tmq, const tmq_topic_vgroup_list_t *offsets, int32_t async);
DLL_EXPORT tmq_resp_err_t tmq_seek(tmq_t *tmq, const tmq_topic_vgroup_t *offset);
#endif
@ -251,7 +253,7 @@ typedef enum tmq_conf_res_t tmq_conf_res_t;
DLL_EXPORT tmq_conf_t *tmq_conf_new();
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
DLL_EXPORT void tmq_conf_set_offset_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
/* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */

View File

@ -29,27 +29,42 @@ extern "C" {
typedef struct SSchema SSchema;
typedef struct STColumn STColumn;
typedef struct STSchema STSchema;
typedef struct SColVal SColVal;
typedef struct STSRow2 STSRow2;
typedef struct STSRowBuilder STSRowBuilder;
typedef struct SKVIdx SKVIdx;
// STSchema
// STSRow2
int32_t tEncodeTSRow(SEncoder *pEncoder, const STSRow2 *pRow);
int32_t tDecodeTSRow(SDecoder *pDecoder, STSRow2 *pRow);
typedef struct STagVal STagVal;
typedef struct STag STag;
// STSchema
int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t nCols, STSchema **ppTSchema);
void tTSchemaDestroy(STSchema *pTSchema);
// SColVal
#define ColValNONE ((SColVal){.type = COL_VAL_NONE, .nData = 0, .pData = NULL})
#define ColValNULL ((SColVal){.type = COL_VAL_NULL, .nData = 0, .pData = NULL})
#define ColValDATA(nData, pData) ((SColVal){.type = COL_VAL_DATA, .nData = (nData), .pData = (pData)})
// STSRow2
int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow);
int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow);
int32_t tTSRowDup(const STSRow2 *pRow, STSRow2 **ppRow);
void tTSRowFree(STSRow2 *pRow);
int32_t tTSRowGet(const STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal);
// STSRowBuilder
int32_t tTSRowBuilderInit(STSRowBuilder *pBuilder, int32_t sver, SSchema *pSchema, int32_t nCols);
int32_t tTSRowBuilderInit(STSRowBuilder *pBuilder, int32_t sver, int32_t nCols, SSchema *pSchema);
void tTSRowBuilderClear(STSRowBuilder *pBuilder);
void tTSRowBuilderReset(STSRowBuilder *pBuilder);
int32_t tTSRowBuilderPut(STSRowBuilder *pBuilder, int32_t cid, const uint8_t *pData, uint32_t nData);
int32_t tTSRowBuilderPut(STSRowBuilder *pBuilder, int32_t cid, uint8_t *pData, uint32_t nData);
int32_t tTSRowBuilderGetRow(STSRowBuilder *pBuilder, const STSRow2 **ppRow);
// STag
int32_t tTagNew(STagVal *pTagVals, int16_t nTag, STag **ppTag);
void tTagFree(STag *pTag);
void tTagGet(STag *pTag, int16_t cid, int8_t type, uint8_t **ppData, int32_t *nData);
int32_t tEncodeTag(SEncoder *pEncoder, STag *pTag);
int32_t tDecodeTag(SDecoder *pDecoder, const STag **ppTag);
// STRUCT =================
struct STColumn {
col_id_t colId;
@ -68,31 +83,47 @@ struct STSchema {
STColumn columns[];
};
#define TSROW_HAS_NONE ((uint8_t)0x1)
#define TSROW_HAS_NULL ((uint8_t)0x2U)
#define TSROW_HAS_VAL ((uint8_t)0x4U)
#define TSROW_KV_ROW ((uint8_t)0x10U)
struct STSRow2 {
TSKEY ts;
uint32_t flags;
union {
int32_t sver;
int32_t ncols;
};
uint32_t nData;
const uint8_t *pData;
uint8_t flags;
int32_t sver;
uint32_t nData;
uint8_t *pData;
};
struct STSRowBuilder {
STColumn *pTColumn;
STSchema *pTSchema;
int32_t szBitMap1;
int32_t szBitMap2;
int32_t szKVBuf;
uint8_t *pKVBuf;
int32_t szTPBuf;
uint8_t *pTPBuf;
int32_t nCols;
int32_t kvVLen;
int32_t tpVLen;
int32_t iCol;
int32_t vlenKV;
int32_t vlenTP;
STSRow2 row;
};
#if 1 //====================================
typedef enum { COL_VAL_NONE = 0, COL_VAL_NULL = 1, COL_VAL_DATA = 2 } EColValT;
struct SColVal {
EColValT type;
uint32_t nData;
uint8_t *pData;
};
struct STagVal {
int16_t cid;
int8_t type;
uint32_t nData;
uint8_t *pData;
};
#if 1 //================================================================================================================================================
// Imported since 3.0 and use bitmap to demonstrate None/Null/Norm, while use Null/Norm below 3.0 without of bitmap.
#define TD_SUPPORT_BITMAP
#define TD_SUPPORT_READ2

View File

@ -32,6 +32,7 @@ extern char tsLocalEp[];
extern uint16_t tsServerPort;
extern int32_t tsVersion;
extern int32_t tsStatusInterval;
extern int32_t tsNumOfSupportVnodes;
// common
extern int32_t tsMaxShellConns;
@ -45,7 +46,7 @@ extern bool tsPrintAuth;
extern int64_t tsTickPerMin[3];
// multi-process
extern bool tsMultiProcess;
extern int32_t tsMultiProcess;
extern int32_t tsMnodeShmSize;
extern int32_t tsVnodeShmSize;
extern int32_t tsQnodeShmSize;

View File

@ -258,6 +258,7 @@ typedef struct {
char* tblFName;
int32_t numOfRows;
int32_t affectedRows;
int64_t sver;
} SSubmitBlkRsp;
typedef struct {
@ -274,10 +275,10 @@ int32_t tEncodeSSubmitRsp(SEncoder* pEncoder, const SSubmitRsp* pRsp);
int32_t tDecodeSSubmitRsp(SDecoder* pDecoder, SSubmitRsp* pRsp);
void tFreeSSubmitRsp(SSubmitRsp* pRsp);
#define COL_SMA_ON ((int8_t)0x1)
#define COL_IDX_ON ((int8_t)0x2)
#define COL_VAL_SET ((int8_t)0x4)
#define COL_SMA_ON ((int8_t)0x1)
#define COL_IDX_ON ((int8_t)0x2)
#define COL_SET_NULL ((int8_t)0x10)
#define COL_SET_VAL ((int8_t)0x20)
typedef struct SSchema {
int8_t type;
int8_t flags;
@ -286,6 +287,9 @@ typedef struct SSchema {
char name[TSDB_COL_NAME_LEN];
} SSchema;
#define COL_IS_SET(FLG) ((FLG) & (COL_SET_VAL | COL_SET_NULL) != 0)
#define COL_CLR_SET(FLG) ((FLG) &= (~(COL_SET_VAL | COL_SET_NULL)))
#define IS_BSMA_ON(s) (((s)->flags & 0x01) == COL_SMA_ON)
#define SSCHMEA_TYPE(s) ((s)->type)
@ -297,6 +301,8 @@ typedef struct SSchema {
typedef struct {
int32_t nCols;
int32_t sver;
int32_t tagVer;
int32_t colVer;
SSchema* pSchema;
} SSchemaWrapper;
@ -305,6 +311,8 @@ static FORCE_INLINE SSchemaWrapper* tCloneSSchemaWrapper(const SSchemaWrapper* p
if (pSW == NULL) return pSW;
pSW->nCols = pSchemaWrapper->nCols;
pSW->sver = pSchemaWrapper->sver;
pSW->tagVer = pSchemaWrapper->tagVer;
pSW->colVer = pSchemaWrapper->colVer;
pSW->pSchema = (SSchema*)taosMemoryCalloc(pSW->nCols, sizeof(SSchema));
if (pSW->pSchema == NULL) {
taosMemoryFree(pSW);
@ -360,6 +368,8 @@ static FORCE_INLINE int32_t taosEncodeSSchemaWrapper(void** buf, const SSchemaWr
int32_t tlen = 0;
tlen += taosEncodeVariantI32(buf, pSW->nCols);
tlen += taosEncodeVariantI32(buf, pSW->sver);
tlen += taosEncodeVariantI32(buf, pSW->tagVer);
tlen += taosEncodeVariantI32(buf, pSW->colVer);
for (int32_t i = 0; i < pSW->nCols; i++) {
tlen += taosEncodeSSchema(buf, &pSW->pSchema[i]);
}
@ -369,6 +379,8 @@ static FORCE_INLINE int32_t taosEncodeSSchemaWrapper(void** buf, const SSchemaWr
static FORCE_INLINE void* taosDecodeSSchemaWrapper(const void* buf, SSchemaWrapper* pSW) {
buf = taosDecodeVariantI32(buf, &pSW->nCols);
buf = taosDecodeVariantI32(buf, &pSW->sver);
buf = taosDecodeVariantI32(buf, &pSW->tagVer);
buf = taosDecodeVariantI32(buf, &pSW->colVer);
pSW->pSchema = (SSchema*)taosMemoryCalloc(pSW->nCols, sizeof(SSchema));
if (pSW->pSchema == NULL) {
return NULL;
@ -383,6 +395,8 @@ static FORCE_INLINE void* taosDecodeSSchemaWrapper(const void* buf, SSchemaWrapp
static FORCE_INLINE int32_t tEncodeSSchemaWrapper(SEncoder* pEncoder, const SSchemaWrapper* pSW) {
if (tEncodeI32v(pEncoder, pSW->nCols) < 0) return -1;
if (tEncodeI32v(pEncoder, pSW->sver) < 0) return -1;
if (tEncodeI32v(pEncoder, pSW->tagVer) < 0) return -1;
if (tEncodeI32v(pEncoder, pSW->colVer) < 0) return -1;
for (int32_t i = 0; i < pSW->nCols; i++) {
if (tEncodeSSchema(pEncoder, &pSW->pSchema[i]) < 0) return -1;
}
@ -393,6 +407,8 @@ static FORCE_INLINE int32_t tEncodeSSchemaWrapper(SEncoder* pEncoder, const SSch
static FORCE_INLINE int32_t tDecodeSSchemaWrapper(SDecoder* pDecoder, SSchemaWrapper* pSW) {
if (tDecodeI32v(pDecoder, &pSW->nCols) < 0) return -1;
if (tDecodeI32v(pDecoder, &pSW->sver) < 0) return -1;
if (tDecodeI32v(pDecoder, &pSW->tagVer) < 0) return -1;
if (tDecodeI32v(pDecoder, &pSW->colVer) < 0) return -1;
pSW->pSchema = (SSchema*)taosMemoryCalloc(pSW->nCols, sizeof(SSchema));
if (pSW->pSchema == NULL) return -1;
@ -403,6 +419,21 @@ static FORCE_INLINE int32_t tDecodeSSchemaWrapper(SDecoder* pDecoder, SSchemaWra
return 0;
}
static FORCE_INLINE int32_t tDecodeSSchemaWrapperEx(SDecoder* pDecoder, SSchemaWrapper* pSW) {
if (tDecodeI32v(pDecoder, &pSW->nCols) < 0) return -1;
if (tDecodeI32v(pDecoder, &pSW->sver) < 0) return -1;
if (tDecodeI32v(pDecoder, &pSW->tagVer) < 0) return -1;
if (tDecodeI32v(pDecoder, &pSW->colVer) < 0) return -1;
pSW->pSchema = (SSchema*)tDecoderMalloc(pDecoder, pSW->nCols * sizeof(SSchema));
if (pSW->pSchema == NULL) return -1;
for (int32_t i = 0; i < pSW->nCols; i++) {
if (tDecodeSSchema(pDecoder, &pSW->pSchema[i]) < 0) return -1;
}
return 0;
}
STSchema* tdGetSTSChemaFromSSChema(SSchema** pSchema, int32_t nCols);
typedef struct {
@ -438,6 +469,7 @@ int32_t tDeserializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
int8_t alterType;
int32_t verInBlock;
int32_t numOfFields;
SArray* pFields;
int32_t ttl;
@ -1463,6 +1495,7 @@ typedef struct {
typedef struct {
int64_t consumerId;
char cgroup[TSDB_CGROUP_LEN];
char clientId[256];
SArray* topicNames; // SArray<char**>
} SCMSubscribeReq;
@ -1470,6 +1503,7 @@ static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubsc
int32_t tlen = 0;
tlen += taosEncodeFixedI64(buf, pReq->consumerId);
tlen += taosEncodeString(buf, pReq->cgroup);
tlen += taosEncodeString(buf, pReq->clientId);
int32_t topicNum = taosArrayGetSize(pReq->topicNames);
tlen += taosEncodeFixedI32(buf, topicNum);
@ -1483,6 +1517,7 @@ static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubsc
static FORCE_INLINE void* tDeserializeSCMSubscribeReq(void* buf, SCMSubscribeReq* pReq) {
buf = taosDecodeFixedI64(buf, &pReq->consumerId);
buf = taosDecodeStringTo(buf, pReq->cgroup);
buf = taosDecodeStringTo(buf, pReq->clientId);
int32_t topicNum;
buf = taosDecodeFixedI32(buf, &topicNum);
@ -1613,6 +1648,15 @@ typedef struct {
int32_t tSerializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
int32_t tDeserializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
typedef struct {
char topic[TSDB_TOPIC_FNAME_LEN];
char cgroup[TSDB_CGROUP_LEN];
int8_t igNotExists;
} SMDropCgroupReq;
int32_t tSerializeSMDropCgroupReq(void* buf, int32_t bufLen, SMDropCgroupReq* pReq);
int32_t tDeserializeSMDropCgroupReq(void* buf, int32_t bufLen, SMDropCgroupReq* pReq);
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
int8_t alterType;

View File

@ -22,9 +22,10 @@
extern "C" {
#endif
typedef struct SRpcMsg SRpcMsg;
typedef struct SEpSet SEpSet;
typedef struct SMgmtWrapper SMgmtWrapper;
typedef struct SRpcMsg SRpcMsg;
typedef struct SEpSet SEpSet;
typedef struct SMgmtWrapper SMgmtWrapper;
typedef struct SRpcHandleInfo SRpcHandleInfo;
typedef enum {
QUERY_QUEUE,
@ -37,41 +38,36 @@ typedef enum {
QUEUE_MAX,
} EQueueType;
typedef int32_t (*PutToQueueFp)(void *pMgmt, SRpcMsg* pReq);
typedef int32_t (*GetQueueSizeFp)(void *pMgmt, int32_t vgId, EQueueType qtype);
typedef int32_t (*SendReqFp)(SMgmtWrapper* pWrapper, const SEpSet* epSet, SRpcMsg* pReq);
typedef int32_t (*SendMnodeReqFp)(SMgmtWrapper* pWrapper, SRpcMsg* pReq);
typedef void (*SendRspFp)(SMgmtWrapper* pWrapper, const SRpcMsg* pRsp);
typedef void (*SendMnodeRecvFp)(SMgmtWrapper* pWrapper, SRpcMsg* pReq, SRpcMsg* pRsp);
typedef void (*SendRedirectRspFp)(SMgmtWrapper* pWrapper, const SRpcMsg* pRsp, const SEpSet* pNewEpSet);
typedef void (*RegisterBrokenLinkArgFp)(SMgmtWrapper* pWrapper, SRpcMsg* pMsg);
typedef void (*ReleaseHandleFp)(SMgmtWrapper* pWrapper, void* handle, int8_t type);
typedef void (*ReportStartup)(SMgmtWrapper* pWrapper, const char* name, const char* desc);
typedef int32_t (*PutToQueueFp)(void* pMgmt, SRpcMsg* pMsg);
typedef int32_t (*GetQueueSizeFp)(void* pMgmt, int32_t vgId, EQueueType qtype);
typedef int32_t (*SendReqFp)(const SEpSet* pEpSet, SRpcMsg* pMsg);
typedef void (*SendRspFp)(SRpcMsg* pMsg);
typedef void (*SendRedirectRspFp)(SRpcMsg* pMsg, const SEpSet* pNewEpSet);
typedef void (*RegisterBrokenLinkArgFp)(SRpcMsg* pMsg);
typedef void (*ReleaseHandleFp)(SRpcHandleInfo* pHandle, int8_t type);
typedef void (*ReportStartup)(const char* name, const char* desc);
typedef struct {
SMgmtWrapper* pWrapper;
void* pMgmt;
void* mgmt;
void* clientRpc;
PutToQueueFp queueFps[QUEUE_MAX];
GetQueueSizeFp qsizeFp;
SendReqFp sendReqFp;
SendRspFp sendRspFp;
SendMnodeRecvFp sendMnodeRecvFp;
SendRedirectRspFp sendRedirectRspFp;
RegisterBrokenLinkArgFp registerBrokenLinkArgFp;
ReleaseHandleFp releaseHandleFp;
ReportStartup reportStartupFp;
} SMsgCb;
void tmsgSetDefaultMsgCb(const SMsgCb* pMsgCb);
int32_t tmsgPutToQueue(const SMsgCb* pMsgCb, EQueueType qtype, SRpcMsg* pReq);
int32_t tmsgGetQueueSize(const SMsgCb* pMsgCb, int32_t vgId, EQueueType qtype);
int32_t tmsgSendReq(const SMsgCb* pMsgCb, const SEpSet* epSet, SRpcMsg* pReq);
void tmsgSendRsp(SRpcMsg* pRsp);
void tmsgSendMnodeRecv(SRpcMsg* pReq, SRpcMsg* pRsp);
void tmsgSendRedirectRsp(SRpcMsg* pRsp, const SEpSet* pNewEpSet);
void tmsgRegisterBrokenLinkArg(const SMsgCb* pMsgCb, SRpcMsg* pMsg);
void tmsgReleaseHandle(void* handle, int8_t type);
void tmsgSetDefault(const SMsgCb* msgcb);
int32_t tmsgPutToQueue(const SMsgCb* msgcb, EQueueType qtype, SRpcMsg* pMsg);
int32_t tmsgGetQueueSize(const SMsgCb* msgcb, int32_t vgId, EQueueType qtype);
int32_t tmsgSendReq(const SEpSet* epSet, SRpcMsg* pMsg);
void tmsgSendRsp(SRpcMsg* pMsg);
void tmsgSendRedirectRsp(SRpcMsg* pMsg, const SEpSet* pNewEpSet);
void tmsgRegisterBrokenLinkArg(SRpcMsg* pMsg);
void tmsgReleaseHandle(SRpcHandleInfo* pHandle, int8_t type);
void tmsgReportStartup(const char* name, const char* desc);
#ifdef __cplusplus

View File

@ -179,6 +179,8 @@ typedef struct {
} \
} while (0)
//TODO: use varchar(0) to represent NULL type
#define IS_NULL_TYPE(_t) ((_t) == TSDB_DATA_TYPE_NULL)
#define IS_SIGNED_NUMERIC_TYPE(_t) ((_t) >= TSDB_DATA_TYPE_TINYINT && (_t) <= TSDB_DATA_TYPE_BIGINT)
#define IS_UNSIGNED_NUMERIC_TYPE(_t) ((_t) >= TSDB_DATA_TYPE_UTINYINT && (_t) <= TSDB_DATA_TYPE_UBIGINT)
#define IS_FLOAT_TYPE(_t) ((_t) == TSDB_DATA_TYPE_FLOAT || (_t) == TSDB_DATA_TYPE_DOUBLE)

View File

@ -22,66 +22,28 @@
extern "C" {
#endif
/* ------------------------ TYPES EXPOSED ---------------- */
typedef struct SDnode SDnode;
/**
* @brief Initialize the environment
* @brief Initialize the dnode
*
* @param rtype for internal debug usage, default is 0
* @return int32_t 0 for success and -1 for failure
*/
int32_t dmInit();
int32_t dmInit(int8_t rtype);
/**
* @brief Clear the environment
* @brief Cleanup the dnode
*/
void dmCleanup();
/* ------------------------ SDnode ----------------------- */
typedef struct {
int32_t numOfSupportVnodes;
uint16_t serverPort;
char dataDir[PATH_MAX];
char localEp[TSDB_EP_LEN];
char localFqdn[TSDB_FQDN_LEN];
char firstEp[TSDB_EP_LEN];
char secondEp[TSDB_EP_LEN];
SDiskCfg *disks;
int32_t numOfDisks;
int8_t ntype;
} SDnodeOpt;
typedef enum { DND_EVENT_START = 0, DND_EVENT_STOP = 1, DND_EVENT_CHILD = 2 } EDndEvent;
/**
* @brief Run dnode.
*/
int32_t dmRun();
/**
* @brief Initialize and start the dnode.
*
* @param pOption Option of the dnode.
* @return SDnode* The dnode object.
* @brief Stop dnode.
*/
SDnode *dmCreate(const SDnodeOpt *pOption);
/**
* @brief Stop and cleanup the dnode.
*
* @param pDnode The dnode object to close.
*/
void dmClose(SDnode *pDnode);
/**
* @brief Run dnode until specific event is receive.
*
* @param pDnode The dnode object to run.
*/
int32_t dmRun(SDnode *pDnode);
/**
* @brief Handle event in the dnode.
*
* @param pDnode The dnode object to close.
* @param event The event to handle.
*/
void dmSetEvent(SDnode *pDnode, EDndEvent event);
void dmStop();
#ifdef __cplusplus
}

View File

@ -88,7 +88,7 @@ int32_t mndGetLoad(SMnode *pMnode, SMnodeLoad *pLoad);
* @param pMsg The request msg.
* @return int32_t 0 for success, -1 for failure.
*/
int32_t mndProcessMsg(SNodeMsg *pMsg);
int32_t mndProcessMsg(SRpcMsg *pMsg);
/**
* @brief Generate machine code

View File

@ -333,23 +333,23 @@ SSdbRow *sdbAllocRow(int32_t objSize);
void *sdbGetRowObj(SSdbRow *pRow);
typedef struct SSdb {
SMnode *pMnode;
char *currDir;
char *syncDir;
char *tmpDir;
int64_t lastCommitVer;
int64_t curVer;
int64_t tableVer[SDB_MAX];
int64_t maxId[SDB_MAX];
EKeyType keyTypes[SDB_MAX];
SHashObj *hashObjs[SDB_MAX];
SRWLatch locks[SDB_MAX];
SdbInsertFp insertFps[SDB_MAX];
SdbUpdateFp updateFps[SDB_MAX];
SdbDeleteFp deleteFps[SDB_MAX];
SdbDeployFp deployFps[SDB_MAX];
SdbEncodeFp encodeFps[SDB_MAX];
SdbDecodeFp decodeFps[SDB_MAX];
SMnode *pMnode;
char *currDir;
char *syncDir;
char *tmpDir;
int64_t lastCommitVer;
int64_t curVer;
int64_t tableVer[SDB_MAX];
int64_t maxId[SDB_MAX];
EKeyType keyTypes[SDB_MAX];
SHashObj *hashObjs[SDB_MAX];
TdThreadRwlock locks[SDB_MAX];
SdbInsertFp insertFps[SDB_MAX];
SdbUpdateFp updateFps[SDB_MAX];
SdbDeleteFp deleteFps[SDB_MAX];
SdbDeployFp deployFps[SDB_MAX];
SdbEncodeFp encodeFps[SDB_MAX];
SdbDecodeFp decodeFps[SDB_MAX];
} SSdb;
#ifdef __cplusplus

View File

@ -59,6 +59,11 @@ typedef struct SMetaData {
SArray *pQnodeList; // qnode list, SArray<SQueryNodeAddr>
} SMetaData;
typedef struct STbSVersion {
char* tbFName;
int32_t sver;
} STbSVersion;
typedef struct SCatalogCfg {
uint32_t maxTblCacheNum;
uint32_t maxDBCacheNum;
@ -165,6 +170,8 @@ int32_t catalogUpdateSTableMeta(SCatalog* pCatalog, STableMetaRsp *rspMsg);
*/
int32_t catalogRefreshDBVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const char* dbFName);
int32_t catalogChkTbMetaVersion(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, SArray* pTables);
/**
* Force refresh a table's local cached meta data.
* @param pCatalog (input, got with catalogGetHandle)

View File

@ -42,7 +42,7 @@ typedef struct SReadHandle {
#define STREAM_DATA_TYPE_SSDATA_BLOCK 0x2
typedef enum {
OPTR_EXEC_MODEL_BATCH = 0x1,
OPTR_EXEC_MODEL_BATCH = 0x1,
OPTR_EXEC_MODEL_STREAM = 0x2,
} EOPTR_EXEC_MODEL;
@ -81,7 +81,7 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
* @param isAdd
* @return
*/
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, SArray* tableIdList, bool isAdd);
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd);
/**
* Create the exec task object according to task json
@ -95,6 +95,15 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, SArray* tableIdList, bool isA
int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, struct SSubplan* pPlan,
qTaskInfo_t* pTaskInfo, DataSinkHandle* handle, EOPTR_EXEC_MODEL model);
/**
*
* @param tinfo
* @param sversion
* @param tversion
* @return
*/
int32_t qGetQueriedTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion, int32_t* tversion);
/**
* The main task execution function, including query on both table and multiple tables,
* which are decided according to the tag or table name query conditions
@ -169,7 +178,7 @@ int32_t qUpdateQueriedTableIdList(qTaskInfo_t tinfo, int64_t uid, int32_t type);
void qProcessFetchRsp(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet);
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t *resNum, SExplainExecInfo **pRes);
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t* resNum, SExplainExecInfo** pRes);
#ifdef __cplusplus
}

View File

@ -170,32 +170,18 @@ typedef struct SInputColumnInfoData {
// sql function runtime context
typedef struct SqlFunctionCtx {
SInputColumnInfoData input;
SResultDataInfo resDataInfo;
uint32_t order; // data block scanner order: asc|desc
uint8_t scanFlag; // record current running step, default: 0
////////////////////////////////////////////////////////////////
int32_t startRow; // start row index
int32_t size; // handled processed row number
SColumnInfoData* pInput;
SColumnDataAgg agg;
int16_t inputType; // TODO remove it
int16_t inputBytes; // TODO remove it
bool hasNull; // null value exist in current block, TODO remove it
bool requireNull; // require null in some function, TODO remove it
int32_t columnIndex; // TODO remove it
bool isAggSet;
int64_t startTs; // timestamp range of current query when function is executed on a specific data block, TODO remove it
bool stableQuery;
/////////////////////////////////////////////////////////////////
int16_t functionId; // function id
char * pOutput; // final result output buffer, point to sdata->data
int32_t numOfParams;
SFunctParam *param; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param
int64_t *ptsList; // corresponding timestamp array list
SColumnInfoData *pTsOutput; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/
int32_t offset;
SVariant tag;
SInputColumnInfoData input;
SResultDataInfo resDataInfo;
uint32_t order; // data block scanner order: asc|desc
uint8_t scanFlag; // record current running step, default: 0
int16_t functionId; // function id
char *pOutput; // final result output buffer, point to sdata->data
int32_t numOfParams;
SFunctParam *param; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param
int64_t *ptsList; // corresponding timestamp array list
SColumnInfoData *pTsOutput; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/
int32_t offset;
SVariant tag;
struct SResultRowEntryInfo *resultInfo;
SSubsidiaryResInfo subsidiaries;
SPoint1 start;

View File

@ -142,6 +142,8 @@ void fmFuncMgtDestroy();
int32_t fmGetFuncInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc);
bool fmIsBuiltinFunc(const char* pFunc);
bool fmIsAggFunc(int32_t funcId);
bool fmIsScalarFunc(int32_t funcId);
bool fmIsNonstandardSQLFunc(int32_t funcId);

View File

@ -78,7 +78,7 @@ typedef struct SAlterDatabaseStmt {
typedef struct STableOptions {
ENodeType type;
char comment[TSDB_STB_COMMENT_LEN];
char comment[TSDB_TB_COMMENT_LEN];
int32_t delay;
float filesFactor;
SNodeList* pRollupFuncs;
@ -90,7 +90,7 @@ typedef struct SColumnDefNode {
ENodeType type;
char colName[TSDB_COL_NAME_LEN];
SDataType dataType;
char comments[TSDB_STB_COMMENT_LEN];
char comments[TSDB_TB_COMMENT_LEN];
bool sma;
} SColumnDefNode;

View File

@ -208,6 +208,7 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_SORT,
QUERY_NODE_PHYSICAL_PLAN_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_FILL,
QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW,
QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW,
@ -240,6 +241,7 @@ typedef struct SNodeList {
#define SNodeptr void*
int32_t nodesNodeSize(ENodeType type);
SNodeptr nodesMakeNode(ENodeType type);
void nodesDestroyNode(SNodeptr pNode);

View File

@ -48,6 +48,7 @@ typedef struct SExprNode {
ENodeType type;
SDataType resType;
char aliasName[TSDB_COL_NAME_LEN];
char userAlias[TSDB_COL_NAME_LEN];
SArray* pAssociation;
} SExprNode;
@ -247,6 +248,7 @@ typedef struct SSetOperator {
SNode* pRight;
SNodeList* pOrderByList; // SOrderByExprNode
SNode* pLimit;
char stmtName[TSDB_TABLE_NAME_LEN];
} SSetOperator;
typedef enum ESqlClause {
@ -325,7 +327,7 @@ typedef struct SQuery {
bool showRewrite;
int32_t placeholderNum;
SArray* pPlaceholderValues;
SNode* pContainPlaceholderRoot;
SNode* pPrepareRoot;
} SQuery;
void nodesWalkSelectStmt(SSelectStmt* pSelect, ESqlClause clause, FNodeWalker walker, void* pContext);

View File

@ -220,23 +220,23 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
taosPrintLog("QRY ", DEBUG_INFO, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qDebug(...) \
do { \
if (qDebugFlag & DEBUG_DEBUG) { \
taosPrintLog("QRY ", DEBUG_DEBUG, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
} \
#define qDebug(...) \
do { \
if (qDebugFlag & DEBUG_DEBUG) { \
taosPrintLog("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qTrace(...) \
do { \
if (qDebugFlag & DEBUG_TRACE) { \
taosPrintLog("QRY ", DEBUG_TRACE, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
} \
#define qTrace(...) \
do { \
if (qDebugFlag & DEBUG_TRACE) { \
taosPrintLog("QRY ", DEBUG_TRACE, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qDebugL(...) \
do { \
if (qDebugFlag & DEBUG_DEBUG) { \
taosPrintLongString("QRY ", DEBUG_DEBUG, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
} \
#define qDebugL(...) \
do { \
if (qDebugFlag & DEBUG_DEBUG) { \
taosPrintLongString("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define QRY_ERR_RET(c) \

View File

@ -72,7 +72,7 @@ int32_t schedulerInit(SSchedulerCfg *cfg);
* @param nodeList Qnode/Vnode address list, element is SQueryNodeAddr
* @return
*/
int32_t schedulerExecJob(void *transport, SArray *nodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, int64_t startTs, bool needRes, SQueryResult *pRes);
int32_t schedulerExecJob(void *transport, SArray *nodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, int64_t startTs, SQueryResult *pRes);
/**
* Process the query job, generated according to the query physical plan.

View File

@ -26,39 +26,42 @@ extern "C" {
#define TAOS_CONN_SERVER 0
#define TAOS_CONN_CLIENT 1
#define IsReq(pMsg) (pMsg->msgType & 1U)
extern int tsRpcHeadSize;
typedef struct SRpcConnInfo {
typedef struct {
uint32_t clientIp;
uint16_t clientPort;
uint32_t serverIp;
char user[TSDB_USER_LEN];
} SRpcConnInfo;
typedef struct SRpcMsg {
tmsg_t msgType;
void * pCont;
int contLen;
int32_t code;
void * handle; // rpc handle returned to app
void * ahandle; // app handle set by client
typedef struct SRpcHandleInfo {
// rpc info
void *handle; // rpc handle returned to app
int64_t refId; // refid, used by server
int noResp; // has response or not(default 0, 0: resp, 1: no resp);
int persistHandle; // persist handle or not
int32_t noResp; // has response or not(default 0, 0: resp, 1: no resp);
int32_t persistHandle; // persist handle or not
// app info
void *ahandle; // app handle set by client
void *wrapper; // wrapper handle
void *node; // node mgmt handle
// resp info
void *rsp;
int32_t rspLen;
} SRpcHandleInfo;
typedef struct SRpcMsg {
tmsg_t msgType;
void *pCont;
int32_t contLen;
int32_t code;
SRpcHandleInfo info;
SRpcConnInfo conn;
} SRpcMsg;
typedef struct {
char user[TSDB_USER_LEN];
uint32_t clientIp;
uint16_t clientPort;
SRpcMsg rpcMsg;
int32_t rspLen;
void * pRsp;
void * pNode;
} SNodeMsg;
typedef void (*RpcCfp)(void *parent, SRpcMsg *, SEpSet *rf);
typedef int (*RpcAfp)(void *parent, char *tableId, char *spi, char *encrypt, char *secret, char *ckey);
///

View File

@ -38,6 +38,13 @@ typedef int32_t TdUcs4;
#define wcsncpy WCSNCPY_FUNC_TAOS_FORBID
#define wchar_t WCHAR_T_TYPE_TAOS_FORBID
#define strcasestr STR_CASE_STR_FORBID
#define strtoll STR_TO_LL_FUNC_TAOS_FORBID
#define strtoull STR_TO_ULL_FUNC_TAOS_FORBID
#define strtol STR_TO_L_FUNC_TAOS_FORBID
#define strtoul STR_TO_UL_FUNC_TAOS_FORBID
#define strtod STR_TO_LD_FUNC_TAOS_FORBID
#define strtold STR_TO_D_FUNC_TAOS_FORBID
#define strtof STR_TO_F_FUNC_TAOS_FORBID
#endif
#ifdef WINDOWS
@ -72,6 +79,17 @@ int32_t taosWcharsToMbs(char *pStrs, TdWchar *pWchars, int32_t size);
char *taosStrCaseStr(const char *str, const char *pattern);
int64_t taosStr2Int64(const char *str, char** pEnd, int32_t radix);
uint64_t taosStr2UInt64(const char *str, char** pEnd, int32_t radix);
int32_t taosStr2Int32(const char *str, char** pEnd, int32_t radix);
uint32_t taosStr2UInt32(const char *str, char** pEnd, int32_t radix);
int16_t taosStr2Int16(const char *str, char** pEnd, int32_t radix);
uint16_t taosStr2UInt16(const char *str, char** pEnd, int32_t radix);
int8_t taosStr2Int8(const char *str, char** pEnd, int32_t radix);
uint8_t taosStr2UInt8(const char *str, char** pEnd, int32_t radix);
double taosStr2Double(const char *str, char** pEnd);
float taosStr2Float(const char *str, char** pEnd);
#ifdef __cplusplus
}
#endif

View File

@ -646,6 +646,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INVALID_ALTER_TABLE TAOS_DEF_ERROR_CODE(0, 0x2649)
#define TSDB_CODE_PAR_CANNOT_DROP_PRIMARY_KEY TAOS_DEF_ERROR_CODE(0, 0x264A)
#define TSDB_CODE_PAR_INVALID_MODIFY_COL TAOS_DEF_ERROR_CODE(0, 0x264B)
#define TSDB_CODE_PAR_INVALID_TBNAME TAOS_DEF_ERROR_CODE(0, 0x264C)
#define TSDB_CODE_PAR_INVALID_FUNCTION_NAME TAOS_DEF_ERROR_CODE(0, 0x264D)
#define TSDB_CODE_PAR_COMMENT_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x264E)
//planner
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
@ -668,6 +671,8 @@ int32_t* taosGetErrno();
#define TSDB_CODE_UDF_INVALID_STATE TAOS_DEF_ERROR_CODE(0, 0x2906)
#define TSDB_CODE_UDF_INVALID_INPUT TAOS_DEF_ERROR_CODE(0, 0x2907)
#define TSDB_CODE_UDF_NO_FUNC_HANDLE TAOS_DEF_ERROR_CODE(0, 0x2908)
#define TSDB_CODE_UDF_INVALID_BUFSIZE TAOS_DEF_ERROR_CODE(0, 0x2909)
#define TSDB_CODE_UDF_INVALID_OUTPUT_TYPE TAOS_DEF_ERROR_CODE(0, 0x290A)
#define TSDB_CODE_SML_INVALID_PROTOCOL_TYPE TAOS_DEF_ERROR_CODE(0, 0x3000)
#define TSDB_CODE_SML_INVALID_PRECISION_TYPE TAOS_DEF_ERROR_CODE(0, 0x3001)

View File

@ -218,8 +218,8 @@ typedef enum ELogicConditionType {
#define TSDB_MAX_SQL_SHOW_LEN 1024
#define TSDB_MAX_ALLOWED_SQL_LEN (1 * 1024 * 1024u) // sql length should be less than 1mb
#define TSDB_APP_NAME_LEN TSDB_UNI_LEN
#define TSDB_STB_COMMENT_LEN 1024
#define TSDB_APP_NAME_LEN TSDB_UNI_LEN
#define TSDB_TB_COMMENT_LEN 1025
/**
* In some scenarios uint16_t (0~65535) is used to store the row len.

View File

@ -456,52 +456,189 @@ static FORCE_INLINE void* tDecoderMalloc(SDecoder* pCoder, int32_t size) {
return p;
}
static FORCE_INLINE int32_t tPutBinary(uint8_t* p, const uint8_t* pData, uint32_t nData) {
int n = 0;
uint32_t v = nData;
// ===========================================
#define tPutV(p, v) \
do { \
int32_t n = 0; \
for (;;) { \
if (v <= 0x7f) { \
if (p) p[n] = v; \
n++; \
break; \
} \
if (p) p[n] = (v & 0x7f) | 0x80; \
n++; \
v >>= 7; \
} \
return n; \
} while (0)
for (;;) {
if (v <= 0x7f) {
if (p) p[n] = v;
n++;
break;
}
#define tGetV(p, v) \
do { \
int32_t n = 0; \
if (v) *v = 0; \
for (;;) { \
if (p[n] <= 0x7f) { \
if (v) (*v) |= (p[n] << (7 * n)); \
n++; \
break; \
} \
if (v) (*v) |= ((p[n] & 0x7f) << (7 * n)); \
n++; \
} \
return n; \
} while (0)
if (p) p[n] = (v & 0x7f) | 0x80;
n++;
v >>= 7;
}
// PUT
static FORCE_INLINE int32_t tPutU8(uint8_t* p, uint8_t v) {
if (p) ((uint8_t*)p)[0] = v;
return sizeof(uint8_t);
}
if (p) {
memcpy(p + n, pData, nData);
}
static FORCE_INLINE int32_t tPutI8(uint8_t* p, int8_t v) {
if (p) ((int8_t*)p)[0] = v;
return sizeof(int8_t);
}
static FORCE_INLINE int32_t tPutU16(uint8_t* p, uint16_t v) {
if (p) ((uint16_t*)p)[0] = v;
return sizeof(uint16_t);
}
static FORCE_INLINE int32_t tPutI16(uint8_t* p, int16_t v) {
if (p) ((int16_t*)p)[0] = v;
return sizeof(int16_t);
}
static FORCE_INLINE int32_t tPutU32(uint8_t* p, uint32_t v) {
if (p) ((uint32_t*)p)[0] = v;
return sizeof(uint32_t);
}
static FORCE_INLINE int32_t tPutI32(uint8_t* p, int32_t v) {
if (p) ((int32_t*)p)[0] = v;
return sizeof(int32_t);
}
static FORCE_INLINE int32_t tPutU64(uint8_t* p, uint64_t v) {
if (p) ((uint64_t*)p)[0] = v;
return sizeof(uint64_t);
}
static FORCE_INLINE int32_t tPutI64(uint8_t* p, int64_t v) {
if (p) ((int64_t*)p)[0] = v;
return sizeof(int64_t);
}
static FORCE_INLINE int32_t tPutU16v(uint8_t* p, uint16_t v) { tPutV(p, v); }
static FORCE_INLINE int32_t tPutI16v(uint8_t* p, int16_t v) { return tPutU16v(p, ZIGZAGE(int16_t, v)); }
static FORCE_INLINE int32_t tPutU32v(uint8_t* p, uint32_t v) { tPutV(p, v); }
static FORCE_INLINE int32_t tPutI32v(uint8_t* p, int32_t v) { return tPutU32v(p, ZIGZAGE(int32_t, v)); }
static FORCE_INLINE int32_t tPutU64v(uint8_t* p, uint64_t v) { tPutV(p, v); }
static FORCE_INLINE int32_t tPutI64v(uint8_t* p, int64_t v) { return tPutU64v(p, ZIGZAGE(int64_t, v)); }
// GET
static FORCE_INLINE int32_t tGetU8(uint8_t* p, uint8_t* v) {
if (v) *v = ((uint8_t*)p)[0];
return sizeof(uint8_t);
}
static FORCE_INLINE int32_t tGetI8(uint8_t* p, int8_t* v) {
if (v) *v = ((int8_t*)p)[0];
return sizeof(int8_t);
}
static FORCE_INLINE int32_t tGetU16(uint8_t* p, uint16_t* v) {
if (v) *v = ((uint16_t*)p)[0];
return sizeof(uint16_t);
}
static FORCE_INLINE int32_t tGetI16(uint8_t* p, int16_t* v) {
if (v) *v = ((int16_t*)p)[0];
return sizeof(int16_t);
}
static FORCE_INLINE int32_t tGetU32(uint8_t* p, uint32_t* v) {
if (v) *v = ((uint32_t*)p)[0];
return sizeof(uint32_t);
}
static FORCE_INLINE int32_t tGetI32(uint8_t* p, int32_t* v) {
if (v) *v = ((int32_t*)p)[0];
return sizeof(int32_t);
}
static FORCE_INLINE int32_t tGetU64(uint8_t* p, uint64_t* v) {
if (v) *v = ((uint64_t*)p)[0];
return sizeof(uint64_t);
}
static FORCE_INLINE int32_t tGetI64(uint8_t* p, int64_t* v) {
if (v) *v = ((int64_t*)p)[0];
return sizeof(int64_t);
}
static FORCE_INLINE int32_t tGetU16v(uint8_t* p, uint16_t* v) { tGetV(p, v); }
static FORCE_INLINE int32_t tGetI16v(uint8_t* p, int16_t* v) {
int32_t n;
uint16_t tv;
n = tGetU16v(p, &tv);
if (v) *v = ZIGZAGD(int16_t, tv);
return n;
}
static FORCE_INLINE int32_t tGetU32v(uint8_t* p, uint32_t* v) { tGetV(p, v); }
static FORCE_INLINE int32_t tGetI32v(uint8_t* p, int32_t* v) {
int32_t n;
uint32_t tv;
n = tGetU32v(p, &tv);
if (v) *v = ZIGZAGD(int32_t, tv);
return n;
}
static FORCE_INLINE int32_t tGetU64v(uint8_t* p, uint64_t* v) { tGetV(p, v); }
static FORCE_INLINE int32_t tGetI64v(uint8_t* p, int64_t* v) {
int32_t n;
uint64_t tv;
n = tGetU64v(p, &tv);
if (v) *v = ZIGZAGD(int64_t, tv);
return n;
}
// =====================
static FORCE_INLINE int32_t tPutBinary(uint8_t* p, uint8_t* pData, uint32_t nData) {
int n = 0;
n += tPutU32v(p ? p + n : p, nData);
if (p) memcpy(p + n, pData, nData);
n += nData;
return n;
}
static FORCE_INLINE int32_t tGetBinary(const uint8_t* p, const uint8_t** ppData, uint32_t* nData) {
static FORCE_INLINE int32_t tGetBinary(uint8_t* p, uint8_t** ppData, uint32_t* nData) {
int32_t n = 0;
uint32_t tv = 0;
uint32_t t;
uint32_t nt;
for (;;) {
if (p[n] <= 0x7f) {
t = p[n];
tv |= (t << (7 * n));
n++;
break;
}
t = p[n] & 0x7f;
tv |= (t << (7 * n));
n++;
}
if (nData) *nData = n;
n += tGetU32v(p, &nt);
if (nData) *nData = nt;
if (ppData) *ppData = p + n;
n += nt;
n += tv;
return n;
}

View File

@ -1,67 +0,0 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_UTIL_PROCESS_H_
#define _TD_UTIL_PROCESS_H_
#include "os.h"
#include "tqueue.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum { PROC_FUNC_REQ = 1, PROC_FUNC_RSP, PROC_FUNC_REGIST, PROC_FUNC_RELEASE } EProcFuncType;
typedef struct SProcObj SProcObj;
typedef void *(*ProcMallocFp)(int32_t contLen, EQItype itype);
typedef void *(*ProcFreeFp)(void *pCont);
typedef void (*ProcConsumeFp)(void *parent, void *pHead, int16_t headLen, void *pBody, int32_t bodyLen,
EProcFuncType ftype);
typedef struct {
ProcConsumeFp childConsumeFp;
ProcMallocFp childMallocHeadFp;
ProcFreeFp childFreeHeadFp;
ProcMallocFp childMallocBodyFp;
ProcFreeFp childFreeBodyFp;
ProcConsumeFp parentConsumeFp;
ProcMallocFp parentMallocHeadFp;
ProcFreeFp parentFreeHeadFp;
ProcMallocFp parentMallocBodyFp;
ProcFreeFp parentFreeBodyFp;
SShm shm;
void *parent;
const char *name;
bool isChild;
} SProcCfg;
SProcObj *taosProcInit(const SProcCfg *pCfg);
void taosProcCleanup(SProcObj *pProc);
int32_t taosProcRun(SProcObj *pProc);
void taosProcStop(SProcObj *pProc);
int32_t taosProcPutToChildQ(SProcObj *pProc, const void *pHead, int16_t headLen, const void *pBody, int32_t bodyLen,
void *handle, int64_t handleRef, EProcFuncType ftype);
int64_t taosProcRemoveHandle(SProcObj *pProc, void *handle);
void taosProcCloseHandles(SProcObj *pProc, void (*HandleFp)(void *handle));
void taosProcPutToParentQ(SProcObj *pProc, const void *pHead, int16_t headLen, const void *pBody, int32_t bodyLen,
EProcFuncType ftype);
#ifdef __cplusplus
}
#endif
#endif /*_TD_UTIL_PROCESS_H_*/

View File

@ -56,10 +56,10 @@ typedef enum { SSkipListPutSuccess = 0, SSkipListPutEarlyStop = 1, SSkipListPutS
typedef struct SSkipList {
uint32_t seed;
uint16_t len;
__compar_fn_t comparFn;
__sl_key_fn_t keyFn;
TdThreadRwlock *lock;
uint16_t len;
uint8_t maxLevel;
uint8_t flags;
uint8_t type; // static info above

View File

@ -1,22 +1,22 @@
[Unit]
Description=Nginx For TDengine Service
After=network-online.target
Wants=network-online.target
[Service]
Type=forking
PIDFile=/usr/local/nginxd/logs/nginx.pid
ExecStart=/usr/local/nginxd/sbin/nginx
ExecStop=/usr/local/nginxd/sbin/nginx -s stop
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
[Unit]
Description=Nginx For TDengine Service
After=network-online.target
Wants=network-online.target
[Service]
Type=forking
PIDFile=/usr/local/nginxd/logs/nginx.pid
ExecStart=/usr/local/nginxd/sbin/nginx
ExecStop=/usr/local/nginxd/sbin/nginx -s stop
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target

View File

@ -1,312 +1,312 @@
########################################################
# #
# TDengine Configuration #
# Any questions, please email support@taosdata.com #
# #
########################################################
# first fully qualified domain name (FQDN) for TDengine system
# firstEp hostname:6030
# local fully qualified domain name (FQDN)
# fqdn hostname
# first port number for the connection (12 continuous UDP/TCP port number are used)
# serverPort 6030
# log file's directory
# logDir /var/log/taos
# data file's directory
# dataDir /var/lib/taos
# temporary file's directory
# tempDir /tmp/
# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
# arbitrator arbitrator_hostname:6042
# number of threads per CPU core
# numOfThreadsPerCore 1.0
# number of threads to commit cache data
# numOfCommitThreads 4
# the proportion of total CPU cores available for query processing
# 2.0: the query threads will be set to double of the CPU cores.
# 1.0: all CPU cores are available for query processing [default].
# 0.5: only half of the CPU cores are available for query.
# 0.0: only one core available.
# ratioOfQueryCores 1.0
# the last_row/first/last aggregator will not change the original column name in the result fields
keepColumnName 1
# number of management nodes in the system
# numOfMnodes 1
# enable/disable backuping vnode directory when removing vnode
# vnodeBak 1
# enable/disable installation / usage report
# telemetryReporting 1
# enable/disable load balancing
# balance 1
# role for dnode. 0 - any, 1 - mnode, 2 - dnode
# role 0
# max timer control blocks
# maxTmrCtrl 512
# time interval of system monitor, seconds
# monitorInterval 30
# number of seconds allowed for a dnode to be offline, for cluster only
# offlineThreshold 864000
# RPC re-try timer, millisecond
# rpcTimer 300
# RPC maximum time for ack, seconds.
# rpcMaxTime 600
# time interval of dnode status reporting to mnode, seconds, for cluster only
# statusInterval 1
# time interval of heart beat from shell to dnode, seconds
# shellActivityTimer 3
# minimum sliding window time, milli-second
# minSlidingTime 10
# minimum time window, milli-second
# minIntervalTime 10
# maximum delay before launching a stream computation, milli-second
# maxStreamCompDelay 20000
# maximum delay before launching a stream computation for the first time, milli-second
# maxFirstStreamCompDelay 10000
# retry delay when a stream computation fails, milli-second
# retryStreamCompDelay 10
# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
# streamCompDelayRatio 0.1
# max number of vgroups per db, 0 means configured automatically
# maxVgroupsPerDb 0
# max number of tables per vnode
# maxTablesPerVnode 1000000
# cache block size (Mbyte)
# cache 16
# number of cache blocks per vnode
# blocks 6
# number of days per DB file
# days 10
# number of days to keep DB file
# keep 3650
# minimum rows of records in file block
# minRows 100
# maximum rows of records in file block
# maxRows 4096
# the number of acknowledgments required for successful data writing
# quorum 1
# enable/disable compression
# comp 2
# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
# walLevel 1
# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
# fsync 3000
# number of replications, for cluster only
# replica 1
# the compressed rpc message, option:
# -1 (no compression)
# 0 (all message compressed),
# > 0 (rpc message body which larger than this value will be compressed)
# compressMsgSize -1
# query retrieved column data compression option:
# -1 (no compression)
# 0 (all retrieved column data compressed),
# > 0 (any retrieved column size greater than this value all data will be compressed.)
# compressColData -1
# max length of an SQL
# maxSQLLength 65480
# max length of WildCards
# maxWildCardsLength 100
# the maximum number of records allowed for super table time sorting
# maxNumOfOrderedRes 100000
# system time zone
# timezone Asia/Shanghai (CST, +0800)
# system time zone (for windows 10)
# timezone UTC-8
# system locale
# locale en_US.UTF-8
# default system charset
# charset UTF-8
# max number of connections allowed in dnode
# maxShellConns 5000
# max number of connections allowed in client
# maxConnections 5000
# stop writing logs when the disk size of the log folder is less than this value
# minimalLogDirGB 1.0
# stop writing temporary files when the disk size of the tmp folder is less than this value
# minimalTmpDirGB 1.0
# if disk free space is less than this value, taosd service exit directly within startup process
# minimalDataDirGB 2.0
# One mnode is equal to the number of vnode consumed
# mnodeEqualVnodeNum 4
# enbale/disable http service
# http 1
# enable/disable system monitor
# monitor 1
# enable/disable recording the SQL statements via restful interface
# httpEnableRecordSql 0
# number of threads used to process http requests
# httpMaxThreads 2
# maximum number of rows returned by the restful interface
# restfulRowLimit 10240
# database name must be specified in restful interface if the following parameter is set, off by default
# httpDbNameMandatory 1
# http keep alive, default is 30 seconds
# httpKeepAlive 30000
# The following parameter is used to limit the maximum number of lines in log files.
# max number of lines per log filters
# numOfLogLines 10000000
# enable/disable async log
# asyncLog 1
# time of keeping log files, days
# logKeepDays 0
# The following parameters are used for debug purpose only.
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
# 131: output warning and error
# 135: output debug, warning and error
# 143: output trace, debug, warning and error to log
# 199: output debug, warning and error to both screen and file
# 207: output trace, debug, warning and error to both screen and file
# debug flag for all log type, take effect when non-zero value
# debugFlag 0
# debug flag for meta management messages
# mDebugFlag 135
# debug flag for dnode messages
# dDebugFlag 135
# debug flag for sync module
# sDebugFlag 135
# debug flag for WAL
# wDebugFlag 135
# debug flag for SDB
# sdbDebugFlag 135
# debug flag for RPC
# rpcDebugFlag 131
# debug flag for TAOS TIMER
# tmrDebugFlag 131
# debug flag for TDengine client
# cDebugFlag 131
# debug flag for JNI
# jniDebugFlag 131
# debug flag for storage
# uDebugFlag 131
# debug flag for http server
# httpDebugFlag 131
# debug flag for monitor
# monDebugFlag 131
# debug flag for query
# qDebugFlag 131
# debug flag for vnode
# vDebugFlag 131
# debug flag for TSDB
# tsdbDebugFlag 131
# debug flag for continue query
# cqDebugFlag 131
# enable/disable recording the SQL in taos client
# enableRecordSql 0
# generate core file when service crash
# enableCoreFile 1
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30
# enable/disable stream (continuous query)
# stream 1
# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
# retrieveBlockingModel 0
# the maximum allowed query buffer size in MB during query processing for each data node
# -1 no limit (default)
# 0 no query allowed, queries are disabled
# queryBufferSize -1
# percent of redundant data in tsdb meta will compact meta data,0 means donot compact
# tsdbMetaCompactRatio 0
# default string type used for storing JSON String, options can be binary/nchar, default is nchar
# defaultJSONStrType nchar
# force TCP transmission
# rpcForceTcp 0
# unit MB. Flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks
# walFlushSize 1024
# unit Hour. Latency of data migration
# keepTimeOffset 0
########################################################
# #
# TDengine Configuration #
# Any questions, please email support@taosdata.com #
# #
########################################################
# first fully qualified domain name (FQDN) for TDengine system
# firstEp hostname:6030
# local fully qualified domain name (FQDN)
# fqdn hostname
# first port number for the connection (12 continuous UDP/TCP port number are used)
# serverPort 6030
# log file's directory
# logDir /var/log/taos
# data file's directory
# dataDir /var/lib/taos
# temporary file's directory
# tempDir /tmp/
# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
# arbitrator arbitrator_hostname:6042
# number of threads per CPU core
# numOfThreadsPerCore 1.0
# number of threads to commit cache data
# numOfCommitThreads 4
# the proportion of total CPU cores available for query processing
# 2.0: the query threads will be set to double of the CPU cores.
# 1.0: all CPU cores are available for query processing [default].
# 0.5: only half of the CPU cores are available for query.
# 0.0: only one core available.
# ratioOfQueryCores 1.0
# the last_row/first/last aggregator will not change the original column name in the result fields
keepColumnName 1
# number of management nodes in the system
# numOfMnodes 1
# enable/disable backuping vnode directory when removing vnode
# vnodeBak 1
# enable/disable installation / usage report
# telemetryReporting 1
# enable/disable load balancing
# balance 1
# role for dnode. 0 - any, 1 - mnode, 2 - dnode
# role 0
# max timer control blocks
# maxTmrCtrl 512
# time interval of system monitor, seconds
# monitorInterval 30
# number of seconds allowed for a dnode to be offline, for cluster only
# offlineThreshold 864000
# RPC re-try timer, millisecond
# rpcTimer 300
# RPC maximum time for ack, seconds.
# rpcMaxTime 600
# time interval of dnode status reporting to mnode, seconds, for cluster only
# statusInterval 1
# time interval of heart beat from shell to dnode, seconds
# shellActivityTimer 3
# minimum sliding window time, milli-second
# minSlidingTime 10
# minimum time window, milli-second
# minIntervalTime 10
# maximum delay before launching a stream computation, milli-second
# maxStreamCompDelay 20000
# maximum delay before launching a stream computation for the first time, milli-second
# maxFirstStreamCompDelay 10000
# retry delay when a stream computation fails, milli-second
# retryStreamCompDelay 10
# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
# streamCompDelayRatio 0.1
# max number of vgroups per db, 0 means configured automatically
# maxVgroupsPerDb 0
# max number of tables per vnode
# maxTablesPerVnode 1000000
# cache block size (Mbyte)
# cache 16
# number of cache blocks per vnode
# blocks 6
# number of days per DB file
# days 10
# number of days to keep DB file
# keep 3650
# minimum rows of records in file block
# minRows 100
# maximum rows of records in file block
# maxRows 4096
# the number of acknowledgments required for successful data writing
# quorum 1
# enable/disable compression
# comp 2
# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
# walLevel 1
# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
# fsync 3000
# number of replications, for cluster only
# replica 1
# the compressed rpc message, option:
# -1 (no compression)
# 0 (all message compressed),
# > 0 (rpc message body which larger than this value will be compressed)
# compressMsgSize -1
# query retrieved column data compression option:
# -1 (no compression)
# 0 (all retrieved column data compressed),
# > 0 (any retrieved column size greater than this value all data will be compressed.)
# compressColData -1
# max length of an SQL
# maxSQLLength 65480
# max length of WildCards
# maxWildCardsLength 100
# the maximum number of records allowed for super table time sorting
# maxNumOfOrderedRes 100000
# system time zone
# timezone Asia/Shanghai (CST, +0800)
# system time zone (for windows 10)
# timezone UTC-8
# system locale
# locale en_US.UTF-8
# default system charset
# charset UTF-8
# max number of connections allowed in dnode
# maxShellConns 5000
# max number of connections allowed in client
# maxConnections 5000
# stop writing logs when the disk size of the log folder is less than this value
# minimalLogDirGB 1.0
# stop writing temporary files when the disk size of the tmp folder is less than this value
# minimalTmpDirGB 1.0
# if disk free space is less than this value, taosd service exit directly within startup process
# minimalDataDirGB 2.0
# One mnode is equal to the number of vnode consumed
# mnodeEqualVnodeNum 4
# enbale/disable http service
# http 1
# enable/disable system monitor
# monitor 1
# enable/disable recording the SQL statements via restful interface
# httpEnableRecordSql 0
# number of threads used to process http requests
# httpMaxThreads 2
# maximum number of rows returned by the restful interface
# restfulRowLimit 10240
# database name must be specified in restful interface if the following parameter is set, off by default
# httpDbNameMandatory 1
# http keep alive, default is 30 seconds
# httpKeepAlive 30000
# The following parameter is used to limit the maximum number of lines in log files.
# max number of lines per log filters
# numOfLogLines 10000000
# enable/disable async log
# asyncLog 1
# time of keeping log files, days
# logKeepDays 0
# The following parameters are used for debug purpose only.
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
# 131: output warning and error
# 135: output debug, warning and error
# 143: output trace, debug, warning and error to log
# 199: output debug, warning and error to both screen and file
# 207: output trace, debug, warning and error to both screen and file
# debug flag for all log type, take effect when non-zero value
# debugFlag 0
# debug flag for meta management messages
# mDebugFlag 135
# debug flag for dnode messages
# dDebugFlag 135
# debug flag for sync module
# sDebugFlag 135
# debug flag for WAL
# wDebugFlag 135
# debug flag for SDB
# sdbDebugFlag 135
# debug flag for RPC
# rpcDebugFlag 131
# debug flag for TAOS TIMER
# tmrDebugFlag 131
# debug flag for TDengine client
# cDebugFlag 131
# debug flag for JNI
# jniDebugFlag 131
# debug flag for storage
# uDebugFlag 131
# debug flag for http server
# httpDebugFlag 131
# debug flag for monitor
# monDebugFlag 131
# debug flag for query
# qDebugFlag 131
# debug flag for vnode
# vDebugFlag 131
# debug flag for TSDB
# tsdbDebugFlag 131
# debug flag for continue query
# cqDebugFlag 131
# enable/disable recording the SQL in taos client
# enableRecordSql 0
# generate core file when service crash
# enableCoreFile 1
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30
# enable/disable stream (continuous query)
# stream 1
# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
# retrieveBlockingModel 0
# the maximum allowed query buffer size in MB during query processing for each data node
# -1 no limit (default)
# 0 no query allowed, queries are disabled
# queryBufferSize -1
# percent of redundant data in tsdb meta will compact meta data,0 means donot compact
# tsdbMetaCompactRatio 0
# default string type used for storing JSON String, options can be binary/nchar, default is nchar
# defaultJSONStrType nchar
# force TCP transmission
# rpcForceTcp 0
# unit MB. Flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks
# walFlushSize 1024
# unit Hour. Latency of data migration
# keepTimeOffset 0

View File

@ -1,21 +1,21 @@
[Unit]
Description=TDengine server service
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/bin/taosd
ExecStartPre=/usr/local/taos/bin/startPre.sh
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
[Unit]
Description=TDengine server service
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/bin/taosd
ExecStartPre=/usr/local/taos/bin/startPre.sh
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target

View File

@ -1,20 +1,20 @@
[Unit]
Description=TDengine arbitrator service
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/bin/tarbitrator
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
[Unit]
Description=TDengine arbitrator service
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/bin/tarbitrator
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target

View File

@ -1,252 +1,252 @@
#!/bin/bash
#
# This file is used to install database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
#set -x
verMode=edge
pagMode=full
iplist=""
serverFqdn=""
# -----------------------Variables definition---------------------
script_dir="../release"
# Dynamic directory
data_dir="/var/lib/taos"
log_dir="/var/log/taos"
data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log"
cfg_install_dir="/etc/taos"
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
#install main path
install_main_dir="/usr/local/taos"
# old bin dir
sbin_dir="/usr/local/taos/bin"
temp_version=""
fin_result=""
service_config_dir="/etc/systemd/system"
nginx_port=6060
nginx_dir="/usr/local/nginxd"
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
# ============================= get input parameters =================================================
# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
# set parameters by default value
interactiveFqdn=yes # [yes | no]
verType=server # [server | client]
initType=systemd # [systemd | service | ...]
while getopts "hv:d:" arg
do
case $arg in
d)
#echo "interactiveFqdn=$OPTARG"
script_dir=$( echo $OPTARG )
;;
h)
echo "Usage: `basename $0` -d scripy_path"
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}"
function kill_process() {
pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function check_file() {
#check file whether exists
if [ ! -e $1/$2 ];then
echo -e "$1/$2 \033[31mnot exists\033[0m!quit"
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
echo -e $fin_result
exit 8
fi
}
function get_package_name() {
var=$1
if [[ $1 =~ 'aarch' ]];then
echo ${var::-21}
else
echo ${var::-17}
fi
}
function check_link() {
#check Link whether exists or broken
if [ -L $1 ] ; then
if [ ! -e $1 ] ; then
echo -e "$1 \033[31Broken link\033[0m"
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
echo -e $fin_result
exit 8
fi
else
echo -e "$1 \033[31mnot exists\033[0m!quit"
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
echo -e $fin_result
exit 8
fi
}
function check_main_path() {
#check install main dir and all sub dir
main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d")
for i in "${main_dir[@]}";do
check_file ${install_main_dir} $i
done
if [ "$verMode" == "cluster" ]; then
nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
for i in "${nginx_main_dir[@]}";do
check_file ${nginx_dir} $i
done
fi
echo -e "Check main path:\033[32mOK\033[0m!"
}
function check_bin_path() {
# check install bin dir and all sub dir
bin_dir=("taos" "taosd" "taosadapter" "taosdemo" "remove.sh" "tarbitrator" "set_core.sh")
for i in "${bin_dir[@]}";do
check_file ${sbin_dir} $i
done
lbin_dir=("taos" "taosd" "taosadapter" "taosdemo" "rmtaos" "tarbitrator" "set_core")
for i in "${lbin_dir[@]}";do
check_link ${bin_link_dir}/$i
done
if [ "$verMode" == "cluster" ]; then
check_file ${nginx_dir}/sbin nginx
fi
echo -e "Check bin path:\033[32mOK\033[0m!"
}
function check_lib_path() {
# check all links
check_link ${lib_link_dir}/libtaos.so
check_link ${lib_link_dir}/libtaos.so.1
if [[ -d ${lib64_link_dir} ]]; then
check_link ${lib64_link_dir}/libtaos.so
check_link ${lib64_link_dir}/libtaos.so.1
fi
echo -e "Check lib path:\033[32mOK\033[0m!"
}
function check_header_path() {
# check all header
header_dir=("taos.h" "taosdef.h" "taoserror.h")
for i in "${header_dir[@]}";do
check_link ${inc_link_dir}/$i
done
echo -e "Check bin path:\033[32mOK\033[0m!"
}
function check_taosadapter_config_dir() {
# check all config
check_file ${cfg_install_dir} taosadapter.toml
check_file ${cfg_install_dir} taosadapter.service
check_file ${install_main_dir}/cfg taosadapter.toml.org
echo -e "Check conf path:\033[32mOK\033[0m!"
}
function check_config_dir() {
# check all config
check_file ${cfg_install_dir} taos.cfg
check_file ${install_main_dir}/cfg taos.cfg.org
echo -e "Check conf path:\033[32mOK\033[0m!"
}
function check_log_path() {
# check log path
check_file ${log_dir}
echo -e "Check log path:\033[32mOK\033[0m!"
}
function check_data_path() {
# check data path
check_file ${data_dir}
echo -e "Check data path:\033[32mOK\033[0m!"
}
function install_TDengine() {
cd ${script_dir}
tar zxf $1
temp_version=$(get_package_name $1)
cd $(get_package_name $1)
echo -e "\033[32muninstall TDengine && install TDengine...\033[0m"
rmtaos >/dev/null 2>&1 || echo 'taosd not installed' && echo -e '\n\n' |./install.sh >/dev/null 2>&1
echo -e "\033[32mTDengine has been installed!\033[0m"
echo -e "\033[32mTDengine is starting...\033[0m"
kill_process taos && systemctl start taosd && sleep 10
}
function test_TDengine() {
check_main_path
check_bin_path
check_lib_path
check_header_path
check_config_dir
check_taosadapter_config_dir
check_log_path
check_data_path
result=`taos -s 'create database test ;create table test.tt(ts timestamp ,i int);insert into test.tt values(now,11);select * from test.tt' 2>&1 ||:`
if [[ $result =~ "Unable to establish" ]];then
echo -e "\033[31mTDengine connect failed\033[0m"
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
echo -e $fin_result
exit 8
fi
echo -e "Check TDengine connect:\033[32mOK\033[0m!"
fin_result=$fin_result"\033[32m$temp_version\033[0m test OK!\n"
}
# ## ==============================Main program starts from here============================
TD_package_name=`ls ${script_dir}/*server*gz |awk -F '/' '{print $NF}' `
temp=`pwd`
for i in $TD_package_name;do
if [[ $i =~ 'enterprise' ]];then
verMode="cluster"
else
verMode=""
fi
cd $temp
install_TDengine $i
test_TDengine
done
echo "============================================================"
echo -e $fin_result
#!/bin/bash
#
# This file is used to install database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
#set -x
verMode=edge
pagMode=full
iplist=""
serverFqdn=""
# -----------------------Variables definition---------------------
script_dir="../release"
# Dynamic directory
data_dir="/var/lib/taos"
log_dir="/var/log/taos"
data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log"
cfg_install_dir="/etc/taos"
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
#install main path
install_main_dir="/usr/local/taos"
# old bin dir
sbin_dir="/usr/local/taos/bin"
temp_version=""
fin_result=""
service_config_dir="/etc/systemd/system"
nginx_port=6060
nginx_dir="/usr/local/nginxd"
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
# ============================= get input parameters =================================================
# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
# set parameters by default value
interactiveFqdn=yes # [yes | no]
verType=server # [server | client]
initType=systemd # [systemd | service | ...]
while getopts "hv:d:" arg
do
case $arg in
d)
#echo "interactiveFqdn=$OPTARG"
script_dir=$( echo $OPTARG )
;;
h)
echo "Usage: `basename $0` -d scripy_path"
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}"
function kill_process() {
pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function check_file() {
#check file whether exists
if [ ! -e $1/$2 ];then
echo -e "$1/$2 \033[31mnot exists\033[0m!quit"
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
echo -e $fin_result
exit 8
fi
}
function get_package_name() {
var=$1
if [[ $1 =~ 'aarch' ]];then
echo ${var::-21}
else
echo ${var::-17}
fi
}
function check_link() {
#check Link whether exists or broken
if [ -L $1 ] ; then
if [ ! -e $1 ] ; then
echo -e "$1 \033[31Broken link\033[0m"
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
echo -e $fin_result
exit 8
fi
else
echo -e "$1 \033[31mnot exists\033[0m!quit"
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
echo -e $fin_result
exit 8
fi
}
function check_main_path() {
#check install main dir and all sub dir
main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d")
for i in "${main_dir[@]}";do
check_file ${install_main_dir} $i
done
if [ "$verMode" == "cluster" ]; then
nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
for i in "${nginx_main_dir[@]}";do
check_file ${nginx_dir} $i
done
fi
echo -e "Check main path:\033[32mOK\033[0m!"
}
function check_bin_path() {
# check install bin dir and all sub dir
bin_dir=("taos" "taosd" "taosadapter" "taosdemo" "remove.sh" "tarbitrator" "set_core.sh")
for i in "${bin_dir[@]}";do
check_file ${sbin_dir} $i
done
lbin_dir=("taos" "taosd" "taosadapter" "taosdemo" "rmtaos" "tarbitrator" "set_core")
for i in "${lbin_dir[@]}";do
check_link ${bin_link_dir}/$i
done
if [ "$verMode" == "cluster" ]; then
check_file ${nginx_dir}/sbin nginx
fi
echo -e "Check bin path:\033[32mOK\033[0m!"
}
function check_lib_path() {
# check all links
check_link ${lib_link_dir}/libtaos.so
check_link ${lib_link_dir}/libtaos.so.1
if [[ -d ${lib64_link_dir} ]]; then
check_link ${lib64_link_dir}/libtaos.so
check_link ${lib64_link_dir}/libtaos.so.1
fi
echo -e "Check lib path:\033[32mOK\033[0m!"
}
function check_header_path() {
# check all header
header_dir=("taos.h" "taosdef.h" "taoserror.h")
for i in "${header_dir[@]}";do
check_link ${inc_link_dir}/$i
done
echo -e "Check bin path:\033[32mOK\033[0m!"
}
function check_taosadapter_config_dir() {
# check all config
check_file ${cfg_install_dir} taosadapter.toml
check_file ${cfg_install_dir} taosadapter.service
check_file ${install_main_dir}/cfg taosadapter.toml.org
echo -e "Check conf path:\033[32mOK\033[0m!"
}
function check_config_dir() {
# check all config
check_file ${cfg_install_dir} taos.cfg
check_file ${install_main_dir}/cfg taos.cfg.org
echo -e "Check conf path:\033[32mOK\033[0m!"
}
function check_log_path() {
# check log path
check_file ${log_dir}
echo -e "Check log path:\033[32mOK\033[0m!"
}
function check_data_path() {
# check data path
check_file ${data_dir}
echo -e "Check data path:\033[32mOK\033[0m!"
}
function install_TDengine() {
cd ${script_dir}
tar zxf $1
temp_version=$(get_package_name $1)
cd $(get_package_name $1)
echo -e "\033[32muninstall TDengine && install TDengine...\033[0m"
rmtaos >/dev/null 2>&1 || echo 'taosd not installed' && echo -e '\n\n' |./install.sh >/dev/null 2>&1
echo -e "\033[32mTDengine has been installed!\033[0m"
echo -e "\033[32mTDengine is starting...\033[0m"
kill_process taos && systemctl start taosd && sleep 10
}
function test_TDengine() {
check_main_path
check_bin_path
check_lib_path
check_header_path
check_config_dir
check_taosadapter_config_dir
check_log_path
check_data_path
result=`taos -s 'create database test ;create table test.tt(ts timestamp ,i int);insert into test.tt values(now,11);select * from test.tt' 2>&1 ||:`
if [[ $result =~ "Unable to establish" ]];then
echo -e "\033[31mTDengine connect failed\033[0m"
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
echo -e $fin_result
exit 8
fi
echo -e "Check TDengine connect:\033[32mOK\033[0m!"
fin_result=$fin_result"\033[32m$temp_version\033[0m test OK!\n"
}
# ## ==============================Main program starts from here============================
TD_package_name=`ls ${script_dir}/*server*gz |awk -F '/' '{print $NF}' `
temp=`pwd`
for i in $TD_package_name;do
if [[ $i =~ 'enterprise' ]];then
verMode="cluster"
else
verMode=""
fi
cd $temp
install_TDengine $i
test_TDengine
done
echo "============================================================"
echo -e $fin_result

View File

@ -1,13 +1,13 @@
Package: tdengine
Version: 1.0.0
Section: utils
Priority: optional
#Essential: no
#Depends: no
#Suggests: no
Architecture: amd64
Installed-Size: 66666
Maintainer: support@taosdata.com
Provides: taosdata
Homepage: http://taosdata.com
Description: Big Data Platform Designed and Optimized for IoT.
Package: tdengine
Version: 1.0.0
Section: utils
Priority: optional
#Essential: no
#Depends: no
#Suggests: no
Architecture: amd64
Installed-Size: 66666
Maintainer: support@taosdata.com
Provides: taosdata
Homepage: http://taosdata.com
Description: Big Data Platform Designed and Optimized for IoT.

View File

@ -1,13 +1,13 @@
#!/bin/bash
#set -x
#path=`pwd`
insmetaPath="/usr/local/taos/script"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
${csudo}chmod -R 744 ${insmetaPath}
cd ${insmetaPath}
${csudo}./post.sh
#!/bin/bash
#set -x
#path=`pwd`
insmetaPath="/usr/local/taos/script"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
${csudo}chmod -R 744 ${insmetaPath}
cd ${insmetaPath}
${csudo}./post.sh

View File

@ -1,2 +1,2 @@
#!/bin/bash
#!/bin/bash

View File

@ -1,40 +1,40 @@
#!/bin/bash
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
# Stop the service if running
if pidof taosd &> /dev/null; then
if pidof systemd &> /dev/null; then
${csudo}systemctl stop taosd || :
elif $(which service &> /dev/null); then
${csudo}service taosd stop || :
else
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
fi
echo "Stop taosd service success!"
sleep 1
fi
# if taos.cfg already softlink, remove it
cfg_install_dir="/etc/taos"
install_main_dir="/usr/local/taos"
if [ -f "${install_main_dir}/taos.cfg" ]; then
${csudo}rm -f ${install_main_dir}/cfg/taos.cfg || :
fi
if [ -f "${install_main_dir}/taosadapter.toml" ]; then
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.toml || :
fi
if [ -f "${install_main_dir}/taosadapter.service" ]; then
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.service || :
fi
# there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f ${install_main_dir}/driver/libtaos* || :
#!/bin/bash
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
# Stop the service if running
if pidof taosd &> /dev/null; then
if pidof systemd &> /dev/null; then
${csudo}systemctl stop taosd || :
elif $(which service &> /dev/null); then
${csudo}service taosd stop || :
else
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
fi
echo "Stop taosd service success!"
sleep 1
fi
# if taos.cfg already softlink, remove it
cfg_install_dir="/etc/taos"
install_main_dir="/usr/local/taos"
if [ -f "${install_main_dir}/taos.cfg" ]; then
${csudo}rm -f ${install_main_dir}/cfg/taos.cfg || :
fi
if [ -f "${install_main_dir}/taosadapter.toml" ]; then
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.toml || :
fi
if [ -f "${install_main_dir}/taosadapter.service" ]; then
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.service || :
fi
# there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f ${install_main_dir}/driver/libtaos* || :

View File

@ -1,42 +1,42 @@
#!/bin/bash
insmetaPath="/usr/local/taos/script"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
${csudo}chmod -R 744 ${insmetaPath} || :
#cd ${insmetaPath}
#${csudo}./preun.sh
if [ -f ${insmetaPath}/preun.sh ]; then
cd ${insmetaPath}
${csudo}./preun.sh
else
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
inc_link_dir="/usr/include"
data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log"
cfg_link_dir="/usr/local/taos/cfg"
# Remove all links
${csudo}rm -f ${bin_link_dir}/taos || :
${csudo}rm -f ${bin_link_dir}/taosd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${cfg_link_dir}/* || :
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${log_link_dir} || :
${csudo}rm -f ${data_link_dir} || :
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
fi
#!/bin/bash
insmetaPath="/usr/local/taos/script"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
${csudo}chmod -R 744 ${insmetaPath} || :
#cd ${insmetaPath}
#${csudo}./preun.sh
if [ -f ${insmetaPath}/preun.sh ]; then
cd ${insmetaPath}
${csudo}./preun.sh
else
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
inc_link_dir="/usr/include"
data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log"
cfg_link_dir="/usr/local/taos/cfg"
# Remove all links
${csudo}rm -f ${bin_link_dir}/taos || :
${csudo}rm -f ${bin_link_dir}/taosd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${cfg_link_dir}/* || :
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${log_link_dir} || :
${csudo}rm -f ${data_link_dir} || :
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
fi

294
packaging/deb/makedeb.sh Normal file → Executable file
View File

@ -1,147 +1,147 @@
#!/bin/bash
#
# Generate deb package for ubuntu
set -e
# set -x
#curr_dir=$(pwd)
compile_dir=$1
output_dir=$2
tdengine_ver=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
pkg_dir="${top_dir}/debworkroom"
#echo "curr_dir: ${curr_dir}"
#echo "top_dir: ${top_dir}"
#echo "script_dir: ${script_dir}"
echo "compile_dir: ${compile_dir}"
echo "pkg_dir: ${pkg_dir}"
if [ -d ${pkg_dir} ]; then
rm -rf ${pkg_dir}
fi
mkdir -p ${pkg_dir}
cd ${pkg_dir}
libfile="libtaos.so.${tdengine_ver}"
# create install dir
install_home_path="/usr/local/taos"
mkdir -p ${pkg_dir}${install_home_path}
mkdir -p ${pkg_dir}${install_home_path}/bin
mkdir -p ${pkg_dir}${install_home_path}/cfg
#mkdir -p ${pkg_dir}${install_home_path}/connector
mkdir -p ${pkg_dir}${install_home_path}/driver
mkdir -p ${pkg_dir}${install_home_path}/examples
mkdir -p ${pkg_dir}${install_home_path}/include
#mkdir -p ${pkg_dir}${install_home_path}/init.d
mkdir -p ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
fi
if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg || :
fi
#cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/startPre.sh ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
#cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin
if [ -f "${compile_dir}/build/bin/taosadapter" ]; then
cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||:
fi
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taosdef.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/examples/* ${pkg_dir}${install_home_path}/examples
#cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
#cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
#cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
#cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
install_user_local_path="/usr/local"
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then
cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/
fi
if [ -f ${compile_dir}/build/bin/jeprof ]; then
cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/
fi
if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then
cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/
fi
if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
fi
if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
fi
if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
fi
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
fi
if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/
fi
if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then
cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/
fi
fi
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
chmod 755 ${pkg_dir}/DEBIAN/*
# modify version of control
debver="Version: "$tdengine_ver
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
#get taos version, then set deb name
if [ "$verMode" == "cluster" ]; then
debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
debname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
fi
if [ "$verType" == "beta" ]; then
debname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".deb"
elif [ "$verType" == "stable" ]; then
debname=${debname}".deb"
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
# make deb package
dpkg -b ${pkg_dir} $debname
echo "make deb package success!"
cp ${pkg_dir}/*.deb ${output_dir}
# clean temp dir
rm -rf ${pkg_dir}
#!/bin/bash
#
# Generate deb package for ubuntu
set -e
# set -x
#curr_dir=$(pwd)
compile_dir=$1
output_dir=$2
tdengine_ver=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
pkg_dir="${top_dir}/debworkroom"
#echo "curr_dir: ${curr_dir}"
#echo "top_dir: ${top_dir}"
#echo "script_dir: ${script_dir}"
echo "compile_dir: ${compile_dir}"
echo "pkg_dir: ${pkg_dir}"
if [ -d ${pkg_dir} ]; then
rm -rf ${pkg_dir}
fi
mkdir -p ${pkg_dir}
cd ${pkg_dir}
libfile="libtaos.so.${tdengine_ver}"
# create install dir
install_home_path="/usr/local/taos"
mkdir -p ${pkg_dir}${install_home_path}
mkdir -p ${pkg_dir}${install_home_path}/bin
mkdir -p ${pkg_dir}${install_home_path}/cfg
#mkdir -p ${pkg_dir}${install_home_path}/connector
mkdir -p ${pkg_dir}${install_home_path}/driver
mkdir -p ${pkg_dir}${install_home_path}/examples
mkdir -p ${pkg_dir}${install_home_path}/include
#mkdir -p ${pkg_dir}${install_home_path}/init.d
mkdir -p ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
fi
if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg || :
fi
#cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/startPre.sh ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
#cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin
if [ -f "${compile_dir}/build/bin/taosadapter" ]; then
cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||:
fi
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../include/util/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/examples/* ${pkg_dir}${install_home_path}/examples
#cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
#cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
#cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
#cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
install_user_local_path="/usr/local"
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then
cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/
fi
if [ -f ${compile_dir}/build/bin/jeprof ]; then
cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/
fi
if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then
cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/
fi
if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
fi
if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
fi
if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
fi
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
fi
if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/
fi
if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then
cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/
fi
fi
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
chmod 755 ${pkg_dir}/DEBIAN/*
# modify version of control
debver="Version: "$tdengine_ver
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
#get taos version, then set deb name
if [ "$verMode" == "cluster" ]; then
debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
debname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
fi
if [ "$verType" == "beta" ]; then
debname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".deb"
elif [ "$verType" == "stable" ]; then
debname=${debname}".deb"
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
# make deb package
dpkg -b ${pkg_dir} $debname
echo "make deb package success!"
cp ${pkg_dir}/*.deb ${output_dir}
# clean temp dir
rm -rf ${pkg_dir}

View File

@ -1,95 +1,95 @@
#!/bin/bash
#
# Modified from original source: Elastic Search
# https://github.com/elasticsearch/elasticsearch
# Thank you to the Elastic Search authors
#
# chkconfig: 2345 99 01
#
### BEGIN INIT INFO
# Provides: TDengine
# Required-Start: $local_fs $network $syslog
# Required-Stop: $local_fs $network $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts TDengine taosd
# Description: Starts TDengine taosd, a time-series database engine
### END INIT INFO
set -e
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
NAME="TDengine"
USER="root"
GROUP="root"
DAEMON="/usr/local/taos/bin/taosd"
DAEMON_OPTS=""
HTTPD_NAME="taosadapter"
DAEMON_HTTPD_NAME=$HTTPD_NAME
DAEMON_HTTPD="/usr/local/taos/bin/$HTTPD_NAME"
PID_FILE="/var/run/$NAME.pid"
APPARGS=""
# Maximum number of open files
MAX_OPEN_FILES=65535
. /lib/lsb/init-functions
case "$1" in
start)
log_action_begin_msg "Starting TDengine..."
$DAEMON_HTTPD &
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
if [ -n "$MAX_OPEN_FILES" ]; then
ulimit -n $MAX_OPEN_FILES
fi
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
log_end_msg $?
fi
;;
stop)
log_action_begin_msg "Stopping TDengine..."
pkill -9 $DAEMON_HTTPD_NAME
set +e
if [ -f "$PID_FILE" ]; then
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
if [ $? -eq 1 ]; then
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
elif [ $? -eq 3 ]; then
PID="`cat $PID_FILE`"
log_failure_msg "Failed to stop TDengine (pid $PID)"
exit 1
fi
rm -f "$PID_FILE"
else
log_action_cont_msg "TDengine was not running"
fi
log_action_end_msg 0
set -e
;;
restart|force-reload)
if [ -f "$PID_FILE" ]; then
$0 stop
sleep 1
fi
$0 start
;;
status)
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
;;
*)
exit 1
;;
esac
exit 0
#!/bin/bash
#
# Modified from original source: Elastic Search
# https://github.com/elasticsearch/elasticsearch
# Thank you to the Elastic Search authors
#
# chkconfig: 2345 99 01
#
### BEGIN INIT INFO
# Provides: TDengine
# Required-Start: $local_fs $network $syslog
# Required-Stop: $local_fs $network $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts TDengine taosd
# Description: Starts TDengine taosd, a time-series database engine
### END INIT INFO
set -e
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
NAME="TDengine"
USER="root"
GROUP="root"
DAEMON="/usr/local/taos/bin/taosd"
DAEMON_OPTS=""
HTTPD_NAME="taosadapter"
DAEMON_HTTPD_NAME=$HTTPD_NAME
DAEMON_HTTPD="/usr/local/taos/bin/$HTTPD_NAME"
PID_FILE="/var/run/$NAME.pid"
APPARGS=""
# Maximum number of open files
MAX_OPEN_FILES=65535
. /lib/lsb/init-functions
case "$1" in
start)
log_action_begin_msg "Starting TDengine..."
$DAEMON_HTTPD &
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
if [ -n "$MAX_OPEN_FILES" ]; then
ulimit -n $MAX_OPEN_FILES
fi
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
log_end_msg $?
fi
;;
stop)
log_action_begin_msg "Stopping TDengine..."
pkill -9 $DAEMON_HTTPD_NAME
set +e
if [ -f "$PID_FILE" ]; then
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
if [ $? -eq 1 ]; then
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
elif [ $? -eq 3 ]; then
PID="`cat $PID_FILE`"
log_failure_msg "Failed to stop TDengine (pid $PID)"
exit 1
fi
rm -f "$PID_FILE"
else
log_action_cont_msg "TDengine was not running"
fi
log_action_end_msg 0
set -e
;;
restart|force-reload)
if [ -f "$PID_FILE" ]; then
$0 stop
sleep 1
fi
$0 start
;;
status)
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
;;
*)
exit 1
;;
esac
exit 0

View File

@ -1,88 +1,88 @@
#!/bin/bash
#
# Modified from original source: Elastic Search
# https://github.com/elasticsearch/elasticsearch
# Thank you to the Elastic Search authors
#
# chkconfig: 2345 99 01
#
### BEGIN INIT INFO
# Provides: taoscluster
# Required-Start: $local_fs $network $syslog
# Required-Stop: $local_fs $network $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts taoscluster tarbitrator
# Description: Starts taoscluster tarbitrator, a arbitrator
### END INIT INFO
set -e
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
NAME="taoscluster"
USER="root"
GROUP="root"
DAEMON="/usr/local/taos/bin/tarbitrator"
DAEMON_OPTS=""
PID_FILE="/var/run/$NAME.pid"
APPARGS=""
# Maximum number of open files
MAX_OPEN_FILES=65535
. /lib/lsb/init-functions
case "$1" in
start)
log_action_begin_msg "Starting tarbitrator..."
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
if [ -n "$MAX_OPEN_FILES" ]; then
ulimit -n $MAX_OPEN_FILES
fi
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
log_end_msg $?
fi
;;
stop)
log_action_begin_msg "Stopping tarbitrator..."
set +e
if [ -f "$PID_FILE" ]; then
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
if [ $? -eq 1 ]; then
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
elif [ $? -eq 3 ]; then
PID="`cat $PID_FILE`"
log_failure_msg "Failed to stop tarbitrator (pid $PID)"
exit 1
fi
rm -f "$PID_FILE"
else
log_action_cont_msg "tarbitrator was not running"
fi
log_action_end_msg 0
set -e
;;
restart|force-reload)
if [ -f "$PID_FILE" ]; then
$0 stop
sleep 1
fi
$0 start
;;
status)
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
;;
*)
exit 1
;;
esac
exit 0
#!/bin/bash
#
# Modified from original source: Elastic Search
# https://github.com/elasticsearch/elasticsearch
# Thank you to the Elastic Search authors
#
# chkconfig: 2345 99 01
#
### BEGIN INIT INFO
# Provides: taoscluster
# Required-Start: $local_fs $network $syslog
# Required-Stop: $local_fs $network $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts taoscluster tarbitrator
# Description: Starts taoscluster tarbitrator, a arbitrator
### END INIT INFO
set -e
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
NAME="taoscluster"
USER="root"
GROUP="root"
DAEMON="/usr/local/taos/bin/tarbitrator"
DAEMON_OPTS=""
PID_FILE="/var/run/$NAME.pid"
APPARGS=""
# Maximum number of open files
MAX_OPEN_FILES=65535
. /lib/lsb/init-functions
case "$1" in
start)
log_action_begin_msg "Starting tarbitrator..."
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
if [ -n "$MAX_OPEN_FILES" ]; then
ulimit -n $MAX_OPEN_FILES
fi
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
log_end_msg $?
fi
;;
stop)
log_action_begin_msg "Stopping tarbitrator..."
set +e
if [ -f "$PID_FILE" ]; then
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
if [ $? -eq 1 ]; then
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
elif [ $? -eq 3 ]; then
PID="`cat $PID_FILE`"
log_failure_msg "Failed to stop tarbitrator (pid $PID)"
exit 1
fi
rm -f "$PID_FILE"
else
log_action_cont_msg "tarbitrator was not running"
fi
log_action_end_msg 0
set -e
;;
restart|force-reload)
if [ -f "$PID_FILE" ]; then
$0 stop
sleep 1
fi
$0 start
;;
status)
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
;;
*)
exit 1
;;
esac
exit 0

View File

@ -1,32 +1,32 @@
FROM ubuntu:18.04
WORKDIR /root
ARG pkgFile
ARG dirName
ARG cpuType
RUN echo ${pkgFile} && echo ${dirName}
COPY ${pkgFile} /root/
RUN tar -zxf ${pkgFile}
WORKDIR /root/
RUN cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root
RUN rm /root/${pkgFile}
RUN rm -rf /root/${dirName}
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get clean && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
LC_CTYPE=en_US.UTF-8 \
LANG=en_US.UTF-8 \
LC_ALL=en_US.UTF-8
COPY ./bin/* /usr/bin/
ENV TINI_VERSION v0.19.0
RUN bash -c 'echo -e "Downloading tini-${cpuType} ..."'
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
RUN chmod +x /tini
ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"]
CMD ["taosd"]
VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ]
FROM ubuntu:18.04
WORKDIR /root
ARG pkgFile
ARG dirName
ARG cpuType
RUN echo ${pkgFile} && echo ${dirName}
COPY ${pkgFile} /root/
RUN tar -zxf ${pkgFile}
WORKDIR /root/
RUN cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root
RUN rm /root/${pkgFile}
RUN rm -rf /root/${dirName}
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get clean && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
LC_CTYPE=en_US.UTF-8 \
LANG=en_US.UTF-8 \
LC_ALL=en_US.UTF-8
COPY ./bin/* /usr/bin/
ENV TINI_VERSION v0.19.0
RUN bash -c 'echo -e "Downloading tini-${cpuType} ..."'
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
RUN chmod +x /tini
ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"]
CMD ["taosd"]
VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ]

File diff suppressed because it is too large Load Diff

View File

@ -1,83 +1,83 @@
#!/bin/sh
set -e
# for TZ awareness
if [ "$TZ" != "" ]; then
ln -sf /usr/share/zoneinfo/$TZ /etc/localtime
echo $TZ >/etc/timezone
fi
# option to disable taosadapter, default is no
DISABLE_ADAPTER=${TAOS_DISABLE_ADAPTER:-0}
unset TAOS_DISABLE_ADAPTER
# to get mnodeEpSet from data dir
DATA_DIR=${TAOS_DATA_DIR:-/var/lib/taos}
# append env to custom taos.cfg
CFG_DIR=/tmp/taos
CFG_FILE=$CFG_DIR/taos.cfg
mkdir -p $CFG_DIR >/dev/null 2>&1
[ -f /etc/taos/taos.cfg ] && cat /etc/taos/taos.cfg | grep -E -v "^#|^\s*$" >$CFG_FILE
env-to-cfg >>$CFG_FILE
FQDN=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep fqdn | tail -n1 | sed -E 's/.*fqdn\s+//')
# ensure the fqdn is resolved as localhost
grep "$FQDN" /etc/hosts >/dev/null || echo "127.0.0.1 $FQDN" >>/etc/hosts
# parse first ep host and port
FIRST_EP_HOST=${TAOS_FIRST_EP%:*}
FIRST_EP_PORT=${TAOS_FIRST_EP#*:}
# in case of custom server port
SERVER_PORT=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep serverPort | tail -n1 | sed -E 's/.*serverPort\s+//')
SERVER_PORT=${SERVER_PORT:-6030}
# for other binaries like interpreters
if echo $1 | grep -E "taosd$" - >/dev/null; then
true # will run taosd
else
cp -f $CFG_FILE /etc/taos/taos.cfg || true
$@
exit $?
fi
set +e
ulimit -c unlimited
# set core files pattern, maybe failed
sysctl -w kernel.core_pattern=/corefile/core-$FQDN-%e-%p >/dev/null >&1
set -e
if [ "$DISABLE_ADAPTER" = "0" ]; then
which taosadapter >/dev/null && taosadapter &
# wait for 6041 port ready
for _ in $(seq 1 20); do
nc -z localhost 6041 && break
sleep 0.5
done
fi
# if has mnode ep set or the host is first ep or not for cluster, just start.
if [ -f "$DATA_DIR/dnode/mnodeEpSet.json" ] ||
[ "$TAOS_FQDN" = "$FIRST_EP_HOST" ]; then
$@ -c $CFG_DIR
# others will first wait the first ep ready.
else
if [ "$TAOS_FIRST_EP" = "" ]; then
echo "run TDengine with single node."
$@ -c $CFG_DIR
exit $?
fi
while true; do
es=0
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -n startup >/dev/null || es=$?
if [ "$es" -eq 0 ]; then
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -s "create dnode \"$FQDN:$SERVER_PORT\";"
break
fi
sleep 1s
done
$@ -c $CFG_DIR
fi
#!/bin/sh
set -e
# for TZ awareness
if [ "$TZ" != "" ]; then
ln -sf /usr/share/zoneinfo/$TZ /etc/localtime
echo $TZ >/etc/timezone
fi
# option to disable taosadapter, default is no
DISABLE_ADAPTER=${TAOS_DISABLE_ADAPTER:-0}
unset TAOS_DISABLE_ADAPTER
# to get mnodeEpSet from data dir
DATA_DIR=${TAOS_DATA_DIR:-/var/lib/taos}
# append env to custom taos.cfg
CFG_DIR=/tmp/taos
CFG_FILE=$CFG_DIR/taos.cfg
mkdir -p $CFG_DIR >/dev/null 2>&1
[ -f /etc/taos/taos.cfg ] && cat /etc/taos/taos.cfg | grep -E -v "^#|^\s*$" >$CFG_FILE
env-to-cfg >>$CFG_FILE
FQDN=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep fqdn | tail -n1 | sed -E 's/.*fqdn\s+//')
# ensure the fqdn is resolved as localhost
grep "$FQDN" /etc/hosts >/dev/null || echo "127.0.0.1 $FQDN" >>/etc/hosts
# parse first ep host and port
FIRST_EP_HOST=${TAOS_FIRST_EP%:*}
FIRST_EP_PORT=${TAOS_FIRST_EP#*:}
# in case of custom server port
SERVER_PORT=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep serverPort | tail -n1 | sed -E 's/.*serverPort\s+//')
SERVER_PORT=${SERVER_PORT:-6030}
# for other binaries like interpreters
if echo $1 | grep -E "taosd$" - >/dev/null; then
true # will run taosd
else
cp -f $CFG_FILE /etc/taos/taos.cfg || true
$@
exit $?
fi
set +e
ulimit -c unlimited
# set core files pattern, maybe failed
sysctl -w kernel.core_pattern=/corefile/core-$FQDN-%e-%p >/dev/null >&1
set -e
if [ "$DISABLE_ADAPTER" = "0" ]; then
which taosadapter >/dev/null && taosadapter &
# wait for 6041 port ready
for _ in $(seq 1 20); do
nc -z localhost 6041 && break
sleep 0.5
done
fi
# if has mnode ep set or the host is first ep or not for cluster, just start.
if [ -f "$DATA_DIR/dnode/mnodeEpSet.json" ] ||
[ "$TAOS_FQDN" = "$FIRST_EP_HOST" ]; then
$@ -c $CFG_DIR
# others will first wait the first ep ready.
else
if [ "$TAOS_FIRST_EP" = "" ]; then
echo "run TDengine with single node."
$@ -c $CFG_DIR
exit $?
fi
while true; do
es=0
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -n startup >/dev/null || es=$?
if [ "$es" -eq 0 ]; then
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -s "create dnode \"$FQDN:$SERVER_PORT\";"
break
fi
sleep 1s
done
$@ -c $CFG_DIR
fi

View File

@ -1,13 +1,13 @@
#!/bin/sh
set -e
self=$0
snake_to_camel_case() {
echo $1 | awk -F _ '{printf "%s", $1; for(i=2; i<=NF; i++) printf "%s", toupper(substr($i,1,1)) substr($i,2); print"";}'
}
if echo $1 | grep -E "^$" - >/dev/null; then
export |grep -E 'TAOS_.*' -o| sed 's/TAOS_//' |tr A-Z a-z | awk -F"=" '{print "name=$(""'$self' " $1"); echo $name "$2}' |sh
else
snake_to_camel_case $1
fi
#!/bin/sh
set -e
self=$0
snake_to_camel_case() {
echo $1 | awk -F _ '{printf "%s", $1; for(i=2; i<=NF; i++) printf "%s", toupper(substr($i,1,1)) substr($i,2); print"";}'
}
if echo $1 | grep -E "^$" - >/dev/null; then
export |grep -E 'TAOS_.*' -o| sed 's/TAOS_//' |tr A-Z a-z | awk -F"=" '{print "name=$(""'$self' " $1"); echo $name "$2}' |sh
else
snake_to_camel_case $1
fi

View File

@ -1,77 +1,77 @@
version: "3"
networks:
inter:
api:
services:
arbitrator:
image: tdengine/tdengine:$VERSION
command: tarbitrator
networks:
- inter
td-1:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td1:/var/lib/taos/
- taoslog-td1:/var/log/taos/
td-2:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td2:/var/lib/taos/
- taoslog-td2:/var/log/taos/
adapter:
image: tdengine/tdengine:$VERSION
command: taosadapter
networks:
- inter
environment:
TAOS_FIRST_EP: "td-1"
TOAS_SECOND_EP: "td-2"
deploy:
replicas: 4
update_config:
parallelism: 4
nginx:
image: nginx
depends_on:
- adapter
networks:
- inter
- api
ports:
- 6041:6041
- 6044:6044/udp
command: [
"sh",
"-c",
"while true;
do curl -s http://adapter:6041/-/ping >/dev/null && break;
done;
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
> /etc/nginx/conf.d/rest.conf;
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
nginx -g 'daemon off;'",
]
volumes:
taosdata-td1:
taoslog-td1:
taosdata-td2:
taoslog-td2:
version: "3"
networks:
inter:
api:
services:
arbitrator:
image: tdengine/tdengine:$VERSION
command: tarbitrator
networks:
- inter
td-1:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td1:/var/lib/taos/
- taoslog-td1:/var/log/taos/
td-2:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td2:/var/lib/taos/
- taoslog-td2:/var/log/taos/
adapter:
image: tdengine/tdengine:$VERSION
command: taosadapter
networks:
- inter
environment:
TAOS_FIRST_EP: "td-1"
TOAS_SECOND_EP: "td-2"
deploy:
replicas: 4
update_config:
parallelism: 4
nginx:
image: nginx
depends_on:
- adapter
networks:
- inter
- api
ports:
- 6041:6041
- 6044:6044/udp
command: [
"sh",
"-c",
"while true;
do curl -s http://adapter:6041/-/ping >/dev/null && break;
done;
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
> /etc/nginx/conf.d/rest.conf;
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
nginx -g 'daemon off;'",
]
volumes:
taosdata-td1:
taoslog-td1:
taosdata-td2:
taoslog-td2:

View File

@ -1,82 +1,82 @@
#!/bin/bash
set -e
#set -x
# dockerbuild.sh
# -n [version number]
# -p [xxxx]
# -V [stable | beta]
# set parameters by default value
version=""
passWord=""
verType=""
while getopts "hn:p:V:" arg
do
case $arg in
n)
#echo "version=$OPTARG"
version=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
V)
#echo "verType=$OPTARG"
verType=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -n [version number] "
echo " -p [password for docker hub] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
echo "version=${version}"
#docker manifest rm tdengine/tdengine
#docker manifest rm tdengine/tdengine:${version}
if [ "$verType" == "beta" ]; then
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
docker manifest rm tdengine/tdengine-beta:${version}
docker manifest rm tdengine/tdengine-beta:latest
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
docker manifest inspect tdengine/tdengine:latest
docker manifest inspect tdengine/tdengine:${version}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker manifest push tdengine/tdengine-beta:${version}
docker manifest push tdengine/tdengine-beta:latest
elif [ "$verType" == "stable" ]; then
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
docker manifest rm tdengine/tdengine:latest
docker manifest rm tdengine/tdengine:${version}
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
docker manifest inspect tdengine/tdengine:latest
docker manifest inspect tdengine/tdengine:${version}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker manifest push tdengine/tdengine:${version}
docker manifest push tdengine/tdengine:latest
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
# docker manifest create -a tdengine/${dockername}:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
# docker manifest create -a tdengine/${dockername}:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
# docker login -u tdengine -p ${passWord} #replace the docker registry username and password
# docker manifest push tdengine/tdengine:latest
# # how set latest version ???
#!/bin/bash
set -e
#set -x
# dockerbuild.sh
# -n [version number]
# -p [xxxx]
# -V [stable | beta]
# set parameters by default value
version=""
passWord=""
verType=""
while getopts "hn:p:V:" arg
do
case $arg in
n)
#echo "version=$OPTARG"
version=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
V)
#echo "verType=$OPTARG"
verType=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -n [version number] "
echo " -p [password for docker hub] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
echo "version=${version}"
#docker manifest rm tdengine/tdengine
#docker manifest rm tdengine/tdengine:${version}
if [ "$verType" == "beta" ]; then
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
docker manifest rm tdengine/tdengine-beta:${version}
docker manifest rm tdengine/tdengine-beta:latest
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
docker manifest inspect tdengine/tdengine:latest
docker manifest inspect tdengine/tdengine:${version}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker manifest push tdengine/tdengine-beta:${version}
docker manifest push tdengine/tdengine-beta:latest
elif [ "$verType" == "stable" ]; then
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
docker manifest rm tdengine/tdengine:latest
docker manifest rm tdengine/tdengine:${version}
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
docker manifest inspect tdengine/tdengine:latest
docker manifest inspect tdengine/tdengine:${version}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker manifest push tdengine/tdengine:${version}
docker manifest push tdengine/tdengine:latest
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
# docker manifest create -a tdengine/${dockername}:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
# docker manifest create -a tdengine/${dockername}:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
# docker login -u tdengine -p ${passWord} #replace the docker registry username and password
# docker manifest push tdengine/tdengine:latest
# # how set latest version ???

View File

@ -1,174 +1,174 @@
#!/bin/bash
#
set -e
#set -x
# dockerbuild.sh
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
# -n [version number]
# -p [password for docker hub]
# -V [stable | beta]
# -f [pkg file]
# set parameters by default value
cpuType=""
cpuTypeAlias=""
version=""
passWord=""
pkgFile=""
verType="stable"
dockerLatest="n"
while getopts "hc:n:p:f:V:a:b:" arg
do
case $arg in
c)
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
n)
#echo "version=$OPTARG"
version=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
f)
#echo "pkgFile=$OPTARG"
pkgFile=$(echo $OPTARG)
;;
b)
#echo "branchName=$OPTARG"
branchName=$(echo $OPTARG)
;;
V)
#echo "verType=$OPTARG"
verType=$(echo $OPTARG)
;;
a)
#echo "dockerLatest=$OPTARG"
dockerLatest=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
echo " -n [version number] "
echo " -p [password for docker hub] "
echo " -V [stable | beta] "
echo " -f [pkg file] "
echo " -a [y | n ] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
# Check_verison()
# {
# }
if [ "$verType" == "beta" ]; then
dockername=${cpuType}-${verType}
dirName=${pkgFile%-beta*}
elif [ "$verType" == "stable" ]; then
dockername=${cpuType}
dirName=${pkgFile%-Linux*}
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
echo "cpuType=${cpuType} version=${version} pkgFile=${pkgFile} verType=${verType} "
echo "$(pwd)"
echo "====NOTES: ${pkgFile} must be in the same directory as dockerbuild.sh===="
scriptDir=$(dirname $(readlink -f $0))
comunityArchiveDir=/nas/TDengine/v$version/community # community versionpackage directory
communityDir=${scriptDir}/../../../community
DockerfilePath=${communityDir}/packaging/docker/
Dockerfile=${communityDir}/packaging/docker/Dockerfile
cd ${scriptDir}
cp -f ${comunityArchiveDir}/${pkgFile} .
echo "dirName=${dirName}"
if [[ "${cpuType}" == "x64" ]] || [[ "${cpuType}" == "amd64" ]]; then
cpuTypeAlias="amd64"
elif [[ "${cpuType}" == "aarch64" ]]; then
cpuTypeAlias="arm64"
elif [[ "${cpuType}" == "aarch32" ]]; then
cpuTypeAlias="armhf"
else
echo "Unknown cpuType: ${cpuType}"
exit 1
fi
docker build --rm -f "${Dockerfile}" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName} --build-arg cpuType=${cpuTypeAlias}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker push tdengine/tdengine-${dockername}:${version}
if [ -n "$(docker ps -aq)" ] ;then
echo "delete docker process"
docker stop $(docker ps -aq)
docker rm $(docker ps -aq)
fi
if [ -n "$(pidof taosd)" ] ;then
echo "kill taosd "
kill -9 $(pidof taosd)
fi
if [ -n "$(pidof power)" ] ;then
echo "kill power "
kill -9 $(pidof power)
fi
echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:${version} has been published"
docker run -d --name doctest -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine-${dockername}:${version}
sleep 2
curl -u root:taosdata -d 'show variables;' 127.0.0.1:6041/rest/sql > temp1.data
data_version=$( cat temp1.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]")
echo "${data_version}"
if [ "${data_version}" == "\"${version}\"" ] ; then
echo "docker version is right "
else
echo "docker version is wrong "
exit 1
fi
rm -rf temp1.data
# set this version to latest version
if [ ${dockerLatest} == 'y' ] ;then
docker tag tdengine/tdengine-${dockername}:${version} tdengine/tdengine-${dockername}:latest
docker push tdengine/tdengine-${dockername}:latest
echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:latest has been published correctly"
docker run -d --name doctestla -p 7030-7049:6030-6049 -p 7030-7049:6030-6049/udp tdengine/tdengine-${dockername}:latest
sleep 2
curl -u root:taosdata -d 'show variables;' 127.0.0.1:7041/rest/sql > temp2.data
version_latest=` cat temp2.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]" `
echo "${version_latest}"
if [ "${version_latest}" == "\"${version}\"" ] ; then
echo "docker version is right "
else
echo "docker version is wrong "
exit 1
fi
fi
rm -rf temp2.data
if [ -n "$(docker ps -aq)" ] ;then
echo "delte docker process"
docker stop $(docker ps -aq)
docker rm $(docker ps -aq)
fi
cd ${scriptDir}
rm -f ${pkgFile}
#!/bin/bash
#
set -e
#set -x
# dockerbuild.sh
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
# -n [version number]
# -p [password for docker hub]
# -V [stable | beta]
# -f [pkg file]
# set parameters by default value
cpuType=""
cpuTypeAlias=""
version=""
passWord=""
pkgFile=""
verType="stable"
dockerLatest="n"
while getopts "hc:n:p:f:V:a:b:" arg
do
case $arg in
c)
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
n)
#echo "version=$OPTARG"
version=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
f)
#echo "pkgFile=$OPTARG"
pkgFile=$(echo $OPTARG)
;;
b)
#echo "branchName=$OPTARG"
branchName=$(echo $OPTARG)
;;
V)
#echo "verType=$OPTARG"
verType=$(echo $OPTARG)
;;
a)
#echo "dockerLatest=$OPTARG"
dockerLatest=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
echo " -n [version number] "
echo " -p [password for docker hub] "
echo " -V [stable | beta] "
echo " -f [pkg file] "
echo " -a [y | n ] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
# Check_verison()
# {
# }
if [ "$verType" == "beta" ]; then
dockername=${cpuType}-${verType}
dirName=${pkgFile%-beta*}
elif [ "$verType" == "stable" ]; then
dockername=${cpuType}
dirName=${pkgFile%-Linux*}
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
echo "cpuType=${cpuType} version=${version} pkgFile=${pkgFile} verType=${verType} "
echo "$(pwd)"
echo "====NOTES: ${pkgFile} must be in the same directory as dockerbuild.sh===="
scriptDir=$(dirname $(readlink -f $0))
comunityArchiveDir=/nas/TDengine/v$version/community # community versionpackage directory
communityDir=${scriptDir}/../../../community
DockerfilePath=${communityDir}/packaging/docker/
Dockerfile=${communityDir}/packaging/docker/Dockerfile
cd ${scriptDir}
cp -f ${comunityArchiveDir}/${pkgFile} .
echo "dirName=${dirName}"
if [[ "${cpuType}" == "x64" ]] || [[ "${cpuType}" == "amd64" ]]; then
cpuTypeAlias="amd64"
elif [[ "${cpuType}" == "aarch64" ]]; then
cpuTypeAlias="arm64"
elif [[ "${cpuType}" == "aarch32" ]]; then
cpuTypeAlias="armhf"
else
echo "Unknown cpuType: ${cpuType}"
exit 1
fi
docker build --rm -f "${Dockerfile}" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName} --build-arg cpuType=${cpuTypeAlias}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker push tdengine/tdengine-${dockername}:${version}
if [ -n "$(docker ps -aq)" ] ;then
echo "delete docker process"
docker stop $(docker ps -aq)
docker rm $(docker ps -aq)
fi
if [ -n "$(pidof taosd)" ] ;then
echo "kill taosd "
kill -9 $(pidof taosd)
fi
if [ -n "$(pidof power)" ] ;then
echo "kill power "
kill -9 $(pidof power)
fi
echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:${version} has been published"
docker run -d --name doctest -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine-${dockername}:${version}
sleep 2
curl -u root:taosdata -d 'show variables;' 127.0.0.1:6041/rest/sql > temp1.data
data_version=$( cat temp1.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]")
echo "${data_version}"
if [ "${data_version}" == "\"${version}\"" ] ; then
echo "docker version is right "
else
echo "docker version is wrong "
exit 1
fi
rm -rf temp1.data
# set this version to latest version
if [ ${dockerLatest} == 'y' ] ;then
docker tag tdengine/tdengine-${dockername}:${version} tdengine/tdengine-${dockername}:latest
docker push tdengine/tdengine-${dockername}:latest
echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:latest has been published correctly"
docker run -d --name doctestla -p 7030-7049:6030-6049 -p 7030-7049:6030-6049/udp tdengine/tdengine-${dockername}:latest
sleep 2
curl -u root:taosdata -d 'show variables;' 127.0.0.1:7041/rest/sql > temp2.data
version_latest=` cat temp2.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]" `
echo "${version_latest}"
if [ "${version_latest}" == "\"${version}\"" ] ; then
echo "docker version is right "
else
echo "docker version is wrong "
exit 1
fi
fi
rm -rf temp2.data
if [ -n "$(docker ps -aq)" ] ;then
echo "delte docker process"
docker stop $(docker ps -aq)
docker rm $(docker ps -aq)
fi
cd ${scriptDir}
rm -f ${pkgFile}

View File

@ -1,56 +1,56 @@
#!/bin/bash
#
set -e
#set -x
# dockerbuild.sh
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
# -n [version number]
# -p [password for docker hub]
# set parameters by default value
cpuType=aarch64
verNumber=""
passWord=""
while getopts "hc:n:p:f:" arg
do
case $arg in
c)
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
echo " -n [version number] "
echo " -p [password for docker hub] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
pkgFile=TDengine-server-${verNumber}-Linux-${cpuType}.tar.gz
echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
scriptDir=`pwd`
pkgDir=$scriptDir/../../release/
cp -f ${pkgDir}/${pkgFile} .
./dockerbuild.sh -c ${cpuType} -f ${pkgFile} -n ${verNumber} -p ${passWord}
rm -f ${pkgFile}
#!/bin/bash
#
set -e
#set -x
# dockerbuild.sh
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
# -n [version number]
# -p [password for docker hub]
# set parameters by default value
cpuType=aarch64
verNumber=""
passWord=""
while getopts "hc:n:p:f:" arg
do
case $arg in
c)
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
echo " -n [version number] "
echo " -p [password for docker hub] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
pkgFile=TDengine-server-${verNumber}-Linux-${cpuType}.tar.gz
echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
scriptDir=`pwd`
pkgDir=$scriptDir/../../release/
cp -f ${pkgDir}/${pkgFile} .
./dockerbuild.sh -c ${cpuType} -f ${pkgFile} -n ${verNumber} -p ${passWord}
rm -f ${pkgFile}

View File

@ -1,62 +1,62 @@
@echo off
set internal_dir=%~dp0\..\..\
set community_dir=%~dp0\..
cd %community_dir%
git checkout -- .
cd %community_dir%\packaging
:: %1 name %2 version
if !%1==! GOTO USAGE
if !%2==! GOTO USAGE
if %1 == taos GOTO TAOS
if %1 == power GOTO POWER
if %1 == tq GOTO TQ
if %1 == pro GOTO PRO
if %1 == kh GOTO KH
if %1 == jh GOTO JH
GOTO USAGE
:TAOS
goto RELEASE
:POWER
call sed_power.bat %community_dir%
goto RELEASE
:TQ
call sed_tq.bat %community_dir%
goto RELEASE
:PRO
call sed_pro.bat %community_dir%
goto RELEASE
:KH
call sed_kh.bat %community_dir%
goto RELEASE
:JH
call sed_jh.bat %community_dir%
goto RELEASE
:RELEASE
echo release windows-client-64 for %1, version: %2
if not exist %internal_dir%\debug\ver-%2-64bit-%1 (
md %internal_dir%\debug\ver-%2-64bit-%1
) else (
rd /S /Q %internal_dir%\debug\ver-%2-64bit-%1
md %internal_dir%\debug\ver-%2-64bit-%1
)
cd %internal_dir%\debug\ver-%2-64bit-%1
call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
cmake ../../ -G "NMake Makefiles" -DVERNUMBER=%2 -DCPUTYPE=x64
set CL=/MP4
nmake install
goto EXIT0
:USAGE
echo Usage: release.bat $productName $version
goto EXIT0
@echo off
set internal_dir=%~dp0\..\..\
set community_dir=%~dp0\..
cd %community_dir%
git checkout -- .
cd %community_dir%\packaging
:: %1 name %2 version
if !%1==! GOTO USAGE
if !%2==! GOTO USAGE
if %1 == taos GOTO TAOS
if %1 == power GOTO POWER
if %1 == tq GOTO TQ
if %1 == pro GOTO PRO
if %1 == kh GOTO KH
if %1 == jh GOTO JH
GOTO USAGE
:TAOS
goto RELEASE
:POWER
call sed_power.bat %community_dir%
goto RELEASE
:TQ
call sed_tq.bat %community_dir%
goto RELEASE
:PRO
call sed_pro.bat %community_dir%
goto RELEASE
:KH
call sed_kh.bat %community_dir%
goto RELEASE
:JH
call sed_jh.bat %community_dir%
goto RELEASE
:RELEASE
echo release windows-client-64 for %1, version: %2
if not exist %internal_dir%\debug\ver-%2-64bit-%1 (
md %internal_dir%\debug\ver-%2-64bit-%1
) else (
rd /S /Q %internal_dir%\debug\ver-%2-64bit-%1
md %internal_dir%\debug\ver-%2-64bit-%1
)
cd %internal_dir%\debug\ver-%2-64bit-%1
call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
cmake ../../ -G "NMake Makefiles" -DVERNUMBER=%2 -DCPUTYPE=x64
set CL=/MP4
nmake install
goto EXIT0
:USAGE
echo Usage: release.bat $productName $version
goto EXIT0
:EXIT0

View File

@ -1,94 +1,315 @@
#!/bin/bash
#
# Generate the tar.gz package for linux os
# Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os
set -e
#set -x
# release.sh -v [cluster | edge]
# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
# -V [stable | beta]
# -l [full | lite]
# -s [static | dynamic]
# -d [taos | ...]
# -n [2.0.0.3]
# -m [2.0.0.0]
# -H [ false | true]
# set parameters by default value
version="3.0.0.0"
verMode=edge # [cluster, edge]
verType=stable # [stable, beta]
cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
pagMode=full # [full | lite]
soMode=dynamic # [static | dynamic]
dbName=taos # [taos | ...]
allocator=glibc # [glibc | jemalloc]
verNumber=""
verNumberComp="2.0.0.0"
httpdBuild=false
while getopts "hv:V:c:o:l:s:d:a:n:m:H:" arg; do
case $arg in
v)
#echo "verMode=$OPTARG"
verMode=$(echo $OPTARG)
;;
V)
#echo "verType=$OPTARG"
verType=$(echo $OPTARG)
;;
c)
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
l)
#echo "pagMode=$OPTARG"
pagMode=$(echo $OPTARG)
;;
s)
#echo "soMode=$OPTARG"
soMode=$(echo $OPTARG)
;;
d)
#echo "dbName=$OPTARG"
dbName=$(echo $OPTARG)
;;
a)
#echo "allocator=$OPTARG"
allocator=$(echo $OPTARG)
;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
m)
#echo "verNumberComp=$OPTARG"
verNumberComp=$(echo $OPTARG)
;;
o)
#echo "osType=$OPTARG"
osType=$(echo $OPTARG)
;;
H)
#echo "httpdBuild=$OPTARG"
httpdBuild=$(echo $OPTARG)
;;
h)
echo "Usage: $(basename $0) -v [cluster | edge] "
echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] "
echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] "
echo " -V [stable | beta] "
echo " -l [full | lite] "
echo " -a [glibc | jemalloc] "
echo " -s [static | dynamic] "
echo " -d [taos | ...] "
echo " -n [version number] "
echo " -m [compatible version number] "
echo " -H [false | true] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp} httpdBuild=${httpdBuild}"
curr_dir=$(pwd)
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/..)"
if [ "$osType" == "Darwin" ]; then
script_dir=$(dirname $0)
cd ${script_dir}
script_dir="$(pwd)"
top_dir=${script_dir}/..
else
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/..)"
fi
echo "=======================new version number: ${verNumber}======================================"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
function is_valid_version() {
[ -z $1 ] && return 1 || :
rx='^([0-9]+\.){3}(\*|[0-9]+)$'
if [[ $1 =~ $rx ]]; then
return 0
fi
return 1
}
function vercomp() {
if [[ $1 == $2 ]]; then
echo 0
exit 0
fi
local IFS=.
local i ver1=($1) ver2=($2)
# fill empty fields in ver1 with zeros
for ((i = ${#ver1[@]}; i < ${#ver2[@]}; i++)); do
ver1[i]=0
done
for ((i = 0; i < ${#ver1[@]}; i++)); do
if [[ -z ${ver2[i]} ]]; then
# fill empty fields in ver2 with zeros
ver2[i]=0
fi
if ((10#${ver1[i]} > 10#${ver2[i]})); then
echo 1
exit 0
fi
if ((10#${ver1[i]} < 10#${ver2[i]})); then
echo 2
exit 0
fi
done
echo 0
}
# 1. check version information
if ( (! is_valid_version $verNumber) || (! is_valid_version $verNumberComp) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then
echo "please enter correct version"
exit 0
fi
echo "=======================new version number: ${verNumber}, compatible version: ${verNumberComp}======================================"
build_time=$(date +"%F %R")
echo "script_dir: ${script_dir}"
echo "top_dir: ${top_dir}"
# get commint id from git
gitinfo=$(git rev-parse --verify HEAD)
cd ${top_dir}
# git checkout -- .
# git checkout 3.0
# git pull || :
if [[ "$verMode" == "cluster" ]]; then
enterprise_dir="${top_dir}/../enterprise"
cd ${enterprise_dir}
gitinfoOfInternal=$(git rev-parse --verify HEAD)
else
gitinfoOfInternal=NULL
fi
echo "curr_dir: ${curr_dir}"
cd "${curr_dir}"
# 2. cmake executable file
compile_dir="${top_dir}/debug"
# if [ -d ${compile_dir} ]; then
# rm -rf ${compile_dir}
# fi
mkdir -p ${compile_dir}
cd ${compile_dir}
echo "compile_dir: ${compile_dir}"
cmake .. -DBUILD_TOOLS=true
make -j32
release_dir="${top_dir}/release"
if [ -d ${release_dir} ]; then
rm -rf ${release_dir}
if [ -d ${compile_dir} ]; then
${csudo}rm -rf ${compile_dir}
fi
mkdir -p ${release_dir}
cd ${release_dir}
if [ "$osType" != "Darwin" ]; then
${csudo}mkdir -p ${compile_dir}
else
mkdir -p ${compile_dir}
fi
cd ${compile_dir}
install_dir="${release_dir}/TDengine-server-${version}"
mkdir -p ${install_dir}
mkdir -p ${install_dir}/bin
mkdir -p ${install_dir}/lib
mkdir -p ${install_dir}/inc
if [[ "$allocator" == "jemalloc" ]]; then
allocator_macro="-DJEMALLOC_ENABLED=true"
else
allocator_macro=""
fi
install_files="${script_dir}/tools/install.sh"
chmod a+x ${script_dir}/tools/install.sh || :
cp ${install_files} ${install_dir}
if [[ "$dbName" != "taos" ]]; then
source ${enterprise_dir}/packaging/oem/sed_$dbName.sh
replace_community_$dbName
fi
header_files="${top_dir}/include/client/taos.h ${top_dir}/include/util/taoserror.h"
cp ${header_files} ${install_dir}/inc
bin_files="${compile_dir}/build/bin/taosd ${compile_dir}/build/bin/taos ${compile_dir}/build/bin/create_table ${compile_dir}/build/bin/tmq_sim ${script_dir}/tools/remove.sh ${compile_dir}/build/bin/taosBenchmark ${compile_dir}/build/bin/taosdump"
cp -rf ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
if [[ "$httpdBuild" == "true" ]]; then
BUILD_HTTP=true
else
BUILD_HTTP=false
fi
cp ${compile_dir}/build/lib/libtaos.so ${install_dir}/lib/
cp ${compile_dir}/build/lib/libavro* ${install_dir}/lib/ > /dev/null || echo -e "failed to copy avro libraries"
cp -rf ${compile_dir}/build/lib/pkgconfig ${install_dir}/lib/ > /dev/null || echo -e "failed to copy pkgconfig directory"
if [[ "$verMode" == "cluster" ]]; then
BUILD_HTTP=internal
fi
if [[ "$pagMode" == "full" ]]; then
BUILD_TOOLS=true
else
BUILD_TOOLS=false
fi
#cp ${compile_dir}/source/dnode/mnode/impl/libmnode.so ${install_dir}/lib/
#cp ${compile_dir}/source/dnode/qnode/libqnode.so ${install_dir}/lib/
#cp ${compile_dir}/source/dnode/snode/libsnode.so ${install_dir}/lib/
#cp ${compile_dir}/source/dnode/bnode/libbnode.so ${install_dir}/lib/
#cp ${compile_dir}/source/libs/wal/libwal.so ${install_dir}/lib/
#cp ${compile_dir}/source/libs/scheduler/libscheduler.so ${install_dir}/lib/
#cp ${compile_dir}/source/libs/planner/libplanner.so ${install_dir}/lib/
#cp ${compile_dir}/source/libs/parser/libparser.so ${install_dir}/lib/
#cp ${compile_dir}/source/libs/qcom/libqcom.so ${install_dir}/lib/
#cp ${compile_dir}/source/libs/transport/libtransport.so ${install_dir}/lib/
#cp ${compile_dir}/source/libs/function/libfunction.so ${install_dir}/lib/
#cp ${compile_dir}/source/common/libcommon.so ${install_dir}/lib/
#cp ${compile_dir}/source/os/libos.so ${install_dir}/lib/
#cp ${compile_dir}/source/dnode/mnode/sdb/libsdb.so ${install_dir}/lib/
#cp ${compile_dir}/source/libs/catalog/libcatalog.so ${install_dir}/lib/
# check support cpu type
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]]; then
if [ "$verMode" != "cluster" ]; then
# community-version compile
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
else
if [[ "$dbName" != "taos" ]]; then
replace_enterprise_$dbName
fi
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
fi
else
echo "input cpuType=${cpuType} error!!!"
exit 1
fi
pkg_name=${install_dir}-Linux-x64
CORES=$(grep -c ^processor /proc/cpuinfo)
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
if [[ "$allocator" == "jemalloc" ]]; then
# jemalloc need compile first, so disable parallel build
make -j ${CORES} && ${csudo}make install
else
make -j ${CORES} && ${csudo}make install
fi
cd ${curr_dir}
# 3. Call the corresponding script for packaging
if [ "$osType" != "Darwin" ]; then
if [[ "$verMode" != "cluster" ]] && [[ "$pagMode" == "full" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
ret='0'
command -v dpkg >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then
echo "====do deb package for the ubuntu system===="
output_dir="${top_dir}/debs"
if [ -d ${output_dir} ]; then
${csudo}rm -rf ${output_dir}
fi
${csudo}mkdir -p ${output_dir}
cd ${script_dir}/deb
${csudo}./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
if [[ "$pagMode" == "full" ]]; then
if [ -d ${top_dir}/tools/taos-tools/packaging/deb ]; then
cd ${top_dir}/tools/taos-tools/packaging/deb
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}')
${csudo}./make-taos-tools-deb.sh ${top_dir} \
${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
fi
fi
else
echo "==========dpkg command not exist, so not release deb package!!!"
fi
ret='0'
command -v rpmbuild >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then
echo "====do rpm package for the centos system===="
output_dir="${top_dir}/rpms"
if [ -d ${output_dir} ]; then
${csudo}rm -rf ${output_dir}
fi
${csudo}mkdir -p ${output_dir}
cd ${script_dir}/rpm
${csudo}./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
if [[ "$pagMode" == "full" ]]; then
if [ -d ${top_dir}/tools/taos-tools/packaging/rpm ]; then
cd ${top_dir}/tools/taos-tools/packaging/rpm
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}' | sed -e 's/-/_/g')
${csudo}./make-taos-tools-rpm.sh ${top_dir} \
${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
fi
fi
else
echo "==========rpmbuild command not exist, so not release rpm package!!!"
fi
fi
echo "====do tar.gz package for all systems===="
cd ${script_dir}/tools
${csudo}./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} ${dbName}
${csudo}./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
# ${csudo}./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
else
# only make client for Darwin
cd ${script_dir}/tools
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
fi

View File

@ -1,87 +1,87 @@
#!/bin/bash
#
# Generate rpm package for centos
set -e
# set -x
#curr_dir=$(pwd)
compile_dir=$1
output_dir=$2
tdengine_ver=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
pkg_dir="${top_dir}/rpmworkroom"
spec_file="${script_dir}/tdengine.spec"
#echo "curr_dir: ${curr_dir}"
#echo "top_dir: ${top_dir}"
#echo "script_dir: ${script_dir}"
echo "compile_dir: ${compile_dir}"
echo "pkg_dir: ${pkg_dir}"
echo "spec_file: ${spec_file}"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
function cp_rpm_package() {
local cur_dir
cd $1
cur_dir=$(pwd)
for dirlist in "$(ls ${cur_dir})"; do
if test -d ${dirlist}; then
cd ${dirlist}
cp_rpm_package ${cur_dir}/${dirlist}
cd ..
fi
if test -e ${dirlist}; then
cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm
fi
done
}
if [ -d ${pkg_dir} ]; then
${csudo}rm -rf ${pkg_dir}
fi
${csudo}mkdir -p ${pkg_dir}
cd ${pkg_dir}
${csudo}mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS
${csudo}rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file}
# copy rpm package to output_dir, and modify package name, then clean temp dir
#${csudo}cp -rf RPMS/* ${output_dir}
cp_rpm_package ${pkg_dir}/RPMS
if [ "$verMode" == "cluster" ]; then
rpmname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
rpmname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
fi
if [ "$verType" == "beta" ]; then
rpmname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".rpm"
elif [ "$verType" == "stable" ]; then
rpmname=${rpmname}".rpm"
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/${rpmname}
cd ..
${csudo}rm -rf ${pkg_dir}
#!/bin/bash
#
# Generate rpm package for centos
set -e
# set -x
#curr_dir=$(pwd)
compile_dir=$1
output_dir=$2
tdengine_ver=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
pkg_dir="${top_dir}/rpmworkroom"
spec_file="${script_dir}/tdengine.spec"
#echo "curr_dir: ${curr_dir}"
#echo "top_dir: ${top_dir}"
#echo "script_dir: ${script_dir}"
echo "compile_dir: ${compile_dir}"
echo "pkg_dir: ${pkg_dir}"
echo "spec_file: ${spec_file}"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
function cp_rpm_package() {
local cur_dir
cd $1
cur_dir=$(pwd)
for dirlist in "$(ls ${cur_dir})"; do
if test -d ${dirlist}; then
cd ${dirlist}
cp_rpm_package ${cur_dir}/${dirlist}
cd ..
fi
if test -e ${dirlist}; then
cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm
fi
done
}
if [ -d ${pkg_dir} ]; then
${csudo}rm -rf ${pkg_dir}
fi
${csudo}mkdir -p ${pkg_dir}
cd ${pkg_dir}
${csudo}mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS
${csudo}rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file}
# copy rpm package to output_dir, and modify package name, then clean temp dir
#${csudo}cp -rf RPMS/* ${output_dir}
cp_rpm_package ${pkg_dir}/RPMS
if [ "$verMode" == "cluster" ]; then
rpmname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
rpmname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
fi
if [ "$verType" == "beta" ]; then
rpmname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".rpm"
elif [ "$verType" == "stable" ]; then
rpmname=${rpmname}".rpm"
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/${rpmname}
cd ..
${csudo}rm -rf ${pkg_dir}

View File

@ -1,145 +1,145 @@
#!/bin/bash
#
# taosd This shell script takes care of starting and stopping TDengine.
#
# chkconfig: 2345 99 01
# description: TDengine is a districuted, scalable, high-performance Time Series Database
# (TSDB). More than just a pure database, TDengine also provides the ability
# to do stream computing, aggregation etc.
#
#
### BEGIN INIT INFO
# Provides: taosd
# Required-Start: $network $local_fs $remote_fs
# Required-Stop: $network $local_fs $remote_fs
# Short-Description: start and stop taosd
# Description: TDengine is a districuted, scalable, high-performance Time Series Database
# (TSDB). More than just a pure database, TDengine also provides the ability
# to do stream computing, aggregation etc.
### END INIT INFO
# Source init functions
. /etc/init.d/functions
# Maximum number of open files
MAX_OPEN_FILES=65535
# Default program options
NAME=taosd
PROG=/usr/local/taos/bin/taosd
USER=root
GROUP=root
# Default directories
LOCK_DIR=/var/lock/subsys
PID_DIR=/var/run/$NAME
# Set file names
LOCK_FILE=$LOCK_DIR/$NAME
PID_FILE=$PID_DIR/$NAME.pid
[ -e $PID_DIR ] || mkdir -p $PID_DIR
PROG_OPTS=""
start() {
echo -n "Starting ${NAME}: "
# check identity
curid="`id -u -n`"
if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
echo "Must be run as root or $USER, but was run as $curid"
return 1
fi
# Sets the maximum number of open file descriptors allowed.
ulimit -n $MAX_OPEN_FILES
curulimit="`ulimit -n`"
if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
return 1
fi
if [ "`id -u -n`" == root ] ; then
# Changes the owner of the lock, and the pid files to allow
# non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
touch $PID_FILE && chown $USER:$GROUP $PID_FILE
daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
else
# Don't have to change user.
daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
fi
retval=$?
sleep 2
echo
[ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
return $retval
}
stop() {
echo -n "Stopping ${NAME}: "
killproc -p $PID_FILE $NAME
retval=$?
echo
# Non-root users don't have enough permission to remove pid and lock files.
# So, the opentsdb_restart.py cannot get rid of the files, and the command
# "service opentsdb status" will complain about the existing pid file.
# Makes the pid file empty.
echo > $PID_FILE
[ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
return $retval
}
restart() {
stop
start
}
reload() {
restart
}
force_reload() {
restart
}
rh_status() {
# run checks to determine if the service is running or use generic status
status -p $PID_FILE -l $LOCK_FILE $NAME
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
reload)
rh_status_q || exit 7
$1
;;
force-reload)
force_reload
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
exit 2
esac
exit $?
#!/bin/bash
#
# taosd This shell script takes care of starting and stopping TDengine.
#
# chkconfig: 2345 99 01
# description: TDengine is a districuted, scalable, high-performance Time Series Database
# (TSDB). More than just a pure database, TDengine also provides the ability
# to do stream computing, aggregation etc.
#
#
### BEGIN INIT INFO
# Provides: taosd
# Required-Start: $network $local_fs $remote_fs
# Required-Stop: $network $local_fs $remote_fs
# Short-Description: start and stop taosd
# Description: TDengine is a districuted, scalable, high-performance Time Series Database
# (TSDB). More than just a pure database, TDengine also provides the ability
# to do stream computing, aggregation etc.
### END INIT INFO
# Source init functions
. /etc/init.d/functions
# Maximum number of open files
MAX_OPEN_FILES=65535
# Default program options
NAME=taosd
PROG=/usr/local/taos/bin/taosd
USER=root
GROUP=root
# Default directories
LOCK_DIR=/var/lock/subsys
PID_DIR=/var/run/$NAME
# Set file names
LOCK_FILE=$LOCK_DIR/$NAME
PID_FILE=$PID_DIR/$NAME.pid
[ -e $PID_DIR ] || mkdir -p $PID_DIR
PROG_OPTS=""
start() {
echo -n "Starting ${NAME}: "
# check identity
curid="`id -u -n`"
if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
echo "Must be run as root or $USER, but was run as $curid"
return 1
fi
# Sets the maximum number of open file descriptors allowed.
ulimit -n $MAX_OPEN_FILES
curulimit="`ulimit -n`"
if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
return 1
fi
if [ "`id -u -n`" == root ] ; then
# Changes the owner of the lock, and the pid files to allow
# non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
touch $PID_FILE && chown $USER:$GROUP $PID_FILE
daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
else
# Don't have to change user.
daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
fi
retval=$?
sleep 2
echo
[ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
return $retval
}
stop() {
echo -n "Stopping ${NAME}: "
killproc -p $PID_FILE $NAME
retval=$?
echo
# Non-root users don't have enough permission to remove pid and lock files.
# So, the opentsdb_restart.py cannot get rid of the files, and the command
# "service opentsdb status" will complain about the existing pid file.
# Makes the pid file empty.
echo > $PID_FILE
[ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
return $retval
}
restart() {
stop
start
}
reload() {
restart
}
force_reload() {
restart
}
rh_status() {
# run checks to determine if the service is running or use generic status
status -p $PID_FILE -l $LOCK_FILE $NAME
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
reload)
rh_status_q || exit 7
$1
;;
force-reload)
force_reload
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
exit 2
esac
exit $?

View File

@ -1,141 +1,141 @@
#!/bin/bash
#
# tarbitratord This shell script takes care of starting and stopping tarbitrator.
#
# chkconfig: 2345 99 01
# description: tarbitrator is a arbitrator used in TDengine cluster.
#
#
### BEGIN INIT INFO
# Provides: taoscluster
# Required-Start: $network $local_fs $remote_fs
# Required-Stop: $network $local_fs $remote_fs
# Short-Description: start and stop tarbitrator
# Description: tarbitrator is a arbitrator used in TDengine cluster.
### END INIT INFO
# Source init functions
. /etc/init.d/functions
# Maximum number of open files
MAX_OPEN_FILES=65535
# Default program options
NAME=tarbitrator
PROG=/usr/local/taos/bin/tarbitrator
USER=root
GROUP=root
# Default directories
LOCK_DIR=/var/lock/subsys
PID_DIR=/var/run/$NAME
# Set file names
LOCK_FILE=$LOCK_DIR/$NAME
PID_FILE=$PID_DIR/$NAME.pid
[ -e $PID_DIR ] || mkdir -p $PID_DIR
PROG_OPTS=""
start() {
echo -n "Starting ${NAME}: "
# check identity
curid="`id -u -n`"
if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
echo "Must be run as root or $USER, but was run as $curid"
return 1
fi
# Sets the maximum number of open file descriptors allowed.
ulimit -n $MAX_OPEN_FILES
curulimit="`ulimit -n`"
if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
return 1
fi
if [ "`id -u -n`" == root ] ; then
# Changes the owner of the lock, and the pid files to allow
# non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
touch $PID_FILE && chown $USER:$GROUP $PID_FILE
daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
else
# Don't have to change user.
daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
fi
retval=$?
sleep 2
echo
[ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
return $retval
}
stop() {
echo -n "Stopping ${NAME}: "
killproc -p $PID_FILE $NAME
retval=$?
echo
# Non-root users don't have enough permission to remove pid and lock files.
# So, the opentsdb_restart.py cannot get rid of the files, and the command
# "service opentsdb status" will complain about the existing pid file.
# Makes the pid file empty.
echo > $PID_FILE
[ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
return $retval
}
restart() {
stop
start
}
reload() {
restart
}
force_reload() {
restart
}
rh_status() {
# run checks to determine if the service is running or use generic status
status -p $PID_FILE -l $LOCK_FILE $NAME
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
reload)
rh_status_q || exit 7
$1
;;
force-reload)
force_reload
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
exit 2
esac
exit $?
#!/bin/bash
#
# tarbitratord This shell script takes care of starting and stopping tarbitrator.
#
# chkconfig: 2345 99 01
# description: tarbitrator is a arbitrator used in TDengine cluster.
#
#
### BEGIN INIT INFO
# Provides: taoscluster
# Required-Start: $network $local_fs $remote_fs
# Required-Stop: $network $local_fs $remote_fs
# Short-Description: start and stop tarbitrator
# Description: tarbitrator is a arbitrator used in TDengine cluster.
### END INIT INFO
# Source init functions
. /etc/init.d/functions
# Maximum number of open files
MAX_OPEN_FILES=65535
# Default program options
NAME=tarbitrator
PROG=/usr/local/taos/bin/tarbitrator
USER=root
GROUP=root
# Default directories
LOCK_DIR=/var/lock/subsys
PID_DIR=/var/run/$NAME
# Set file names
LOCK_FILE=$LOCK_DIR/$NAME
PID_FILE=$PID_DIR/$NAME.pid
[ -e $PID_DIR ] || mkdir -p $PID_DIR
PROG_OPTS=""
start() {
echo -n "Starting ${NAME}: "
# check identity
curid="`id -u -n`"
if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
echo "Must be run as root or $USER, but was run as $curid"
return 1
fi
# Sets the maximum number of open file descriptors allowed.
ulimit -n $MAX_OPEN_FILES
curulimit="`ulimit -n`"
if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
return 1
fi
if [ "`id -u -n`" == root ] ; then
# Changes the owner of the lock, and the pid files to allow
# non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
touch $PID_FILE && chown $USER:$GROUP $PID_FILE
daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
else
# Don't have to change user.
daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
fi
retval=$?
sleep 2
echo
[ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
return $retval
}
stop() {
echo -n "Stopping ${NAME}: "
killproc -p $PID_FILE $NAME
retval=$?
echo
# Non-root users don't have enough permission to remove pid and lock files.
# So, the opentsdb_restart.py cannot get rid of the files, and the command
# "service opentsdb status" will complain about the existing pid file.
# Makes the pid file empty.
echo > $PID_FILE
[ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
return $retval
}
restart() {
stop
start
}
reload() {
restart
}
force_reload() {
restart
}
rh_status() {
# run checks to determine if the service is running or use generic status
status -p $PID_FILE -l $LOCK_FILE $NAME
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
reload)
rh_status_q || exit 7
$1
;;
force-reload)
force_reload
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
exit 2
esac
exit $?

View File

@ -1,236 +1,236 @@
%define homepath /usr/local/taos
%define userlocalpath /usr/local
%define cfg_install_dir /etc/taos
%define __strip /bin/true
Name: tdengine
Version: %{_version}
Release: 3%{?dist}
Summary: tdengine from taosdata
Group: Application/Database
License: AGPL
URL: www.taosdata.com
AutoReqProv: no
#BuildRoot: %_topdir/BUILDROOT
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
#Prefix: /usr/local/taos
#BuildRequires:
#Requires:
%description
Big Data Platform Designed and Optimized for IoT
#"prep" Nothing needs to be done
#%prep
#%setup -q
#%setup -T
#"build" Nothing needs to be done
#%build
#%configure
#make %{?_smp_mflags}
%install
#make install DESTDIR=%{buildroot}
rm -rf %{buildroot}
echo topdir: %{_topdir}
echo version: %{_version}
echo buildroot: %{buildroot}
libfile="libtaos.so.%{_version}"
# create install path, and cp file
mkdir -p %{buildroot}%{homepath}/bin
mkdir -p %{buildroot}%{homepath}/cfg
#mkdir -p %{buildroot}%{homepath}/connector
mkdir -p %{buildroot}%{homepath}/driver
mkdir -p %{buildroot}%{homepath}/examples
mkdir -p %{buildroot}%{homepath}/include
#mkdir -p %{buildroot}%{homepath}/init.d
mkdir -p %{buildroot}%{homepath}/script
cp %{_compiledir}/../packaging/cfg/taos.cfg %{buildroot}%{homepath}/cfg
if [ -f %{_compiledir}/test/cfg/taosadapter.toml ]; then
cp %{_compiledir}/test/cfg/taosadapter.toml %{buildroot}%{homepath}/cfg
fi
if [ -f %{_compiledir}/test/cfg/taosadapter.service ]; then
cp %{_compiledir}/test/cfg/taosadapter.service %{buildroot}%{homepath}/cfg
fi
#cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d
cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script
cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/script
cp %{_compiledir}/../packaging/tools/startPre.sh %{buildroot}%{homepath}/bin
cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin
cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
#cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
if [ -f %{_compiledir}/build/bin/taosadapter ]; then
cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
fi
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taosdef.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
#cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
#cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
#cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
#cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
cp -r %{_compiledir}/../examples/* %{buildroot}%{homepath}/examples
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
mkdir -p %{buildroot}%{userlocalpath}/bin
mkdir -p %{buildroot}%{userlocalpath}/lib
mkdir -p %{buildroot}%{userlocalpath}/lib/pkgconfig
mkdir -p %{buildroot}%{userlocalpath}/include
mkdir -p %{buildroot}%{userlocalpath}/include/jemalloc
mkdir -p %{buildroot}%{userlocalpath}/share
mkdir -p %{buildroot}%{userlocalpath}/share/doc
mkdir -p %{buildroot}%{userlocalpath}/share/doc/jemalloc
mkdir -p %{buildroot}%{userlocalpath}/share/man
mkdir -p %{buildroot}%{userlocalpath}/share/man/man3
cp %{_compiledir}/build/bin/jemalloc-config %{buildroot}%{userlocalpath}/bin/
if [ -f %{_compiledir}/build/bin/jemalloc.sh ]; then
cp %{_compiledir}/build/bin/jemalloc.sh %{buildroot}%{userlocalpath}/bin/
fi
if [ -f %{_compiledir}/build/bin/jeprof ]; then
cp %{_compiledir}/build/bin/jeprof %{buildroot}%{userlocalpath}/bin/
fi
if [ -f %{_compiledir}/build/include/jemalloc/jemalloc.h ]; then
cp %{_compiledir}/build/include/jemalloc/jemalloc.h %{buildroot}%{userlocalpath}/include/jemalloc/
fi
if [ -f %{_compiledir}/build/lib/libjemalloc.so.2 ]; then
cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/
ln -sf libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/libjemalloc.so
fi
if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{userlocalpath}/lib/
fi
if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{userlocalpath}/lib/
fi
if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{userlocalpath}/lib/pkgconfig/
fi
if [ -f %{_compiledir}/build/share/doc/jemalloc/jemalloc.html ]; then
cp %{_compiledir}/build/share/doc/jemalloc/jemalloc.html %{buildroot}%{userlocalpath}/share/doc/jemalloc/
fi
if [ -f %{_compiledir}/build/share/man/man3/jemalloc.3 ]; then
cp %{_compiledir}/build/share/man/man3/jemalloc.3 %{buildroot}%{userlocalpath}/share/man/man3/
fi
fi
#Scripts executed before installation
%pre
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
# Stop the service if running
if pidof taosd &> /dev/null; then
if pidof systemd &> /dev/null; then
${csudo}systemctl stop taosd || :
elif $(which service &> /dev/null); then
${csudo}service taosd stop || :
else
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
fi
echo "Stop taosd service success!"
sleep 1
fi
# if taos.cfg already exist, remove it
if [ -f %{cfg_install_dir}/taos.cfg ]; then
${csudo}rm -f %{cfg_install_dir}/cfg/taos.cfg || :
fi
# if taosadapter.toml already exist, remove it
if [ -f %{cfg_install_dir}/taosadapter.toml ]; then
${csudo}rm -f %{cfg_install_dir}/cfg/taosadapter.toml || :
fi
# there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f %{homepath}/driver/libtaos* || :
#Scripts executed after installation
%post
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
cd %{homepath}/script
${csudo}./post.sh
# Scripts executed before uninstall
%preun
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
# only remove package to call preun.sh, not but update(2)
if [ $1 -eq 0 ];then
#cd %{homepath}/script
#${csudo}./preun.sh
if [ -f %{homepath}/script/preun.sh ]; then
cd %{homepath}/script
${csudo}./preun.sh
else
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
inc_link_dir="/usr/include"
data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log"
cfg_link_dir="/usr/local/taos/cfg"
# Remove all links
${csudo}rm -f ${bin_link_dir}/taos || :
${csudo}rm -f ${bin_link_dir}/taosd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${cfg_link_dir}/* || :
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${log_link_dir} || :
${csudo}rm -f ${data_link_dir} || :
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
fi
fi
# Scripts executed after uninstall
%postun
# clean build dir
%clean
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
${csudo}rm -rf %{buildroot}
#Specify the files to be packaged
%files
/*
#%doc
#Setting default permissions
%defattr (-,root,root,0755)
#%{prefix}
#%changelog
%define homepath /usr/local/taos
%define userlocalpath /usr/local
%define cfg_install_dir /etc/taos
%define __strip /bin/true
Name: tdengine
Version: %{_version}
Release: 3%{?dist}
Summary: tdengine from taosdata
Group: Application/Database
License: AGPL
URL: www.taosdata.com
AutoReqProv: no
#BuildRoot: %_topdir/BUILDROOT
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
#Prefix: /usr/local/taos
#BuildRequires:
#Requires:
%description
Big Data Platform Designed and Optimized for IoT
#"prep" Nothing needs to be done
#%prep
#%setup -q
#%setup -T
#"build" Nothing needs to be done
#%build
#%configure
#make %{?_smp_mflags}
%install
#make install DESTDIR=%{buildroot}
rm -rf %{buildroot}
echo topdir: %{_topdir}
echo version: %{_version}
echo buildroot: %{buildroot}
libfile="libtaos.so.%{_version}"
# create install path, and cp file
mkdir -p %{buildroot}%{homepath}/bin
mkdir -p %{buildroot}%{homepath}/cfg
#mkdir -p %{buildroot}%{homepath}/connector
mkdir -p %{buildroot}%{homepath}/driver
mkdir -p %{buildroot}%{homepath}/examples
mkdir -p %{buildroot}%{homepath}/include
#mkdir -p %{buildroot}%{homepath}/init.d
mkdir -p %{buildroot}%{homepath}/script
cp %{_compiledir}/../packaging/cfg/taos.cfg %{buildroot}%{homepath}/cfg
if [ -f %{_compiledir}/test/cfg/taosadapter.toml ]; then
cp %{_compiledir}/test/cfg/taosadapter.toml %{buildroot}%{homepath}/cfg
fi
if [ -f %{_compiledir}/test/cfg/taosadapter.service ]; then
cp %{_compiledir}/test/cfg/taosadapter.service %{buildroot}%{homepath}/cfg
fi
#cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d
cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script
cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/script
cp %{_compiledir}/../packaging/tools/startPre.sh %{buildroot}%{homepath}/bin
cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin
cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
#cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
if [ -f %{_compiledir}/build/bin/taosadapter ]; then
cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
fi
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include
#cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
#cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
#cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
#cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
cp -r %{_compiledir}/../examples/* %{buildroot}%{homepath}/examples
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
mkdir -p %{buildroot}%{userlocalpath}/bin
mkdir -p %{buildroot}%{userlocalpath}/lib
mkdir -p %{buildroot}%{userlocalpath}/lib/pkgconfig
mkdir -p %{buildroot}%{userlocalpath}/include
mkdir -p %{buildroot}%{userlocalpath}/include/jemalloc
mkdir -p %{buildroot}%{userlocalpath}/share
mkdir -p %{buildroot}%{userlocalpath}/share/doc
mkdir -p %{buildroot}%{userlocalpath}/share/doc/jemalloc
mkdir -p %{buildroot}%{userlocalpath}/share/man
mkdir -p %{buildroot}%{userlocalpath}/share/man/man3
cp %{_compiledir}/build/bin/jemalloc-config %{buildroot}%{userlocalpath}/bin/
if [ -f %{_compiledir}/build/bin/jemalloc.sh ]; then
cp %{_compiledir}/build/bin/jemalloc.sh %{buildroot}%{userlocalpath}/bin/
fi
if [ -f %{_compiledir}/build/bin/jeprof ]; then
cp %{_compiledir}/build/bin/jeprof %{buildroot}%{userlocalpath}/bin/
fi
if [ -f %{_compiledir}/build/include/jemalloc/jemalloc.h ]; then
cp %{_compiledir}/build/include/jemalloc/jemalloc.h %{buildroot}%{userlocalpath}/include/jemalloc/
fi
if [ -f %{_compiledir}/build/lib/libjemalloc.so.2 ]; then
cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/
ln -sf libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/libjemalloc.so
fi
if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{userlocalpath}/lib/
fi
if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{userlocalpath}/lib/
fi
if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{userlocalpath}/lib/pkgconfig/
fi
if [ -f %{_compiledir}/build/share/doc/jemalloc/jemalloc.html ]; then
cp %{_compiledir}/build/share/doc/jemalloc/jemalloc.html %{buildroot}%{userlocalpath}/share/doc/jemalloc/
fi
if [ -f %{_compiledir}/build/share/man/man3/jemalloc.3 ]; then
cp %{_compiledir}/build/share/man/man3/jemalloc.3 %{buildroot}%{userlocalpath}/share/man/man3/
fi
fi
#Scripts executed before installation
%pre
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
# Stop the service if running
if pidof taosd &> /dev/null; then
if pidof systemd &> /dev/null; then
${csudo}systemctl stop taosd || :
elif $(which service &> /dev/null); then
${csudo}service taosd stop || :
else
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
fi
echo "Stop taosd service success!"
sleep 1
fi
# if taos.cfg already exist, remove it
if [ -f %{cfg_install_dir}/taos.cfg ]; then
${csudo}rm -f %{cfg_install_dir}/cfg/taos.cfg || :
fi
# if taosadapter.toml already exist, remove it
if [ -f %{cfg_install_dir}/taosadapter.toml ]; then
${csudo}rm -f %{cfg_install_dir}/cfg/taosadapter.toml || :
fi
# there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f %{homepath}/driver/libtaos* || :
#Scripts executed after installation
%post
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
cd %{homepath}/script
${csudo}./post.sh
# Scripts executed before uninstall
%preun
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
# only remove package to call preun.sh, not but update(2)
if [ $1 -eq 0 ];then
#cd %{homepath}/script
#${csudo}./preun.sh
if [ -f %{homepath}/script/preun.sh ]; then
cd %{homepath}/script
${csudo}./preun.sh
else
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
inc_link_dir="/usr/include"
data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log"
cfg_link_dir="/usr/local/taos/cfg"
# Remove all links
${csudo}rm -f ${bin_link_dir}/taos || :
${csudo}rm -f ${bin_link_dir}/taosd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${cfg_link_dir}/* || :
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${log_link_dir} || :
${csudo}rm -f ${data_link_dir} || :
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
fi
fi
# Scripts executed after uninstall
%postun
# clean build dir
%clean
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
${csudo}rm -rf %{buildroot}
#Specify the files to be packaged
%files
/*
#%doc
#Setting default permissions
%defattr (-,root,root,0755)
#%{prefix}
#%changelog

104
packaging/tools/check_os.sh Normal file → Executable file
View File

@ -1,52 +1,52 @@
#!/bin/bash
#
CSI=$(echo -e "\033[")
CRED="${CSI}1;31m"
CFAILURE="$CRED"
CEND="${CSI}0m"
if [ -n "$(grep 'Aliyun Linux release' /etc/issue)" -o -e /etc/redhat-release ]; then
OS=CentOS
[ -n "$(grep ' 7\.' /etc/redhat-release 2> /dev/null)" ] && CentOS_RHEL_version=7
[ -n "$(grep ' 6\.' /etc/redhat-release 2> /dev/null)" -o -n "$(grep 'Aliyun Linux release6 15' /etc/issue)" ] && CentOS_RHEL_version=6
[ -n "$(grep ' 5\.' /etc/redhat-release 2> /dev/null)" -o -n "$(grep 'Aliyun Linux release5' /etc/issue)" ] && CentOS_RHEL_version=5
elif [ -n "$(grep 'Amazon Linux AMI release' /etc/issue)" -o -e /etc/system-release ]; then
OS=CentOS
CentOS_RHEL_version=6
elif [ -n "$(grep 'bian' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Debian" ]; then
OS=Debian
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
Debian_version=$(lsb_release -sr | awk -F. '{print $1}')
elif [ -n "$(grep 'Deepin' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Deepin" ]; then
OS=Debian
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
Debian_version=$(lsb_release -sr | awk -F. '{print $1}')
elif [ -n "$(grep 'Kali GNU/Linux Rolling' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Kali" ]; then
OS=Debian
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
if [ -n "$(grep 'VERSION="2016.*"' /etc/os-release)" ]; then
Debian_version=8
else
echo "${CFAILURE}Does not support this OS, Please contact the author! ${CEND}"
kill -9 $$
fi
elif [ -n "$(grep 'Ubuntu' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Ubuntu" -o -n "$(grep 'Linux Mint' /etc/issue)" ]; then
OS=Ubuntu
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
Ubuntu_version=$(lsb_release -sr | awk -F. '{print $1}')
[ -n "$(grep 'Linux Mint 18' /etc/issue)" ] && Ubuntu_version=16
elif [ -n "$(grep 'elementary' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == 'elementary' ]; then
OS=Ubuntu
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
Ubuntu_version=16
else
echo "${CFAILURE}Does not support this OS, Please contact the author! ${CEND}"
kill -9 $$
fi
echo "${CFAILURE}${OS}${CEND}"
if [ "$OS" == 'CentOS' ]; then
echo ${CentOS_RHEL_version}
else
echo ${Ubuntu_version}
fi
#!/bin/bash
#
CSI=$(echo -e "\033[")
CRED="${CSI}1;31m"
CFAILURE="$CRED"
CEND="${CSI}0m"
if [ -n "$(grep 'Aliyun Linux release' /etc/issue)" -o -e /etc/redhat-release ]; then
OS=CentOS
[ -n "$(grep ' 7\.' /etc/redhat-release 2> /dev/null)" ] && CentOS_RHEL_version=7
[ -n "$(grep ' 6\.' /etc/redhat-release 2> /dev/null)" -o -n "$(grep 'Aliyun Linux release6 15' /etc/issue)" ] && CentOS_RHEL_version=6
[ -n "$(grep ' 5\.' /etc/redhat-release 2> /dev/null)" -o -n "$(grep 'Aliyun Linux release5' /etc/issue)" ] && CentOS_RHEL_version=5
elif [ -n "$(grep 'Amazon Linux AMI release' /etc/issue)" -o -e /etc/system-release ]; then
OS=CentOS
CentOS_RHEL_version=6
elif [ -n "$(grep 'bian' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Debian" ]; then
OS=Debian
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
Debian_version=$(lsb_release -sr | awk -F. '{print $1}')
elif [ -n "$(grep 'Deepin' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Deepin" ]; then
OS=Debian
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
Debian_version=$(lsb_release -sr | awk -F. '{print $1}')
elif [ -n "$(grep 'Kali GNU/Linux Rolling' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Kali" ]; then
OS=Debian
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
if [ -n "$(grep 'VERSION="2016.*"' /etc/os-release)" ]; then
Debian_version=8
else
echo "${CFAILURE}Does not support this OS, Please contact the author! ${CEND}"
kill -9 $$
fi
elif [ -n "$(grep 'Ubuntu' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Ubuntu" -o -n "$(grep 'Linux Mint' /etc/issue)" ]; then
OS=Ubuntu
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
Ubuntu_version=$(lsb_release -sr | awk -F. '{print $1}')
[ -n "$(grep 'Linux Mint 18' /etc/issue)" ] && Ubuntu_version=16
elif [ -n "$(grep 'elementary' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == 'elementary' ]; then
OS=Ubuntu
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
Ubuntu_version=16
else
echo "${CFAILURE}Does not support this OS, Please contact the author! ${CEND}"
kill -9 $$
fi
echo "${CFAILURE}${OS}${CEND}"
if [ "$OS" == 'CentOS' ]; then
echo ${CentOS_RHEL_version}
else
echo ${Ubuntu_version}
fi

42
packaging/tools/get_client.sh Normal file → Executable file
View File

@ -1,21 +1,21 @@
#!/bin/bash
#
log_dir=$1
result_file=$2
if [ ! -n "$1" ];then
echo "Pleas input the director of taosdlog."
echo "usage: ./get_client.sh <taosdlog directory> <result file>"
exit 1
else
log_dir=$1
fi
if [ ! -n "$2" ];then
result_file=clientInfo.txt
else
result_file=$2
fi
grep "new TCP connection" ${log_dir}/taosdlog.* | sed -e "s/0x.* from / /"|sed -e "s/,.*$//"|sed -e "s/:[0-9]*$//"|sort -r|uniq -f 2|sort -k 3 -r|uniq -f 2 > ${result_file}
#!/bin/bash
#
log_dir=$1
result_file=$2
if [ ! -n "$1" ];then
echo "Pleas input the director of taosdlog."
echo "usage: ./get_client.sh <taosdlog directory> <result file>"
exit 1
else
log_dir=$1
fi
if [ ! -n "$2" ];then
result_file=clientInfo.txt
else
result_file=$2
fi
grep "new TCP connection" ${log_dir}/taosdlog.* | sed -e "s/0x.* from / /"|sed -e "s/,.*$//"|sed -e "s/:[0-9]*$//"|sort -r|uniq -f 2|sort -k 3 -r|uniq -f 2 > ${result_file}

28
packaging/tools/get_os.sh Normal file → Executable file
View File

@ -1,14 +1,14 @@
#!/bin/bash
#
# This file is used to install TAOS time-series database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
# set -x
# -----------------------Variables definition---------------------
OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2)
len=$(echo ${#OS})
len=$((len-2))
retval=$(echo -ne ${OS:1:${len}} | cut -d" " -f1)
echo -ne $retval
#!/bin/bash
#
# This file is used to install TAOS time-series database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
# set -x
# -----------------------Variables definition---------------------
OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2)
len=$(echo ${#OS})
len=$((len-2))
retval=$(echo -ne ${OS:1:${len}} | cut -d" " -f1)
echo -ne $retval

30
packaging/tools/get_version.sh Normal file → Executable file
View File

@ -1,15 +1,15 @@
#!/bin/bash
#
# This file is used to install TAOS time-series database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
# set -x
# -----------------------Variables definition---------------------
verinfo=$(cat $1 | grep " version" | cut -d '"' -f2)
verinfo=$(echo $verinfo | tr "\n" " ")
len=$(echo ${#verinfo})
len=$((len-1))
retval=$(echo -ne ${verinfo:0:${len}})
echo -ne $retval
#!/bin/bash
#
# This file is used to install TAOS time-series database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
# set -x
# -----------------------Variables definition---------------------
verinfo=$(cat $1 | grep " version" | cut -d '"' -f2)
verinfo=$(echo $verinfo | tr "\n" " ")
len=$(echo ${#verinfo})
len=$((len-1))
retval=$(echo -ne ${verinfo:0:${len}})
echo -ne $retval

View File

@ -485,6 +485,17 @@ function install_service() {
# fi
}
function install_config() {
if [ ! -f ${cfg_install_dir}/${configFile} ]; then
${csudo}mkdir -p ${cfg_install_dir}
[ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir}
${csudo}chmod 644 ${cfg_install_dir}/*
fi
${csudo}cp -f ${script_dir}/cfg/${configFile} ${install_main_dir}/cfg/${configFile}.org
${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg
}
function install_TDengine() {
# Start to install
echo -e "${GREEN}Start to install TDengine...${NC}"
@ -500,7 +511,7 @@ function install_TDengine() {
# For installing new
install_bin
install_service
#install_config
install_config
# Ask if to start the service
#echo
@ -539,7 +550,7 @@ function install_TDengine() {
echo
else # Only install client
install_bin
#install_config
install_config
echo
echo -e "\033[44;32;1mTDengine client is installed successfully!${NC}"
fi

678
packaging/tools/install_arbi.sh Normal file → Executable file
View File

@ -1,339 +1,339 @@
#!/bin/bash
#
# This file is used to install database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
#set -x
# -----------------------Variables definition---------------------
script_dir=$(dirname $(readlink -f "$0"))
bin_link_dir="/usr/bin"
#inc_link_dir="/usr/include"
#install main path
install_main_dir="/usr/local/tarbitrator"
# old bin dir
bin_dir="/usr/local/tarbitrator/bin"
service_config_dir="/etc/systemd/system"
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
csudo=""
if command -v sudo >/dev/null; then
csudo="sudo "
fi
update_flag=0
initd_mod=0
service_mod=2
if pidof systemd &>/dev/null; then
service_mod=0
elif $(which service &>/dev/null); then
service_mod=1
service_config_dir="/etc/init.d"
if $(which chkconfig &>/dev/null); then
initd_mod=1
elif $(which insserv &>/dev/null); then
initd_mod=2
elif $(which update-rc.d &>/dev/null); then
initd_mod=3
else
service_mod=2
fi
else
service_mod=2
fi
# get the operating system type for using the corresponding init file
# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
if [[ -e /etc/os-release ]]; then
osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) || :
else
osinfo=""
fi
#echo "osinfo: ${osinfo}"
os_type=0
if echo $osinfo | grep -qwi "ubuntu"; then
# echo "This is ubuntu system"
os_type=1
elif echo $osinfo | grep -qwi "debian"; then
# echo "This is debian system"
os_type=1
elif echo $osinfo | grep -qwi "Kylin"; then
# echo "This is Kylin system"
os_type=1
elif echo $osinfo | grep -qwi "centos"; then
# echo "This is centos system"
os_type=2
elif echo $osinfo | grep -qwi "fedora"; then
# echo "This is fedora system"
os_type=2
else
echo " osinfo: ${osinfo}"
echo " This is an officially unverified linux system,"
echo " if there are any problems with the installation and operation, "
echo " please feel free to contact taosdata.com for support."
os_type=1
fi
function kill_tarbitrator() {
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function install_main_path() {
#create install main dir and all sub dir
${csudo}rm -rf ${install_main_dir} || :
${csudo}mkdir -p ${install_main_dir}
${csudo}mkdir -p ${install_main_dir}/bin
#${csudo}mkdir -p ${install_main_dir}/include
${csudo}mkdir -p ${install_main_dir}/init.d
}
function install_bin() {
# Remove links
${csudo}rm -f ${bin_link_dir}/rmtarbitrator || :
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
#Make link
[ -x ${install_main_dir}/bin/remove_arbi.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_arbi.sh ${bin_link_dir}/rmtarbitrator || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
}
function install_header() {
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
function install_jemalloc() {
jemalloc_dir=${script_dir}/jemalloc
if [ -d ${jemalloc_dir} ]; then
${csudo}/usr/bin/install -c -d /usr/local/bin
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
fi
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
fi
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
fi
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
${csudo}/usr/bin/install -c -d /usr/local/lib
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
${csudo}/usr/bin/install -c -d /usr/local/lib
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi
fi
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
fi
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
${csudo}/usr/bin/install -c -d /usr/local/share/man/man3
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
fi
if [ -d /etc/ld.so.conf.d ]; then
echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf >/dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
${csudo}ldconfig
else
echo "/etc/ld.so.conf.d not found!"
fi
fi
}
function clean_service_on_sysvinit() {
if pidof tarbitrator &>/dev/null; then
${csudo}service tarbitratord stop || :
fi
if ((${initd_mod} == 1)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo}chkconfig --del tarbitratord || :
fi
elif ((${initd_mod} == 2)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo}insserv -r tarbitratord || :
fi
elif ((${initd_mod} == 3)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo}update-rc.d -f tarbitratord remove || :
fi
fi
${csudo}rm -f ${service_config_dir}/tarbitratord || :
if $(which init &>/dev/null); then
${csudo}init q || :
fi
}
function install_service_on_sysvinit() {
clean_service_on_sysvinit
sleep 1
if ((${os_type} == 1)); then
${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord
elif ((${os_type} == 2)); then
${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord
fi
if ((${initd_mod} == 1)); then
${csudo}chkconfig --add tarbitratord || :
${csudo}chkconfig --level 2345 tarbitratord on || :
elif ((${initd_mod} == 2)); then
${csudo}insserv tarbitratord || :
${csudo}insserv -d tarbitratord || :
elif ((${initd_mod} == 3)); then
${csudo}update-rc.d tarbitratord defaults || :
fi
}
function clean_service_on_systemd() {
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
if systemctl is-active --quiet tarbitratord; then
echo "tarbitrator is running, stopping it..."
${csudo}systemctl stop tarbitratord &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null
${csudo}rm -f ${tarbitratord_service_config}
}
function install_service_on_systemd() {
clean_service_on_systemd
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo >> ${tarbitratord_service_config}"
${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo >> ${tarbitratord_service_config}"
${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
${csudo}systemctl enable tarbitratord
}
function install_service() {
if ((${service_mod} == 0)); then
install_service_on_systemd
elif ((${service_mod} == 1)); then
install_service_on_sysvinit
else
kill_tarbitrator
fi
}
function update_TDengine() {
# Start to update
echo -e "${GREEN}Start to update TDengine's arbitrator ...${NC}"
# Stop the service if running
if pidof tarbitrator &>/dev/null; then
if ((${service_mod} == 0)); then
${csudo}systemctl stop tarbitratord || :
elif ((${service_mod} == 1)); then
${csudo}service tarbitratord stop || :
else
kill_tarbitrator
fi
sleep 1
fi
install_main_path
#install_header
install_bin
install_service
install_jemalloc
echo
if ((${service_mod} == 0)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}"
elif ((${service_mod} == 1)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}"
else
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
fi
echo
echo -e "\033[44;32;1mTDengine's arbitrator is updated successfully!${NC}"
}
function install_TDengine() {
# Start to install
echo -e "${GREEN}Start to install TDengine's arbitrator ...${NC}"
install_main_path
#install_header
install_bin
install_service
install_jemalloc
echo
if ((${service_mod} == 0)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}"
elif ((${service_mod} == 1)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}"
else
echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
fi
echo -e "\033[44;32;1mTDengine's arbitrator is installed successfully!${NC}"
echo
}
## ==============================Main program starts from here============================
# Install server and client
if [ -x ${bin_dir}/tarbitrator ]; then
update_flag=1
update_TDengine
else
install_TDengine
fi
#!/bin/bash
#
# This file is used to install database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
#set -x
# -----------------------Variables definition---------------------
script_dir=$(dirname $(readlink -f "$0"))
bin_link_dir="/usr/bin"
#inc_link_dir="/usr/include"
#install main path
install_main_dir="/usr/local/tarbitrator"
# old bin dir
bin_dir="/usr/local/tarbitrator/bin"
service_config_dir="/etc/systemd/system"
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
csudo=""
if command -v sudo >/dev/null; then
csudo="sudo "
fi
update_flag=0
initd_mod=0
service_mod=2
if pidof systemd &>/dev/null; then
service_mod=0
elif $(which service &>/dev/null); then
service_mod=1
service_config_dir="/etc/init.d"
if $(which chkconfig &>/dev/null); then
initd_mod=1
elif $(which insserv &>/dev/null); then
initd_mod=2
elif $(which update-rc.d &>/dev/null); then
initd_mod=3
else
service_mod=2
fi
else
service_mod=2
fi
# get the operating system type for using the corresponding init file
# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
if [[ -e /etc/os-release ]]; then
osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) || :
else
osinfo=""
fi
#echo "osinfo: ${osinfo}"
os_type=0
if echo $osinfo | grep -qwi "ubuntu"; then
# echo "This is ubuntu system"
os_type=1
elif echo $osinfo | grep -qwi "debian"; then
# echo "This is debian system"
os_type=1
elif echo $osinfo | grep -qwi "Kylin"; then
# echo "This is Kylin system"
os_type=1
elif echo $osinfo | grep -qwi "centos"; then
# echo "This is centos system"
os_type=2
elif echo $osinfo | grep -qwi "fedora"; then
# echo "This is fedora system"
os_type=2
else
echo " osinfo: ${osinfo}"
echo " This is an officially unverified linux system,"
echo " if there are any problems with the installation and operation, "
echo " please feel free to contact taosdata.com for support."
os_type=1
fi
function kill_tarbitrator() {
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function install_main_path() {
#create install main dir and all sub dir
${csudo}rm -rf ${install_main_dir} || :
${csudo}mkdir -p ${install_main_dir}
${csudo}mkdir -p ${install_main_dir}/bin
#${csudo}mkdir -p ${install_main_dir}/include
${csudo}mkdir -p ${install_main_dir}/init.d
}
function install_bin() {
# Remove links
${csudo}rm -f ${bin_link_dir}/rmtarbitrator || :
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
#Make link
[ -x ${install_main_dir}/bin/remove_arbi.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_arbi.sh ${bin_link_dir}/rmtarbitrator || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
}
function install_header() {
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
function install_jemalloc() {
jemalloc_dir=${script_dir}/jemalloc
if [ -d ${jemalloc_dir} ]; then
${csudo}/usr/bin/install -c -d /usr/local/bin
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
fi
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
fi
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
fi
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
${csudo}/usr/bin/install -c -d /usr/local/lib
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
${csudo}/usr/bin/install -c -d /usr/local/lib
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi
fi
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
fi
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
${csudo}/usr/bin/install -c -d /usr/local/share/man/man3
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
fi
if [ -d /etc/ld.so.conf.d ]; then
echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf >/dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
${csudo}ldconfig
else
echo "/etc/ld.so.conf.d not found!"
fi
fi
}
function clean_service_on_sysvinit() {
if pidof tarbitrator &>/dev/null; then
${csudo}service tarbitratord stop || :
fi
if ((${initd_mod} == 1)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo}chkconfig --del tarbitratord || :
fi
elif ((${initd_mod} == 2)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo}insserv -r tarbitratord || :
fi
elif ((${initd_mod} == 3)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo}update-rc.d -f tarbitratord remove || :
fi
fi
${csudo}rm -f ${service_config_dir}/tarbitratord || :
if $(which init &>/dev/null); then
${csudo}init q || :
fi
}
function install_service_on_sysvinit() {
clean_service_on_sysvinit
sleep 1
if ((${os_type} == 1)); then
${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord
elif ((${os_type} == 2)); then
${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord
fi
if ((${initd_mod} == 1)); then
${csudo}chkconfig --add tarbitratord || :
${csudo}chkconfig --level 2345 tarbitratord on || :
elif ((${initd_mod} == 2)); then
${csudo}insserv tarbitratord || :
${csudo}insserv -d tarbitratord || :
elif ((${initd_mod} == 3)); then
${csudo}update-rc.d tarbitratord defaults || :
fi
}
function clean_service_on_systemd() {
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
if systemctl is-active --quiet tarbitratord; then
echo "tarbitrator is running, stopping it..."
${csudo}systemctl stop tarbitratord &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null
${csudo}rm -f ${tarbitratord_service_config}
}
function install_service_on_systemd() {
clean_service_on_systemd
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo >> ${tarbitratord_service_config}"
${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo >> ${tarbitratord_service_config}"
${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
${csudo}systemctl enable tarbitratord
}
function install_service() {
if ((${service_mod} == 0)); then
install_service_on_systemd
elif ((${service_mod} == 1)); then
install_service_on_sysvinit
else
kill_tarbitrator
fi
}
function update_TDengine() {
# Start to update
echo -e "${GREEN}Start to update TDengine's arbitrator ...${NC}"
# Stop the service if running
if pidof tarbitrator &>/dev/null; then
if ((${service_mod} == 0)); then
${csudo}systemctl stop tarbitratord || :
elif ((${service_mod} == 1)); then
${csudo}service tarbitratord stop || :
else
kill_tarbitrator
fi
sleep 1
fi
install_main_path
#install_header
install_bin
install_service
install_jemalloc
echo
if ((${service_mod} == 0)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}"
elif ((${service_mod} == 1)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}"
else
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
fi
echo
echo -e "\033[44;32;1mTDengine's arbitrator is updated successfully!${NC}"
}
function install_TDengine() {
# Start to install
echo -e "${GREEN}Start to install TDengine's arbitrator ...${NC}"
install_main_path
#install_header
install_bin
install_service
install_jemalloc
echo
if ((${service_mod} == 0)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}"
elif ((${service_mod} == 1)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}"
else
echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
fi
echo -e "\033[44;32;1mTDengine's arbitrator is installed successfully!${NC}"
echo
}
## ==============================Main program starts from here============================
# Install server and client
if [ -x ${bin_dir}/tarbitrator ]; then
update_flag=1
update_TDengine
else
install_TDengine
fi

625
packaging/tools/install_client.sh Normal file → Executable file
View File

@ -1,320 +1,305 @@
#!/bin/bash
#
# This file is used to install TDengine client on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
#set -x
# -----------------------Variables definition---------------------
dataDir="/var/lib/taos"
logDir="/var/log/taos"
productName="TDengine"
installDir="/usr/local/taos"
configDir="/etc/taos"
serverName="taosd"
clientName="taos"
uninstallScript="rmtaos"
configFile="taos.cfg"
tarName="taos.tar.gz"
osType=Linux
pagMode=full
verMode=edge
if [ "$osType" != "Darwin" ]; then
script_dir=$(dirname $(readlink -f "$0"))
# Dynamic directory
data_dir=${dataDir}
log_dir=${logDir}
else
script_dir=`dirname $0`
cd ${script_dir}
script_dir="$(pwd)"
data_dir=${dataDir}
log_dir=~/${productName}/log
fi
log_link_dir="${installDir}/log"
cfg_install_dir=${configDir}
if [ "$osType" != "Darwin" ]; then
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
else
bin_link_dir="/usr/local/bin"
lib_link_dir="/usr/local/lib"
inc_link_dir="/usr/local/include"
fi
#install main path
install_main_dir="${installDir}"
# old bin dir
bin_dir="${installDir}/bin"
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
update_flag=0
function kill_client() {
pid=$(ps -ef | grep "${clientName}" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function install_main_path() {
#create install main dir and all sub dir
${csudo}rm -rf ${install_main_dir} || :
${csudo}mkdir -p ${install_main_dir}
${csudo}mkdir -p ${install_main_dir}/cfg
${csudo}mkdir -p ${install_main_dir}/bin
${csudo}mkdir -p ${install_main_dir}/driver
if [ $productName == "TDengine" ]; then
${csudo}mkdir -p ${install_main_dir}/examples
fi
${csudo}mkdir -p ${install_main_dir}/include
if [ "$verMode" == "cluster" ]; then
${csudo}mkdir -p ${install_main_dir}/connector
fi
}
function install_bin() {
# Remove links
${csudo}rm -f ${bin_link_dir}/${clientName} || :
if [ "$osType" != "Darwin" ]; then
${csudo}rm -f ${bin_link_dir}/taosdemo || :
fi
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
#Make link
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || :
if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
fi
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript} || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
}
function clean_lib() {
sudo rm -f /usr/lib/libtaos.* || :
sudo rm -rf ${lib_dir} || :
}
function install_lib() {
# Remove links
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
#${csudo}rm -rf ${v15_java_app_dir} || :
${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
if [ "$osType" != "Darwin" ]; then
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
if [ -d "${lib64_link_dir}" ]; then
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
else
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
if [ "$osType" != "Darwin" ]; then
${csudo}ldconfig
else
${csudo}update_dyld_shared_cache
fi
}
function install_header() {
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
function install_jemalloc() {
jemalloc_dir=${script_dir}/jemalloc
if [ -d ${jemalloc_dir} ]; then
${csudo}/usr/bin/install -c -d /usr/local/bin
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
fi
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
fi
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
fi
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
${csudo}/usr/bin/install -c -d /usr/local/lib
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
${csudo}/usr/bin/install -c -d /usr/local/lib
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi
fi
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
fi
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
${csudo}/usr/bin/install -c -d /usr/local/share/man/man3
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
fi
if [ -d /etc/ld.so.conf.d ]; then
echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
${csudo}ldconfig
else
echo "/etc/ld.so.conf.d not found!"
fi
fi
}
function install_config() {
if [ ! -f ${cfg_install_dir}/${configFile} ]; then
${csudo}mkdir -p ${cfg_install_dir}
[ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir}
${csudo}chmod 644 ${cfg_install_dir}/*
fi
${csudo}cp -f ${script_dir}/cfg/${configFile} ${install_main_dir}/cfg/${configFile}.org
${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg
}
function install_log() {
${csudo}rm -rf ${log_dir} || :
if [ "$osType" != "Darwin" ]; then
${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
else
mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
fi
${csudo}ln -s ${log_dir} ${install_main_dir}/log
}
function install_connector() {
${csudo}cp -rf ${script_dir}/connector/ ${install_main_dir}/
}
function install_examples() {
if [ -d ${script_dir}/examples ]; then
${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
fi
}
function update_TDengine() {
# Start to update
if [ ! -e ${tarName} ]; then
echo "File ${tarName} does not exist"
exit 1
fi
tar -zxf ${tarName}
echo -e "${GREEN}Start to update ${productName} client...${NC}"
# Stop the client shell if running
if pidof ${clientName} &> /dev/null; then
kill_client
sleep 1
fi
install_main_path
install_log
install_header
install_lib
install_jemalloc
if [ "$verMode" == "cluster" ]; then
install_connector
fi
install_examples
install_bin
install_config
echo
echo -e "\033[44;32;1m${productName} client is updated successfully!${NC}"
rm -rf $(tar -tf ${tarName})
}
function install_TDengine() {
# Start to install
if [ ! -e ${tarName} ]; then
echo "File ${tarName} does not exist"
exit 1
fi
tar -zxf ${tarName}
echo -e "${GREEN}Start to install ${productName} client...${NC}"
install_main_path
install_log
install_header
install_lib
install_jemalloc
if [ "$verMode" == "cluster" ]; then
install_connector
fi
install_examples
install_bin
install_config
echo
echo -e "\033[44;32;1m${productName} client is installed successfully!${NC}"
rm -rf $(tar -tf ${tarName})
}
## ==============================Main program starts from here============================
# Install or updata client and client
# if server is already install, don't install client
if [ -e ${bin_dir}/${serverName} ]; then
echo -e "\033[44;32;1mThere are already installed ${productName} server, so don't need install client!${NC}"
exit 0
fi
if [ -x ${bin_dir}/${clientName} ]; then
update_flag=1
update_TDengine
else
install_TDengine
fi
#!/bin/bash
#
# This file is used to install TDengine client on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
#set -x
# -----------------------Variables definition---------------------
dataDir="/var/lib/taos"
logDir="/var/log/taos"
productName="TDengine"
installDir="/usr/local/taos"
configDir="/etc/taos"
serverName="taosd"
clientName="taos"
uninstallScript="rmtaos"
configFile="taos.cfg"
osType=Linux
pagMode=full
verMode=edge
if [ "$osType" != "Darwin" ]; then
script_dir=$(dirname $(readlink -f "$0"))
# Dynamic directory
data_dir=${dataDir}
log_dir=${logDir}
else
script_dir=`dirname $0`
cd ${script_dir}
script_dir="$(pwd)"
data_dir=${dataDir}
log_dir=~/${productName}/log
fi
log_link_dir="${installDir}/log"
cfg_install_dir=${configDir}
if [ "$osType" != "Darwin" ]; then
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
else
bin_link_dir="/usr/local/bin"
lib_link_dir="/usr/local/lib"
inc_link_dir="/usr/local/include"
fi
#install main path
install_main_dir="${installDir}"
# old bin dir
bin_dir="${installDir}/bin"
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
update_flag=0
function kill_client() {
pid=$(ps -ef | grep "${clientName}" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function install_main_path() {
#create install main dir and all sub dir
${csudo}rm -rf ${install_main_dir} || :
${csudo}mkdir -p ${install_main_dir}
${csudo}mkdir -p ${install_main_dir}/cfg
${csudo}mkdir -p ${install_main_dir}/bin
${csudo}mkdir -p ${install_main_dir}/driver
if [ $productName == "TDengine" ]; then
${csudo}mkdir -p ${install_main_dir}/examples
fi
${csudo}mkdir -p ${install_main_dir}/include
if [ "$verMode" == "cluster" ]; then
${csudo}mkdir -p ${install_main_dir}/connector
fi
}
function install_bin() {
# Remove links
${csudo}rm -f ${bin_link_dir}/${clientName} || :
if [ "$osType" != "Darwin" ]; then
${csudo}rm -f ${bin_link_dir}/taosdemo || :
fi
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
#Make link
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || :
if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
fi
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript} || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
}
function clean_lib() {
sudo rm -f /usr/lib/libtaos.* || :
sudo rm -rf ${lib_dir} || :
}
function install_lib() {
# Remove links
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
#${csudo}rm -rf ${v15_java_app_dir} || :
${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
if [ "$osType" != "Darwin" ]; then
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
if [ -d "${lib64_link_dir}" ]; then
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
else
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
if [ "$osType" != "Darwin" ]; then
${csudo}ldconfig
else
${csudo}update_dyld_shared_cache
fi
}
function install_header() {
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
function install_jemalloc() {
jemalloc_dir=${script_dir}/jemalloc
if [ -d ${jemalloc_dir} ]; then
${csudo}/usr/bin/install -c -d /usr/local/bin
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
fi
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
fi
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
fi
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
${csudo}/usr/bin/install -c -d /usr/local/lib
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
${csudo}/usr/bin/install -c -d /usr/local/lib
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi
fi
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
fi
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
${csudo}/usr/bin/install -c -d /usr/local/share/man/man3
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
fi
if [ -d /etc/ld.so.conf.d ]; then
echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
${csudo}ldconfig
else
echo "/etc/ld.so.conf.d not found!"
fi
fi
}
function install_config() {
if [ ! -f ${cfg_install_dir}/${configFile} ]; then
${csudo}mkdir -p ${cfg_install_dir}
[ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir}
${csudo}chmod 644 ${cfg_install_dir}/*
fi
${csudo}cp -f ${script_dir}/cfg/${configFile} ${install_main_dir}/cfg/${configFile}.org
${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg
}
function install_log() {
${csudo}rm -rf ${log_dir} || :
if [ "$osType" != "Darwin" ]; then
${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
else
mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
fi
${csudo}ln -s ${log_dir} ${install_main_dir}/log
}
function install_connector() {
${csudo}cp -rf ${script_dir}/connector/ ${install_main_dir}/
}
function install_examples() {
if [ -d ${script_dir}/examples ]; then
${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
fi
}
function update_TDengine() {
# Start to update
echo -e "${GREEN}Start to update ${productName} client...${NC}"
# Stop the client shell if running
if pidof ${clientName} &> /dev/null; then
kill_client
sleep 1
fi
install_main_path
install_log
install_header
install_lib
install_jemalloc
if [ "$verMode" == "cluster" ]; then
install_connector
fi
install_examples
install_bin
install_config
echo
echo -e "\033[44;32;1m${productName} client is updated successfully!${NC}"
}
function install_TDengine() {
# Start to install
echo -e "${GREEN}Start to install ${productName} client...${NC}"
install_main_path
install_log
install_header
install_lib
install_jemalloc
if [ "$verMode" == "cluster" ]; then
install_connector
fi
install_examples
install_bin
install_config
echo
echo -e "\033[44;32;1m${productName} client is installed successfully!${NC}"
rm -rf $(tar -tf ${tarName})
}
## ==============================Main program starts from here============================
# Install or updata client and client
# if server is already install, don't install client
if [ -e ${bin_dir}/${serverName} ]; then
echo -e "\033[44;32;1mThere are already installed ${productName} server, so don't need install client!${NC}"
exit 0
fi
if [ -x ${bin_dir}/${clientName} ]; then
update_flag=1
update_TDengine
else
install_TDengine
fi

View File

@ -0,0 +1,6 @@
@echo off
goto %1
:needAdmin
mshta vbscript:createobject("shell.application").shellexecute("%~s0",":hasAdmin","","runas",1)(window.close)&goto :eof
:hasAdmin
cp -f C:\\TDengine\\driver\\taos.dll C:\\Windows\\System32

142
packaging/tools/makearbi.sh Normal file → Executable file
View File

@ -1,71 +1,71 @@
#!/bin/bash
#
# Generate arbitrator's tar.gz setup package for all os system
set -e
#set -x
curr_dir=$(pwd)
compile_dir=$1
version=$2
build_time=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
pagMode=$8
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
productName="TDengine"
# create compressed install file.
build_dir="${compile_dir}/build"
code_dir="${top_dir}/src"
release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/${productName}-enterprise-arbitrator-${version}"
else
install_dir="${release_dir}/${productName}-arbitrator-${version}"
fi
# Directories and files.
bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi.sh"
install_files="${script_dir}/install_arbi.sh"
#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
# make directories.
mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi.sh || :
#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
cd ${release_dir}
# install_dir has been distinguishes cluster from edege, so comments this code
pkg_name=${install_dir}-${osType}-${cpuType}
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${pkg_name}.tar.gz error !!!"
exit $exitcode
fi
cd ${curr_dir}
#!/bin/bash
#
# Generate arbitrator's tar.gz setup package for all os system
set -e
#set -x
curr_dir=$(pwd)
compile_dir=$1
version=$2
build_time=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
pagMode=$8
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
productName="TDengine"
# create compressed install file.
build_dir="${compile_dir}/build"
code_dir="${top_dir}"
release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/${productName}-enterprise-arbitrator-${version}"
else
install_dir="${release_dir}/${productName}-arbitrator-${version}"
fi
# Directories and files.
bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi.sh"
install_files="${script_dir}/install_arbi.sh"
#header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h"
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
# make directories.
mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi.sh || :
#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
cd ${release_dir}
# install_dir has been distinguishes cluster from edege, so comments this code
pkg_name=${install_dir}-${osType}-${cpuType}
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${pkg_name}.tar.gz error !!!"
exit $exitcode
fi
cd ${curr_dir}

492
packaging/tools/makeclient.sh Normal file → Executable file
View File

@ -1,246 +1,246 @@
#!/bin/bash
#
# Generate tar.gz package for linux client in all os system
set -e
#set -x
curr_dir=$(pwd)
compile_dir=$1
version=$2
build_time=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
pagMode=$8
dbName=$9
productName="TDengine"
clientName="taos"
configFile="taos.cfg"
tarName="taos.tar.gz"
if [ "$osType" != "Darwin" ]; then
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
else
script_dir=$(dirname $0)
cd ${script_dir}
script_dir="$(pwd)"
top_dir=${script_dir}/../..
fi
# create compressed install file.
build_dir="${compile_dir}/build"
code_dir="${top_dir}/src"
release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/${productName}-enterprise-client-${version}"
else
install_dir="${release_dir}/${productName}-client-${version}"
fi
# Directories and files.
if [ "$osType" != "Darwin" ]; then
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/${clientName}
bin_files="${build_dir}/bin/${clientName} \
${script_dir}/remove_client.sh"
else
bin_files="${build_dir}/bin/${clientName} \
${script_dir}/remove_client.sh \
${script_dir}/set_core.sh \
${script_dir}/get_client.sh"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
else
bin_files="${build_dir}/bin/${clientName} ${script_dir}/remove_client.sh"
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
fi
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
if [ "$dbName" != "taos" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
else
cfg_dir="${top_dir}/packaging/cfg"
fi
install_files="${script_dir}/install_client.sh"
# make directories.
mkdir -p ${install_dir}
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile}
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/*
if [ -f ${build_dir}/bin/jemalloc-config ]; then
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/bin/jeprof ]; then
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
fi
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
fi
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
fi
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
fi
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
fi
fi
cd ${install_dir}
if [ "$osType" != "Darwin" ]; then
tar -zcv -f ${tarName} * --remove-files || :
else
tar -zcv -f ${tarName} * || :
mv ${tarName} ..
rm -rf ./*
mv ../${tarName} .
fi
cd ${curr_dir}
cp ${install_files} ${install_dir}
if [ "$osType" == "Darwin" ]; then
sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client.sh >>install_client_temp.sh
mv install_client_temp.sh ${install_dir}/install_client.sh
fi
if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_client.sh >>install_client_temp.sh
mv install_client_temp.sh ${install_dir}/install_client.sh
fi
if [ "$pagMode" == "lite" ]; then
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client.sh >>install_client_temp.sh
mv install_client_temp.sh ${install_dir}/install_client.sh
fi
chmod a+x ${install_dir}/install_client.sh
if [[ $productName == "TDengine" ]]; then
# Copy example code
mkdir -p ${install_dir}/examples
examples_dir="${top_dir}/examples"
cp -r ${examples_dir}/c ${install_dir}/examples
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples
cp -r ${examples_dir}/python ${install_dir}/examples
cp -r ${examples_dir}/R ${install_dir}/examples
cp -r ${examples_dir}/go ${install_dir}/examples
cp -r ${examples_dir}/nodejs ${install_dir}/examples
cp -r ${examples_dir}/C# ${install_dir}/examples
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../src/kit/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
fi
if [ "$verMode" == "cluster" ]; then
# Copy connector
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
git clone --depth 1 https://github.com/taosdata/taos-connector-python ${install_dir}/connector/python
rm -rf ${install_dir}/connector/python/.git ||:
# cp -r ${connector_dir}/python ${install_dir}/connector
git clone --depth 1 https://github.com/taosdata/taos-connector-node ${install_dir}/connector/nodejs
rm -rf ${install_dir}/connector/nodejs/.git ||:
git clone --depth 1 https://github.com/taosdata/taos-connector-dotnet ${install_dir}/connector/dotnet
rm -rf ${install_dir}/connector/dotnet/.git ||:
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
rm -rf ${install_dir}/connector/rust/.git ||:
fi
fi
fi
# Copy driver
mkdir -p ${install_dir}/driver
cp ${lib_files} ${install_dir}/driver
# Copy connector
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
cp -r ${connector_dir}/python ${install_dir}/connector
cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
# exit 1
cd ${release_dir}
# install_dir has been distinguishes cluster from edege, so comments this code
pkg_name=${install_dir}-${osType}-${cpuType}
# if [ "$verMode" == "cluster" ]; then
# pkg_name=${install_dir}-${osType}-${cpuType}
# elif [ "$verMode" == "edge" ]; then
# pkg_name=${install_dir}-${osType}-${cpuType}
# else
# echo "unknow verMode, nor cluster or edge"
# exit 1
# fi
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
if [ "$pagMode" == "lite" ]; then
pkg_name=${pkg_name}-Lite
fi
if [ "$osType" != "Darwin" ]; then
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
else
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
mv "$(basename ${pkg_name}).tar.gz" ..
rm -rf ./*
mv ../"$(basename ${pkg_name}).tar.gz" .
fi
cd ${curr_dir}
#!/bin/bash
#
# Generate tar.gz package for linux client in all os system
set -e
#set -x
curr_dir=$(pwd)
compile_dir=$1
version=$2
build_time=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
pagMode=$8
dbName=$9
productName="TDengine"
clientName="taos"
configFile="taos.cfg"
tarName="taos.tar.gz"
if [ "$osType" != "Darwin" ]; then
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
else
script_dir=$(dirname $0)
cd ${script_dir}
script_dir="$(pwd)"
top_dir=${script_dir}/../..
fi
# create compressed install file.
build_dir="${compile_dir}/build"
code_dir="${top_dir}"
release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/${productName}-enterprise-client-${version}"
else
install_dir="${release_dir}/${productName}-client-${version}"
fi
# Directories and files.
if [ "$osType" != "Darwin" ]; then
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/${clientName}
bin_files="${build_dir}/bin/${clientName} \
${script_dir}/remove_client.sh"
else
bin_files="${build_dir}/bin/${clientName} \
${script_dir}/remove_client.sh \
${script_dir}/set_core.sh \
${script_dir}/get_client.sh"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
else
bin_files="${build_dir}/bin/${clientName} ${script_dir}/remove_client.sh"
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
fi
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h"
if [ "$dbName" != "taos" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
else
cfg_dir="${top_dir}/packaging/cfg"
fi
install_files="${script_dir}/install_client.sh"
# make directories.
mkdir -p ${install_dir}
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile}
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/*
if [ -f ${build_dir}/bin/jemalloc-config ]; then
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/bin/jeprof ]; then
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
fi
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
fi
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
fi
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
fi
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
fi
fi
cd ${install_dir}
if [ "$osType" != "Darwin" ]; then
tar -zcv -f ${tarName} * --remove-files || :
else
tar -zcv -f ${tarName} * || :
mv ${tarName} ..
rm -rf ./*
mv ../${tarName} .
fi
cd ${curr_dir}
cp ${install_files} ${install_dir}
if [ "$osType" == "Darwin" ]; then
sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client.sh >>install_client_temp.sh
mv install_client_temp.sh ${install_dir}/install_client.sh
fi
if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_client.sh >>install_client_temp.sh
mv install_client_temp.sh ${install_dir}/install_client.sh
fi
if [ "$pagMode" == "lite" ]; then
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client.sh >>install_client_temp.sh
mv install_client_temp.sh ${install_dir}/install_client.sh
fi
chmod a+x ${install_dir}/install_client.sh
if [[ $productName == "TDengine" ]]; then
# Copy example code
mkdir -p ${install_dir}/examples
examples_dir="${top_dir}/examples"
cp -r ${examples_dir}/c ${install_dir}/examples
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples
cp -r ${examples_dir}/python ${install_dir}/examples
cp -r ${examples_dir}/R ${install_dir}/examples
cp -r ${examples_dir}/go ${install_dir}/examples
cp -r ${examples_dir}/nodejs ${install_dir}/examples
cp -r ${examples_dir}/C# ${install_dir}/examples
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
fi
if [ "$verMode" == "cluster" ]; then
# Copy connector
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
git clone --depth 1 https://github.com/taosdata/taos-connector-python ${install_dir}/connector/python
rm -rf ${install_dir}/connector/python/.git ||:
# cp -r ${connector_dir}/python ${install_dir}/connector
git clone --depth 1 https://github.com/taosdata/taos-connector-node ${install_dir}/connector/nodejs
rm -rf ${install_dir}/connector/nodejs/.git ||:
git clone --depth 1 https://github.com/taosdata/taos-connector-dotnet ${install_dir}/connector/dotnet
rm -rf ${install_dir}/connector/dotnet/.git ||:
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
rm -rf ${install_dir}/connector/rust/.git ||:
fi
fi
fi
# Copy driver
mkdir -p ${install_dir}/driver
cp ${lib_files} ${install_dir}/driver
# Copy connector
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
cp -r ${connector_dir}/python ${install_dir}/connector || :
cp -r ${connector_dir}/nodejs ${install_dir}/connector || :
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
# exit 1
cd ${release_dir}
# install_dir has been distinguishes cluster from edege, so comments this code
pkg_name=${install_dir}-${osType}-${cpuType}
# if [ "$verMode" == "cluster" ]; then
# pkg_name=${install_dir}-${osType}-${cpuType}
# elif [ "$verMode" == "edge" ]; then
# pkg_name=${install_dir}-${osType}-${cpuType}
# else
# echo "unknow verMode, nor cluster or edge"
# exit 1
# fi
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
if [ "$pagMode" == "lite" ]; then
pkg_name=${pkg_name}-Lite
fi
if [ "$osType" != "Darwin" ]; then
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
else
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
mv "$(basename ${pkg_name}).tar.gz" ..
rm -rf ./*
mv ../"$(basename ${pkg_name}).tar.gz" .
fi
cd ${curr_dir}

756
packaging/tools/makepkg.sh Normal file → Executable file
View File

@ -1,378 +1,378 @@
#!/bin/bash
#
# Generate tar.gz package for all os system
set -e
#set -x
curr_dir=$(pwd)
compile_dir=$1
version=$2
build_time=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
pagMode=$8
versionComp=$9
dbName=${10}
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
productName="TDengine"
serverName="taosd"
clientName="taos"
configFile="taos.cfg"
tarName="taos.tar.gz"
dumpName="taosdump"
benchmarkName="taosBenchmark"
toolsName="taostools"
adapterName="taosadapter"
defaultPasswd="taosdata"
# create compressed install file.
build_dir="${compile_dir}/build"
code_dir="${top_dir}/src"
release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/${productName}-enterprise-server-${version}"
else
install_dir="${release_dir}/${productName}-server-${version}"
fi
if [ -d ${top_dir}/src/kit/taos-tools/packaging/deb ]; then
cd ${top_dir}/src/kit/taos-tools/packaging/deb
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
taostools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}')
taostools_install_dir="${release_dir}/${clientName}Tools-${taostools_ver}"
cd ${curr_dir}
else
taostools_install_dir="${release_dir}/${clientName}Tools-${version}"
fi
# Directories and files
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/${serverName}
strip ${build_dir}/bin/${clientName}
# lite version doesn't include taosadapter, which will lead to no restful interface
bin_files="${build_dir}/bin/${serverName} ${build_dir}/bin/${clientName} ${script_dir}/remove.sh ${script_dir}/startPre.sh ${build_dir}/bin/taosBenchmark"
taostools_bin_files=""
else
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${build_dir}/bin/TDinsight.sh \
&& echo "TDinsight.sh downloaded!" \
|| echo "failed to download TDinsight.sh"
# download TDinsight caches
orig_pwd=$(pwd)
tdinsight_caches=""
cd ${build_dir}/bin/ && \
chmod +x TDinsight.sh
tdinsight_caches=$(./TDinsight.sh --download-only | xargs -i printf "${build_dir}/bin/{} ")
cd $orig_pwd
echo "TDinsight caches: $tdinsight_caches"
taostools_bin_files=" ${build_dir}/bin/taosdump \
${build_dir}/bin/taosBenchmark \
${build_dir}/bin/TDinsight.sh \
$tdinsight_caches"
bin_files="${build_dir}/bin/${serverName} \
${build_dir}/bin/${clientName} \
${taostools_bin_files} \
${build_dir}/bin/taosadapter \
${build_dir}/bin/tarbitrator\
${script_dir}/remove.sh \
${script_dir}/set_core.sh \
${script_dir}/run_taosd_and_taosadapter.sh \
${script_dir}/startPre.sh \
${script_dir}/taosd-dump-cfg.gdb"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
if [ "$dbName" != "taos" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
else
cfg_dir="${top_dir}/packaging/cfg"
fi
install_files="${script_dir}/install.sh"
nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
init_file_deb=${script_dir}/../deb/taosd
init_file_rpm=${script_dir}/../rpm/taosd
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
# make directories.
mkdir -p ${install_dir}
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile}
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
cp ${compile_dir}/test/cfg/taosadapter.toml ${install_dir}/cfg || :
fi
if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
cp ${compile_dir}/test/cfg/taosadapter.service ${install_dir}/cfg || :
fi
if [ -f "${cfg_dir}/${serverName}.service" ]; then
cp ${cfg_dir}/${serverName}.service ${install_dir}/cfg || :
fi
if [ -f "${top_dir}/packaging/cfg/tarbitratord.service" ]; then
cp ${top_dir}/packaging/cfg/tarbitratord.service ${install_dir}/cfg || :
fi
if [ -f "${top_dir}/packaging/cfg/nginxd.service" ]; then
cp ${top_dir}/packaging/cfg/nginxd.service ${install_dir}/cfg || :
fi
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${serverName}.deb
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
if [ $adapterName != "taosadapter" ]; then
mv ${install_dir}/cfg/taosadapter.toml ${install_dir}/cfg/$adapterName.toml
sed -i "s/path = \"\/var\/log\/taos\"/path = \"\/var\/log\/${productName}\"/g" ${install_dir}/cfg/$adapterName.toml
sed -i "s/password = \"taosdata\"/password = \"${defaultPasswd}\"/g" ${install_dir}/cfg/$adapterName.toml
mv ${install_dir}/cfg/taosadapter.service ${install_dir}/cfg/$adapterName.service
sed -i "s/TDengine/${productName}/g" ${install_dir}/cfg/$adapterName.service
sed -i "s/taosAdapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
sed -i "s/taosadapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
mv ${install_dir}/bin/taosadapter ${install_dir}/bin/${adapterName}
mv ${install_dir}/bin/run_taosd_and_taosadapter.sh ${install_dir}/bin/run_${serverName}_and_${adapterName}.sh
mv ${install_dir}/bin/taosd-dump-cfg.gdb ${install_dir}/bin/${serverName}-dump-cfg.gdb
fi
if [ -n "${taostools_bin_files}" ]; then
mkdir -p ${taostools_install_dir} || echo -e "failed to create ${taostools_install_dir}"
mkdir -p ${taostools_install_dir}/bin \
&& cp ${taostools_bin_files} ${taostools_install_dir}/bin \
&& chmod a+x ${taostools_install_dir}/bin/* || :
if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh ]; then
cp ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh \
${taostools_install_dir}/ > /dev/null \
&& chmod a+x ${taostools_install_dir}/install-taostools.sh \
|| echo -e "failed to copy install-taostools.sh"
else
echo -e "install-taostools.sh not found"
fi
if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/uninstall-taostools.sh ]; then
cp ${top_dir}/src/kit/taos-tools/packaging/tools/uninstall-taostools.sh \
${taostools_install_dir}/ > /dev/null \
&& chmod a+x ${taostools_install_dir}/uninstall-taostools.sh \
|| echo -e "failed to copy uninstall-taostools.sh"
else
echo -e "uninstall-taostools.sh not found"
fi
if [ -f ${build_dir}/lib/libavro.so.23.0.0 ]; then
mkdir -p ${taostools_install_dir}/avro/{lib,lib/pkgconfig} || echo -e "failed to create ${taostools_install_dir}/avro"
cp ${build_dir}/lib/libavro.* ${taostools_install_dir}/avro/lib
cp ${build_dir}/lib/pkgconfig/avro-c.pc ${taostools_install_dir}/avro/lib/pkgconfig
fi
fi
if [ -f ${build_dir}/bin/jemalloc-config ]; then
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/bin/jeprof ]; then
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
fi
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
fi
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
fi
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
fi
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
fi
fi
if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >>remove_temp.sh
mv remove_temp.sh ${install_dir}/bin/remove.sh
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
rm -rf ${install_dir}/nginxd/png
if [ "$cpuType" == "aarch64" ]; then
cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
elif [ "$cpuType" == "aarch32" ]; then
cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
fi
rm -rf ${install_dir}/nginxd/sbin/arm
fi
cd ${install_dir}
tar -zcv -f ${tarName} * --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${tarName} error !!!"
exit $exitcode
fi
cd ${curr_dir}
cp ${install_files} ${install_dir}
if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install.sh >>install_temp.sh
mv install_temp.sh ${install_dir}/install.sh
fi
if [ "$pagMode" == "lite" ]; then
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >>install_temp.sh
mv install_temp.sh ${install_dir}/install.sh
fi
chmod a+x ${install_dir}/install.sh
if [[ $dbName == "taos" ]]; then
# Copy example code
mkdir -p ${install_dir}/examples
examples_dir="${top_dir}/examples"
cp -r ${examples_dir}/c ${install_dir}/examples
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ -d ${examples_dir}/JDBC/connectionPools/target ]; then
rm -rf ${examples_dir}/JDBC/connectionPools/target
fi
if [ -d ${examples_dir}/JDBC/JDBCDemo/target ]; then
rm -rf ${examples_dir}/JDBC/JDBCDemo/target
fi
if [ -d ${examples_dir}/JDBC/mybatisplus-demo/target ]; then
rm -rf ${examples_dir}/JDBC/mybatisplus-demo/target
fi
if [ -d ${examples_dir}/JDBC/springbootdemo/target ]; then
rm -rf ${examples_dir}/JDBC/springbootdemo/target
fi
if [ -d ${examples_dir}/JDBC/SpringJdbcTemplate/target ]; then
rm -rf ${examples_dir}/JDBC/SpringJdbcTemplate/target
fi
if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then
rm -rf ${examples_dir}/JDBC/taosdemo/target
fi
cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples
cp -r ${examples_dir}/python ${install_dir}/examples
cp -r ${examples_dir}/R ${install_dir}/examples
cp -r ${examples_dir}/go ${install_dir}/examples
cp -r ${examples_dir}/nodejs ${install_dir}/examples
cp -r ${examples_dir}/C# ${install_dir}/examples
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../src/kit/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
fi
fi
# Copy driver
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt
# Copy connector
if [ "$verMode" == "cluster" ]; then
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
git clone --depth 1 https://github.com/taosdata/taos-connector-python ${install_dir}/connector/python
rm -rf ${install_dir}/connector/python/.git ||:
git clone --depth 1 https://github.com/taosdata/taos-connector-node ${install_dir}/connector/nodejs
rm -rf ${install_dir}/connector/nodejs/.git ||:
git clone --depth 1 https://github.com/taosdata/taos-connector-dotnet ${install_dir}/connector/dotnet
rm -rf ${install_dir}/connector/dotnet/.git ||:
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
rm -rf ${install_dir}/connector/rust/.git ||:
# cp -r ${connector_dir}/python ${install_dir}/connector
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi
fi
# Copy release note
cp ${script_dir}/release_note ${install_dir}
# exit 1
cd ${release_dir}
# install_dir has been distinguishes cluster from edege, so comments this code
pkg_name=${install_dir}-${osType}-${cpuType}
taostools_pkg_name=${taostools_install_dir}-${osType}-${cpuType}
# if [ "$verMode" == "cluster" ]; then
# pkg_name=${install_dir}-${osType}-${cpuType}
# elif [ "$verMode" == "edge" ]; then
# pkg_name=${install_dir}-${osType}-${cpuType}
# else
# echo "unknow verMode, nor cluster or edge"
# exit 1
# fi
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
taostools_pkg_name=${taostools_install_dir}-${verType}-${osType}-${cpuType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
taostools_pkg_name=${taostools_pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
if [ "$pagMode" == "lite" ]; then
pkg_name=${pkg_name}-Lite
fi
tar -zcv -f "$(basename ${pkg_name}).tar.gz" "$(basename ${install_dir})" --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${pkg_name}.tar.gz error !!!"
exit $exitcode
fi
if [ -n "${taostools_bin_files}" ]; then
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${taostools_install_dir}/bin/TDinsight.sh && echo "TDinsight.sh downloaded!"|| echo "failed to download TDinsight.sh"
tar -zcv -f "$(basename ${taostools_pkg_name}).tar.gz" "$(basename ${taostools_install_dir})" --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${taostools_pkg_name}.tar.gz error !!!"
exit $exitcode
fi
fi
cd ${curr_dir}
#!/bin/bash
#
# Generate tar.gz package for all os system
set -e
#set -x
curr_dir=$(pwd)
compile_dir=$1
version=$2
build_time=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
pagMode=$8
versionComp=$9
dbName=${10}
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
productName="TDengine"
serverName="taosd"
clientName="taos"
configFile="taos.cfg"
tarName="taos.tar.gz"
dumpName="taosdump"
benchmarkName="taosBenchmark"
toolsName="taostools"
adapterName="taosadapter"
defaultPasswd="taosdata"
# create compressed install file.
build_dir="${compile_dir}/build"
code_dir="${top_dir}"
release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/${productName}-enterprise-server-${version}"
else
install_dir="${release_dir}/${productName}-server-${version}"
fi
if [ -d ${top_dir}/tools/taos-tools/packaging/deb ]; then
cd ${top_dir}/tools/taos-tools/packaging/deb
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
taostools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}')
taostools_install_dir="${release_dir}/${clientName}Tools-${taostools_ver}"
cd ${curr_dir}
else
taostools_install_dir="${release_dir}/${clientName}Tools-${version}"
fi
# Directories and files
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/${serverName}
strip ${build_dir}/bin/${clientName}
# lite version doesn't include taosadapter, which will lead to no restful interface
bin_files="${build_dir}/bin/${serverName} ${build_dir}/bin/${clientName} ${script_dir}/remove.sh ${script_dir}/startPre.sh ${build_dir}/bin/taosBenchmark"
taostools_bin_files=""
else
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${build_dir}/bin/TDinsight.sh \
&& echo "TDinsight.sh downloaded!" \
|| echo "failed to download TDinsight.sh"
# download TDinsight caches
orig_pwd=$(pwd)
tdinsight_caches=""
cd ${build_dir}/bin/ && \
chmod +x TDinsight.sh
tdinsight_caches=$(./TDinsight.sh --download-only | xargs -i printf "${build_dir}/bin/{} ")
cd $orig_pwd
echo "TDinsight caches: $tdinsight_caches"
taostools_bin_files=" ${build_dir}/bin/taosdump \
${build_dir}/bin/taosBenchmark \
${build_dir}/bin/TDinsight.sh \
$tdinsight_caches"
bin_files="${build_dir}/bin/${serverName} \
${build_dir}/bin/${clientName} \
${taostools_bin_files} \
${build_dir}/bin/taosadapter \
${build_dir}/bin/tarbitrator\
${script_dir}/remove.sh \
${script_dir}/set_core.sh \
${script_dir}/run_taosd_and_taosadapter.sh \
${script_dir}/startPre.sh \
${script_dir}/taosd-dump-cfg.gdb"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h"
if [ "$dbName" != "taos" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
else
cfg_dir="${top_dir}/packaging/cfg"
fi
install_files="${script_dir}/install.sh"
nginx_dir="${top_dir}/../enterprise/src/plugins/web"
init_file_deb=${script_dir}/../deb/taosd
init_file_rpm=${script_dir}/../rpm/taosd
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
# make directories.
mkdir -p ${install_dir}
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile}
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
cp ${compile_dir}/test/cfg/taosadapter.toml ${install_dir}/cfg || :
fi
if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
cp ${compile_dir}/test/cfg/taosadapter.service ${install_dir}/cfg || :
fi
if [ -f "${cfg_dir}/${serverName}.service" ]; then
cp ${cfg_dir}/${serverName}.service ${install_dir}/cfg || :
fi
if [ -f "${top_dir}/packaging/cfg/tarbitratord.service" ]; then
cp ${top_dir}/packaging/cfg/tarbitratord.service ${install_dir}/cfg || :
fi
if [ -f "${top_dir}/packaging/cfg/nginxd.service" ]; then
cp ${top_dir}/packaging/cfg/nginxd.service ${install_dir}/cfg || :
fi
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${serverName}.deb
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
if [ $adapterName != "taosadapter" ]; then
mv ${install_dir}/cfg/taosadapter.toml ${install_dir}/cfg/$adapterName.toml
sed -i "s/path = \"\/var\/log\/taos\"/path = \"\/var\/log\/${productName}\"/g" ${install_dir}/cfg/$adapterName.toml
sed -i "s/password = \"taosdata\"/password = \"${defaultPasswd}\"/g" ${install_dir}/cfg/$adapterName.toml
mv ${install_dir}/cfg/taosadapter.service ${install_dir}/cfg/$adapterName.service
sed -i "s/TDengine/${productName}/g" ${install_dir}/cfg/$adapterName.service
sed -i "s/taosAdapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
sed -i "s/taosadapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
mv ${install_dir}/bin/taosadapter ${install_dir}/bin/${adapterName}
mv ${install_dir}/bin/run_taosd_and_taosadapter.sh ${install_dir}/bin/run_${serverName}_and_${adapterName}.sh
mv ${install_dir}/bin/taosd-dump-cfg.gdb ${install_dir}/bin/${serverName}-dump-cfg.gdb
fi
if [ -n "${taostools_bin_files}" ]; then
mkdir -p ${taostools_install_dir} || echo -e "failed to create ${taostools_install_dir}"
mkdir -p ${taostools_install_dir}/bin \
&& cp ${taostools_bin_files} ${taostools_install_dir}/bin \
&& chmod a+x ${taostools_install_dir}/bin/* || :
if [ -f ${top_dir}/tools/taos-tools/packaging/tools/install-taostools.sh ]; then
cp ${top_dir}/tools/taos-tools/packaging/tools/install-taostools.sh \
${taostools_install_dir}/ > /dev/null \
&& chmod a+x ${taostools_install_dir}/install-taostools.sh \
|| echo -e "failed to copy install-taostools.sh"
else
echo -e "install-taostools.sh not found"
fi
if [ -f ${top_dir}/tools/taos-tools/packaging/tools/uninstall-taostools.sh ]; then
cp ${top_dir}/tools/taos-tools/packaging/tools/uninstall-taostools.sh \
${taostools_install_dir}/ > /dev/null \
&& chmod a+x ${taostools_install_dir}/uninstall-taostools.sh \
|| echo -e "failed to copy uninstall-taostools.sh"
else
echo -e "uninstall-taostools.sh not found"
fi
if [ -f ${build_dir}/lib/libavro.so.23.0.0 ]; then
mkdir -p ${taostools_install_dir}/avro/{lib,lib/pkgconfig} || echo -e "failed to create ${taostools_install_dir}/avro"
cp ${build_dir}/lib/libavro.* ${taostools_install_dir}/avro/lib
cp ${build_dir}/lib/pkgconfig/avro-c.pc ${taostools_install_dir}/avro/lib/pkgconfig
fi
fi
if [ -f ${build_dir}/bin/jemalloc-config ]; then
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/bin/jeprof ]; then
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
fi
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
fi
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
fi
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
fi
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
fi
fi
if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >>remove_temp.sh
mv remove_temp.sh ${install_dir}/bin/remove.sh
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
rm -rf ${install_dir}/nginxd/png
if [ "$cpuType" == "aarch64" ]; then
cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
elif [ "$cpuType" == "aarch32" ]; then
cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
fi
rm -rf ${install_dir}/nginxd/sbin/arm
fi
cd ${install_dir}
tar -zcv -f ${tarName} * --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${tarName} error !!!"
exit $exitcode
fi
cd ${curr_dir}
cp ${install_files} ${install_dir}
if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install.sh >>install_temp.sh
mv install_temp.sh ${install_dir}/install.sh
fi
if [ "$pagMode" == "lite" ]; then
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >>install_temp.sh
mv install_temp.sh ${install_dir}/install.sh
fi
chmod a+x ${install_dir}/install.sh
if [[ $dbName == "taos" ]]; then
# Copy example code
mkdir -p ${install_dir}/examples
examples_dir="${top_dir}/examples"
cp -r ${examples_dir}/c ${install_dir}/examples
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ -d ${examples_dir}/JDBC/connectionPools/target ]; then
rm -rf ${examples_dir}/JDBC/connectionPools/target
fi
if [ -d ${examples_dir}/JDBC/JDBCDemo/target ]; then
rm -rf ${examples_dir}/JDBC/JDBCDemo/target
fi
if [ -d ${examples_dir}/JDBC/mybatisplus-demo/target ]; then
rm -rf ${examples_dir}/JDBC/mybatisplus-demo/target
fi
if [ -d ${examples_dir}/JDBC/springbootdemo/target ]; then
rm -rf ${examples_dir}/JDBC/springbootdemo/target
fi
if [ -d ${examples_dir}/JDBC/SpringJdbcTemplate/target ]; then
rm -rf ${examples_dir}/JDBC/SpringJdbcTemplate/target
fi
if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then
rm -rf ${examples_dir}/JDBC/taosdemo/target
fi
cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples
cp -r ${examples_dir}/python ${install_dir}/examples
cp -r ${examples_dir}/R ${install_dir}/examples
cp -r ${examples_dir}/go ${install_dir}/examples
cp -r ${examples_dir}/nodejs ${install_dir}/examples
cp -r ${examples_dir}/C# ${install_dir}/examples
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
fi
fi
# Copy driver
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt
# Copy connector
if [ "$verMode" == "cluster" ]; then
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
git clone --depth 1 https://github.com/taosdata/taos-connector-python ${install_dir}/connector/python
rm -rf ${install_dir}/connector/python/.git ||:
git clone --depth 1 https://github.com/taosdata/taos-connector-node ${install_dir}/connector/nodejs
rm -rf ${install_dir}/connector/nodejs/.git ||:
git clone --depth 1 https://github.com/taosdata/taos-connector-dotnet ${install_dir}/connector/dotnet
rm -rf ${install_dir}/connector/dotnet/.git ||:
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
rm -rf ${install_dir}/connector/rust/.git ||:
# cp -r ${connector_dir}/python ${install_dir}/connector
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi
fi
# Copy release note
cp ${script_dir}/release_note ${install_dir}
# exit 1
cd ${release_dir}
# install_dir has been distinguishes cluster from edege, so comments this code
pkg_name=${install_dir}-${osType}-${cpuType}
taostools_pkg_name=${taostools_install_dir}-${osType}-${cpuType}
# if [ "$verMode" == "cluster" ]; then
# pkg_name=${install_dir}-${osType}-${cpuType}
# elif [ "$verMode" == "edge" ]; then
# pkg_name=${install_dir}-${osType}-${cpuType}
# else
# echo "unknow verMode, nor cluster or edge"
# exit 1
# fi
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
taostools_pkg_name=${taostools_install_dir}-${verType}-${osType}-${cpuType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
taostools_pkg_name=${taostools_pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
if [ "$pagMode" == "lite" ]; then
pkg_name=${pkg_name}-Lite
fi
tar -zcv -f "$(basename ${pkg_name}).tar.gz" "$(basename ${install_dir})" --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${pkg_name}.tar.gz error !!!"
exit $exitcode
fi
if [ -n "${taostools_bin_files}" ]; then
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${taostools_install_dir}/bin/TDinsight.sh && echo "TDinsight.sh downloaded!"|| echo "failed to download TDinsight.sh"
tar -zcv -f "$(basename ${taostools_pkg_name}).tar.gz" "$(basename ${taostools_install_dir})" --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${taostools_pkg_name}.tar.gz error !!!"
exit $exitcode
fi
fi
cd ${curr_dir}

1078
packaging/tools/post.sh Normal file → Executable file

File diff suppressed because it is too large Load Diff

284
packaging/tools/preun.sh Normal file → Executable file
View File

@ -1,142 +1,142 @@
#!/bin/bash
#
# Script to stop the service and uninstall TSDB
RED='\033[0;31m'
GREEN='\033[1;32m'
NC='\033[0m'
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log"
cfg_link_dir="/usr/local/taos/cfg"
service_config_dir="/etc/systemd/system"
taos_service_name="taosd"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
service_mod=0
elif $(which service &> /dev/null); then
service_mod=1
service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then
initd_mod=1
elif $(which insserv &> /dev/null); then
initd_mod=2
elif $(which update-rc.d &> /dev/null); then
initd_mod=3
else
service_mod=2
fi
else
service_mod=2
fi
function kill_taosadapter() {
pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function kill_taosd() {
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function clean_service_on_systemd() {
taosadapter_service_config="${service_config_dir}/taosadapter.service"
if systemctl is-active --quiet taosadapter; then
echo "taosadapter is running, stopping it..."
${csudo}systemctl stop taosadapter &> /dev/null || echo &> /dev/null
fi
taosd_service_config="${service_config_dir}/${taos_service_name}.service"
if systemctl is-active --quiet ${taos_service_name}; then
echo "TDengine taosd is running, stopping it..."
${csudo}systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo}systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null
${csudo}rm -f ${taosd_service_config}
[ -f ${taosadapter_service_config} ] && ${csudo}rm -f ${taosadapter_service_config}
}
function clean_service_on_sysvinit() {
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#${csudo}sed -i "\|${restart_config_str}|d" /etc/inittab || :
if pidof taosd &> /dev/null; then
echo "TDengine taosd is running, stopping it..."
${csudo}service taosd stop || :
fi
if ((${initd_mod}==1)); then
${csudo}chkconfig --del taosd || :
elif ((${initd_mod}==2)); then
${csudo}insserv -r taosd || :
elif ((${initd_mod}==3)); then
${csudo}update-rc.d -f taosd remove || :
fi
${csudo}rm -f ${service_config_dir}/taosd || :
if $(which init &> /dev/null); then
${csudo}init q || :
fi
}
function clean_service() {
if ((${service_mod}==0)); then
clean_service_on_systemd
elif ((${service_mod}==1)); then
clean_service_on_sysvinit
else
# must manual stop taosd
kill_taosadapter
kill_taosd
fi
}
# Stop service and disable booting start.
clean_service
# Remove all links
${csudo}rm -f ${bin_link_dir}/taos || :
${csudo}rm -f ${bin_link_dir}/taosd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${cfg_link_dir}/*.new || :
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
${csudo}rm -f ${log_link_dir} || :
${csudo}rm -f ${data_link_dir} || :
if ((${service_mod}==2)); then
kill_taosadapter
kill_taosd
fi
echo -e "${GREEN}TDengine is removed successfully!${NC}"
#!/bin/bash
#
# Script to stop the service and uninstall TSDB
RED='\033[0;31m'
GREEN='\033[1;32m'
NC='\033[0m'
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log"
cfg_link_dir="/usr/local/taos/cfg"
service_config_dir="/etc/systemd/system"
taos_service_name="taosd"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
service_mod=0
elif $(which service &> /dev/null); then
service_mod=1
service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then
initd_mod=1
elif $(which insserv &> /dev/null); then
initd_mod=2
elif $(which update-rc.d &> /dev/null); then
initd_mod=3
else
service_mod=2
fi
else
service_mod=2
fi
function kill_taosadapter() {
pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function kill_taosd() {
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function clean_service_on_systemd() {
taosadapter_service_config="${service_config_dir}/taosadapter.service"
if systemctl is-active --quiet taosadapter; then
echo "taosadapter is running, stopping it..."
${csudo}systemctl stop taosadapter &> /dev/null || echo &> /dev/null
fi
taosd_service_config="${service_config_dir}/${taos_service_name}.service"
if systemctl is-active --quiet ${taos_service_name}; then
echo "TDengine taosd is running, stopping it..."
${csudo}systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo}systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null
${csudo}rm -f ${taosd_service_config}
[ -f ${taosadapter_service_config} ] && ${csudo}rm -f ${taosadapter_service_config}
}
function clean_service_on_sysvinit() {
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#${csudo}sed -i "\|${restart_config_str}|d" /etc/inittab || :
if pidof taosd &> /dev/null; then
echo "TDengine taosd is running, stopping it..."
${csudo}service taosd stop || :
fi
if ((${initd_mod}==1)); then
${csudo}chkconfig --del taosd || :
elif ((${initd_mod}==2)); then
${csudo}insserv -r taosd || :
elif ((${initd_mod}==3)); then
${csudo}update-rc.d -f taosd remove || :
fi
${csudo}rm -f ${service_config_dir}/taosd || :
if $(which init &> /dev/null); then
${csudo}init q || :
fi
}
function clean_service() {
if ((${service_mod}==0)); then
clean_service_on_systemd
elif ((${service_mod}==1)); then
clean_service_on_sysvinit
else
# must manual stop taosd
kill_taosadapter
kill_taosd
fi
}
# Stop service and disable booting start.
clean_service
# Remove all links
${csudo}rm -f ${bin_link_dir}/taos || :
${csudo}rm -f ${bin_link_dir}/taosd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${cfg_link_dir}/*.new || :
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
${csudo}rm -f ${log_link_dir} || :
${csudo}rm -f ${data_link_dir} || :
if ((${service_mod}==2)); then
kill_taosadapter
kill_taosd
fi
echo -e "${GREEN}TDengine is removed successfully!${NC}"

View File

@ -1,136 +1,136 @@
taos-1.6.4.0 (Release on 2019-12-01)
Bug fixed:
1.Look for possible causes of file corruption and fix them
2.Encapsulate memory allocation functions to reduce the possibility of crashes
3.Increase Arm64 compilation options
4.Remove most of the warnings in the code
5.Provide a variety of connector usage documents
6.Network connection can be selected in udp and tcp
7.Allow the maximum number of Tags to be 32
8.Bugs reported by the user
taos-1.5.2.6 (Release on 2019-05-13)
Bug fixed:
- Nchar strings sometimes were wrongly truncated on Window
- Importing data from file throws an error of "invalid SQL"
taos-1.5.2.5 (Release on 2019-05-13)
Bug fixed:
- Long timespan data import sometimes affects query result
- Synchronzation of cluster dnodes worked incorrectly when importing
taos-1.5.2.4 (Release on 2019-05-10)
New Features:
- Optimized Windows client installation: now users don't need to copy taos.dll manually
- Changed the priority of taos.cfg and JDBC URL: parameters in JDCB URL now has a higher priority than parameters in taos.cfg
Bug fixed:
- Expired data files were not deleted corrected
- Occasionally importing returned "affected rows" which larger than 0, but 0 row was actually written into db
- Commit log is occupied by too many import-to-file requests, which blocked further data importing
- Cloud service shows a wrong number of available days with current balance
- Other minor issues
taos-1.5.1 (Release on 2019-04-09)
New Features:
- Maximum number of rows returned by "top/bottom" methods increased from 20 to 100
- Improved the performance of "first/last" methods
- Increased system stability
Bug fixed:
- Connection failure when query on huge STables through TPC
- The primary timestamp is occasionally returned as NULL in some queries
- Operation failure when updating a tag value to NULL
- Stream calculation couldn't start at certain occasions
taos-1.5.0 (Release on 2019-03-11)
New Features:
- New syntax to automatically create tables when inserting values into non-existing tables
- New syntax "slimit/soffset" to pagenate groups in a query result set
- Support "top/bottom" queries on a supertable
- High performance statistic aggregation function "apercentile"
- Remove "first_t/last_t" functions; improve the performance of "first/last" function
- Add pre-aggregation for bool type values
- Supports fixed-length streaming computation, i.e. users may define an end time for a stream
- New JAVA API for SQL subscription, supports table/supertable/SQL query subscription
Bug fixed:
- Data file broken issue when frequently using "import"
- Using "spread" on a super table may return negative values
- RPC bug that random network packets might cause the RPC module to crush
taos-1.4.15 (Released on 2019-01-23)
New Features:
- JDBC Driver now supports configuring timezone, locale, cfgdir in JDBC url
- A new API is added to validate if a table creation sql statement is correct in syntax without actually creating that table
Bugs Fixed:
- "select last(*) from STable" sometimest returned incorrect number of rows
- JDBC driver method ResultSetMetaData.getColumnClassName() returned wrong values.
- Web shell automatically changed query string to lower case
taos-1.4.14 (Released on 2018-12-22)
New Features:
- C Driver support for integration with Python
- JDBC Driver support for integration with R and MATLAB
taos-1.4.13 (Released on 2018-12-14)
Bugs Fixed:
- Clients failed to connect to server due to unexpected and invalid packets recieved by the server.
Features Added:
- Add support to HikariCP in TSDB JDBC driver.
taos-1.4.12 (Released on 2018-12-08)
Bugs Fixed:
- Querying data while inserting into the database might return incomplete resultsets.
Features Added:
- A new python driver is added.
- Increased system stability.
- Changed meaning of database configuration paramerter 'ablocks'. 'ablocks' used to refer to the number of total cache blocks in memory, now it refers to average number of cache blocks for each table in memory.
taos-1.4.11 (Released on 2018-11-23)
Bugs Fixed:
- Thread memory leaking during high-frequency committing.
- Master dnode selection failure caused by accidental network issues.
Features Added:
- Change keyword "metrics" to "stables", i.e. supertables; the previous query "show metrics" is now changed to "show stables".
- Add an error message mechanism in C# driver. An error with message "Failed to connect to server" is thrown when fetching data experienced a network connection interruption during data transmitting.
taos-1.4.10 (Released on 2018-11-13)
Bugs Fixed:
- Taosdump failed while exporting extremely large datasets to a .sql file.
- Commit status did not change correctly if the last commit was triggered by commit threshold time (ctime) and no more new data was written to DB during the next ctime period.
Features Added:
- Support importing historical data from Telegraf interface.
- Support MyBatis framework in TSDB JDBC Driver.
- Change result set row indexing in JDBC Driver. Result set row indexes now starts from 1 instead of 0.
taos-1.4.9 (Released on 2018-11-02)
Bugs Fixed:
- Dumping data using UTF-8 format in client shell failed.
- Tag query failed using C# Driver.
- Committing data to disk failed if DB files were corrupted.
- Continuously pressing Ctrl+c in client shell for multiple times produced a segmentation fault.
Features Added:
- Changed the display pattern in shell for taosdump.
- Add a check to the status of an existing resultset before firing a new query in a single JDBC connection. A connection can only have a single open resultset, and the resultset must be closed before one can execute new queries.
taos-1.4.7 (Released on 2018-10-25)
Bug Fixed:
- UTF-8 encoding in JDBC Driver did not give the correct Chinese characters.
- Fix crash error when where clause is too long.
Features Added:
- Add check on database properties, force ablocks to be at least (4 * tables) in a vnode.
- Check if pVgroup is empty in sdb.
taos-1.4.6 (Released on 2018-10-21)
Bug Fixed:
- Fix wrong symbol addition while export csv file.
Features Added:
- Update grafana plugins.
- Update python drivers.
- Add error code explanation in JDBC Driver.
- Prohibit login while the version of server and client are not match.
taos-1.4.5 (Released on 2018-10-17)
Bug Fixed:
- Fix HTTP request truncation bug in Telegraf interface.
Features Added:
- Support nchar and null object in JDBC Driver.
taos-1.6.4.0 (Release on 2019-12-01)
Bug fixed:
1.Look for possible causes of file corruption and fix them
2.Encapsulate memory allocation functions to reduce the possibility of crashes
3.Increase Arm64 compilation options
4.Remove most of the warnings in the code
5.Provide a variety of connector usage documents
6.Network connection can be selected in udp and tcp
7.Allow the maximum number of Tags to be 32
8.Bugs reported by the user
taos-1.5.2.6 (Release on 2019-05-13)
Bug fixed:
- Nchar strings sometimes were wrongly truncated on Window
- Importing data from file throws an error of "invalid SQL"
taos-1.5.2.5 (Release on 2019-05-13)
Bug fixed:
- Long timespan data import sometimes affects query result
- Synchronzation of cluster dnodes worked incorrectly when importing
taos-1.5.2.4 (Release on 2019-05-10)
New Features:
- Optimized Windows client installation: now users don't need to copy taos.dll manually
- Changed the priority of taos.cfg and JDBC URL: parameters in JDCB URL now has a higher priority than parameters in taos.cfg
Bug fixed:
- Expired data files were not deleted corrected
- Occasionally importing returned "affected rows" which larger than 0, but 0 row was actually written into db
- Commit log is occupied by too many import-to-file requests, which blocked further data importing
- Cloud service shows a wrong number of available days with current balance
- Other minor issues
taos-1.5.1 (Release on 2019-04-09)
New Features:
- Maximum number of rows returned by "top/bottom" methods increased from 20 to 100
- Improved the performance of "first/last" methods
- Increased system stability
Bug fixed:
- Connection failure when query on huge STables through TPC
- The primary timestamp is occasionally returned as NULL in some queries
- Operation failure when updating a tag value to NULL
- Stream calculation couldn't start at certain occasions
taos-1.5.0 (Release on 2019-03-11)
New Features:
- New syntax to automatically create tables when inserting values into non-existing tables
- New syntax "slimit/soffset" to pagenate groups in a query result set
- Support "top/bottom" queries on a supertable
- High performance statistic aggregation function "apercentile"
- Remove "first_t/last_t" functions; improve the performance of "first/last" function
- Add pre-aggregation for bool type values
- Supports fixed-length streaming computation, i.e. users may define an end time for a stream
- New JAVA API for SQL subscription, supports table/supertable/SQL query subscription
Bug fixed:
- Data file broken issue when frequently using "import"
- Using "spread" on a super table may return negative values
- RPC bug that random network packets might cause the RPC module to crush
taos-1.4.15 (Released on 2019-01-23)
New Features:
- JDBC Driver now supports configuring timezone, locale, cfgdir in JDBC url
- A new API is added to validate if a table creation sql statement is correct in syntax without actually creating that table
Bugs Fixed:
- "select last(*) from STable" sometimest returned incorrect number of rows
- JDBC driver method ResultSetMetaData.getColumnClassName() returned wrong values.
- Web shell automatically changed query string to lower case
taos-1.4.14 (Released on 2018-12-22)
New Features:
- C Driver support for integration with Python
- JDBC Driver support for integration with R and MATLAB
taos-1.4.13 (Released on 2018-12-14)
Bugs Fixed:
- Clients failed to connect to server due to unexpected and invalid packets recieved by the server.
Features Added:
- Add support to HikariCP in TSDB JDBC driver.
taos-1.4.12 (Released on 2018-12-08)
Bugs Fixed:
- Querying data while inserting into the database might return incomplete resultsets.
Features Added:
- A new python driver is added.
- Increased system stability.
- Changed meaning of database configuration paramerter 'ablocks'. 'ablocks' used to refer to the number of total cache blocks in memory, now it refers to average number of cache blocks for each table in memory.
taos-1.4.11 (Released on 2018-11-23)
Bugs Fixed:
- Thread memory leaking during high-frequency committing.
- Master dnode selection failure caused by accidental network issues.
Features Added:
- Change keyword "metrics" to "stables", i.e. supertables; the previous query "show metrics" is now changed to "show stables".
- Add an error message mechanism in C# driver. An error with message "Failed to connect to server" is thrown when fetching data experienced a network connection interruption during data transmitting.
taos-1.4.10 (Released on 2018-11-13)
Bugs Fixed:
- Taosdump failed while exporting extremely large datasets to a .sql file.
- Commit status did not change correctly if the last commit was triggered by commit threshold time (ctime) and no more new data was written to DB during the next ctime period.
Features Added:
- Support importing historical data from Telegraf interface.
- Support MyBatis framework in TSDB JDBC Driver.
- Change result set row indexing in JDBC Driver. Result set row indexes now starts from 1 instead of 0.
taos-1.4.9 (Released on 2018-11-02)
Bugs Fixed:
- Dumping data using UTF-8 format in client shell failed.
- Tag query failed using C# Driver.
- Committing data to disk failed if DB files were corrupted.
- Continuously pressing Ctrl+c in client shell for multiple times produced a segmentation fault.
Features Added:
- Changed the display pattern in shell for taosdump.
- Add a check to the status of an existing resultset before firing a new query in a single JDBC connection. A connection can only have a single open resultset, and the resultset must be closed before one can execute new queries.
taos-1.4.7 (Released on 2018-10-25)
Bug Fixed:
- UTF-8 encoding in JDBC Driver did not give the correct Chinese characters.
- Fix crash error when where clause is too long.
Features Added:
- Add check on database properties, force ablocks to be at least (4 * tables) in a vnode.
- Check if pVgroup is empty in sdb.
taos-1.4.6 (Released on 2018-10-21)
Bug Fixed:
- Fix wrong symbol addition while export csv file.
Features Added:
- Update grafana plugins.
- Update python drivers.
- Add error code explanation in JDBC Driver.
- Prohibit login while the version of server and client are not match.
taos-1.4.5 (Released on 2018-10-17)
Bug Fixed:
- Fix HTTP request truncation bug in Telegraf interface.
Features Added:
- Support nchar and null object in JDBC Driver.

0
packaging/tools/remove.sh Normal file → Executable file
View File

260
packaging/tools/remove_arbi.sh Normal file → Executable file
View File

@ -1,130 +1,130 @@
#!/bin/bash
#
# Script to stop the service and uninstall TDengine's arbitrator
set -e
#set -x
verMode=edge
RED='\033[0;31m'
GREEN='\033[1;32m'
NC='\033[0m'
#install main path
install_main_dir="/usr/local/tarbitrator"
bin_link_dir="/usr/bin"
#inc_link_dir="/usr/include"
service_config_dir="/etc/systemd/system"
tarbitrator_service_name="tarbitratord"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
service_mod=0
elif $(which service &> /dev/null); then
service_mod=1
service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then
initd_mod=1
elif $(which insserv &> /dev/null); then
initd_mod=2
elif $(which update-rc.d &> /dev/null); then
initd_mod=3
else
service_mod=2
fi
else
service_mod=2
fi
function kill_tarbitrator() {
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function clean_bin() {
# Remove link
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
}
function clean_header() {
# Remove link
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
}
function clean_log() {
# Remove link
${csudo}rm -rf /arbitrator.log || :
}
function clean_service_on_systemd() {
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
if systemctl is-active --quiet ${tarbitrator_service_name}; then
echo "TDengine tarbitrator is running, stopping it..."
${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
${csudo}rm -f ${tarbitratord_service_config}
}
function clean_service_on_sysvinit() {
if pidof tarbitrator &> /dev/null; then
echo "TDengine's tarbitrator is running, stopping it..."
${csudo}service tarbitratord stop || :
fi
if ((${initd_mod}==1)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo}chkconfig --del tarbitratord || :
fi
elif ((${initd_mod}==2)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo}insserv -r tarbitratord || :
fi
elif ((${initd_mod}==3)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo}update-rc.d -f tarbitratord remove || :
fi
fi
${csudo}rm -f ${service_config_dir}/tarbitratord || :
if $(which init &> /dev/null); then
${csudo}init q || :
fi
}
function clean_service() {
if ((${service_mod}==0)); then
clean_service_on_systemd
elif ((${service_mod}==1)); then
clean_service_on_sysvinit
else
# must manual stop
kill_tarbitrator
fi
}
# Stop service and disable booting start.
clean_service
# Remove binary file and links
clean_bin
# Remove header file.
##clean_header
# Remove log file
clean_log
${csudo}rm -rf ${install_main_dir}
echo -e "${GREEN}TDengine's arbitrator is removed successfully!${NC}"
#!/bin/bash
#
# Script to stop the service and uninstall TDengine's arbitrator
set -e
#set -x
verMode=edge
RED='\033[0;31m'
GREEN='\033[1;32m'
NC='\033[0m'
#install main path
install_main_dir="/usr/local/tarbitrator"
bin_link_dir="/usr/bin"
#inc_link_dir="/usr/include"
service_config_dir="/etc/systemd/system"
tarbitrator_service_name="tarbitratord"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
service_mod=0
elif $(which service &> /dev/null); then
service_mod=1
service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then
initd_mod=1
elif $(which insserv &> /dev/null); then
initd_mod=2
elif $(which update-rc.d &> /dev/null); then
initd_mod=3
else
service_mod=2
fi
else
service_mod=2
fi
function kill_tarbitrator() {
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function clean_bin() {
# Remove link
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
}
function clean_header() {
# Remove link
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
}
function clean_log() {
# Remove link
${csudo}rm -rf /arbitrator.log || :
}
function clean_service_on_systemd() {
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
if systemctl is-active --quiet ${tarbitrator_service_name}; then
echo "TDengine tarbitrator is running, stopping it..."
${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
${csudo}rm -f ${tarbitratord_service_config}
}
function clean_service_on_sysvinit() {
if pidof tarbitrator &> /dev/null; then
echo "TDengine's tarbitrator is running, stopping it..."
${csudo}service tarbitratord stop || :
fi
if ((${initd_mod}==1)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo}chkconfig --del tarbitratord || :
fi
elif ((${initd_mod}==2)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo}insserv -r tarbitratord || :
fi
elif ((${initd_mod}==3)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo}update-rc.d -f tarbitratord remove || :
fi
fi
${csudo}rm -f ${service_config_dir}/tarbitratord || :
if $(which init &> /dev/null); then
${csudo}init q || :
fi
}
function clean_service() {
if ((${service_mod}==0)); then
clean_service_on_systemd
elif ((${service_mod}==1)); then
clean_service_on_sysvinit
else
# must manual stop
kill_tarbitrator
fi
}
# Stop service and disable booting start.
clean_service
# Remove binary file and links
clean_bin
# Remove header file.
##clean_header
# Remove log file
clean_log
${csudo}rm -rf ${install_main_dir}
echo -e "${GREEN}TDengine's arbitrator is removed successfully!${NC}"

170
packaging/tools/remove_client.sh Normal file → Executable file
View File

@ -1,85 +1,85 @@
#!/bin/bash
#
# Script to stop the client and uninstall database, but retain the config and log files.
set -e
# set -x
RED='\033[0;31m'
GREEN='\033[1;32m'
NC='\033[0m'
installDir="/usr/local/taos"
clientName="taos"
uninstallScript="rmtaos"
#install main path
install_main_dir=${installDir}
log_link_dir=${installDir}/log
cfg_link_dir=${installDir}/cfg
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
function kill_client() {
if [ -n "$(pidof ${clientName})" ]; then
${csudo}kill -9 $pid || :
fi
}
function clean_bin() {
# Remove link
${csudo}rm -f ${bin_link_dir}/${clientName} || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${bin_link_dir}/taosdump || :
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}rm -f ${bin_link_dir}/set_core || :
}
function clean_lib() {
# Remove link
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
#${csudo}rm -rf ${v15_java_app_dir} || :
}
function clean_header() {
# Remove link
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
}
function clean_config() {
# Remove link
${csudo}rm -f ${cfg_link_dir}/* || :
}
function clean_log() {
# Remove link
${csudo}rm -rf ${log_link_dir} || :
}
# Stop client.
kill_client
# Remove binary file and links
clean_bin
# Remove header file.
clean_header
# Remove lib file
clean_lib
# Remove link log directory
clean_log
# Remove link configuration file
clean_config
${csudo}rm -rf ${install_main_dir}
echo -e "${GREEN}TDengine client is removed successfully!${NC}"
echo
#!/bin/bash
#
# Script to stop the client and uninstall database, but retain the config and log files.
set -e
# set -x
RED='\033[0;31m'
GREEN='\033[1;32m'
NC='\033[0m'
installDir="/usr/local/taos"
clientName="taos"
uninstallScript="rmtaos"
#install main path
install_main_dir=${installDir}
log_link_dir=${installDir}/log
cfg_link_dir=${installDir}/cfg
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
function kill_client() {
if [ -n "$(pidof ${clientName})" ]; then
${csudo}kill -9 $pid || :
fi
}
function clean_bin() {
# Remove link
${csudo}rm -f ${bin_link_dir}/${clientName} || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${bin_link_dir}/taosdump || :
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}rm -f ${bin_link_dir}/set_core || :
}
function clean_lib() {
# Remove link
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
#${csudo}rm -rf ${v15_java_app_dir} || :
}
function clean_header() {
# Remove link
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
}
function clean_config() {
# Remove link
${csudo}rm -f ${cfg_link_dir}/* || :
}
function clean_log() {
# Remove link
${csudo}rm -rf ${log_link_dir} || :
}
# Stop client.
kill_client
# Remove binary file and links
clean_bin
# Remove header file.
clean_header
# Remove lib file
clean_lib
# Remove link log directory
clean_log
# Remove link configuration file
clean_config
${csudo}rm -rf ${install_main_dir}
echo -e "${GREEN}TDengine client is removed successfully!${NC}"
echo

78
packaging/tools/repair_link.sh Normal file → Executable file
View File

@ -1,39 +1,39 @@
#!/bin/bash
# This script is used to repaire links when you what to move TDengine
# data to other places and to access data.
# Read link path
read -p "Please enter link directory such as /var/lib/taos/tsdb: " linkDir
while true; do
if [ ! -d $linkDir ]; then
read -p "Paht not exists, please enter the correct link path:" linkDir
continue
fi
break
done
declare -A dirHash
for linkFile in $(find -L $linkDir -xtype l); do
targetFile=$(readlink -f $linkFile)
echo "targetFile: ${targetFile}"
# TODO : Extract directory part and basename part
dirName=$(dirname $(dirname ${targetFile}))
baseName=$(basename $(dirname ${targetFile}))/$(basename ${targetFile})
# TODO :
newDir="${dirHash["$dirName"]}"
if [ -z "${dirHash["$dirName"]}" ]; then
read -p "Please enter the directory to replace ${dirName}:" newDir
read -p "Do you want to replcace all[y/N]?" replcaceAll
if [[ ( "${replcaceAll}" == "y") || ( "${replcaceAll}" == "Y") ]]; then
dirHash["$dirName"]="$newDir"
fi
fi
# Replcace the file
ln -sf "${newDir}/${baseName}" "${linkFile}"
done
#!/bin/bash
# This script is used to repaire links when you what to move TDengine
# data to other places and to access data.
# Read link path
read -p "Please enter link directory such as /var/lib/taos/tsdb: " linkDir
while true; do
if [ ! -d $linkDir ]; then
read -p "Paht not exists, please enter the correct link path:" linkDir
continue
fi
break
done
declare -A dirHash
for linkFile in $(find -L $linkDir -xtype l); do
targetFile=$(readlink -f $linkFile)
echo "targetFile: ${targetFile}"
# TODO : Extract directory part and basename part
dirName=$(dirname $(dirname ${targetFile}))
baseName=$(basename $(dirname ${targetFile}))/$(basename ${targetFile})
# TODO :
newDir="${dirHash["$dirName"]}"
if [ -z "${dirHash["$dirName"]}" ]; then
read -p "Please enter the directory to replace ${dirName}:" newDir
read -p "Do you want to replcace all[y/N]?" replcaceAll
if [[ ( "${replcaceAll}" == "y") || ( "${replcaceAll}" == "Y") ]]; then
dirHash["$dirName"]="$newDir"
fi
fi
# Replcace the file
ln -sf "${newDir}/${baseName}" "${linkFile}"
done

6
packaging/tools/run_taosd_and_taosadapter.sh Normal file → Executable file
View File

@ -1,3 +1,3 @@
#!/bin/bash
[[ -x /usr/bin/taosadapter ]] && /usr/bin/taosadapter &
taosd
#!/bin/bash
[[ -x /usr/bin/taosadapter ]] && /usr/bin/taosadapter &
taosd

80
packaging/tools/set_core.sh Normal file → Executable file
View File

@ -1,40 +1,40 @@
#!/bin/bash
#
# This file is used to set config for core when taosd crash
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
# set -e
# set -x
corePath=$1
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
if [[ ! -n ${corePath} ]]; then
echo -e -n "${GREEN}Please enter a file directory to save the coredump file${NC}:"
read corePath
while true; do
if [[ ! -z "$corePath" ]]; then
break
else
read -p "Please enter a file directory to save the coredump file:" corePath
fi
done
fi
ulimit -c unlimited
${csudo}sed -i '/ulimit -c unlimited/d' /etc/profile ||:
${csudo}sed -i '$a\ulimit -c unlimited' /etc/profile ||:
source /etc/profile
${csudo}mkdir -p ${corePath} ||:
${csudo}sysctl -w kernel.core_pattern=${corePath}/core-%e-%p ||:
${csudo}echo "${corePath}/core-%e-%p" | ${csudo}tee /proc/sys/kernel/core_pattern ||:
#!/bin/bash
#
# This file is used to set config for core when taosd crash
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
# set -e
# set -x
corePath=$1
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
if [[ ! -n ${corePath} ]]; then
echo -e -n "${GREEN}Please enter a file directory to save the coredump file${NC}:"
read corePath
while true; do
if [[ ! -z "$corePath" ]]; then
break
else
read -p "Please enter a file directory to save the coredump file:" corePath
fi
done
fi
ulimit -c unlimited
${csudo}sed -i '/ulimit -c unlimited/d' /etc/profile ||:
${csudo}sed -i '$a\ulimit -c unlimited' /etc/profile ||:
source /etc/profile
${csudo}mkdir -p ${corePath} ||:
${csudo}sysctl -w kernel.core_pattern=${corePath}/core-%e-%p ||:
${csudo}echo "${corePath}/core-%e-%p" | ${csudo}tee /proc/sys/kernel/core_pattern ||:

0
packaging/tools/startPre.sh Normal file → Executable file
View File

View File

@ -1,144 +1,144 @@
# Usage:
# sudo gdb -x ./taosd-dump-cfg.gdb
define attach_pidof
if $argc != 1
help attach_pidof
else
shell echo -e "\
set \$PID = "$(echo $(pidof $arg0) 0 | cut -d " " -f 1)"\n\
if \$PID > 0\n\
attach "$(pidof -s $arg0)"\n\
else\n\
print \"Process '"$arg0"' not found\"\n\
end" > /tmp/gdb.pidof
source /tmp/gdb.pidof
end
end
document attach_pidof
Attach to process by name
Usage: attach_pidof PROG_NAME
end
set $TAOS_CFG_VTYPE_INT8 = 0
set $TAOS_CFG_VTYPE_INT16 = 1
set $TAOS_CFG_VTYPE_INT32 = 2
set $TAOS_CFG_VTYPE_FLOAT = 3
set $TAOS_CFG_VTYPE_STRING = 4
set $TAOS_CFG_VTYPE_IPSTR = 5
set $TAOS_CFG_VTYPE_DIRECTORY = 6
set $TSDB_CFG_CTYPE_B_CONFIG = 1U
set $TSDB_CFG_CTYPE_B_SHOW = 2U
set $TSDB_CFG_CTYPE_B_LOG = 4U
set $TSDB_CFG_CTYPE_B_CLIENT = 8U
set $TSDB_CFG_CTYPE_B_OPTION = 16U
set $TSDB_CFG_CTYPE_B_NOT_PRINT = 32U
set $TSDB_CFG_PRINT_LEN = 53
define print_blank
if $argc == 1
set $blank_len = $arg0
while $blank_len > 0
printf "%s", " "
set $blank_len = $blank_len - 1
end
end
end
define dump_cfg
if $argc != 1
help dump_cfg
else
set $blen = $TSDB_CFG_PRINT_LEN - (int)strlen($arg0.option)
if $blen < 0
$blen = 0
end
#printf "%s: %d\n", "******blen: ", $blen
printf "%s: ", $arg0.option
print_blank $blen
if $arg0.valType == $TAOS_CFG_VTYPE_INT8
printf "%d\n", *((int8_t *) $arg0.ptr)
else
if $arg0.valType == $TAOS_CFG_VTYPE_INT16
printf "%d\n", *((int16_t *) $arg0.ptr)
else
if $arg0.valType == $TAOS_CFG_VTYPE_INT32
printf "%d\n", *((int32_t *) $arg0.ptr)
else
if $arg0.valType == $TAOS_CFG_VTYPE_FLOAT
printf "%f\n", *((float *) $arg0.ptr)
else
printf "%s\n", $arg0.ptr
end
end
end
end
end
end
document dump_cfg
Dump a cfg entry
Usage: dump_cfg cfg
end
set pagination off
attach_pidof taosd
set $idx=0
#print tsGlobalConfigNum
#set $end=$1
set $end=tsGlobalConfigNum
p "*=*=*=*=*=*=*=*=*= taos global config:"
#while ($idx .lt. $end)
while ($idx < $end)
# print tsGlobalConfig[$idx].option
set $cfg = tsGlobalConfig[$idx]
set $tsce = tscEmbedded
# p "1"
if ($tsce == 0)
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
end
else
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
else
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
else
dump_cfg $cfg
end
end
end
set $idx=$idx+1
end
set $idx=0
p "*=*=*=*=*=*=*=*=*= taos local config:"
while ($idx < $end)
set $cfg = tsGlobalConfig[$idx]
set $tsce = tscEmbedded
if ($tsce == 0)
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
end
else
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
else
if ($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
else
dump_cfg $cfg
end
end
end
set $idx=$idx+1
end
detach
quit
# Usage:
# sudo gdb -x ./taosd-dump-cfg.gdb
define attach_pidof
if $argc != 1
help attach_pidof
else
shell echo -e "\
set \$PID = "$(echo $(pidof $arg0) 0 | cut -d " " -f 1)"\n\
if \$PID > 0\n\
attach "$(pidof -s $arg0)"\n\
else\n\
print \"Process '"$arg0"' not found\"\n\
end" > /tmp/gdb.pidof
source /tmp/gdb.pidof
end
end
document attach_pidof
Attach to process by name
Usage: attach_pidof PROG_NAME
end
set $TAOS_CFG_VTYPE_INT8 = 0
set $TAOS_CFG_VTYPE_INT16 = 1
set $TAOS_CFG_VTYPE_INT32 = 2
set $TAOS_CFG_VTYPE_FLOAT = 3
set $TAOS_CFG_VTYPE_STRING = 4
set $TAOS_CFG_VTYPE_IPSTR = 5
set $TAOS_CFG_VTYPE_DIRECTORY = 6
set $TSDB_CFG_CTYPE_B_CONFIG = 1U
set $TSDB_CFG_CTYPE_B_SHOW = 2U
set $TSDB_CFG_CTYPE_B_LOG = 4U
set $TSDB_CFG_CTYPE_B_CLIENT = 8U
set $TSDB_CFG_CTYPE_B_OPTION = 16U
set $TSDB_CFG_CTYPE_B_NOT_PRINT = 32U
set $TSDB_CFG_PRINT_LEN = 53
define print_blank
if $argc == 1
set $blank_len = $arg0
while $blank_len > 0
printf "%s", " "
set $blank_len = $blank_len - 1
end
end
end
define dump_cfg
if $argc != 1
help dump_cfg
else
set $blen = $TSDB_CFG_PRINT_LEN - (int)strlen($arg0.option)
if $blen < 0
$blen = 0
end
#printf "%s: %d\n", "******blen: ", $blen
printf "%s: ", $arg0.option
print_blank $blen
if $arg0.valType == $TAOS_CFG_VTYPE_INT8
printf "%d\n", *((int8_t *) $arg0.ptr)
else
if $arg0.valType == $TAOS_CFG_VTYPE_INT16
printf "%d\n", *((int16_t *) $arg0.ptr)
else
if $arg0.valType == $TAOS_CFG_VTYPE_INT32
printf "%d\n", *((int32_t *) $arg0.ptr)
else
if $arg0.valType == $TAOS_CFG_VTYPE_FLOAT
printf "%f\n", *((float *) $arg0.ptr)
else
printf "%s\n", $arg0.ptr
end
end
end
end
end
end
document dump_cfg
Dump a cfg entry
Usage: dump_cfg cfg
end
set pagination off
attach_pidof taosd
set $idx=0
#print tsGlobalConfigNum
#set $end=$1
set $end=tsGlobalConfigNum
p "*=*=*=*=*=*=*=*=*= taos global config:"
#while ($idx .lt. $end)
while ($idx < $end)
# print tsGlobalConfig[$idx].option
set $cfg = tsGlobalConfig[$idx]
set $tsce = tscEmbedded
# p "1"
if ($tsce == 0)
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
end
else
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
else
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
else
dump_cfg $cfg
end
end
end
set $idx=$idx+1
end
set $idx=0
p "*=*=*=*=*=*=*=*=*= taos local config:"
while ($idx < $end)
set $cfg = tsGlobalConfig[$idx]
set $tsce = tscEmbedded
if ($tsce == 0)
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
end
else
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
else
if ($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
else
dump_cfg $cfg
end
end
end
set $idx=$idx+1
end
detach
quit

View File

@ -234,6 +234,10 @@ static FORCE_INLINE SReqResultInfo* tmqGetNextResInfo(TAOS_RES* res, bool conver
if (msg->rsp.withSchema) {
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(msg->rsp.blockSchema, msg->resIter);
setResSchemaInfo(&msg->resInfo, pSW->pSchema, pSW->nCols);
taosMemoryFreeClear(msg->resInfo.row);
taosMemoryFreeClear(msg->resInfo.pCol);
taosMemoryFreeClear(msg->resInfo.length);
taosMemoryFreeClear(msg->resInfo.convertBuf);
}
setQueryResultFromRsp(&msg->resInfo, pRetrieve, convertUcs4);
return &msg->resInfo;
@ -310,7 +314,7 @@ void hbMgrInitMqHbRspHandle();
SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, int32_t code, bool keepQuery, void** res);
int32_t getQueryPlan(SRequestObj* pRequest, SQuery* pQuery, SArray** pNodeList);
int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList, void** res);
int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest);
int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest);
#ifdef __cplusplus
}

View File

@ -83,6 +83,15 @@ void closeTransporter(STscObj *pTscObj) {
rpcClose(pTscObj->pAppInfo->pTransporter);
}
static bool clientRpcRfp(int32_t code) {
if (code == TSDB_CODE_RPC_REDIRECT) {
return true;
} else {
return false;
}
}
// TODO refactor
void *openTransporter(const char *user, const char *auth, int32_t numOfThread) {
SRpcInit rpcInit;
@ -91,6 +100,7 @@ void *openTransporter(const char *user, const char *auth, int32_t numOfThread) {
rpcInit.label = "TSC";
rpcInit.numOfThreads = numOfThread;
rpcInit.cfp = processMsgFromServer;
rpcInit.rfp = clientRpcRfp;
rpcInit.sessions = 1024;
rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.user = (char *)user;

View File

@ -291,7 +291,7 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
SQueryResult res = {.code = 0, .numOfRows = 0, .msgSize = ERROR_MSG_BUF_DEFAULT_SIZE, .msg = pRequest->msgBuf};
int32_t code = schedulerExecJob(pTransporter, pNodeList, pDag, &pRequest->body.queryJob, pRequest->sqlstr,
pRequest->metric.start, NULL != pRes, &res);
pRequest->metric.start, &res);
if (code != TSDB_CODE_SUCCESS) {
if (pRequest->body.queryJob != 0) {
schedulerFreeJob(pRequest->body.queryJob);
@ -310,9 +310,7 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
}
}
if (pRes) {
*pRes = res.res;
}
*pRes = res.res;
pRequest->code = res.code;
terrno = res.code;
@ -324,7 +322,60 @@ int32_t getQueryPlan(SRequestObj* pRequest, SQuery* pQuery, SArray** pNodeList)
return getPlan(pRequest, pQuery, &pRequest->body.pDag, *pNodeList);
}
int32_t validateSversion(SRequestObj* pRequest, void* res) {
SArray* pArray = NULL;
int32_t code = 0;
if (TDMT_VND_SUBMIT == pRequest->type) {
SSubmitRsp* pRsp = (SSubmitRsp*)res;
if (pRsp->nBlocks <= 0) {
return TSDB_CODE_SUCCESS;
}
pArray = taosArrayInit(pRsp->nBlocks, sizeof(STbSVersion));
if (NULL == pArray) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_OUT_OF_MEMORY;
}
for (int32_t i = 0; i < pRsp->nBlocks; ++i) {
SSubmitBlkRsp *blk = pRsp->pBlocks + i;
STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver};
taosArrayPush(pArray, &tbSver);
}
} else if (TDMT_VND_QUERY == pRequest->type) {
}
SCatalog* pCatalog = NULL;
CHECK_CODE_GOTO(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog), _return);
SEpSet epset = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
code = catalogChkTbMetaVersion(pCatalog, pRequest->pTscObj->pAppInfo->pTransporter, &epset, pArray);
_return:
taosArrayDestroy(pArray);
return code;
}
void freeRequestRes(SRequestObj* pRequest, void* res) {
if (NULL == res) {
return;
}
if (TDMT_VND_SUBMIT == pRequest->type) {
tFreeSSubmitRsp((SSubmitRsp*)res);
} else if (TDMT_VND_QUERY == pRequest->type) {
}
}
SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, int32_t code, bool keepQuery, void** res) {
void* pRes = NULL;
if (TSDB_CODE_SUCCESS == code) {
switch (pQuery->execMode) {
case QUERY_EXEC_MODE_LOCAL:
@ -337,7 +388,10 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, int32_t code
SArray* pNodeList = taosArrayInit(4, sizeof(struct SQueryNodeAddr));
code = getPlan(pRequest, pQuery, &pRequest->body.pDag, pNodeList);
if (TSDB_CODE_SUCCESS == code) {
code = scheduleQuery(pRequest, pRequest->body.pDag, pNodeList, res);
code = scheduleQuery(pRequest, pRequest->body.pDag, pNodeList, &pRes);
if (NULL != pRes) {
code = validateSversion(pRequest, pRes);
}
}
taosArrayDestroy(pNodeList);
break;
@ -356,6 +410,12 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, int32_t code
if (NULL != pRequest && TSDB_CODE_SUCCESS != code) {
pRequest->code = terrno;
freeRequestRes(pRequest, pRes);
pRes = NULL;
}
if (res) {
*res = pRes;
}
return pRequest;
@ -584,8 +644,8 @@ bool persistConnForSpecificMsg(void* parenct, tmsg_t msgType) {
}
void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->ahandle;
assert(pMsg->ahandle != NULL);
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle;
assert(pMsg->info.ahandle != NULL);
if (pSendInfo->requestObjRefId != 0) {
SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId);
@ -616,7 +676,7 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId);
}
SDataBuf buf = {.len = pMsg->contLen, .pData = NULL, .handle = pMsg->handle};
SDataBuf buf = {.len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle};
if (pMsg->contLen > 0) {
buf.pData = taosMemoryCalloc(1, pMsg->contLen);
@ -957,7 +1017,7 @@ TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* de
void* clientRpc = NULL;
SServerStatusRsp statusRsp = {0};
SEpSet epSet = {.inUse = 0, .numOfEps = 1};
SRpcMsg rpcMsg = {.ahandle = (void*)0x9526, .msgType = TDMT_DND_SERVER_STATUS};
SRpcMsg rpcMsg = {.info.ahandle = (void*)0x9526, .msgType = TDMT_DND_SERVER_STATUS};
SRpcMsg rpcRsp = {0};
SRpcInit rpcInit = {0};
char pass[TSDB_PASSWORD_LEN + 1] = {0};

View File

@ -580,7 +580,7 @@ static bool smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg){
const char *pVal = kvVal->value;
int32_t len = kvVal->length;
char *endptr = NULL;
double result = strtod(pVal, &endptr);
double result = taosStr2Double(pVal, &endptr);
if(pVal == endptr){
smlBuildInvalidDataMsg(msg, "invalid data", pVal);
return false;
@ -714,25 +714,31 @@ static bool smlIsNchar(const char *pVal, uint16_t len) {
static int64_t smlGetTimeValue(const char *value, int32_t len, int8_t type) {
char *endPtr = NULL;
double ts = (double)strtoll(value, &endPtr, 10);
int64_t tsInt64 = taosStr2Int64(value, &endPtr, 10);
if(value + len != endPtr){
return -1;
}
double ts = tsInt64;
switch (type) {
case TSDB_TIME_PRECISION_HOURS:
ts *= (3600 * 1e9);
tsInt64 *= (3600 * 1e9);
break;
case TSDB_TIME_PRECISION_MINUTES:
ts *= (60 * 1e9);
tsInt64 *= (60 * 1e9);
break;
case TSDB_TIME_PRECISION_SECONDS:
ts *= (1e9);
tsInt64 *= (1e9);
break;
case TSDB_TIME_PRECISION_MILLI:
ts *= (1e6);
tsInt64 *= (1e6);
break;
case TSDB_TIME_PRECISION_MICRO:
ts *= (1e3);
tsInt64 *= (1e3);
break;
case TSDB_TIME_PRECISION_NANO:
break;
@ -743,7 +749,7 @@ static int64_t smlGetTimeValue(const char *value, int32_t len, int8_t type) {
return -1;
}
return (int64_t)ts;
return tsInt64;
}
static int64_t smlGetTimeNow(int8_t precision) {

View File

@ -218,6 +218,13 @@ int32_t stmtParseSql(STscStmt* pStmt) {
pStmt->bInfo.needParse = false;
if (pStmt->sql.pQuery->pRoot && 0 == pStmt->sql.type) {
pStmt->sql.type = STMT_TYPE_INSERT;
} else if (pStmt->sql.pQuery->pPrepareRoot) {
pStmt->sql.type = STMT_TYPE_QUERY;
}
/*
switch (nodeType(pStmt->sql.pQuery->pRoot)) {
case QUERY_NODE_VNODE_MODIF_STMT:
if (0 == pStmt->sql.type) {
@ -231,6 +238,7 @@ int32_t stmtParseSql(STscStmt* pStmt) {
tscError("not supported stmt type %d", nodeType(pStmt->sql.pQuery->pRoot));
STMT_ERR_RET(TSDB_CODE_TSC_STMT_CLAUSE_ERROR);
}
*/
return TSDB_CODE_SUCCESS;
}
@ -252,8 +260,8 @@ int32_t stmtCleanBindInfo(STscStmt* pStmt) {
return TSDB_CODE_SUCCESS;
}
int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool freeRequest) {
if (STMT_TYPE_QUERY != pStmt->sql.type || freeRequest) {
int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) {
if (STMT_TYPE_QUERY != pStmt->sql.type || deepClean) {
taos_free_result(pStmt->exec.pRequest);
pStmt->exec.pRequest = NULL;
}
@ -272,7 +280,11 @@ int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool freeRequest) {
continue;
}
qFreeStmtDataBlock(pBlocks);
if (STMT_TYPE_MULTI_INSERT == pStmt->sql.type) {
qFreeStmtDataBlock(pBlocks);
} else {
qDestroyStmtDataBlock(pBlocks);
}
taosHashRemove(pStmt->exec.pBlockHash, key, keyLen);
pIter = taosHashIterate(pStmt->exec.pBlockHash, pIter);
@ -312,11 +324,11 @@ int32_t stmtCleanSQLInfo(STscStmt* pStmt) {
taosHashCleanup(pStmt->sql.pTableCache);
pStmt->sql.pTableCache = NULL;
memset(&pStmt->sql, 0, sizeof(pStmt->sql));
STMT_ERR_RET(stmtCleanExecInfo(pStmt, false, true));
STMT_ERR_RET(stmtCleanBindInfo(pStmt));
memset(&pStmt->sql, 0, sizeof(pStmt->sql));
return TSDB_CODE_SUCCESS;
}

View File

@ -182,10 +182,14 @@ typedef struct {
typedef struct {
tmq_t* tmq;
int32_t async;
int8_t async;
int8_t automatic;
int8_t freeOffsets;
tmq_commit_cb* userCb;
tsem_t rspSem;
tmq_resp_err_t rspErr;
SArray* offsets;
void* userParam;
} SMqCommitCbParam;
tmq_conf_t* tmq_conf_new() {
@ -314,6 +318,142 @@ static int32_t tmqMakeTopicVgKey(char* dst, const char* topicName, int32_t vg) {
return sprintf(dst, "%s:%d", topicName, vg);
}
int32_t tmqCommitCb(void* param, const SDataBuf* pMsg, int32_t code) {
SMqCommitCbParam* pParam = (SMqCommitCbParam*)param;
pParam->rspErr = code == 0 ? TMQ_RESP_ERR__SUCCESS : TMQ_RESP_ERR__FAIL;
if (pParam->async) {
if (pParam->automatic && pParam->tmq->commitCb) {
pParam->tmq->commitCb(pParam->tmq, pParam->rspErr, (tmq_topic_vgroup_list_t*)pParam->offsets,
pParam->tmq->commitCbUserParam);
} else if (!pParam->automatic && pParam->userCb) {
pParam->userCb(pParam->tmq, pParam->rspErr, (tmq_topic_vgroup_list_t*)pParam->offsets, pParam->userParam);
}
if (pParam->freeOffsets) {
taosArrayDestroy(pParam->offsets);
}
taosMemoryFree(pParam);
} else {
tsem_post(&pParam->rspSem);
}
return 0;
}
int32_t tmqCommitInner(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, int8_t automatic, int8_t async,
tmq_commit_cb* userCb, void* userParam) {
SMqCMCommitOffsetReq req;
SArray* pOffsets = NULL;
void* buf = NULL;
SMqCommitCbParam* pParam = NULL;
SMsgSendInfo* sendInfo = NULL;
int8_t freeOffsets;
int32_t code = -1;
if (offsets == NULL) {
freeOffsets = 1;
pOffsets = taosArrayInit(0, sizeof(SMqOffset));
for (int32_t i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
for (int32_t j = 0; j < taosArrayGetSize(pTopic->vgs); j++) {
SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j);
SMqOffset offset;
tstrncpy(offset.topicName, pTopic->topicName, TSDB_TOPIC_FNAME_LEN);
tstrncpy(offset.cgroup, tmq->groupId, TSDB_CGROUP_LEN);
offset.vgId = pVg->vgId;
offset.offset = pVg->currentOffset;
taosArrayPush(pOffsets, &offset);
}
}
} else {
freeOffsets = 0;
pOffsets = (SArray*)&offsets->container;
}
req.num = (int32_t)pOffsets->size;
req.offsets = pOffsets->pData;
SEncoder encoder;
tEncoderInit(&encoder, NULL, 0);
code = tEncodeSMqCMCommitOffsetReq(&encoder, &req);
if (code < 0) {
goto END;
}
int32_t tlen = encoder.pos;
buf = taosMemoryMalloc(tlen);
if (buf == NULL) {
tEncoderClear(&encoder);
goto END;
}
tEncoderClear(&encoder);
tEncoderInit(&encoder, buf, tlen);
tEncodeSMqCMCommitOffsetReq(&encoder, &req);
tEncoderClear(&encoder);
pParam = taosMemoryCalloc(1, sizeof(SMqCommitCbParam));
if (pParam == NULL) {
goto END;
}
pParam->tmq = tmq;
pParam->automatic = automatic;
pParam->async = async;
pParam->offsets = pOffsets;
pParam->freeOffsets = freeOffsets;
pParam->userCb = userCb;
pParam->userParam = userParam;
if (!async) tsem_init(&pParam->rspSem, 0, 0);
sendInfo = taosMemoryMalloc(sizeof(SMsgSendInfo));
if (sendInfo == NULL) goto END;
sendInfo->msgInfo = (SDataBuf){
.pData = buf,
.len = tlen,
.handle = NULL,
};
sendInfo->requestId = generateRequestId();
sendInfo->requestObjRefId = 0;
sendInfo->param = pParam;
sendInfo->fp = tmqCommitCb;
sendInfo->msgType = TDMT_MND_MQ_COMMIT_OFFSET;
SEpSet epSet = getEpSet_s(&tmq->pTscObj->pAppInfo->mgmtEp);
int64_t transporterId = 0;
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
if (!async) {
tsem_wait(&pParam->rspSem);
code = pParam->rspErr;
tsem_destroy(&pParam->rspSem);
taosMemoryFree(pParam);
}
// avoid double free if msg is sent
buf = NULL;
code = 0;
END:
if (buf) taosMemoryFree(buf);
/*if (pParam) taosMemoryFree(pParam);*/
/*if (sendInfo) taosMemoryFree(sendInfo);*/
if (code != 0 && async) {
if (automatic) {
tmq->commitCb(tmq, TMQ_RESP_ERR__FAIL, (tmq_topic_vgroup_list_t*)pOffsets, tmq->commitCbUserParam);
} else {
userCb(tmq, TMQ_RESP_ERR__FAIL, (tmq_topic_vgroup_list_t*)pOffsets, userParam);
}
}
if (!async && freeOffsets) {
taosArrayDestroy(pOffsets);
}
return code;
}
void tmqAssignDelayedHbTask(void* param, void* tmrId) {
tmq_t* tmq = (tmq_t*)param;
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
@ -350,7 +490,8 @@ int32_t tmqHandleAllDelayedTask(tmq_t* tmq) {
tmqAskEp(tmq, true);
taosTmrReset(tmqAssignDelayedHbTask, 1000, tmq, tmqMgmt.timer, &tmq->hbTimer);
} else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) {
tmq_commit(tmq, NULL, true);
/*tmq_commit(tmq, NULL, true);*/
tmqCommitInner(tmq, NULL, 1, 1, tmq->commitCb, tmq->commitCbUserParam);
taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, tmq, tmqMgmt.timer, &tmq->commitTimer);
} else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) {
} else {
@ -385,32 +526,11 @@ void tmqClearUnhandleMsg(tmq_t* tmq) {
int32_t tmqSubscribeCb(void* param, const SDataBuf* pMsg, int32_t code) {
SMqSubscribeCbParam* pParam = (SMqSubscribeCbParam*)param;
pParam->rspErr = code;
tmq_t* tmq = pParam->tmq;
/*tmq_t* tmq = pParam->tmq;*/
tsem_post(&pParam->rspSem);
return 0;
}
int32_t tmqCommitCb(void* param, const SDataBuf* pMsg, int32_t code) {
SMqCommitCbParam* pParam = (SMqCommitCbParam*)param;
pParam->rspErr = code == 0 ? TMQ_RESP_ERR__SUCCESS : TMQ_RESP_ERR__FAIL;
if (pParam->tmq->commitCb) {
pParam->tmq->commitCb(pParam->tmq, pParam->rspErr, NULL, pParam->tmq->commitCbUserParam);
}
if (!pParam->async)
tsem_post(&pParam->rspSem);
else {
if (pParam->offsets) {
taosArrayDestroy(pParam->offsets);
}
tsem_destroy(&pParam->rspSem);
/*if (pParam->pArray) {*/
/*taosArrayDestroy(pParam->pArray);*/
/*}*/
taosMemoryFree(pParam);
}
return 0;
}
tmq_resp_err_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) {
if (*topics == NULL) {
*topics = tmq_list_new();
@ -541,6 +661,8 @@ FAIL:
}
tmq_resp_err_t tmq_commit(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, int32_t async) {
return tmqCommitInner(tmq, offsets, 0, async, tmq->commitCb, tmq->commitCbUserParam);
#if 0
// TODO: add read write lock
SRequestObj* pRequest = NULL;
tmq_resp_err_t resp = TMQ_RESP_ERR__SUCCESS;
@ -627,6 +749,7 @@ tmq_resp_err_t tmq_commit(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, in
}
return resp;
#endif
}
tmq_resp_err_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
@ -707,10 +830,12 @@ tmq_resp_err_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
}
// init hb timer
tmq->hbTimer = taosTmrStart(tmqAssignDelayedHbTask, 1000, tmq, tmqMgmt.timer);
if (tmq->hbTimer == NULL) {
tmq->hbTimer = taosTmrStart(tmqAssignDelayedHbTask, 1000, tmq, tmqMgmt.timer);
}
// init auto commit timer
if (tmq->autoCommit) {
if (tmq->autoCommit && tmq->commitTimer == NULL) {
tmq->commitTimer = taosTmrStart(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, tmq, tmqMgmt.timer);
}
@ -723,7 +848,7 @@ FAIL:
return code;
}
void tmq_conf_set_offset_commit_cb(tmq_conf_t* conf, tmq_commit_cb* cb, void* param) {
void tmq_conf_set_auto_commit_cb(tmq_conf_t* conf, tmq_commit_cb* cb, void* param) {
//
conf->commitCb = cb;
conf->commitCbUserParam = param;
@ -1310,7 +1435,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t wait_time) {
while (1) {
tmqHandleAllDelayedTask(tmq);
tmqPollImpl(tmq, wait_time);
if (tmqPollImpl(tmq, wait_time) < 0) return NULL;
rspObj = tmqHandleAllRsp(tmq, wait_time, false);
if (rspObj) {
@ -1333,9 +1458,18 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t wait_time) {
tmq_resp_err_t tmq_consumer_close(tmq_t* tmq) {
if (tmq->status == TMQ_CONSUMER_STATUS__READY) {
tmq_list_t* lst = tmq_list_new();
tmq_resp_err_t rsp = tmq_subscribe(tmq, lst);
tmq_resp_err_t rsp = tmq_commit_sync(tmq, NULL);
if (rsp == TMQ_RESP_ERR__SUCCESS) {
// TODO: free resources
return TMQ_RESP_ERR__SUCCESS;
} else {
return TMQ_RESP_ERR__FAIL;
}
tmq_list_t* lst = tmq_list_new();
rsp = tmq_subscribe(tmq, lst);
tmq_list_destroy(lst);
if (rsp == TMQ_RESP_ERR__SUCCESS) {
// TODO: free resources
return TMQ_RESP_ERR__SUCCESS;
@ -1384,3 +1518,10 @@ const char* tmq_get_table_name(TAOS_RES* res) {
}
return NULL;
}
DLL_EXPORT void tmq_commit_async(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, tmq_commit_cb* cb, void* param) {
tmqCommitInner(tmq, offsets, 0, 1, cb, param);
}
DLL_EXPORT tmq_resp_err_t tmq_commit_sync(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets) {
return tmqCommitInner(tmq, offsets, 0, 0, NULL, NULL);
}

View File

@ -1188,3 +1188,36 @@ TEST(testCase, smlParseTelnetLine_diff_json_type2_Test) {
destroyRequest(request);
smlDestroyInfo(info);
}
TEST(testCase, sml_TD15662_Test) {
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(taos, nullptr);
TAOS_RES *pRes = taos_query(taos, "create database if not exists db_15662 precision 'ns'");
taos_free_result(pRes);
pRes = taos_query(taos, "use db_15662");
taos_free_result(pRes);
SRequestObj *request = (SRequestObj *)createRequest((STscObj *)taos, NULL, NULL, TSDB_SQL_INSERT);
ASSERT_NE(request, nullptr);
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
ASSERT_NE(info, nullptr);
const char *sql[] = {
"iyyyje,id=iyyyje_41943_1303,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
};
int ret = smlProcess(info, (char **)sql, sizeof(sql) / sizeof(sql[0]));
ASSERT_EQ(ret, 0);
// case 1
TAOS_RES *res = taos_query(taos, "select * from t_a5615048edae55218a22a149edebdc82");
ASSERT_NE(res, nullptr);
TAOS_ROW row = taos_fetch_row(res);
int64_t ts = *(int64_t*)row[0];
ASSERT_EQ(ts, 1626006833639000000);
taos_free_result(res);
}

View File

@ -263,7 +263,7 @@ static const SSysDbTableSchema topicSchema[] = {
static const SSysDbTableSchema consumerSchema[] = {
{.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "consumer_group", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "app_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "client_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "topics", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},

View File

@ -600,10 +600,11 @@ int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) {
}
int32_t blockDataFromBuf1(SSDataBlock* pBlock, const char* buf, size_t capacity) {
pBlock->info.rows = *(int32_t*)buf;
pBlock->info.rows = *(int32_t*) buf;
pBlock->info.groupId = *(uint64_t*) (buf + sizeof(int32_t));
int32_t numOfCols = pBlock->info.numOfCols;
const char* pStart = buf + sizeof(uint32_t);
const char* pStart = buf + sizeof(uint32_t) + sizeof(uint64_t);
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i);
@ -669,7 +670,7 @@ size_t blockDataGetSerialMetaSize(const SSDataBlock* pBlock) {
return sizeof(int32_t) + sizeof(uint64_t) + pBlock->info.numOfCols * sizeof(int32_t);
}
double blockDataGetSerialRowSize(const SSDataBlock* pBlock) {
double blockDataGetSerialRowSize(const SSDataBlock* pBlock) {
ASSERT(pBlock != NULL);
double rowSize = 0;
@ -1224,7 +1225,27 @@ SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) {
}
size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) {
return (int32_t)((pageSize - blockDataGetSerialMetaSize(pBlock)) / blockDataGetSerialRowSize(pBlock));
int32_t payloadSize = pageSize - blockDataGetSerialMetaSize(pBlock);
int32_t rowSize = pBlock->info.rowSize;
int32_t nRows = payloadSize / rowSize;
// the true value must be less than the value of nRows
int32_t additional = 0;
for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i);
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
additional += nRows * sizeof(int32_t);
} else {
additional += BitmapLen(nRows);
}
}
int32_t newRows = (payloadSize - additional) / rowSize;
ASSERT(newRows <= nRows && newRows > 1);
return newRows;
}
void colDataDestroy(SColumnInfoData* pColData) {

View File

@ -19,37 +19,209 @@
#include "tdatablock.h"
#include "tlog.h"
#define TD_KV_ROW 0x1U
struct SKVIdx {
typedef struct SKVIdx {
int32_t cid;
int32_t offset;
} SKVIdx;
#pragma pack(push, 1)
typedef struct {
int16_t nCols;
SKVIdx idx[];
} STSKVRow;
#pragma pack(pop)
typedef struct STagIdx {
int16_t cid;
uint16_t offset;
} STagIdx;
#pragma pack(push, 1)
struct STag {
uint16_t len;
uint16_t nTag;
STagIdx idx[];
};
#pragma pack(pop)
int32_t tEncodeTSRow(SEncoder *pEncoder, const STSRow2 *pRow) {
if (tEncodeI64(pEncoder, pRow->ts) < 0) return -1;
if (tEncodeU32v(pEncoder, pRow->flags) < 0) return -1;
if (pRow->flags & TD_KV_ROW) {
if (tEncodeI32v(pEncoder, pRow->ncols) < 0) return -1;
} else {
if (tEncodeI32v(pEncoder, pRow->sver) < 0) return -1;
#define TSROW_IS_KV_ROW(r) ((r)->flags & TSROW_KV_ROW)
#define BIT1_SIZE(n) (((n)-1) / 8 + 1)
#define BIT2_SIZE(n) (((n)-1) / 4 + 1)
#define SET_BIT1(p, i, v) ((p)[(i) / 8] = (p)[(i) / 8] & (~(((uint8_t)1) << ((i) % 8))) | ((v) << ((i) % 8)))
#define SET_BIT2(p, i, v) ((p)[(i) / 4] = (p)[(i) / 4] & (~(((uint8_t)3) << ((i) % 4))) | ((v) << ((i) % 4)))
#define GET_BIT1(p, i) (((p)[(i) / 8] >> ((i) % 8)) & ((uint8_t)1))
#define GET_BIT2(p, i) (((p)[(i) / 4] >> ((i) % 4)) & ((uint8_t)3))
static FORCE_INLINE int tSKVIdxCmprFn(const void *p1, const void *p2);
// STSRow2
int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow) {
int32_t n = 0;
n += tPutI64(p ? p + n : p, pRow->ts);
n += tPutI8(p ? p + n : p, pRow->flags);
n += tPutI32v(p ? p + n : p, pRow->sver);
ASSERT(pRow->flags & 0xf);
switch (pRow->flags & 0xf) {
case TSROW_HAS_NONE:
case TSROW_HAS_NULL:
break;
default:
n += tPutBinary(p ? p + n : p, pRow->pData, pRow->nData);
break;
}
if (tEncodeBinary(pEncoder, pRow->pData, pRow->nData) < 0) return -1;
return n;
}
int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow) {
int32_t n = 0;
uint8_t flags;
n += tGetI64(p + n, pRow ? &pRow->ts : NULL);
n += tGetI8(p + n, pRow ? &pRow->flags : &flags);
n += tGetI32v(p + n, pRow ? &pRow->sver : NULL);
if (pRow) flags = pRow->flags;
switch (flags & 0xf) {
case TSROW_HAS_NONE:
case TSROW_HAS_NULL:
break;
default:
n += tGetBinary(p + n, pRow ? &pRow->pData : NULL, pRow ? &pRow->nData : NULL);
break;
}
return n;
}
int32_t tTSRowDup(const STSRow2 *pRow, STSRow2 **ppRow) {
(*ppRow) = taosMemoryMalloc(sizeof(*pRow) + pRow->nData);
if (*ppRow == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
(*ppRow)->ts = pRow->ts;
(*ppRow)->flags = pRow->flags;
(*ppRow)->sver = pRow->sver;
(*ppRow)->nData = pRow->nData;
if (pRow->nData) {
(*ppRow)->pData = (uint8_t *)(&(*ppRow)[1]);
memcpy((*ppRow)->pData, pRow->pData, pRow->nData);
} else {
(*ppRow)->pData = NULL;
}
return 0;
}
int32_t tDecodeTSRow(SDecoder *pDecoder, STSRow2 *pRow) {
if (tDecodeI64(pDecoder, &pRow->ts) < 0) return -1;
if (tDecodeU32v(pDecoder, &pRow->flags) < 0) return -1;
if (pRow->flags & TD_KV_ROW) {
if (tDecodeI32v(pDecoder, &pRow->ncols) < 0) return -1;
} else {
if (tDecodeI32v(pDecoder, &pRow->sver) < 0) return -1;
void tTSRowFree(STSRow2 *pRow) {
if (pRow) taosMemoryFree(pRow);
}
int32_t tTSRowGet(const STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) {
uint32_t n;
uint8_t *p;
uint8_t v;
int32_t bidx = iCol - 1;
STColumn *pTColumn = &pTSchema->columns[iCol];
STSKVRow *pTSKVRow;
SKVIdx *pKVIdx;
ASSERT(iCol != 0);
ASSERT(pTColumn->colId != 0);
ASSERT(pRow->flags & 0xf != 0);
switch (pRow->flags & 0xf) {
case TSROW_HAS_NONE:
*pColVal = ColValNONE;
return 0;
case TSROW_HAS_NULL:
*pColVal = ColValNULL;
return 0;
}
if (tDecodeBinary(pDecoder, &pRow->pData, &pRow->nData) < 0) return -1;
if (TSROW_IS_KV_ROW(pRow)) {
ASSERT((pRow->flags & 0xf) != TSROW_HAS_VAL);
pTSKVRow = (STSKVRow *)pRow->pData;
pKVIdx =
bsearch(&((SKVIdx){.cid = pTColumn->colId}), pTSKVRow->idx, pTSKVRow->nCols, sizeof(SKVIdx), tSKVIdxCmprFn);
if (pKVIdx == NULL) {
*pColVal = ColValNONE;
} else if (pKVIdx->offset < 0) {
*pColVal = ColValNULL;
} else {
p = pRow->pData + sizeof(STSKVRow) + sizeof(SKVIdx) * pTSKVRow->nCols + pKVIdx->offset;
pColVal->type = COL_VAL_DATA;
tGetBinary(p, &pColVal->pData, &pColVal->nData);
}
} else {
// get bitmap
p = pRow->pData;
switch (pRow->flags & 0xf) {
case TSROW_HAS_NULL | TSROW_HAS_NONE:
v = GET_BIT1(p, bidx);
if (v == 0) {
*pColVal = ColValNONE;
} else {
*pColVal = ColValNULL;
}
return 0;
case TSROW_HAS_VAL | TSROW_HAS_NONE:
v = GET_BIT1(p, bidx);
if (v == 1) {
p = p + BIT1_SIZE(pTSchema->numOfCols - 1);
break;
} else {
*pColVal = ColValNONE;
return 0;
}
case TSROW_HAS_VAL | TSROW_HAS_NULL:
v = GET_BIT1(p, bidx);
if (v == 1) {
p = p + BIT1_SIZE(pTSchema->numOfCols - 1);
break;
} else {
*pColVal = ColValNULL;
return 0;
}
case TSROW_HAS_VAL | TSROW_HAS_NULL | TSROW_HAS_NONE:
v = GET_BIT2(p, bidx);
if (v == 0) {
*pColVal = ColValNONE;
return 0;
} else if (v == 1) {
*pColVal = ColValNULL;
return 0;
} else if (v == 2) {
p = p + BIT2_SIZE(pTSchema->numOfCols - 1);
break;
} else {
ASSERT(0);
}
default:
break;
}
// get real value
p = p + pTColumn->offset;
pColVal->type = COL_VAL_DATA;
if (IS_VAR_DATA_TYPE(pTColumn->type)) {
tGetBinary(p + pTSchema->flen + *(int32_t *)p, &pColVal->pData, &pColVal->nData);
} else {
pColVal->pData = p;
pColVal->nData = pTColumn->bytes;
}
}
return 0;
}
// STSchema
int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t ncols, STSchema **ppTSchema) {
*ppTSchema = (STSchema *)taosMemoryMalloc(sizeof(STSchema) + sizeof(STColumn) * ncols);
if (*ppTSchema == NULL) {
@ -85,170 +257,360 @@ int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t ncols, STSchema *
return 0;
}
void tTSchemaDestroy(STSchema *pTSchema) { taosMemoryFree(pTSchema); }
int32_t tTSRowBuilderInit(STSRowBuilder *pBuilder, int32_t sver, SSchema *pSchema, int32_t nCols) {
int32_t kvBufLen;
int32_t tpBufLen;
uint8_t *p;
void tTSchemaDestroy(STSchema *pTSchema) {
if (pTSchema) taosMemoryFree(pTSchema);
}
// STSRowBuilder
int32_t tTSRowBuilderInit(STSRowBuilder *pBuilder, int32_t sver, int32_t nCols, SSchema *pSchema) {
if (tTSchemaCreate(sver, pSchema, nCols, &pBuilder->pTSchema) < 0) return -1;
kvBufLen = sizeof(SKVIdx) * nCols + pBuilder->pTSchema->flen + pBuilder->pTSchema->vlen;
tpBufLen = pBuilder->pTSchema->flen + pBuilder->pTSchema->vlen;
if (pBuilder->szKVBuf < kvBufLen) {
p = taosMemoryRealloc(pBuilder->pKVBuf, kvBufLen);
if (p == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
pBuilder->pKVBuf = p;
pBuilder->szKVBuf = kvBufLen;
pBuilder->szBitMap1 = BIT1_SIZE(nCols - 1);
pBuilder->szBitMap2 = BIT2_SIZE(nCols - 1);
pBuilder->szKVBuf =
sizeof(STSKVRow) + sizeof(SKVIdx) * (nCols - 1) + pBuilder->pTSchema->flen + pBuilder->pTSchema->vlen;
pBuilder->szTPBuf = pBuilder->szBitMap2 + pBuilder->pTSchema->flen + pBuilder->pTSchema->vlen;
pBuilder->pKVBuf = taosMemoryMalloc(pBuilder->szKVBuf);
if (pBuilder->pKVBuf == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tTSchemaDestroy(pBuilder->pTSchema);
return -1;
}
if (pBuilder->szTPBuf < tpBufLen) {
p = taosMemoryRealloc(pBuilder->pTPBuf, tpBufLen);
if (p == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
pBuilder->pTPBuf = p;
pBuilder->szTPBuf = tpBufLen;
pBuilder->pTPBuf = taosMemoryMalloc(pBuilder->szTPBuf);
if (pBuilder->pTPBuf == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFree(pBuilder->pKVBuf);
tTSchemaDestroy(pBuilder->pTSchema);
return -1;
}
tTSRowBuilderReset(pBuilder);
return 0;
}
void tTSRowBuilderClear(STSRowBuilder *pBuilder) {
taosMemoryFree(pBuilder->pKVBuf);
taosMemoryFree(pBuilder->pTPBuf);
if (pBuilder->pTPBuf) {
taosMemoryFree(pBuilder->pTPBuf);
pBuilder->pTPBuf = NULL;
}
if (pBuilder->pKVBuf) {
taosMemoryFree(pBuilder->pKVBuf);
pBuilder->pKVBuf = NULL;
}
tTSchemaDestroy(pBuilder->pTSchema);
pBuilder->pTSchema = NULL;
}
void tTSRowBuilderReset(STSRowBuilder *pBuilder) {
for (int32_t iCol = pBuilder->pTSchema->numOfCols - 1; iCol >= 0; iCol--) {
pBuilder->pTColumn = &pBuilder->pTSchema->columns[iCol];
pBuilder->pTColumn->flags &= (~COL_VAL_SET);
STColumn *pTColumn = &pBuilder->pTSchema->columns[iCol];
COL_CLR_SET(pTColumn->flags);
}
pBuilder->nCols = 0;
pBuilder->kvVLen = 0;
pBuilder->tpVLen = 0;
pBuilder->iCol = 0;
((STSKVRow *)pBuilder->pKVBuf)->nCols = 0;
pBuilder->vlenKV = 0;
pBuilder->vlenTP = 0;
pBuilder->row.flags = 0;
}
int32_t tTSRowBuilderPut(STSRowBuilder *pBuilder, int32_t cid, const uint8_t *pData, uint32_t nData) {
int32_t iCol;
uint8_t *p;
int32_t tTSRowBuilderPut(STSRowBuilder *pBuilder, int32_t cid, uint8_t *pData, uint32_t nData) {
STColumn *pTColumn = &pBuilder->pTSchema->columns[pBuilder->iCol];
uint8_t *p;
int32_t iCol;
STSKVRow *pTSKVRow = (STSKVRow *)pBuilder->pKVBuf;
// search column
if (pBuilder->pTColumn->colId < cid) {
iCol = (pBuilder->pTColumn - pBuilder->pTSchema->columns) / sizeof(STColumn) + 1;
for (; iCol < pBuilder->pTSchema->numOfCols; iCol++) {
pBuilder->pTColumn = &pBuilder->pTSchema->columns[iCol];
if (pBuilder->pTColumn->colId == cid) break;
// use interp search
if (pTColumn->colId < cid) { // right search
for (iCol = pBuilder->iCol + 1; iCol < pBuilder->pTSchema->numOfCols; iCol++) {
pTColumn = &pBuilder->pTSchema->columns[iCol];
if (pTColumn->colId >= cid) break;
}
} else if (pBuilder->pTColumn->colId > cid) {
iCol = (pBuilder->pTColumn - pBuilder->pTSchema->columns) / sizeof(STColumn) - 1;
for (; iCol >= 0; iCol--) {
pBuilder->pTColumn = &pBuilder->pTSchema->columns[iCol];
if (pBuilder->pTColumn->colId == cid) break;
} else if (pTColumn->colId > cid) { // left search
for (iCol = pBuilder->iCol - 1; iCol >= 0; iCol--) {
pTColumn = &pBuilder->pTSchema->columns[iCol];
if (pTColumn->colId <= cid) break;
}
}
// check
if (pBuilder->pTColumn->colId != cid || pBuilder->pTColumn->flags & COL_VAL_SET) {
if (pTColumn->colId != cid || COL_IS_SET(pTColumn->flags)) {
return -1;
}
pBuilder->iCol = iCol;
// set value
if (cid == 0) {
ASSERT(pData && nData == sizeof(TSKEY));
ASSERT(pData && nData == sizeof(TSKEY) && iCol == 0);
pBuilder->row.ts = *(TSKEY *)pData;
pTColumn->flags |= COL_SET_VAL;
} else {
if (pData) {
// ASSERT(!IS_NULL(pData));
// set VAL
// set tuple data
p = pBuilder->pTPBuf + pBuilder->pTColumn->offset;
if (IS_VAR_DATA_TYPE(pBuilder->pTColumn->type)) {
*(int32_t *)p = pBuilder->tpVLen;
pBuilder->row.flags |= TSROW_HAS_VAL;
pTColumn->flags |= COL_SET_VAL;
// encode the variant-length data
p = pBuilder->pTPBuf + pBuilder->pTSchema->flen + pBuilder->tpVLen;
pBuilder->tpVLen += tPutBinary(p, pData, nData);
} else {
memcpy(p, pData, nData);
/* KV */
if (1) { // avoid KV at some threshold (todo)
pTSKVRow->idx[pTSKVRow->nCols].cid = cid;
pTSKVRow->idx[pTSKVRow->nCols].offset = pBuilder->vlenKV;
p = pBuilder->pKVBuf + sizeof(STSKVRow) + sizeof(SKVIdx) * (pBuilder->pTSchema->numOfCols - 1) +
pBuilder->vlenKV;
if (IS_VAR_DATA_TYPE(pTColumn->type)) {
ASSERT(nData <= pTColumn->bytes);
pBuilder->vlenKV += tPutBinary(p, pData, nData);
} else {
ASSERT(nData == pTColumn->bytes);
memcpy(p, pData, nData);
pBuilder->vlenKV += nData;
}
}
// set kv data
p = pBuilder->pKVBuf + sizeof(SKVIdx) * pBuilder->nCols;
((SKVIdx *)p)->cid = cid;
((SKVIdx *)p)->offset = pBuilder->kvVLen;
/* TUPLE */
p = pBuilder->pTPBuf + pBuilder->szBitMap2 + pTColumn->offset;
if (IS_VAR_DATA_TYPE(pTColumn->type)) {
ASSERT(nData <= pTColumn->bytes);
*(int32_t *)p = pBuilder->vlenTP;
p = pBuilder->pKVBuf + sizeof(SKVIdx) * pBuilder->pTSchema->numOfCols + pBuilder->kvVLen;
if (IS_VAR_DATA_TYPE(pBuilder->pTColumn->type)) {
pBuilder->kvVLen += tPutBinary(p, pData, nData);
p = pBuilder->pTPBuf + pBuilder->szBitMap2 + pBuilder->pTSchema->flen + pBuilder->vlenTP;
pBuilder->vlenTP += tPutBinary(p, pData, nData);
} else {
ASSERT(nData == pTColumn->bytes);
memcpy(p, pData, nData);
pBuilder->kvVLen += nData;
}
} else {
// set NULL val
// set NULL
pBuilder->row.flags |= TSROW_HAS_NULL;
pTColumn->flags |= COL_SET_NULL;
pTSKVRow->idx[pTSKVRow->nCols].cid = cid;
pTSKVRow->idx[pTSKVRow->nCols].offset = -1;
}
pTSKVRow->nCols++;
}
pBuilder->pTColumn->flags |= COL_VAL_SET;
pBuilder->nCols++;
return 0;
}
static FORCE_INLINE int tSKVIdxCmprFn(const void *p1, const void *p2) {
SKVIdx *pKVIdx1 = (SKVIdx *)p1;
SKVIdx *pKVIdx2 = (SKVIdx *)p2;
if (pKVIdx1->cid > pKVIdx2->cid) {
return 1;
} else if (pKVIdx1->cid < pKVIdx2->cid) {
return -1;
}
return 0;
}
static void setBitMap(uint8_t *p, STSchema *pTSchema, uint8_t flags) {
int32_t bidx;
STColumn *pTColumn;
for (int32_t iCol = 1; iCol < pTSchema->numOfCols; iCol++) {
pTColumn = &pTSchema->columns[iCol];
bidx = iCol - 1;
switch (flags) {
case TSROW_HAS_NULL | TSROW_HAS_NONE:
if (pTColumn->flags & COL_SET_NULL) {
SET_BIT1(p, bidx, (uint8_t)1);
} else {
SET_BIT1(p, bidx, (uint8_t)0);
}
break;
case TSROW_HAS_VAL | TSROW_HAS_NULL | TSROW_HAS_NONE:
if (pTColumn->flags & COL_SET_NULL) {
SET_BIT2(p, bidx, (uint8_t)1);
} else if (pTColumn->flags & COL_SET_VAL) {
SET_BIT2(p, bidx, (uint8_t)2);
} else {
SET_BIT2(p, bidx, (uint8_t)0);
}
break;
default:
if (pTColumn->flags & COL_SET_VAL) {
SET_BIT1(p, bidx, (uint8_t)1);
} else {
SET_BIT1(p, bidx, (uint8_t)0);
}
break;
}
}
}
int32_t tTSRowBuilderGetRow(STSRowBuilder *pBuilder, const STSRow2 **ppRow) {
if ((pBuilder->pTSchema->columns[0].flags & COL_VAL_SET) == 0) {
int32_t nDataTP, nDataKV;
uint32_t flags;
STSKVRow *pTSKVRow = (STSKVRow *)pBuilder->pKVBuf;
int32_t nCols = pBuilder->pTSchema->numOfCols;
// error not set ts
if (!COL_IS_SET(pBuilder->pTSchema->columns->flags)) {
return -1;
}
if (pBuilder->nCols * sizeof(SKVIdx) + pBuilder->kvVLen < pBuilder->pTSchema->flen + pBuilder->tpVLen) {
// encode as TD_KV_ROW
pBuilder->row.flags |= TD_KV_ROW;
pBuilder->row.ncols = pBuilder->nCols;
pBuilder->row.nData = pBuilder->nCols * sizeof(SKVIdx) + pBuilder->kvVLen;
ASSERT(pTSKVRow->nCols < nCols);
if (pTSKVRow->nCols < nCols - 1) {
pBuilder->row.flags |= TSROW_HAS_NONE;
}
ASSERT(pBuilder->row.flags & 0xf != 0);
*(ppRow) = &pBuilder->row;
switch (pBuilder->row.flags & 0xf) {
case TSROW_HAS_NONE:
case TSROW_HAS_NULL:
pBuilder->row.nData = 0;
pBuilder->row.pData = NULL;
return 0;
case TSROW_HAS_NULL | TSROW_HAS_NONE:
nDataTP = pBuilder->szBitMap1;
break;
case TSROW_HAS_VAL:
nDataTP = pBuilder->pTSchema->flen + pBuilder->vlenTP;
break;
case TSROW_HAS_VAL | TSROW_HAS_NONE:
case TSROW_HAS_VAL | TSROW_HAS_NULL:
nDataTP = pBuilder->szBitMap1 + pBuilder->pTSchema->flen + pBuilder->vlenTP;
break;
case TSROW_HAS_VAL | TSROW_HAS_NULL | TSROW_HAS_NONE:
nDataTP = pBuilder->szBitMap2 + pBuilder->pTSchema->flen + pBuilder->vlenTP;
break;
default:
ASSERT(0);
}
nDataKV = sizeof(STSKVRow) + sizeof(SKVIdx) * pTSKVRow->nCols + pBuilder->vlenKV;
pBuilder->row.sver = pBuilder->pTSchema->version;
if (nDataKV < nDataTP) {
// generate KV row
ASSERT(pBuilder->row.flags & 0xf != TSROW_HAS_VAL);
pBuilder->row.flags |= TSROW_KV_ROW;
pBuilder->row.nData = nDataKV;
pBuilder->row.pData = pBuilder->pKVBuf;
if (pBuilder->nCols < pBuilder->pTSchema->numOfCols) {
memmove(pBuilder->pKVBuf + sizeof(SKVIdx) * pBuilder->nCols,
pBuilder->pKVBuf + sizeof(SKVIdx) * pBuilder->pTSchema->numOfCols, pBuilder->kvVLen);
qsort(pTSKVRow->idx, pTSKVRow->nCols, sizeof(SKVIdx), tSKVIdxCmprFn);
if (pTSKVRow->nCols < nCols - 1) {
memmove(&pTSKVRow->idx[pTSKVRow->nCols], &pTSKVRow->idx[nCols - 1], pBuilder->vlenKV);
}
} else {
// encode as TD_TUPLE_ROW
pBuilder->row.flags &= (~TD_KV_ROW);
pBuilder->row.sver = pBuilder->pTSchema->version;
pBuilder->row.nData = pBuilder->pTSchema->flen + pBuilder->tpVLen;
pBuilder->row.pData = pBuilder->pTPBuf;
// generate TUPLE row
if (pBuilder->nCols < pBuilder->pTSchema->numOfCols) {
// set non-set cols as None
for (int32_t iCol = 1; iCol < pBuilder->pTSchema->numOfCols; iCol++) {
pBuilder->pTColumn = &pBuilder->pTSchema->columns[iCol];
if (pBuilder->pTColumn->flags & COL_VAL_SET) continue;
pBuilder->row.nData = nDataTP;
{
// set None (todo)
}
uint8_t *p;
uint8_t flags = pBuilder->row.flags & 0xf;
pBuilder->pTColumn->flags |= COL_VAL_SET;
if (flags == TSROW_HAS_VAL) {
pBuilder->row.pData = pBuilder->pTPBuf + pBuilder->szBitMap2;
} else {
if (flags == TSROW_HAS_VAL | TSROW_HAS_NULL | TSROW_HAS_NONE) {
pBuilder->row.pData = pBuilder->pTPBuf;
} else {
pBuilder->row.pData = pBuilder->pTPBuf + pBuilder->szBitMap2 - pBuilder->szBitMap1;
}
setBitMap(pBuilder->row.pData, pBuilder->pTSchema, flags);
}
}
*ppRow = &pBuilder->row;
return 0;
}
#if 1 // ====================
static FORCE_INLINE int tTagIdxCmprFn(const void *p1, const void *p2) {
STagIdx *pTagIdx1 = (STagIdx *)p1;
STagIdx *pTagIdx2 = (STagIdx *)p2;
if (pTagIdx1->cid < pTagIdx1->cid) {
return -1;
} else if (pTagIdx1->cid > pTagIdx1->cid) {
return 1;
}
return 0;
}
int32_t tTagNew(STagVal *pTagVals, int16_t nTag, STag **ppTag) {
STagVal *pTagVal;
uint8_t *p;
int32_t n;
uint16_t tsize = sizeof(STag) + sizeof(STagIdx) * nTag;
for (int16_t iTag = 0; iTag < nTag; iTag++) {
pTagVal = &pTagVals[iTag];
if (IS_VAR_DATA_TYPE(pTagVal->type)) {
tsize += tPutBinary(NULL, pTagVal->pData, pTagVal->nData);
} else {
ASSERT(pTagVal->nData == TYPE_BYTES[pTagVal->type]);
tsize += pTagVal->nData;
}
}
(*ppTag) = (STag *)taosMemoryMalloc(tsize);
if (*ppTag == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
p = (uint8_t *)&((*ppTag)->idx[nTag]);
n = 0;
(*ppTag)->len = tsize;
(*ppTag)->nTag = nTag;
for (int16_t iTag = 0; iTag < nTag; iTag++) {
pTagVal = &pTagVals[iTag];
(*ppTag)->idx[iTag].cid = pTagVal->cid;
(*ppTag)->idx[iTag].offset = n;
if (IS_VAR_DATA_TYPE(pTagVal->type)) {
n += tPutBinary(p + n, pTagVal->pData, pTagVal->nData);
} else {
memcpy(p + n, pTagVal->pData, pTagVal->nData);
n += pTagVal->nData;
}
}
qsort((*ppTag)->idx, (*ppTag)->nTag, sizeof(STagIdx), tTagIdxCmprFn);
return 0;
}
void tTagFree(STag *pTag) {
if (pTag) taosMemoryFree(pTag);
}
void tTagGet(STag *pTag, int16_t cid, int8_t type, uint8_t **ppData, int32_t *nData) {
STagIdx *pTagIdx = bsearch(&((STagIdx){.cid = cid}), pTag->idx, pTag->nTag, sizeof(STagIdx), tTagIdxCmprFn);
if (pTagIdx == NULL) {
*ppData = NULL;
*nData = 0;
} else {
uint8_t *p = (uint8_t *)&pTag->idx[pTag->nTag] + pTagIdx->offset;
if (IS_VAR_DATA_TYPE(type)) {
tGetBinary(p, ppData, nData);
} else {
*ppData = p;
*nData = TYPE_BYTES[type];
}
}
}
int32_t tEncodeTag(SEncoder *pEncoder, STag *pTag) {
// return tEncodeBinary(pEncoder, (uint8_t *)pTag, pTag->len);
ASSERT(0);
return 0;
}
int32_t tDecodeTag(SDecoder *pDecoder, const STag **ppTag) {
// uint32_t n;
// return tDecodeBinary(pDecoder, (const uint8_t **)ppTag, &n);
ASSERT(0);
return 0;
}
#if 1 // ===================================================================================================================
static void dataColSetNEleNull(SDataCol *pCol, int nEle);
int tdAllocMemForCol(SDataCol *pCol, int maxPoints) {
int spaceNeeded = pCol->bytes * maxPoints;
@ -260,8 +622,8 @@ int tdAllocMemForCol(SDataCol *pCol, int maxPoints) {
spaceNeeded += (int)nBitmapBytes;
// TODO: Currently, the compression of bitmap parts is affiliated to the column data parts, thus allocate 1 more
// TYPE_BYTES as to comprise complete TYPE_BYTES. Otherwise, invalid read/write would be triggered.
// spaceNeeded += TYPE_BYTES[pCol->type]; // the bitmap part is append as a single part since 2022.04.03, thus remove
// the additional space
// spaceNeeded += TYPE_BYTES[pCol->type]; // the bitmap part is append as a single part since 2022.04.03, thus
// remove the additional space
#endif
if (pCol->spaceSize < spaceNeeded) {

View File

@ -30,6 +30,7 @@ char tsLocalEp[TSDB_EP_LEN] = {0}; // Local End Point, hostname:port
uint16_t tsServerPort = 6030;
int32_t tsVersion = 30000000;
int32_t tsStatusInterval = 1; // second
int32_t tsNumOfSupportVnodes = 256;
// common
int32_t tsMaxShellConns = 50000;
@ -38,7 +39,7 @@ bool tsEnableSlaveQuery = true;
bool tsPrintAuth = false;
// multi process
bool tsMultiProcess = false;
int32_t tsMultiProcess = 0;
int32_t tsMnodeShmSize = TSDB_MAX_WAL_SIZE * 2 + 128;
int32_t tsVnodeShmSize = TSDB_MAX_WAL_SIZE * 10 + 128;
int32_t tsQnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 128;
@ -362,7 +363,7 @@ static int32_t taosAddSystemCfg(SConfig *pCfg) {
static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddDir(pCfg, "dataDir", tsDataDir, 0) != 0) return -1;
if (cfgAddFloat(pCfg, "minimalDataDirGB", 2.0f, 0.001f, 10000000, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "supportVnodes", 256, 0, 4096, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "supportVnodes", tsNumOfSupportVnodes, 0, 4096, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "maxShellConns", tsMaxShellConns, 10, 50000000, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "statusInterval", tsStatusInterval, 1, 30, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 10, 1000000, 0) != 0) return -1;
@ -378,7 +379,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddBool(pCfg, "slaveQuery", tsEnableSlaveQuery, 0) != 0) return -1;
if (cfgAddBool(pCfg, "deadLockKillQuery", tsDeadLockKillQuery, 0) != 0) return -1;
if (cfgAddBool(pCfg, "multiProcess", tsMultiProcess, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "multiProcess", tsMultiProcess, 0, 2, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "mnodeShmSize", tsMnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "vnodeShmSize", tsVnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "qnodeShmSize", tsQnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
@ -552,6 +553,7 @@ static void taosSetSystemCfg(SConfig *pCfg) {
static int32_t taosSetServerCfg(SConfig *pCfg) {
tsDataSpace.reserved = cfgGetItem(pCfg, "minimalDataDirGB")->fval;
tsNumOfSupportVnodes = cfgGetItem(pCfg, "supportVnodes")->i32;
tsMaxShellConns = cfgGetItem(pCfg, "maxShellConns")->i32;
tsStatusInterval = cfgGetItem(pCfg, "statusInterval")->i32;
tsMinSlidingTime = cfgGetItem(pCfg, "minSlidingTime")->i32;
@ -565,7 +567,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsRetrieveBlockingModel = cfgGetItem(pCfg, "retrieveBlockingModel")->bval;
tsPrintAuth = cfgGetItem(pCfg, "printAuth")->bval;
tsEnableSlaveQuery = cfgGetItem(pCfg, "slaveQuery")->bval;
tsDeadLockKillQuery = cfgGetItem(pCfg, "deadLockKillQuery")->bval;
tsDeadLockKillQuery = cfgGetItem(pCfg, "deadLockKillQuery")->i32;
tsMultiProcess = cfgGetItem(pCfg, "multiProcess")->bval;
tsMnodeShmSize = cfgGetItem(pCfg, "mnodeShmSize")->i32;

View File

@ -600,6 +600,7 @@ int32_t tSerializeSMAlterStbReq(void *buf, int32_t bufLen, SMAlterStbReq *pReq)
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
if (tEncodeI8(&encoder, pReq->alterType) < 0) return -1;
if (tEncodeI32(&encoder, pReq->verInBlock) < 0) return -1;
if (tEncodeI32(&encoder, pReq->numOfFields) < 0) return -1;
for (int32_t i = 0; i < pReq->numOfFields; ++i) {
SField *pField = taosArrayGet(pReq->pFields, i);
@ -626,6 +627,7 @@ int32_t tDeserializeSMAlterStbReq(void *buf, int32_t bufLen, SMAlterStbReq *pReq
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->alterType) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->verInBlock) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->numOfFields) < 0) return -1;
pReq->pFields = taosArrayInit(pReq->numOfFields, sizeof(SField));
if (pReq->pFields == NULL) {
@ -2625,6 +2627,35 @@ int32_t tDeserializeSMDropTopicReq(void *buf, int32_t bufLen, SMDropTopicReq *pR
return 0;
}
int32_t tSerializeSMDropCgroupReq(void *buf, int32_t bufLen, SMDropCgroupReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->topic) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->cgroup) < 0) return -1;
if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSMDropCgroupReq(void *buf, int32_t bufLen, SMDropCgroupReq *pReq) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->topic) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->cgroup) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->igNotExists) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
int32_t tSerializeSCMCreateTopicReq(void *buf, int32_t bufLen, const SCMCreateTopicReq *pReq) {
int32_t sqlLen = 0;
int32_t astLen = 0;
@ -4087,12 +4118,11 @@ static int32_t tEncodeSSubmitBlkRsp(SEncoder *pEncoder, const SSubmitBlkRsp *pBl
if (tEncodeI32(pEncoder, pBlock->code) < 0) return -1;
if (tEncodeI8(pEncoder, pBlock->hashMeta) < 0) return -1;
if (pBlock->hashMeta) {
if (tEncodeI64(pEncoder, pBlock->uid) < 0) return -1;
if (tEncodeCStr(pEncoder, pBlock->tblFName) < 0) return -1;
}
if (tEncodeI64(pEncoder, pBlock->uid) < 0) return -1;
if (tEncodeCStr(pEncoder, pBlock->tblFName) < 0) return -1;
if (tEncodeI32v(pEncoder, pBlock->numOfRows) < 0) return -1;
if (tEncodeI32v(pEncoder, pBlock->affectedRows) < 0) return -1;
if (tEncodeI64v(pEncoder, pBlock->sver) < 0) return -1;
tEndEncode(pEncoder);
return 0;
@ -4103,14 +4133,13 @@ static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) {
if (tDecodeI32(pDecoder, &pBlock->code) < 0) return -1;
if (tDecodeI8(pDecoder, &pBlock->hashMeta) < 0) return -1;
if (pBlock->hashMeta) {
if (tDecodeI64(pDecoder, &pBlock->uid) < 0) return -1;
pBlock->tblFName = taosMemoryCalloc(TSDB_TABLE_FNAME_LEN, 1);
if (NULL == pBlock->tblFName) return -1;
if (tDecodeCStrTo(pDecoder, pBlock->tblFName) < 0) return -1;
}
if (tDecodeI64(pDecoder, &pBlock->uid) < 0) return -1;
pBlock->tblFName = taosMemoryCalloc(TSDB_TABLE_FNAME_LEN, 1);
if (NULL == pBlock->tblFName) return -1;
if (tDecodeCStrTo(pDecoder, pBlock->tblFName) < 0) return -1;
if (tDecodeI32v(pDecoder, &pBlock->numOfRows) < 0) return -1;
if (tDecodeI32v(pDecoder, &pBlock->affectedRows) < 0) return -1;
if (tDecodeI64v(pDecoder, &pBlock->sver) < 0) return -1;
tEndDecode(pDecoder);
return 0;

View File

@ -17,94 +17,46 @@
#include "tmsgcb.h"
#include "taoserror.h"
static SMsgCb tsDefaultMsgCb;
static SMsgCb defaultMsgCb;
void tmsgSetDefaultMsgCb(const SMsgCb* pMsgCb) {
// if (tsDefaultMsgCb.pWrapper == NULL) {
tsDefaultMsgCb = *pMsgCb;
//}
void tmsgSetDefault(const SMsgCb* msgcb) { defaultMsgCb = *msgcb; }
int32_t tmsgPutToQueue(const SMsgCb* msgcb, EQueueType qtype, SRpcMsg* pMsg) {
PutToQueueFp fp = msgcb->queueFps[qtype];
return (*fp)(msgcb->mgmt, pMsg);
}
int32_t tmsgPutToQueue(const SMsgCb* pMsgCb, EQueueType qtype, SRpcMsg* pReq) {
PutToQueueFp fp = pMsgCb->queueFps[qtype];
if (fp != NULL) {
return (*fp)(pMsgCb->pMgmt, pReq);
} else {
terrno = TSDB_CODE_INVALID_PTR;
return -1;
}
int32_t tmsgGetQueueSize(const SMsgCb* msgcb, int32_t vgId, EQueueType qtype) {
GetQueueSizeFp fp = msgcb->qsizeFp;
return (*fp)(msgcb->mgmt, vgId, qtype);
}
int32_t tmsgGetQueueSize(const SMsgCb* pMsgCb, int32_t vgId, EQueueType qtype) {
GetQueueSizeFp fp = pMsgCb->qsizeFp;
if (fp != NULL) {
return (*fp)(pMsgCb->pMgmt, vgId, qtype);
} else {
terrno = TSDB_CODE_INVALID_PTR;
return -1;
}
int32_t tmsgSendReq(const SEpSet* epSet, SRpcMsg* pMsg) {
SendReqFp fp = defaultMsgCb.sendReqFp;
return (*fp)(epSet, pMsg);
}
int32_t tmsgSendReq(const SMsgCb* pMsgCb, const SEpSet* epSet, SRpcMsg* pReq) {
SendReqFp fp = pMsgCb->sendReqFp;
if (fp != NULL) {
return (*fp)(pMsgCb->pWrapper, epSet, pReq);
} else {
terrno = TSDB_CODE_INVALID_PTR;
return -1;
}
void tmsgSendRsp(SRpcMsg* pMsg) {
SendRspFp fp = defaultMsgCb.sendRspFp;
return (*fp)(pMsg);
}
void tmsgSendRsp(SRpcMsg* pRsp) {
SendRspFp fp = tsDefaultMsgCb.sendRspFp;
if (fp != NULL) {
return (*fp)(tsDefaultMsgCb.pWrapper, pRsp);
} else {
terrno = TSDB_CODE_INVALID_PTR;
}
void tmsgSendRedirectRsp(SRpcMsg* pMsg, const SEpSet* pNewEpSet) {
SendRedirectRspFp fp = defaultMsgCb.sendRedirectRspFp;
(*fp)(pMsg, pNewEpSet);
}
void tmsgSendRedirectRsp(SRpcMsg* pRsp, const SEpSet* pNewEpSet) {
SendRedirectRspFp fp = tsDefaultMsgCb.sendRedirectRspFp;
if (fp != NULL) {
(*fp)(tsDefaultMsgCb.pWrapper, pRsp, pNewEpSet);
} else {
terrno = TSDB_CODE_INVALID_PTR;
}
void tmsgRegisterBrokenLinkArg(SRpcMsg* pMsg) {
RegisterBrokenLinkArgFp fp = defaultMsgCb.registerBrokenLinkArgFp;
(*fp)(pMsg);
}
void tmsgSendMnodeRecv(SRpcMsg* pReq, SRpcMsg* pRsp) {
SendMnodeRecvFp fp = tsDefaultMsgCb.sendMnodeRecvFp;
if (fp != NULL) {
(*fp)(tsDefaultMsgCb.pWrapper, pReq, pRsp);
} else {
terrno = TSDB_CODE_INVALID_PTR;
}
}
void tmsgRegisterBrokenLinkArg(const SMsgCb* pMsgCb, SRpcMsg* pMsg) {
RegisterBrokenLinkArgFp fp = pMsgCb->registerBrokenLinkArgFp;
if (fp != NULL) {
(*fp)(pMsgCb->pWrapper, pMsg);
} else {
terrno = TSDB_CODE_INVALID_PTR;
}
}
void tmsgReleaseHandle(void* handle, int8_t type) {
ReleaseHandleFp fp = tsDefaultMsgCb.releaseHandleFp;
if (fp != NULL) {
(*fp)(tsDefaultMsgCb.pWrapper, handle, type);
} else {
terrno = TSDB_CODE_INVALID_PTR;
}
void tmsgReleaseHandle(SRpcHandleInfo* pHandle, int8_t type) {
ReleaseHandleFp fp = defaultMsgCb.releaseHandleFp;
(*fp)(pHandle, type);
}
void tmsgReportStartup(const char* name, const char* desc) {
ReportStartup fp = tsDefaultMsgCb.reportStartupFp;
if (fp != NULL && tsDefaultMsgCb.pWrapper != NULL) {
(*fp)(tsDefaultMsgCb.pWrapper, name, desc);
} else {
terrno = TSDB_CODE_INVALID_PTR;
}
ReportStartup fp = defaultMsgCb.reportStartupFp;
(*fp)(name, desc);
}

View File

@ -250,7 +250,7 @@ int32_t tNameFromString(SName* dst, const char* str, uint32_t type) {
return -1;
}
dst->acctId = strtoll(str, NULL, 10);
dst->acctId = taosStr2Int32(str, NULL, 10);
}
if ((type & T_NAME_DB) == T_NAME_DB) {

View File

@ -1063,7 +1063,7 @@ bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t fl
int32_t tdGetColDataOfRow(SCellVal *pVal, SDataCol *pCol, int32_t row, int8_t bitmapMode) {
if (isAllRowsNone(pCol)) {
pVal->valType = TD_VTYPE_NULL;
pVal->valType = TD_VTYPE_NONE;
#ifdef TD_SUPPORT_READ2
pVal->val = (void *)getNullValue(pCol->type);
#else

View File

@ -374,39 +374,135 @@ char getPrecisionUnit(int32_t precision) {
}
int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrecision) {
assert(fromPrecision == TSDB_TIME_PRECISION_MILLI || fromPrecision == TSDB_TIME_PRECISION_MICRO ||
assert(fromPrecision == TSDB_TIME_PRECISION_MILLI ||
fromPrecision == TSDB_TIME_PRECISION_MICRO ||
fromPrecision == TSDB_TIME_PRECISION_NANO);
assert(toPrecision == TSDB_TIME_PRECISION_MILLI || toPrecision == TSDB_TIME_PRECISION_MICRO ||
assert(toPrecision == TSDB_TIME_PRECISION_MILLI ||
toPrecision == TSDB_TIME_PRECISION_MICRO ||
toPrecision == TSDB_TIME_PRECISION_NANO);
static double factors[3][3] = {{1., 1000., 1000000.}, {1.0 / 1000, 1., 1000.}, {1.0 / 1000000, 1.0 / 1000, 1.}};
return (int64_t)((double)time * factors[fromPrecision][toPrecision]);
double tempResult = (double)time;
switch(fromPrecision) {
case TSDB_TIME_PRECISION_MILLI: {
switch (toPrecision) {
case TSDB_TIME_PRECISION_MILLI:
return time;
case TSDB_TIME_PRECISION_MICRO:
tempResult *= 1000;
time *= 1000;
goto end_;
case TSDB_TIME_PRECISION_NANO:
tempResult *= 1000000;
time *= 1000000;
goto end_;
}
} // end from milli
case TSDB_TIME_PRECISION_MICRO: {
switch (toPrecision) {
case TSDB_TIME_PRECISION_MILLI:
return time / 1000;
case TSDB_TIME_PRECISION_MICRO:
return time;
case TSDB_TIME_PRECISION_NANO:
tempResult *= 1000;
time *= 1000;
goto end_;
}
} //end from micro
case TSDB_TIME_PRECISION_NANO: {
switch (toPrecision) {
case TSDB_TIME_PRECISION_MILLI:
return time / 1000000;
case TSDB_TIME_PRECISION_MICRO:
return time / 1000;
case TSDB_TIME_PRECISION_NANO:
return time;
}
} //end from nano
default: {
assert(0);
return time; // only to pass windows compilation
}
} //end switch fromPrecision
end_:
if (tempResult >= (double)INT64_MAX) return INT64_MAX;
if (tempResult <= (double)INT64_MIN) return INT64_MIN; // INT64_MIN means NULL
return time;
}
// !!!!notice:there are precision problems, double lose precison if time is too large, for example: 1626006833631000000*1.0 = double = 1626006833631000064
//int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrecision) {
// assert(fromPrecision == TSDB_TIME_PRECISION_MILLI || fromPrecision == TSDB_TIME_PRECISION_MICRO ||
// fromPrecision == TSDB_TIME_PRECISION_NANO);
// assert(toPrecision == TSDB_TIME_PRECISION_MILLI || toPrecision == TSDB_TIME_PRECISION_MICRO ||
// toPrecision == TSDB_TIME_PRECISION_NANO);
// static double factors[3][3] = {{1., 1000., 1000000.}, {1.0 / 1000, 1., 1000.}, {1.0 / 1000000, 1.0 / 1000, 1.}};
// ((double)time * factors[fromPrecision][toPrecision]);
//}
// !!!!notice: double lose precison if time is too large, for example: 1626006833631000000*1.0 = double = 1626006833631000064
int64_t convertTimeFromPrecisionToUnit(int64_t time, int32_t fromPrecision, char toUnit) {
assert(fromPrecision == TSDB_TIME_PRECISION_MILLI || fromPrecision == TSDB_TIME_PRECISION_MICRO ||
fromPrecision == TSDB_TIME_PRECISION_NANO);
static double factors[3] = {1000000., 1000., 1.};
int64_t factors[3] = {NANOSECOND_PER_MSEC, NANOSECOND_PER_USEC, 1};
double tmp = time;
switch (toUnit) {
case 's':
return time * factors[fromPrecision] / NANOSECOND_PER_SEC;
case 's':{
tmp /= (NANOSECOND_PER_SEC/factors[fromPrecision]); // the result of division is an integer
time /= (NANOSECOND_PER_SEC/factors[fromPrecision]);
break;
}
case 'm':
return time * factors[fromPrecision] / NANOSECOND_PER_MINUTE;
tmp /= (NANOSECOND_PER_MINUTE/factors[fromPrecision]); // the result of division is an integer
time /= (NANOSECOND_PER_MINUTE/factors[fromPrecision]);
break;
case 'h':
return time * factors[fromPrecision] / NANOSECOND_PER_HOUR;
tmp /= (NANOSECOND_PER_HOUR/factors[fromPrecision]); // the result of division is an integer
time /= (NANOSECOND_PER_HOUR/factors[fromPrecision]);
break;
case 'd':
return time * factors[fromPrecision] / NANOSECOND_PER_DAY;
tmp /= (NANOSECOND_PER_DAY/factors[fromPrecision]); // the result of division is an integer
time /= (NANOSECOND_PER_DAY/factors[fromPrecision]);
break;
case 'w':
return time * factors[fromPrecision] / NANOSECOND_PER_WEEK;
tmp /= (NANOSECOND_PER_WEEK/factors[fromPrecision]); // the result of division is an integer
time /= (NANOSECOND_PER_WEEK/factors[fromPrecision]);
break;
case 'a':
return time * factors[fromPrecision] / NANOSECOND_PER_MSEC;
tmp /= (NANOSECOND_PER_MSEC/factors[fromPrecision]); // the result of division is an integer
time /= (NANOSECOND_PER_MSEC/factors[fromPrecision]);
break;
case 'u':
return time * factors[fromPrecision] / NANOSECOND_PER_USEC;
// the result of (NANOSECOND_PER_USEC/(double)factors[fromPrecision]) maybe a double
switch (fromPrecision) {
case TSDB_TIME_PRECISION_MILLI:{
tmp *= 1000;
time *= 1000;
break;
}
case TSDB_TIME_PRECISION_MICRO:{
tmp /= 1;
time /= 1;
break;
}
case TSDB_TIME_PRECISION_NANO:{
tmp /= 1000;
time /= 1000;
break;
}
}
break;
case 'b':
return time * factors[fromPrecision];
tmp *= factors[fromPrecision];
time *= factors[fromPrecision];
break;
default: {
return -1;
}
}
if (tmp >= (double)INT64_MAX) return INT64_MAX;
if (tmp <= (double)INT64_MIN) return INT64_MIN;
return time;
}
int32_t convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec, int64_t *timeVal) {
@ -494,7 +590,7 @@ int32_t parseAbsoluteDuration(const char* token, int32_t tokenlen, int64_t* dura
char* endPtr = NULL;
/* get the basic numeric value */
int64_t timestamp = strtoll(token, &endPtr, 10);
int64_t timestamp = taosStr2Int64(token, &endPtr, 10);
if (errno != 0) {
return -1;
}
@ -512,7 +608,7 @@ int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* durati
errno = 0;
/* get the basic numeric value */
*duration = strtoll(token, NULL, 10);
*duration = taosStr2Int64(token, NULL, 10);
if (errno != 0) {
return -1;
}

View File

@ -39,7 +39,7 @@ int32_t toInteger(const char *z, int32_t n, int32_t base, int64_t *value) {
errno = 0;
char *endPtr = NULL;
*value = strtoll(z, &endPtr, base);
*value = taosStr2Int64(z, &endPtr, base);
if (errno == ERANGE || errno == EINVAL || endPtr - z != n) {
errno = 0;
return -1;
@ -58,7 +58,7 @@ int32_t toUInteger(const char *z, int32_t n, int32_t base, uint64_t *value) {
return -1;
}
*value = strtoull(z, &endPtr, base);
*value = taosStr2UInt64(z, &endPtr, base);
if (errno == ERANGE || errno == EINVAL || endPtr - z != n) {
errno = 0;
return -1;
@ -434,7 +434,7 @@ static FORCE_INLINE int32_t convertToDouble(char *pStr, int32_t len, double *val
// return -1;
// }
//
// *value = strtod(pStr, NULL);
// *value = taosStr2Double(pStr, NULL);
return 0;
}
@ -911,7 +911,7 @@ int32_t taosVariantTypeSetType(SVariant *pVariant, char type) {
case TSDB_DATA_TYPE_DOUBLE: {
if (pVariant->nType == TSDB_DATA_TYPE_BINARY) {
errno = 0;
double v = strtod(pVariant->pz, NULL);
double v = taosStr2Double(pVariant->pz, NULL);
if ((errno == ERANGE && v == -1) || (isinf(v) || isnan(v))) {
taosMemoryFree(pVariant->pz);
return -1;

View File

@ -17,6 +17,15 @@ TARGET_INCLUDE_DIRECTORIES(
PRIVATE "${TD_SOURCE_DIR}/source/libs/common/inc"
)
# dataformatTest.cpp
add_executable(dataformatTest "")
target_sources(
dataformatTest
PRIVATE
"dataformatTest.cpp"
)
target_link_libraries(dataformatTest gtest gtest_main util)
# tmsg test
# add_executable(tmsgTest "")
# target_sources(tmsgTest

View File

@ -0,0 +1 @@
#include "gtest/gtest.h"

View File

@ -1,23 +0,0 @@
#include <gtest/gtest.h>
#include "trow.h"
TEST(td_row_test, build_row_to_target) {
#if 0
char dst[1024];
SRow* pRow = (SRow*)dst;
int ncols = 10;
col_id_t cid;
void* pData;
SRowBuilder rb = trbInit(TD_OR_ROW_BUILDER, NULL, 0, pRow, 1024);
trbSetRowInfo(&rb, false, 0);
trbSetRowTS(&rb, 1637550210000);
for (int c = 0; c < ncols; c++) {
cid = c;
if (trbWriteCol(&rb, pData, cid) < 0) {
// TODO
}
}
#endif
}

View File

@ -1,14 +0,0 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/

View File

@ -1,14 +0,0 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/

Some files were not shown because too many files have changed in this diff Show More