Merge branch '3.0' of https://github.com/taosdata/TDengine into feature/3.0_mhli

This commit is contained in:
Minghao Li 2022-03-08 20:43:38 +08:00
commit 4118ddb9eb
1406 changed files with 324039 additions and 587 deletions

1
.gitignore vendored
View File

@ -24,7 +24,6 @@ mac/
*.orig
src/connector/nodejs/node_modules/
src/connector/nodejs/out/
tests/test/
tests/taoshebei/
tests/taoscsv/
tests/taosdalipu/

4
.gitmodules vendored
View File

@ -10,10 +10,6 @@
[submodule "deps/TSZ"]
path = deps/TSZ
url = https://github.com/taosdata/TSZ.git
[submodule "tests"]
path = tests
url = https://github.com/taosdata/tests
branch = 3.0
[submodule "examples/rust"]
path = examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git

View File

@ -74,37 +74,9 @@ def pre_test(){
git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
git submodule update --init --recursive --remote
git submodule update --init --recursive
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WKCT}
git checkout master
'''
}
else if(env.CHANGE_TARGET == '2.0'){
sh '''
cd ${WKCT}
git checkout 2.0
'''
}
else if(env.CHANGE_TARGET == '3.0'){
sh '''
cd ${WKCT}
git checkout 3.0
'''
}
else{
sh '''
cd ${WKCT}
git checkout develop
'''
}
}
sh'''
cd ${WKCT}
git pull >/dev/null
cd ${WKC}
export TZ=Asia/Harbin
date
@ -123,7 +95,6 @@ pipeline {
environment{
WK = '/var/lib/jenkins/workspace/TDinternal'
WKC= '/var/lib/jenkins/workspace/TDengine'
WKCT= '/var/lib/jenkins/workspace/TDengine/tests'
}
stages {
stage('pre_build'){

View File

@ -135,7 +135,7 @@ static FORCE_INLINE void* tDecodeDataBlock(const void* buf, SSDataBlock* pBlock)
return (void*)buf;
}
static FORCE_INLINE int32_t tEncodeSMqConsumeRsp(void** buf, const SMqConsumeRsp* pRsp) {
static FORCE_INLINE int32_t tEncodeSMqPollRsp(void** buf, const SMqPollRsp* pRsp) {
int32_t tlen = 0;
int32_t sz = 0;
tlen += taosEncodeFixedI64(buf, pRsp->consumerId);
@ -156,7 +156,7 @@ static FORCE_INLINE int32_t tEncodeSMqConsumeRsp(void** buf, const SMqConsumeRsp
return tlen;
}
static FORCE_INLINE void* tDecodeSMqConsumeRsp(void* buf, SMqConsumeRsp* pRsp) {
static FORCE_INLINE void* tDecodeSMqPollRsp(void* buf, SMqPollRsp* pRsp) {
int32_t sz;
buf = taosDecodeFixedI64(buf, &pRsp->consumerId);
buf = taosDecodeFixedI64(buf, &pRsp->reqOffset);
@ -194,7 +194,7 @@ static FORCE_INLINE void tDeleteSSDataBlock(SSDataBlock* pBlock) {
// tfree(pBlock);
}
static FORCE_INLINE void tDeleteSMqConsumeRsp(SMqConsumeRsp* pRsp) {
static FORCE_INLINE void tDeleteSMqConsumeRsp(SMqPollRsp* pRsp) {
if (pRsp->schemas) {
if (pRsp->schemas->nCols) {
tfree(pRsp->schemas->pSchema);

View File

@ -795,7 +795,7 @@ typedef struct SVgroupInfo {
int32_t vgId;
uint32_t hashBegin;
uint32_t hashEnd;
SEpSet epset;
SEpSet epSet;
} SVgroupInfo;
typedef struct {
@ -1871,15 +1871,27 @@ typedef struct {
} STSma; // Time-range-wise SMA
typedef struct {
int8_t msgType; // 0 create, 1 recreate
STSma tSma;
STimeWindow window;
} SCreateTSmaMsg;
int64_t ver; // use a general definition
STSma tSma;
} SVCreateTSmaReq;
typedef struct {
STimeWindow window;
char indexName[TSDB_INDEX_NAME_LEN + 1];
} SDropTSmaMsg;
int8_t type; // 0 status report, 1 update data
char indexName[TSDB_INDEX_NAME_LEN + 1]; //
STimeWindow windows;
} STSmaMsg;
typedef struct {
int64_t ver; // use a general definition
char indexName[TSDB_INDEX_NAME_LEN + 1];
} SVDropTSmaReq;
typedef struct {
} SVCreateTSmaRsp, SVDropTSmaRsp;
int32_t tSerializeSVCreateTSmaReq(void** buf, SVCreateTSmaReq* pReq);
void* tDeserializeSVCreateTSmaReq(void* buf, SVCreateTSmaReq* pReq);
int32_t tSerializeSVDropTSmaReq(void** buf, SVDropTSmaReq* pReq);
void* tDeserializeSVDropTSmaReq(void* buf, SVDropTSmaReq* pReq);
typedef struct {
STimeWindow tsWindow; // [skey, ekey]
@ -1901,22 +1913,18 @@ static FORCE_INLINE void tdDestroySmaData(STSmaData* pSmaData) {
}
}
// RSma: Time-range-wise Rollup SMA
// TODO: refactor when rSma grammar defined finally =>
// RSma: Rollup SMA
typedef struct {
int64_t interval;
int32_t retention; // unit: day
uint16_t days; // unit: day
int8_t intervalUnit;
} SSmaParams;
// TODO: refactor when rSma grammar defined finally <=
typedef struct {
// TODO: refactor to use the real schema =>
STSma tsma;
float xFilesFactor;
SArray* smaParams; // SSmaParams
// TODO: refactor to use the real schema <=
} SRSma;
typedef struct {
@ -2065,7 +2073,7 @@ typedef struct {
int32_t skipLogNum;
int32_t numOfTopics;
SArray* pBlockData; // SArray<SSDataBlock>
} SMqConsumeRsp;
} SMqPollRsp;
// one req for one vg+topic
typedef struct {
@ -2078,7 +2086,7 @@ typedef struct {
int64_t currentOffset;
char topic[TSDB_TOPIC_FNAME_LEN];
} SMqConsumeReq;
} SMqPollReq;
typedef struct {
int32_t vgId;
@ -2100,7 +2108,7 @@ typedef struct {
struct tmq_message_t {
SMqRspHead head;
union {
SMqConsumeRsp consumeRsp;
SMqPollRsp consumeRsp;
SMqCMGetSubEpRsp getEpRsp;
};
void* extra;

View File

@ -44,10 +44,10 @@ enum {
};
typedef struct STableComInfo {
uint8_t numOfTags; // the number of tags in schema
uint8_t precision; // the number of precision
int16_t numOfColumns; // the number of columns
int32_t rowSize; // row size of the schema
uint8_t numOfTags; // the number of tags in schema
uint8_t precision; // the number of precision
int16_t numOfColumns; // the number of columns
int32_t rowSize; // row size of the schema
} STableComInfo;
/*
@ -56,49 +56,45 @@ typedef struct STableComInfo {
* The cached child table meta info. For each child table, 24 bytes are required to keep the essential table info.
*/
typedef struct SCTableMeta {
int32_t vgId:24;
int32_t vgId : 24;
int8_t tableType;
uint64_t uid;
uint64_t suid;
} SCTableMeta;
/*
* Note that the first 24 bytes of STableMeta are identical to SCTableMeta, it is safe to cast a STableMeta to be a SCTableMeta.
* Note that the first 24 bytes of STableMeta are identical to SCTableMeta, it is safe to cast a STableMeta to be a
* SCTableMeta.
*/
typedef struct STableMeta {
//BEGIN: KEEP THIS PART SAME WITH SCTableMeta
int32_t vgId:24;
int8_t tableType;
uint64_t uid;
uint64_t suid;
//END: KEEP THIS PART SAME WITH SCTableMeta
// if the table is TSDB_CHILD_TABLE, the following information is acquired from the corresponding super table meta info
int16_t sversion;
int16_t tversion;
STableComInfo tableInfo;
SSchema schema[];
// BEGIN: KEEP THIS PART SAME WITH SCTableMeta
int32_t vgId : 24;
int8_t tableType;
uint64_t uid;
uint64_t suid;
// END: KEEP THIS PART SAME WITH SCTableMeta
// if the table is TSDB_CHILD_TABLE, the following information is acquired from the corresponding super table meta
// info
int16_t sversion;
int16_t tversion;
STableComInfo tableInfo;
SSchema schema[];
} STableMeta;
typedef struct SDBVgInfo {
int32_t vgVersion;
int32_t vgVersion;
int8_t hashMethod;
SHashObj *vgHash; //key:vgId, value:SVgroupInfo
SHashObj* vgHash; // key:vgId, value:SVgroupInfo
} SDBVgInfo;
typedef struct SUseDbOutput {
char db[TSDB_DB_FNAME_LEN];
uint64_t dbId;
SDBVgInfo *dbVgroup;
char db[TSDB_DB_FNAME_LEN];
uint64_t dbId;
SDBVgInfo* dbVgroup;
} SUseDbOutput;
enum {
META_TYPE_NULL_TABLE = 1,
META_TYPE_CTABLE,
META_TYPE_TABLE,
META_TYPE_BOTH_TABLE
};
enum { META_TYPE_NULL_TABLE = 1, META_TYPE_CTABLE, META_TYPE_TABLE, META_TYPE_BOTH_TABLE };
typedef struct STableMetaOutput {
int32_t metaType;
@ -107,30 +103,30 @@ typedef struct STableMetaOutput {
char ctbName[TSDB_TABLE_NAME_LEN];
char tbName[TSDB_TABLE_NAME_LEN];
SCTableMeta ctbMeta;
STableMeta *tbMeta;
STableMeta* tbMeta;
} STableMetaOutput;
typedef struct SDataBuf {
void *pData;
uint32_t len;
void *handle;
void* pData;
uint32_t len;
void* handle;
} SDataBuf;
typedef int32_t (*__async_send_cb_fn_t)(void* param, const SDataBuf* pMsg, int32_t code);
typedef int32_t (*__async_exec_fn_t)(void* param);
typedef struct SMsgSendInfo {
__async_send_cb_fn_t fp; //async callback function
void *param;
uint64_t requestId;
uint64_t requestObjRefId;
int32_t msgType;
SDataBuf msgInfo;
__async_send_cb_fn_t fp; // async callback function
void* param;
uint64_t requestId;
uint64_t requestObjRefId;
int32_t msgType;
SDataBuf msgInfo;
} SMsgSendInfo;
typedef struct SQueryNodeAddr {
int32_t nodeId; // vgId or qnodeId
SEpSet epset;
SEpSet epSet;
} SQueryNodeAddr;
int32_t initTaskQueue();
@ -154,32 +150,67 @@ int32_t taosAsyncExec(__async_exec_fn_t execFn, void* execParam, int32_t* code);
* @param pInfo
* @return
*/
int32_t asyncSendMsgToServer(void *pTransporter, SEpSet* epSet, int64_t* pTransporterId, const SMsgSendInfo* pInfo);
int32_t asyncSendMsgToServer(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, const SMsgSendInfo* pInfo);
int32_t queryBuildUseDbOutput(SUseDbOutput *pOut, SUseDbRsp *usedbRsp);
int32_t queryBuildUseDbOutput(SUseDbOutput* pOut, SUseDbRsp* usedbRsp);
void initQueryModuleMsgHandle();
const SSchema* tGetTbnameColumnSchema();
bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
int32_t queryCreateTableMetaFromMsg(STableMetaRsp* msg, bool isSuperTable, STableMeta **pMeta);
int32_t queryCreateTableMetaFromMsg(STableMetaRsp* msg, bool isSuperTable, STableMeta** pMeta);
extern int32_t (*queryBuildMsg[TDMT_MAX])(void* input, char **msg, int32_t msgSize, int32_t *msgLen);
extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char *msg, int32_t msgSize);
extern int32_t (*queryBuildMsg[TDMT_MAX])(void* input, char** msg, int32_t msgSize, int32_t* msgLen);
extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t msgSize);
#define SET_META_TYPE_NULL(t) (t) = META_TYPE_NULL_TABLE
#define SET_META_TYPE_CTABLE(t) (t) = META_TYPE_CTABLE
#define SET_META_TYPE_TABLE(t) (t) = META_TYPE_TABLE
#define SET_META_TYPE_NULL(t) (t) = META_TYPE_NULL_TABLE
#define SET_META_TYPE_CTABLE(t) (t) = META_TYPE_CTABLE
#define SET_META_TYPE_TABLE(t) (t) = META_TYPE_TABLE
#define SET_META_TYPE_BOTH_TABLE(t) (t) = META_TYPE_BOTH_TABLE
#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", DEBUG_FATAL, qDebugFlag, __VA_ARGS__); }} while(0)
#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", DEBUG_ERROR, qDebugFlag, __VA_ARGS__); }} while(0)
#define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", DEBUG_WARN, qDebugFlag, __VA_ARGS__); }} while(0)
#define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", DEBUG_INFO, qDebugFlag, __VA_ARGS__); }} while(0)
#define qDebug(...) do { if (qDebugFlag & DEBUG_DEBUG) { taosPrintLog("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); }} while(0)
#define qTrace(...) do { if (qDebugFlag & DEBUG_TRACE) { taosPrintLog("QRY ", DEBUG_TRACE, qDebugFlag, __VA_ARGS__); }} while(0)
#define qDebugL(...) do { if (qDebugFlag & DEBUG_DEBUG) { taosPrintLongString("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); }} while(0)
#define qFatal(...) \
do { \
if (qDebugFlag & DEBUG_FATAL) { \
taosPrintLog("QRY FATAL ", DEBUG_FATAL, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qError(...) \
do { \
if (qDebugFlag & DEBUG_ERROR) { \
taosPrintLog("QRY ERROR ", DEBUG_ERROR, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qWarn(...) \
do { \
if (qDebugFlag & DEBUG_WARN) { \
taosPrintLog("QRY WARN ", DEBUG_WARN, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qInfo(...) \
do { \
if (qDebugFlag & DEBUG_INFO) { \
taosPrintLog("QRY ", DEBUG_INFO, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qDebug(...) \
do { \
if (qDebugFlag & DEBUG_DEBUG) { \
taosPrintLog("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qTrace(...) \
do { \
if (qDebugFlag & DEBUG_TRACE) { \
taosPrintLog("QRY ", DEBUG_TRACE, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qDebugL(...) \
do { \
if (qDebugFlag & DEBUG_DEBUG) { \
taosPrintLongString("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#ifdef __cplusplus
}

View File

@ -34,16 +34,10 @@
#include <sys/epoll.h>
#endif
#ifdef USE_UV
#include <uv.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
#ifndef USE_UV
#define TAOS_EPOLL_WAIT_TIME 500
typedef int32_t SOCKET;
typedef SOCKET EpollFd;
@ -63,8 +57,6 @@ int32_t taosCloseSocket(SocketFd fd);
void taosShutDownSocketRD(SOCKET fd);
void taosShutDownSocketWR(SOCKET fd);
int32_t taosSetNonblocking(SOCKET sock, int32_t on);
void taosIgnSIGPIPE();
void taosSetMaskSIGPIPE();
int32_t taosSetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t optlen);
int32_t taosGetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t *optlen);
@ -94,14 +86,13 @@ SOCKET taosOpenTcpClientSocket(uint32_t ip, uint16_t port, uint32_t localIp);
SOCKET taosOpenTcpServerSocket(uint32_t ip, uint16_t port);
int32_t taosKeepTcpAlive(SOCKET sockFd);
int32_t taosGetFqdn(char *);
void taosBlockSIGPIPE();
uint32_t taosGetIpv4FromFqdn(const char *);
int32_t taosGetFqdn(char *);
void tinet_ntoa(char *ipstr, uint32_t ip);
uint32_t ip2uint(const char *const ip_addr);
#endif
void taosBlockSIGPIPE();
void taosIgnSIGPIPE();
void taosSetMaskSIGPIPE();
#ifdef __cplusplus
}

View File

@ -517,7 +517,7 @@ void* doFetchRow(SRequestObj* pRequest) {
SShowReqInfo* pShowReqInfo = &pRequest->body.showInfo;
SVgroupInfo* pVgroupInfo = taosArrayGet(pShowReqInfo->pArray, pShowReqInfo->currentIndex);
epSet = pVgroupInfo->epset;
epSet = pVgroupInfo->epSet;
} else if (pRequest->type == TDMT_VND_SHOW_TABLES_FETCH) {
pRequest->type = TDMT_VND_SHOW_TABLES;
SShowReqInfo* pShowReqInfo = &pRequest->body.showInfo;
@ -534,7 +534,7 @@ void* doFetchRow(SRequestObj* pRequest) {
pRequest->body.requestMsg.pData = pShowReq;
SMsgSendInfo* body = buildMsgInfoImpl(pRequest);
epSet = pVgroupInfo->epset;
epSet = pVgroupInfo->epSet;
int64_t transporterId = 0;
STscObj* pTscObj = pRequest->pTscObj;

View File

@ -13,8 +13,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "clientInt.h"
#include "clientLog.h"
#include "parser.h"
@ -606,17 +604,17 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
int32_t tmqGetSkipLogNum(tmq_message_t* tmq_message) {
if (tmq_message == NULL) return 0;
SMqConsumeRsp* pRsp = &tmq_message->consumeRsp;
SMqPollRsp* pRsp = &tmq_message->consumeRsp;
return pRsp->skipLogNum;
}
void tmqShowMsg(tmq_message_t* tmq_message) {
if (tmq_message == NULL) return;
static bool noPrintSchema;
char pBuf[128];
SMqConsumeRsp* pRsp = &tmq_message->consumeRsp;
int32_t colNum = pRsp->schemas->nCols;
static bool noPrintSchema;
char pBuf[128];
SMqPollRsp* pRsp = &tmq_message->consumeRsp;
int32_t colNum = pRsp->schemas->nCols;
if (!noPrintSchema) {
printf("|");
for (int32_t i = 0; i < colNum; i++) {
@ -703,7 +701,7 @@ int32_t tmqPollCb(void* param, const SDataBuf* pMsg, int32_t code) {
goto WRITE_QUEUE_FAIL;
}
memcpy(pRsp, pMsg->pData, sizeof(SMqRspHead));
tDecodeSMqConsumeRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &pRsp->consumeRsp);
tDecodeSMqPollRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &pRsp->consumeRsp);
/*printf("rsp commit off:%ld rsp off:%ld has data:%d\n", pRsp->committedOffset, pRsp->rspOffset, pRsp->numOfTopics);*/
if (pRsp->consumeRsp.numOfTopics == 0) {
/*printf("no data\n");*/
@ -874,7 +872,7 @@ tmq_resp_err_t tmq_seek(tmq_t* tmq, const tmq_topic_vgroup_t* offset) {
return TMQ_RESP_ERR__FAIL;
}
SMqConsumeReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t blockingTime, SMqClientTopic* pTopic, SMqClientVg* pVg) {
SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t blockingTime, SMqClientTopic* pTopic, SMqClientVg* pVg) {
int64_t reqOffset;
if (pVg->currentOffset >= 0) {
reqOffset = pVg->currentOffset;
@ -886,7 +884,7 @@ SMqConsumeReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t blockingTime, SMqClien
reqOffset = tmq->resetOffsetCfg;
}
SMqConsumeReq* pReq = malloc(sizeof(SMqConsumeReq));
SMqPollReq* pReq = malloc(sizeof(SMqPollReq));
if (pReq == NULL) {
return NULL;
}
@ -900,7 +898,7 @@ SMqConsumeReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t blockingTime, SMqClien
pReq->currentOffset = reqOffset;
pReq->head.vgId = htonl(pVg->vgId);
pReq->head.contLen = htonl(sizeof(SMqConsumeReq));
pReq->head.contLen = htonl(sizeof(SMqPollReq));
return pReq;
}
@ -914,7 +912,7 @@ tmq_message_t* tmqSyncPollImpl(tmq_t* tmq, int64_t blockingTime) {
/*if (vgStatus != TMQ_VG_STATUS__IDLE) {*/
/*continue;*/
/*}*/
SMqConsumeReq* pReq = tmqBuildConsumeReqImpl(tmq, blockingTime, pTopic, pVg);
SMqPollReq* pReq = tmqBuildConsumeReqImpl(tmq, blockingTime, pTopic, pVg);
if (pReq == NULL) {
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
// TODO: out of mem
@ -941,7 +939,7 @@ tmq_message_t* tmqSyncPollImpl(tmq_t* tmq, int64_t blockingTime) {
sendInfo->msgInfo = (SDataBuf){
.pData = pReq,
.len = sizeof(SMqConsumeReq),
.len = sizeof(SMqPollReq),
.handle = NULL,
};
sendInfo->requestId = generateRequestId();
@ -982,7 +980,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t blockingTime) {
if (vgStatus != TMQ_VG_STATUS__IDLE) {
continue;
}
SMqConsumeReq* pReq = tmqBuildConsumeReqImpl(tmq, blockingTime, pTopic, pVg);
SMqPollReq* pReq = tmqBuildConsumeReqImpl(tmq, blockingTime, pTopic, pVg);
if (pReq == NULL) {
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
tsem_post(&tmq->rspSem);
@ -1011,7 +1009,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t blockingTime) {
sendInfo->msgInfo = (SDataBuf){
.pData = pReq,
.len = sizeof(SMqConsumeReq),
.len = sizeof(SMqPollReq),
.handle = NULL,
};
sendInfo->requestId = generateRequestId();
@ -1271,7 +1269,7 @@ tmq_resp_err_t tmq_commit(tmq_t* tmq, const tmq_topic_vgroup_list_t* tmq_topic_v
void tmq_message_destroy(tmq_message_t* tmq_message) {
if (tmq_message == NULL) return;
SMqConsumeRsp* pRsp = &tmq_message->consumeRsp;
SMqPollRsp* pRsp = &tmq_message->consumeRsp;
tDeleteSMqConsumeRsp(pRsp);
/*free(tmq_message);*/
taosFreeQitem(tmq_message);

View File

@ -1481,7 +1481,7 @@ static int32_t tSerializeSUseDbRspImp(SCoder *pEncoder, SUseDbRsp *pRsp) {
if (tEncodeI32(pEncoder, pVgInfo->vgId) < 0) return -1;
if (tEncodeU32(pEncoder, pVgInfo->hashBegin) < 0) return -1;
if (tEncodeU32(pEncoder, pVgInfo->hashEnd) < 0) return -1;
if (tEncodeSEpSet(pEncoder, &pVgInfo->epset) < 0) return -1;
if (tEncodeSEpSet(pEncoder, &pVgInfo->epSet) < 0) return -1;
}
return 0;
@ -1541,7 +1541,7 @@ int32_t tDeserializeSUseDbRspImp(SCoder *pDecoder, SUseDbRsp *pRsp) {
if (tDecodeI32(pDecoder, &vgInfo.vgId) < 0) return -1;
if (tDecodeU32(pDecoder, &vgInfo.hashBegin) < 0) return -1;
if (tDecodeU32(pDecoder, &vgInfo.hashEnd) < 0) return -1;
if (tDecodeSEpSet(pDecoder, &vgInfo.epset) < 0) return -1;
if (tDecodeSEpSet(pDecoder, &vgInfo.epSet) < 0) return -1;
taosArrayPush(pRsp->pVgroupInfos, &vgInfo);
}
@ -2390,3 +2390,36 @@ int32_t tDecodeSMqCMCommitOffsetReq(SCoder *decoder, SMqCMCommitOffsetReq *pReq)
tEndDecode(decoder);
return 0;
}
int32_t tSerializeSVCreateTSmaReq(void **buf, SVCreateTSmaReq *pReq) {
int32_t tlen = 0;
tlen += taosEncodeFixedI64(buf, pReq->ver);
tlen += tEncodeTSma(buf, &pReq->tSma);
return tlen;
}
void *tDeserializeSVCreateTSmaReq(void *buf, SVCreateTSmaReq *pReq) {
buf = taosDecodeFixedI64(buf, &(pReq->ver));
if ((buf = tDecodeTSma(buf, &pReq->tSma)) == NULL) {
tdDestroyTSma(&pReq->tSma);
}
return buf;
}
int32_t tSerializeSVDropTSmaReq(void **buf, SVDropTSmaReq *pReq) {
int32_t tlen = 0;
tlen += taosEncodeFixedI64(buf, pReq->ver);
tlen += taosEncodeString(buf, pReq->indexName);
return tlen;
}
void *tDeserializeSVDropTSmaReq(void *buf, SVDropTSmaReq *pReq) {
buf = taosDecodeFixedI64(buf, &(pReq->ver));
buf = taosDecodeStringTo(buf, pReq->indexName);
return buf;
}

View File

@ -900,10 +900,10 @@ static void mndBuildDBVgroupInfo(SDbObj *pDb, SMnode *pMnode, SArray *pVgList) {
vgInfo.vgId = pVgroup->vgId;
vgInfo.hashBegin = pVgroup->hashBegin;
vgInfo.hashEnd = pVgroup->hashEnd;
vgInfo.epset.numOfEps = pVgroup->replica;
vgInfo.epSet.numOfEps = pVgroup->replica;
for (int32_t gid = 0; gid < pVgroup->replica; ++gid) {
SVnodeGid *pVgid = &pVgroup->vnodeGid[gid];
SEp *pEp = &vgInfo.epset.eps[gid];
SEp *pEp = &vgInfo.epSet.eps[gid];
SDnodeObj *pDnode = mndAcquireDnode(pMnode, pVgid->dnodeId);
if (pDnode != NULL) {
memcpy(pEp->fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
@ -911,7 +911,7 @@ static void mndBuildDBVgroupInfo(SDbObj *pDb, SMnode *pMnode, SArray *pVgList) {
}
mndReleaseDnode(pMnode, pDnode);
if (pVgid->role == TAOS_SYNC_STATE_LEADER) {
vgInfo.epset.inUse = gid;
vgInfo.epSet.inUse = gid;
}
}
vindex++;

View File

@ -33,23 +33,29 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib
SSdb* pSdb = pMnode->pSdb;
SVgObj* pVgroup = NULL;
SQueryDag* pDag = qStringToDag(pTopic->physicalPlan);
SArray* pAray = NULL;
SArray* unassignedVg = pSub->unassignedVg;
if (pDag == NULL) {
terrno = TSDB_CODE_QRY_INVALID_INPUT;
return -1;
}
ASSERT(pSub->vgNum == 0);
int32_t levelNum = taosArrayGetSize(pDag->pSubplans);
if (levelNum != 1) {
qDestroyQueryDag(pDag);
terrno = TSDB_CODE_MND_UNSUPPORTED_TOPIC;
return -1;
}
SArray* inner = taosArrayGet(pDag->pSubplans, 0);
SArray* plans = taosArrayGetP(pDag->pSubplans, 0);
int32_t opNum = taosArrayGetSize(inner);
int32_t opNum = taosArrayGetSize(plans);
if (opNum != 1) {
qDestroyQueryDag(pDag);
terrno = TSDB_CODE_MND_UNSUPPORTED_TOPIC;
return -1;
}
SSubplan* plan = taosArrayGetP(inner, 0);
SSubplan* plan = taosArrayGetP(plans, 0);
void* pIter = NULL;
while (1) {
@ -62,17 +68,24 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib
pSub->vgNum++;
plan->execNode.nodeId = pVgroup->vgId;
plan->execNode.epset = mndGetVgroupEpset(pMnode, pVgroup);
plan->execNode.epSet = mndGetVgroupEpset(pMnode, pVgroup);
SMqConsumerEp consumerEp = {0};
consumerEp.status = 0;
consumerEp.consumerId = -1;
consumerEp.epSet = plan->execNode.epset;
consumerEp.epSet = plan->execNode.epSet;
consumerEp.vgId = plan->execNode.nodeId;
int32_t msgLen;
int32_t code = qSubPlanToString(plan, &consumerEp.qmsg, &msgLen);
taosArrayPush(unassignedVg, &consumerEp);
if (qSubPlanToString(plan, &consumerEp.qmsg, &msgLen) < 0) {
sdbRelease(pSdb, pVgroup);
qDestroyQueryDag(pDag);
terrno = TSDB_CODE_QRY_INVALID_INPUT;
return -1;
}
taosArrayPush(pSub->unassignedVg, &consumerEp);
}
qDestroyQueryDag(pDag);
return 0;
}

View File

@ -93,7 +93,6 @@ static SMqSubscribeObj *mndCreateSubscription(SMnode *pMnode, const SMqTopicObj
strcpy(pSub->key, key);
if (mndSchedInitSubEp(pMnode, pTopic, pSub) < 0) {
terrno = TSDB_CODE_MND_UNSUPPORTED_TOPIC;
tDeleteSMqSubscribeObj(pSub);
free(pSub);
return NULL;
@ -295,7 +294,11 @@ static int32_t mndProcessGetSubEpReq(SMnodeMsg *pMsg) {
for (int32_t k = 0; k < vgsz; k++) {
char offsetKey[TSDB_PARTITION_KEY_LEN];
SMqConsumerEp *pConsumerEp = taosArrayGet(pSubConsumer->vgInfo, k);
SMqSubVgEp vgEp = {.epSet = pConsumerEp->epSet, .vgId = pConsumerEp->vgId, .offset = -1};
SMqSubVgEp vgEp = {
.epSet = pConsumerEp->epSet,
.vgId = pConsumerEp->vgId,
.offset = -1,
};
mndMakePartitionKey(offsetKey, pConsumer->cgroup, topicName, pConsumerEp->vgId);
SMqOffsetObj *pOffsetObj = mndAcquireOffset(pMnode, offsetKey);
if (pOffsetObj != NULL) {
@ -345,7 +348,7 @@ static SMqRebSubscribe *mndGetOrCreateRebSub(SHashObj *pHash, const char *key) {
if (pRebSub == NULL) {
pRebSub = tNewSMqRebSubscribe(key);
if (pRebSub == NULL) {
// TODO
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
taosHashPut(pHash, key, strlen(key), pRebSub, sizeof(SMqRebSubscribe));
@ -412,7 +415,11 @@ static int32_t mndProcessMqTimerMsg(SMnodeMsg *pMsg) {
}
if (taosHashGetSize(pRebMsg->rebSubHash) != 0) {
mInfo("mq rebalance will be triggered");
SRpcMsg rpcMsg = {.msgType = TDMT_MND_MQ_DO_REBALANCE, .pCont = pRebMsg, .contLen = sizeof(SMqDoRebalanceMsg)};
SRpcMsg rpcMsg = {
.msgType = TDMT_MND_MQ_DO_REBALANCE,
.pCont = pRebMsg,
.contLen = sizeof(SMqDoRebalanceMsg),
};
pMnode->putReqToMWriteQFp(pMnode->pDnode, &rpcMsg);
} else {
taosHashCleanup(pRebMsg->rebSubHash);

View File

@ -96,7 +96,11 @@ static void mndCalMqRebalance(void *param, void *tmrId) {
if (mndIsMaster(pMnode)) {
int32_t contLen = 0;
void *pReq = mndBuildTimerMsg(&contLen);
SRpcMsg rpcMsg = {.msgType = TDMT_MND_MQ_TIMER, .pCont = pReq, .contLen = contLen};
SRpcMsg rpcMsg = {
.msgType = TDMT_MND_MQ_TIMER,
.pCont = pReq,
.contLen = contLen,
};
pMnode->putReqToMReadQFp(pMnode->pDnode, &rpcMsg);
}
@ -631,4 +635,4 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr
}
return 0;
}
}

View File

@ -292,9 +292,9 @@ TEST_F(MndTestDb, 03_Create_Use_Restart_Use_Db) {
EXPECT_GT(pInfo->vgId, 0);
EXPECT_EQ(pInfo->hashBegin, 0);
EXPECT_EQ(pInfo->hashEnd, UINT32_MAX / 2 - 1);
EXPECT_EQ(pInfo->epset.inUse, 0);
EXPECT_EQ(pInfo->epset.numOfEps, 1);
SEp* pAddr = &pInfo->epset.eps[0];
EXPECT_EQ(pInfo->epSet.inUse, 0);
EXPECT_EQ(pInfo->epSet.numOfEps, 1);
SEp* pAddr = &pInfo->epSet.eps[0];
EXPECT_EQ(pAddr->port, 9030);
EXPECT_STREQ(pAddr->fqdn, "localhost");
}
@ -307,9 +307,9 @@ TEST_F(MndTestDb, 03_Create_Use_Restart_Use_Db) {
EXPECT_GT(pInfo->vgId, 0);
EXPECT_EQ(pInfo->hashBegin, UINT32_MAX / 2);
EXPECT_EQ(pInfo->hashEnd, UINT32_MAX);
EXPECT_EQ(pInfo->epset.inUse, 0);
EXPECT_EQ(pInfo->epset.numOfEps, 1);
SEp* pAddr = &pInfo->epset.eps[0];
EXPECT_EQ(pInfo->epSet.inUse, 0);
EXPECT_EQ(pInfo->epSet.numOfEps, 1);
SEp* pAddr = &pInfo->epSet.eps[0];
EXPECT_EQ(pAddr->port, 9030);
EXPECT_STREQ(pAddr->fqdn, "localhost");
}

View File

@ -40,24 +40,27 @@ typedef struct SMTbCursor SMTbCursor;
typedef struct SMCtbCursor SMCtbCursor;
typedef struct SMSmaCursor SMSmaCursor;
typedef SVCreateTbReq STbCfg;
typedef STSma SSmaCfg;
typedef SVCreateTbReq STbCfg;
typedef SVCreateTSmaReq SSmaCfg;
// SMeta operations
SMeta *metaOpen(const char *path, const SMetaCfg *pMetaCfg, SMemAllocatorFactory *pMAF);
void metaClose(SMeta *pMeta);
void metaRemove(const char *path);
int metaCreateTable(SMeta *pMeta, STbCfg *pTbCfg);
int metaDropTable(SMeta *pMeta, tb_uid_t uid);
int metaCommit(SMeta *pMeta);
SMeta * metaOpen(const char *path, const SMetaCfg *pMetaCfg, SMemAllocatorFactory *pMAF);
void metaClose(SMeta *pMeta);
void metaRemove(const char *path);
int metaCreateTable(SMeta *pMeta, STbCfg *pTbCfg);
int metaDropTable(SMeta *pMeta, tb_uid_t uid);
int metaCommit(SMeta *pMeta);
int32_t metaCreateTSma(SMeta *pMeta, SSmaCfg *pCfg);
int32_t metaDropTSma(SMeta *pMeta, char *indexName);
// For Query
STbCfg * metaGetTbInfoByUid(SMeta *pMeta, tb_uid_t uid);
STbCfg * metaGetTbInfoByName(SMeta *pMeta, char *tbname, tb_uid_t *uid);
SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, bool isinline);
STSchema * metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver);
SSmaCfg * metaGetSmaInfoByName(SMeta *pMeta, const char *indexName);
STSma * metaGetSmaInfoByName(SMeta *pMeta, const char *indexName);
STSmaWrapper * metaGetSmaInfoByUid(SMeta *pMeta, tb_uid_t uid);
SArray * metaGetSmaTbUids(SMeta *pMeta, bool isDup);
SMTbCursor *metaOpenTbCursor(SMeta *pMeta);
void metaCloseTbCursor(SMTbCursor *pTbCur);

View File

@ -13,16 +13,14 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_TQ_H_
#define _TD_TQ_H_
#ifndef _TQ_H_
#define _TQ_H_
#include "tcommon.h"
#include "executor.h"
#include "tmallocator.h"
#include "meta.h"
#include "scheduler.h"
#include "taoserror.h"
#include "tlist.h"
#include "tcommon.h"
#include "tmallocator.h"
#include "tmsg.h"
#include "trpc.h"
#include "ttimer.h"
@ -54,7 +52,7 @@ void tqClose(STQ*);
int tqPushMsg(STQ*, void* msg, tmsg_t msgType, int64_t version);
int tqCommit(STQ*);
int32_t tqProcessConsumeReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessSetConnReq(STQ* pTq, char* msg);
int32_t tqProcessRebReq(STQ* pTq, char* msg);
@ -62,4 +60,4 @@ int32_t tqProcessRebReq(STQ* pTq, char* msg);
}
#endif
#endif /*_TD_TQ_H_*/
#endif /*_TQ_H_*/

View File

@ -33,7 +33,7 @@ int metaOpenDB(SMeta* pMeta);
void metaCloseDB(SMeta* pMeta);
int metaSaveTableToDB(SMeta* pMeta, STbCfg* pTbCfg);
int metaRemoveTableFromDb(SMeta* pMeta, tb_uid_t uid);
int metaSaveSmaToDB(SMeta* pMeta, SSmaCfg* pTbCfg);
int metaSaveSmaToDB(SMeta* pMeta, STSma* pTbCfg);
int metaRemoveSmaFromDb(SMeta* pMeta, const char* indexName);
// SMetaCache

View File

@ -19,7 +19,7 @@
#include "meta.h"
#include "tlog.h"
#include "tq.h"
#include "trpc.h"
#include "tqPush.h"
#ifdef __cplusplus
extern "C" {
@ -31,30 +31,35 @@ extern "C" {
taosPrintLog("TQ FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); \
} \
}
#define tqError(...) \
{ \
if (tqDebugFlag & DEBUG_ERROR) { \
taosPrintLog("TQ ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); \
} \
}
#define tqWarn(...) \
{ \
if (tqDebugFlag & DEBUG_WARN) { \
taosPrintLog("TQ WARN ", DEBUG_WARN, 255, __VA_ARGS__); \
} \
}
#define tqInfo(...) \
{ \
if (tqDebugFlag & DEBUG_INFO) { \
taosPrintLog("TQ ", DEBUG_INFO, 255, __VA_ARGS__); \
} \
}
#define tqDebug(...) \
{ \
if (tqDebugFlag & DEBUG_DEBUG) { \
taosPrintLog("TQ ", DEBUG_DEBUG, tqDebugFlag, __VA_ARGS__); \
} \
}
#define tqTrace(...) \
{ \
if (tqDebugFlag & DEBUG_TRACE) { \
@ -138,9 +143,7 @@ typedef struct {
// topics that are not connectted
STqMetaList* unconnectTopic;
// TODO:temporaral use, to be replaced by unified tfile
TdFilePtr pFile;
// TODO:temporaral use, to be replaced by unified tfile
TdFilePtr pIdxFile;
char* dirPath;
@ -157,6 +160,7 @@ struct STQ {
STqCfg* tqConfig;
STqMemRef tqMemRef;
STqMetaStore* tqMeta;
STqPushMgr* tqPushMgr;
SWal* pWal;
SMeta* pVnodeMeta;
};

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TQ_PUSH_H_
#define _TQ_PUSH_H_
#include "thash.h"
#include "trpc.h"
#include "ttimer.h"
#ifdef __cplusplus
extern "C" {
#endif
enum {
TQ_PUSHER_TYPE__CLIENT = 1,
TQ_PUSHER_TYPE__STREAM,
};
typedef struct {
int8_t type;
int8_t reserved[3];
int32_t ttl;
int64_t consumerId;
SRpcMsg* pMsg;
// SMqPollRsp* rsp;
} STqClientPusher;
typedef struct {
int8_t type;
int8_t nodeType;
int8_t reserved[6];
int64_t streamId;
SEpSet epSet;
} STqStreamPusher;
typedef struct {
int8_t type; // mq or stream
} STqPusher;
typedef struct {
SHashObj* pHash; // <id, STqPush*>
} STqPushMgr;
typedef struct {
int8_t inited;
tmr_h timer;
} STqPushMgmt;
static STqPushMgmt tqPushMgmt;
int32_t tqPushMgrInit();
void tqPushMgrCleanUp();
STqPushMgr* tqPushMgrOpen();
void tqPushMgrClose(STqPushMgr* pushMgr);
STqClientPusher* tqAddClientPusher(STqPushMgr* pushMgr, SRpcMsg* pMsg, int64_t consumerId, int64_t ttl);
STqStreamPusher* tqAddStreamPusher(STqPushMgr* pushMgr, int64_t streamId, SEpSet* pEpSet);
#ifdef __cplusplus
}
#endif
#endif /*_TQ_PUSH_H_*/

View File

@ -41,55 +41,4 @@ static FORCE_INLINE int32_t tsdbEncodeTSmaKey(uint64_t tableUid, col_id_t colId,
return len;
}
#if 0
typedef struct {
int minFid;
int midFid;
int maxFid;
TSKEY minKey;
} SRtn;
typedef struct {
uint64_t uid;
int64_t offset;
int64_t size;
} SKVRecord;
void tsdbGetRtnSnap(STsdb *pRepo, SRtn *pRtn);
static FORCE_INLINE int TSDB_KEY_FID(TSKEY key, int32_t days, int8_t precision) {
if (key < 0) {
return (int)((key + 1) / tsTickPerDay[precision] / days - 1);
} else {
return (int)((key / tsTickPerDay[precision] / days));
}
}
static FORCE_INLINE int tsdbGetFidLevel(int fid, SRtn *pRtn) {
if (fid >= pRtn->maxFid) {
return 0;
} else if (fid >= pRtn->midFid) {
return 1;
} else if (fid >= pRtn->minFid) {
return 2;
} else {
return -1;
}
}
#define TSDB_DEFAULT_BLOCK_ROWS(maxRows) ((maxRows)*4 / 5)
int tsdbEncodeKVRecord(void **buf, SKVRecord *pRecord);
void *tsdbDecodeKVRecord(void *buf, SKVRecord *pRecord);
void *tsdbCommitData(STsdbRepo *pRepo);
int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn);
int tsdbWriteBlockInfoImpl(SDFile *pHeadf, STable *pTable, SArray *pSupA, SArray *pSubA, void **ppBuf, SBlockIdx *pIdx);
int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf);
int tsdbWriteBlockImpl(STsdbRepo *pRepo, STable *pTable, SDFile *pDFile, SDataCols *pDataCols, SBlock *pBlock,
bool isLast, bool isSuper, void **ppBuf, void **ppCBuf);
int tsdbApplyRtn(STsdbRepo *pRepo);
#endif
#endif /* _TD_TSDB_SMA_H_ */

View File

@ -226,7 +226,7 @@ int metaRemoveTableFromDb(SMeta *pMeta, tb_uid_t uid) {
return 0;
}
int metaSaveSmaToDB(SMeta *pMeta, SSmaCfg *pSmaCfg) {
int metaSaveSmaToDB(SMeta *pMeta, STSma *pSmaCfg) {
char buf[512] = {0}; // TODO: may overflow
void *pBuf = NULL;
DBT key1 = {0}, value1 = {0};
@ -485,7 +485,7 @@ static int metaCtbIdxCb(DB *pIdx, const DBT *pKey, const DBT *pValue, DBT *pSKey
}
static int metaSmaIdxCb(DB *pIdx, const DBT *pKey, const DBT *pValue, DBT *pSKey) {
SSmaCfg *pSmaCfg = (SSmaCfg *)(pValue->app_data);
STSma *pSmaCfg = (STSma *)(pValue->app_data);
memset(pSKey, 0, sizeof(*pSKey));
pSKey->data = &(pSmaCfg->tableUid);
@ -609,8 +609,8 @@ STbCfg *metaGetTbInfoByName(SMeta *pMeta, char *tbname, tb_uid_t *uid) {
return pTbCfg;
}
SSmaCfg *metaGetSmaInfoByName(SMeta *pMeta, const char *indexName) {
SSmaCfg *pCfg = NULL;
STSma *metaGetSmaInfoByName(SMeta *pMeta, const char *indexName) {
STSma * pCfg = NULL;
SMetaDB *pDB = pMeta->pDB;
DBT key = {0};
DBT value = {0};
@ -629,7 +629,7 @@ SSmaCfg *metaGetSmaInfoByName(SMeta *pMeta, const char *indexName) {
}
// Decode
pCfg = (SSmaCfg *)malloc(sizeof(SSmaCfg));
pCfg = (STSma *)malloc(sizeof(STSma));
if (pCfg == NULL) {
return NULL;
}
@ -885,8 +885,8 @@ STSmaWrapper *metaGetSmaInfoByUid(SMeta *pMeta, tb_uid_t uid) {
return NULL;
}
DBT skey = {.data = &(pCur->uid)};
DBT pval = {.size = sizeof(pCur->uid)};
DBT skey = {.data = &(pCur->uid), .size = sizeof(pCur->uid)};
DBT pval = {0};
void *pBuf = NULL;
while (true) {
@ -914,10 +914,49 @@ STSmaWrapper *metaGetSmaInfoByUid(SMeta *pMeta, tb_uid_t uid) {
}
metaCloseSmaCurosr(pCur);
return pSW;
}
SArray *metaGetSmaTbUids(SMeta *pMeta, bool isDup) {
SArray * pUids = NULL;
SMetaDB *pDB = pMeta->pDB;
DBC * pCur = NULL;
DBT pkey = {0}, pval = {0};
int ret;
pUids = taosArrayInit(16, sizeof(tb_uid_t));
if (!pUids) {
return NULL;
}
// TODO: lock?
ret = pDB->pCtbIdx->cursor(pDB->pSmaIdx, NULL, &pCur, 0);
if (ret != 0) {
taosArrayDestroy(pUids);
return NULL;
}
void *pBuf = NULL;
// TODO: lock?
while (true) {
ret = pCur->get(pCur, &pkey, &pval, isDup ? DB_NEXT_DUP : DB_NEXT_NODUP);
if(ret == 0) {
taosArrayPush(pUids, pkey.data);
continue;
}
break;
}
if (pCur) {
pCur->close(pCur);
}
return pUids;
}
static void metaDBWLock(SMetaDB *pDB) {
#if IMPL_WITH_LOCK
pthread_rwlock_wrlock(&(pDB->rwlock));

View File

@ -107,19 +107,27 @@ int metaRemoveTableFromIdx(SMeta *pMeta, tb_uid_t uid) {
return 0;
}
int metaCreateSma(SMeta *pMeta, SSmaCfg *pSmaCfg) {
// Validate the tbOptions
// if (metaValidateTbCfg(pMeta, pTbCfg) < 0) {
// // TODO: handle error
// return -1;
// }
int32_t metaCreateTSma(SMeta *pMeta, SSmaCfg *pCfg) {
// TODO: Validate the cfg
// The table uid should exists and be super table or common table.
// Check other cfg value
// TODO: add atomicity
if (metaSaveSmaToDB(pMeta, pSmaCfg) < 0) {
if (metaSaveSmaToDB(pMeta, &pCfg->tSma) < 0) {
// TODO: handle error
return -1;
}
return 0;
return TSDB_CODE_SUCCESS;
}
int32_t metaDropTSma(SMeta *pMeta, char* indexName) {
// TODO: Validate the cfg
// TODO: add atomicity
if (metaRemoveSmaFromDb(pMeta, indexName) < 0) {
// TODO: handle error
return -1;
}
return TSDB_CODE_SUCCESS;
}

View File

@ -50,3 +50,4 @@ int metaDropTable(SMeta *pMeta, tb_uid_t uid) {
return 0;
}

View File

@ -12,28 +12,16 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "tcompare.h"
#include "tqInt.h"
#include "tqMetaStore.h"
int tqInit() {
int8_t old = atomic_val_compare_exchange_8(&tqMgmt.inited, 0, 1);
if (old == 1) return 0;
int32_t tqInit() { return tqPushMgrInit(); }
tqMgmt.timer = taosTmrInit(0, 0, 0, "TQ");
return 0;
}
void tqCleanUp() { tqPushMgrCleanUp(); }
void tqCleanUp() {
int8_t old = atomic_val_compare_exchange_8(&tqMgmt.inited, 1, 0);
if (old == 0) return;
taosTmrStop(tqMgmt.timer);
taosTmrCleanUp(tqMgmt.timer);
}
STQ* tqOpen(const char* path, SWal* pWal, SMeta* pMeta, STqCfg* tqConfig, SMemAllocatorFactory* allocFac) {
STQ* tqOpen(const char* path, SWal* pWal, SMeta* pVnodeMeta, STqCfg* tqConfig, SMemAllocatorFactory* allocFac) {
STQ* pTq = malloc(sizeof(STQ));
if (pTq == NULL) {
terrno = TSDB_CODE_TQ_OUT_OF_MEMORY;
@ -42,7 +30,7 @@ STQ* tqOpen(const char* path, SWal* pWal, SMeta* pMeta, STqCfg* tqConfig, SMemAl
pTq->path = strdup(path);
pTq->tqConfig = tqConfig;
pTq->pWal = pWal;
pTq->pVnodeMeta = pMeta;
pTq->pVnodeMeta = pVnodeMeta;
#if 0
pTq->tqMemRef.pAllocatorFactory = allocFac;
pTq->tqMemRef.pAllocator = allocFac->create(allocFac);
@ -60,6 +48,13 @@ STQ* tqOpen(const char* path, SWal* pWal, SMeta* pMeta, STqCfg* tqConfig, SMemAl
return NULL;
}
pTq->tqPushMgr = tqPushMgrOpen();
if (pTq->tqPushMgr == NULL) {
// free store
free(pTq);
return NULL;
}
return pTq;
}
@ -72,6 +67,8 @@ void tqClose(STQ* pTq) {
}
int tqPushMsg(STQ* pTq, void* msg, tmsg_t msgType, int64_t version) {
// iterate hash
// process all msg
// if waiting
// memcpy and send msg to fetch thread
// TODO: add reference
@ -199,7 +196,10 @@ int32_t tqDeserializeConsumer(STQ* pTq, const STqSerializedHead* pHead, STqConsu
for (int j = 0; j < TQ_BUFFER_SIZE; j++) {
pTopic->buffer.output[j].status = 0;
STqReadHandle* pReadHandle = tqInitSubmitMsgScanner(pTq->pVnodeMeta);
SReadHandle handle = {.reader = pReadHandle, .meta = pTq->pVnodeMeta};
SReadHandle handle = {
.reader = pReadHandle,
.meta = pTq->pVnodeMeta,
};
pTopic->buffer.output[j].pReadHandle = pReadHandle;
pTopic->buffer.output[j].task = qCreateStreamExecTaskInfo(pTopic->qmsg, &handle);
}
@ -208,11 +208,11 @@ int32_t tqDeserializeConsumer(STQ* pTq, const STqSerializedHead* pHead, STqConsu
return 0;
}
int32_t tqProcessConsumeReq(STQ* pTq, SRpcMsg* pMsg) {
SMqConsumeReq* pReq = pMsg->pCont;
int64_t consumerId = pReq->consumerId;
int64_t fetchOffset;
int64_t blockingTime = pReq->blockingTime;
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
SMqPollReq* pReq = pMsg->pCont;
int64_t consumerId = pReq->consumerId;
int64_t fetchOffset;
int64_t blockingTime = pReq->blockingTime;
if (pReq->currentOffset == TMQ_CONF__RESET_OFFSET__EARLIEAST) {
fetchOffset = 0;
@ -222,7 +222,7 @@ int32_t tqProcessConsumeReq(STQ* pTq, SRpcMsg* pMsg) {
fetchOffset = pReq->currentOffset + 1;
}
SMqConsumeRsp rsp = {
SMqPollRsp rsp = {
.consumerId = consumerId,
.numOfTopics = 0,
.pBlockData = NULL,
@ -236,6 +236,7 @@ int32_t tqProcessConsumeReq(STQ* pTq, SRpcMsg* pMsg) {
rpcSendResponse(pMsg);
return 0;
}
int sz = taosArrayGetSize(pConsumer->topics);
ASSERT(sz == 1);
STqTopic* pTopic = taosArrayGet(pConsumer->topics, 0);
@ -247,13 +248,14 @@ int32_t tqProcessConsumeReq(STQ* pTq, SRpcMsg* pMsg) {
SWalHead* pHead;
while (1) {
int8_t pos = fetchOffset % TQ_BUFFER_SIZE;
/*if (fetchOffset > walGetLastVer(pTq->pWal) || walReadWithHandle(pTopic->pReadhandle, fetchOffset) < 0) {*/
if (walReadWithHandle(pTopic->pReadhandle, fetchOffset) < 0) {
// TODO: no more log, set timer to wait blocking time
// if data inserted during waiting, launch query and
// response to user
break;
}
int8_t pos = fetchOffset % TQ_BUFFER_SIZE;
pHead = pTopic->pReadhandle->pHead;
if (pHead->head.msgType == TDMT_VND_SUBMIT) {
SSubmitReq* pCont = (SSubmitReq*)&pHead->head.body;
@ -280,7 +282,7 @@ int32_t tqProcessConsumeReq(STQ* pTq, SRpcMsg* pMsg) {
rsp.numOfTopics = 1;
rsp.pBlockData = pRes;
int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqConsumeRsp(NULL, &rsp);
int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqPollRsp(NULL, &rsp);
void* buf = rpcMallocCont(tlen);
if (buf == NULL) {
pMsg->code = -1;
@ -290,7 +292,7 @@ int32_t tqProcessConsumeReq(STQ* pTq, SRpcMsg* pMsg) {
((SMqRspHead*)buf)->epoch = pReq->epoch;
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
tEncodeSMqConsumeRsp(&abuf, &rsp);
tEncodeSMqPollRsp(&abuf, &rsp);
taosArrayDestroyEx(rsp.pBlockData, (void (*)(void*))tDeleteSSDataBlock);
pMsg->pCont = buf;
pMsg->contLen = tlen;
@ -304,7 +306,10 @@ int32_t tqProcessConsumeReq(STQ* pTq, SRpcMsg* pMsg) {
}
}
int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqConsumeRsp(NULL, &rsp);
/*if (blockingTime != 0) {*/
/*tqAddClientPusher(pTq->tqPushMgr, pMsg, consumerId, blockingTime);*/
/*} else {*/
int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqPollRsp(NULL, &rsp);
void* buf = rpcMallocCont(tlen);
if (buf == NULL) {
pMsg->code = -1;
@ -314,12 +319,14 @@ int32_t tqProcessConsumeReq(STQ* pTq, SRpcMsg* pMsg) {
((SMqRspHead*)buf)->epoch = pReq->epoch;
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
tEncodeSMqConsumeRsp(&abuf, &rsp);
tEncodeSMqPollRsp(&abuf, &rsp);
rsp.pBlockData = NULL;
pMsg->pCont = buf;
pMsg->contLen = tlen;
pMsg->code = 0;
rpcSendResponse(pMsg);
/*}*/
return 0;
}

View File

@ -0,0 +1,84 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tqPush.h"
int32_t tqPushMgrInit() {
//
int8_t old = atomic_val_compare_exchange_8(&tqPushMgmt.inited, 0, 1);
if (old == 1) return 0;
tqPushMgmt.timer = taosTmrInit(0, 0, 0, "TQ");
return 0;
}
void tqPushMgrCleanUp() {
int8_t old = atomic_val_compare_exchange_8(&tqPushMgmt.inited, 1, 0);
if (old == 0) return;
taosTmrStop(tqPushMgmt.timer);
taosTmrCleanUp(tqPushMgmt.timer);
}
STqPushMgr* tqPushMgrOpen() {
STqPushMgr* mgr = malloc(sizeof(STqPushMgr));
if (mgr == NULL) {
return NULL;
}
mgr->pHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
return mgr;
}
void tqPushMgrClose(STqPushMgr* pushMgr) {
taosHashCleanup(pushMgr->pHash);
free(pushMgr);
}
STqClientPusher* tqAddClientPusher(STqPushMgr* pushMgr, SRpcMsg* pMsg, int64_t consumerId, int64_t ttl) {
STqClientPusher* clientPusher = malloc(sizeof(STqClientPusher));
if (clientPusher == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
clientPusher->type = TQ_PUSHER_TYPE__CLIENT;
clientPusher->pMsg = pMsg;
clientPusher->consumerId = consumerId;
clientPusher->ttl = ttl;
if (taosHashPut(pushMgr->pHash, &consumerId, sizeof(int64_t), &clientPusher, sizeof(void*)) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
free(clientPusher);
// TODO send rsp back
return NULL;
}
return clientPusher;
}
STqStreamPusher* tqAddStreamPusher(STqPushMgr* pushMgr, int64_t streamId, SEpSet* pEpSet) {
STqStreamPusher* streamPusher = malloc(sizeof(STqStreamPusher));
if (streamPusher == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
streamPusher->type = TQ_PUSHER_TYPE__STREAM;
streamPusher->nodeType = 0;
streamPusher->streamId = streamId;
memcpy(&streamPusher->epSet, pEpSet, sizeof(SEpSet));
if (taosHashPut(pushMgr->pHash, &streamId, sizeof(int64_t), &streamPusher, sizeof(void*)) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
free(streamPusher);
return NULL;
}
return streamPusher;
}

View File

@ -29,7 +29,7 @@ int vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
SReadHandle handle = {.reader = pVnode->pTsdb, .meta = pVnode->pMeta};
switch (pMsg->msgType) {
case TDMT_VND_QUERY:{
case TDMT_VND_QUERY: {
return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg);
}
case TDMT_VND_QUERY_CONTINUE:
@ -63,7 +63,7 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg) {
case TDMT_VND_TABLE_META:
return vnodeGetTableMeta(pVnode, pMsg);
case TDMT_VND_CONSUME:
return tqProcessConsumeReq(pVnode->pTq, pMsg);
return tqProcessPollReq(pVnode->pTq, pMsg);
default:
vError("unknown msg type:%d in fetch queue", pMsg->msgType);
return TSDB_CODE_VND_APP_ERROR;
@ -71,8 +71,8 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg) {
}
static int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) {
STbCfg * pTbCfg = NULL;
STbCfg * pStbCfg = NULL;
STbCfg *pTbCfg = NULL;
STbCfg *pStbCfg = NULL;
tb_uid_t uid;
int32_t nCols;
int32_t nTagCols;
@ -204,9 +204,9 @@ static void freeItemHelper(void *pItem) {
*/
static int32_t vnodeGetTableList(SVnode *pVnode, SRpcMsg *pMsg) {
SMTbCursor *pCur = metaOpenTbCursor(pVnode->pMeta);
SArray * pArray = taosArrayInit(10, POINTER_BYTES);
SArray *pArray = taosArrayInit(10, POINTER_BYTES);
char * name = NULL;
char *name = NULL;
int32_t totalLen = 0;
int32_t numOfTables = 0;
while ((name = metaTbCursorNext(pCur)) != NULL) {

View File

@ -69,7 +69,7 @@ int vnodeApplyWMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
// TODO: handle error
}
// TODO: maybe need to clear the requst struct
// TODO: maybe need to clear the request struct
free(vCreateTbReq.stbCfg.pSchema);
free(vCreateTbReq.stbCfg.pTagSchema);
free(vCreateTbReq.name);
@ -133,13 +133,44 @@ int vnodeApplyWMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
}
} break;
case TDMT_VND_CREATE_SMA: { // timeRangeSMA
// 1. tdCreateSmaMeta(pVnode->pMeta,...);
// 2. tdCreateSmaDataInit();
// 3. tdCreateSmaData
SSmaCfg vCreateSmaReq = {0};
if (tDeserializeSVCreateTSmaReq(POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), &vCreateSmaReq) == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
if (metaCreateTSma(pVnode->pMeta, &vCreateSmaReq) < 0) {
// TODO: handle error
tdDestroyTSma(&vCreateSmaReq.tSma);
return -1;
}
// TODO: send msg to stream computing to create tSma
// if ((send msg to stream computing) < 0) {
// tdDestroyTSma(&vCreateSmaReq);
// return -1;
// }
tdDestroyTSma(&vCreateSmaReq.tSma);
// TODO: return directly or go on follow steps?
} break;
case TDMT_VND_CANCEL_SMA: { // timeRangeSMA
} break;
case TDMT_VND_DROP_SMA: { // timeRangeSMA
SVDropTSmaReq vDropSmaReq = {0};
if (tDeserializeSVDropTSmaReq(POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), &vDropSmaReq) == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
if (metaDropTSma(pVnode->pMeta, vDropSmaReq.indexName) < 0) {
// TODO: handle error
return -1;
}
// TODO: send msg to stream computing to drop tSma
// if ((send msg to stream computing) < 0) {
// tdDestroyTSma(&vCreateSmaReq);
// return -1;
// }
// TODO: return directly or go on follow steps?
} break;
default:
ASSERT(0);

View File

@ -103,6 +103,7 @@ TEST(testCase, tSma_DB_Put_Get_Del_Test) {
const char *smaIndexName2 = "sma_index_test_2";
const char *smaTestDir = "./smaTest";
const uint64_t tbUid = 1234567890;
const uint32_t nCntTSma = 2;
// encode
STSma tSma = {0};
tSma.version = 0;
@ -125,7 +126,7 @@ TEST(testCase, tSma_DB_Put_Get_Del_Test) {
}
SMeta * pMeta = NULL;
SSmaCfg * pSmaCfg = &tSma;
STSma * pSmaCfg = &tSma;
const SMetaCfg *pMetaCfg = &defaultMetaOptions;
taosRemoveDir(smaTestDir);
@ -146,14 +147,14 @@ TEST(testCase, tSma_DB_Put_Get_Del_Test) {
metaSaveSmaToDB(pMeta, pSmaCfg);
// get value by indexName
SSmaCfg *qSmaCfg = NULL;
STSma *qSmaCfg = NULL;
qSmaCfg = metaGetSmaInfoByName(pMeta, smaIndexName1);
assert(qSmaCfg != NULL);
printf("name1 = %s\n", qSmaCfg->indexName);
EXPECT_STRCASEEQ(qSmaCfg->indexName, smaIndexName1);
EXPECT_EQ(qSmaCfg->tableUid, tSma.tableUid);
tdDestroyTSma(qSmaCfg);
free(qSmaCfg);
tfree(qSmaCfg);
qSmaCfg = metaGetSmaInfoByName(pMeta, smaIndexName2);
assert(qSmaCfg != NULL);
@ -161,7 +162,7 @@ TEST(testCase, tSma_DB_Put_Get_Del_Test) {
EXPECT_STRCASEEQ(qSmaCfg->indexName, smaIndexName2);
EXPECT_EQ(qSmaCfg->interval, tSma.interval);
tdDestroyTSma(qSmaCfg);
free(qSmaCfg);
tfree(qSmaCfg);
// get index name by table uid
SMSmaCursor *pSmaCur = metaOpenSmaCursor(pMeta, tbUid);
@ -175,17 +176,30 @@ TEST(testCase, tSma_DB_Put_Get_Del_Test) {
printf("indexName = %s\n", indexName);
++indexCnt;
}
EXPECT_EQ(indexCnt, 2);
EXPECT_EQ(indexCnt, nCntTSma);
metaCloseSmaCurosr(pSmaCur);
// get wrapper by table uid
STSmaWrapper *pSW = metaGetSmaInfoByUid(pMeta, tbUid);
assert(pSW != NULL);
EXPECT_EQ(pSW->number, 2);
EXPECT_EQ(pSW->number, nCntTSma);
EXPECT_STRCASEEQ(pSW->tSma->indexName, smaIndexName1);
EXPECT_EQ(pSW->tSma->tableUid, tSma.tableUid);
EXPECT_STRCASEEQ((pSW->tSma + 1)->indexName, smaIndexName2);
EXPECT_EQ((pSW->tSma + 1)->tableUid, tSma.tableUid);
tdDestroyTSmaWrapper(pSW);
tfree(pSW);
// get all sma table uids
SArray *pUids = metaGetSmaTbUids(pMeta, false);
assert(pUids != NULL);
for (uint32_t i = 0; i < taosArrayGetSize(pUids); ++i) {
printf("metaGetSmaTbUids: uid[%" PRIu32 "] = %" PRIi64 "\n", i, *(tb_uid_t *)taosArrayGet(pUids, i));
// printf("metaGetSmaTbUids: index[%" PRIu32 "] = %s", i, (char *)taosArrayGet(pUids, i));
}
EXPECT_EQ(taosArrayGetSize(pUids), 1);
taosArrayDestroy(pUids);
// resource release
metaRemoveSmaFromDb(pMeta, smaIndexName1);

View File

@ -845,7 +845,7 @@ int32_t ctgGetTableMetaFromVnode(SCatalog* pCtg, void *pTrans, const SEpSet* pMg
};
SRpcMsg rpcRsp = {0};
rpcSendRecv(pTrans, &vgroupInfo->epset, &rpcMsg, &rpcRsp);
rpcSendRecv(pTrans, &vgroupInfo->epSet, &rpcMsg, &rpcRsp);
if (TSDB_CODE_SUCCESS != rpcRsp.code) {
if (CTG_TABLE_NOT_EXIST(rpcRsp.code)) {

View File

@ -17,7 +17,7 @@ TARGET_INCLUDE_DIRECTORIES(
PRIVATE "${CMAKE_SOURCE_DIR}/source/libs/catalog/inc"
)
add_test(
NAME catalogTest
COMMAND catalogTest
)
# add_test(
# NAME catalogTest
# COMMAND catalogTest
# )

View File

@ -228,10 +228,10 @@ void ctgTestBuildDBVgroup(SDBVgInfo **pdbVgroup) {
vgInfo.vgId = i + 1;
vgInfo.hashBegin = i * hashUnit;
vgInfo.hashEnd = hashUnit * (i + 1) - 1;
vgInfo.epset.numOfEps = i % TSDB_MAX_REPLICA + 1;
vgInfo.epset.inUse = i % vgInfo.epset.numOfEps;
for (int32_t n = 0; n < vgInfo.epset.numOfEps; ++n) {
SEp *addr = &vgInfo.epset.eps[n];
vgInfo.epSet.numOfEps = i % TSDB_MAX_REPLICA + 1;
vgInfo.epSet.inUse = i % vgInfo.epSet.numOfEps;
for (int32_t n = 0; n < vgInfo.epSet.numOfEps; ++n) {
SEp *addr = &vgInfo.epSet.eps[n];
strcpy(addr->fqdn, "a0");
addr->port = n + 22;
}
@ -301,10 +301,10 @@ void ctgTestRspDbVgroups(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *
vg.hashEnd = htonl(UINT32_MAX);
}
vg.epset.numOfEps = i % TSDB_MAX_REPLICA + 1;
vg.epset.inUse = i % vg.epset.numOfEps;
for (int32_t n = 0; n < vg.epset.numOfEps; ++n) {
SEp *addr = &vg.epset.eps[n];
vg.epSet.numOfEps = i % TSDB_MAX_REPLICA + 1;
vg.epSet.inUse = i % vg.epSet.numOfEps;
for (int32_t n = 0; n < vg.epSet.numOfEps; ++n) {
SEp *addr = &vg.epSet.eps[n];
strcpy(addr->fqdn, "a0");
addr->port = n + 22;
}
@ -877,7 +877,7 @@ TEST(tableMeta, normalTable) {
code = catalogGetTableHashVgroup(pCtg, mockPointer, (const SEpSet *)mockPointer, &n, &vgInfo);
ASSERT_EQ(code, 0);
ASSERT_EQ(vgInfo.vgId, 8);
ASSERT_EQ(vgInfo.epset.numOfEps, 3);
ASSERT_EQ(vgInfo.epSet.numOfEps, 3);
while (0 == ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_DB_NUM)) {
usleep(50000);
@ -1384,7 +1384,7 @@ TEST(refreshGetMeta, normal2normal) {
code = catalogGetTableHashVgroup(pCtg, mockPointer, (const SEpSet *)mockPointer, &n, &vgInfo);
ASSERT_EQ(code, 0);
ASSERT_EQ(vgInfo.vgId, 8);
ASSERT_EQ(vgInfo.epset.numOfEps, 3);
ASSERT_EQ(vgInfo.epSet.numOfEps, 3);
while (true) {
uint64_t n = 0;
@ -1463,7 +1463,7 @@ TEST(refreshGetMeta, normal2notexist) {
code = catalogGetTableHashVgroup(pCtg, mockPointer, (const SEpSet *)mockPointer, &n, &vgInfo);
ASSERT_EQ(code, 0);
ASSERT_EQ(vgInfo.vgId, 8);
ASSERT_EQ(vgInfo.epset.numOfEps, 3);
ASSERT_EQ(vgInfo.epSet.numOfEps, 3);
while (true) {
uint64_t n = 0;
@ -1537,7 +1537,7 @@ TEST(refreshGetMeta, normal2child) {
code = catalogGetTableHashVgroup(pCtg, mockPointer, (const SEpSet *)mockPointer, &n, &vgInfo);
ASSERT_EQ(code, 0);
ASSERT_EQ(vgInfo.vgId, 8);
ASSERT_EQ(vgInfo.epset.numOfEps, 3);
ASSERT_EQ(vgInfo.epSet.numOfEps, 3);
while (true) {
uint64_t n = 0;
@ -1621,7 +1621,7 @@ TEST(refreshGetMeta, stable2child) {
code = catalogGetTableHashVgroup(pCtg, mockPointer, (const SEpSet *)mockPointer, &n, &vgInfo);
ASSERT_EQ(code, 0);
ASSERT_EQ(vgInfo.vgId, 8);
ASSERT_EQ(vgInfo.epset.numOfEps, 3);
ASSERT_EQ(vgInfo.epSet.numOfEps, 3);
while (true) {
uint64_t n = 0;
@ -1706,7 +1706,7 @@ TEST(refreshGetMeta, stable2stable) {
code = catalogGetTableHashVgroup(pCtg, mockPointer, (const SEpSet *)mockPointer, &n, &vgInfo);
ASSERT_EQ(code, 0);
ASSERT_EQ(vgInfo.vgId, 8);
ASSERT_EQ(vgInfo.epset.numOfEps, 3);
ASSERT_EQ(vgInfo.epSet.numOfEps, 3);
while (true) {
uint64_t n = 0;
@ -1794,7 +1794,7 @@ TEST(refreshGetMeta, child2stable) {
code = catalogGetTableHashVgroup(pCtg, mockPointer, (const SEpSet *)mockPointer, &n, &vgInfo);
ASSERT_EQ(code, 0);
ASSERT_EQ(vgInfo.vgId, 8);
ASSERT_EQ(vgInfo.epset.numOfEps, 3);
ASSERT_EQ(vgInfo.epSet.numOfEps, 3);
while (true) {
uint64_t n = 0;
@ -1879,7 +1879,7 @@ TEST(tableDistVgroup, normalTable) {
ASSERT_EQ(taosArrayGetSize((const SArray *)vgList), 1);
vgInfo = (SVgroupInfo *)taosArrayGet(vgList, 0);
ASSERT_EQ(vgInfo->vgId, 8);
ASSERT_EQ(vgInfo->epset.numOfEps, 3);
ASSERT_EQ(vgInfo->epSet.numOfEps, 3);
catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
@ -1921,7 +1921,7 @@ TEST(tableDistVgroup, childTableCase) {
ASSERT_EQ(taosArrayGetSize((const SArray *)vgList), 1);
vgInfo = (SVgroupInfo *)taosArrayGet(vgList, 0);
ASSERT_EQ(vgInfo->vgId, 9);
ASSERT_EQ(vgInfo->epset.numOfEps, 4);
ASSERT_EQ(vgInfo->epSet.numOfEps, 4);
catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
@ -1964,13 +1964,13 @@ TEST(tableDistVgroup, superTableCase) {
ASSERT_EQ(taosArrayGetSize((const SArray *)vgList), 10);
vgInfo = (SVgroupInfo *)taosArrayGet(vgList, 0);
ASSERT_EQ(vgInfo->vgId, 1);
ASSERT_EQ(vgInfo->epset.numOfEps, 1);
ASSERT_EQ(vgInfo->epSet.numOfEps, 1);
vgInfo = (SVgroupInfo *)taosArrayGet(vgList, 1);
ASSERT_EQ(vgInfo->vgId, 2);
ASSERT_EQ(vgInfo->epset.numOfEps, 2);
ASSERT_EQ(vgInfo->epSet.numOfEps, 2);
vgInfo = (SVgroupInfo *)taosArrayGet(vgList, 2);
ASSERT_EQ(vgInfo->vgId, 3);
ASSERT_EQ(vgInfo->epset.numOfEps, 3);
ASSERT_EQ(vgInfo->epSet.numOfEps, 3);
catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
@ -2025,14 +2025,14 @@ TEST(dbVgroup, getSetDbVgroupCase) {
code = catalogGetTableHashVgroup(pCtg, mockPointer, (const SEpSet *)mockPointer, &n, &vgInfo);
ASSERT_EQ(code, 0);
ASSERT_EQ(vgInfo.vgId, 8);
ASSERT_EQ(vgInfo.epset.numOfEps, 3);
ASSERT_EQ(vgInfo.epSet.numOfEps, 3);
code = catalogGetTableDistVgInfo(pCtg, mockPointer, (const SEpSet *)mockPointer, &n, &vgList);
ASSERT_EQ(code, 0);
ASSERT_EQ(taosArrayGetSize((const SArray *)vgList), 1);
pvgInfo = (SVgroupInfo *)taosArrayGet(vgList, 0);
ASSERT_EQ(pvgInfo->vgId, 8);
ASSERT_EQ(pvgInfo->epset.numOfEps, 3);
ASSERT_EQ(pvgInfo->epSet.numOfEps, 3);
taosArrayDestroy(vgList);
ctgTestBuildDBVgroup(&dbVgroup);
@ -2053,14 +2053,14 @@ TEST(dbVgroup, getSetDbVgroupCase) {
code = catalogGetTableHashVgroup(pCtg, mockPointer, (const SEpSet *)mockPointer, &n, &vgInfo);
ASSERT_EQ(code, 0);
ASSERT_EQ(vgInfo.vgId, 7);
ASSERT_EQ(vgInfo.epset.numOfEps, 2);
ASSERT_EQ(vgInfo.epSet.numOfEps, 2);
code = catalogGetTableDistVgInfo(pCtg, mockPointer, (const SEpSet *)mockPointer, &n, &vgList);
ASSERT_EQ(code, 0);
ASSERT_EQ(taosArrayGetSize((const SArray *)vgList), 1);
pvgInfo = (SVgroupInfo *)taosArrayGet(vgList, 0);
ASSERT_EQ(pvgInfo->vgId, 8);
ASSERT_EQ(pvgInfo->epset.numOfEps, 3);
ASSERT_EQ(pvgInfo->epSet.numOfEps, 3);
taosArrayDestroy(vgList);
catalogDestroy();

View File

@ -4977,7 +4977,7 @@ static int32_t doSendFetchDataRequest(SExchangeInfo *pExchangeInfo, SExecTaskInf
SSourceDataInfo *pDataInfo = taosArrayGet(pExchangeInfo->pSourceDataInfo, sourceIndex);
qDebug("%s build fetch msg and send to vgId:%d, ep:%s, taskId:0x%" PRIx64 ", %d/%" PRIzu,
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->addr.epset.eps[0].fqdn, pSource->taskId, sourceIndex, totalSources);
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->addr.epSet.eps[0].fqdn, pSource->taskId, sourceIndex, totalSources);
pMsg->header.vgId = htonl(pSource->addr.nodeId);
pMsg->sId = htobe64(pSource->schedId);
@ -5000,7 +5000,7 @@ static int32_t doSendFetchDataRequest(SExchangeInfo *pExchangeInfo, SExecTaskInf
pMsgSendInfo->fp = loadRemoteDataCallback;
int64_t transporterId = 0;
int32_t code = asyncSendMsgToServer(pExchangeInfo->pTransporter, &pSource->addr.epset, &transporterId, pMsgSendInfo);
int32_t code = asyncSendMsgToServer(pExchangeInfo->pTransporter, &pSource->addr.epSet, &transporterId, pMsgSendInfo);
return TSDB_CODE_SUCCESS;
}

View File

@ -6,7 +6,7 @@ target_include_directories(
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
target_link_libraries(monitor os util common)
target_link_libraries(monitor os util common transport)
if(${BUILD_TEST})
add_subdirectory(test)

View File

@ -19,7 +19,6 @@
#include "monitor.h"
#include "tarray.h"
#include "tlockfree.h"
#include "tjson.h"
typedef struct {

View File

@ -58,7 +58,7 @@ static int32_t setShowInfo(SShowInfo* pShowInfo, SParseContext* pCtx, void** out
SVgroupInfo* info = taosArrayGet(array, 0);
pShowReq->head.vgId = htonl(info->vgId);
*pEpSet = info->epset;
*pEpSet = info->epSet;
*outputLen = sizeof(SVShowTablesReq);
*output = pShowReq;
@ -902,4 +902,4 @@ SVnodeModifOpStmtInfo* qParserValidateCreateTbSqlNode(SSqlInfo* pInfo, SParseCon
}
return pModifSqlStmt;
}
}

View File

@ -43,11 +43,11 @@ public:
SVgroupInfo vgroup = {.vgId = vgid, .hashBegin = 0, .hashEnd = 0, };
vgroup.epset.eps[0] = (SEp){"dnode_1", 6030};
vgroup.epset.eps[1] = (SEp){"dnode_2", 6030};
vgroup.epset.eps[2] = (SEp){"dnode_3", 6030};
vgroup.epset.inUse = 0;
vgroup.epset.numOfEps = 3;
vgroup.epSet.eps[0] = (SEp){"dnode_1", 6030};
vgroup.epSet.eps[1] = (SEp){"dnode_2", 6030};
vgroup.epSet.eps[2] = (SEp){"dnode_3", 6030};
vgroup.epSet.inUse = 0;
vgroup.epSet.numOfEps = 3;
meta_->vgs.emplace_back(vgroup);
return *this;
@ -122,7 +122,7 @@ public:
int32_t catalogGetTableHashVgroup(const SName* pTableName, SVgroupInfo* vgInfo) const {
// todo
vgInfo->vgId = 1;
addEpIntoEpSet(&vgInfo->epset, "node1", 6030);
addEpIntoEpSet(&vgInfo->epSet, "node1", 6030);
return 0;
}
@ -143,10 +143,10 @@ public:
meta_[db][tbname]->schema->uid = id_++;
SVgroupInfo vgroup = {.vgId = vgid, .hashBegin = 0, .hashEnd = 0,};
addEpIntoEpSet(&vgroup.epset, "dnode_1", 6030);
addEpIntoEpSet(&vgroup.epset, "dnode_2", 6030);
addEpIntoEpSet(&vgroup.epset, "dnode_3", 6030);
vgroup.epset.inUse = 0;
addEpIntoEpSet(&vgroup.epSet, "dnode_1", 6030);
addEpIntoEpSet(&vgroup.epSet, "dnode_2", 6030);
addEpIntoEpSet(&vgroup.epSet, "dnode_3", 6030);
vgroup.epSet.inUse = 0;
meta_[db][tbname]->vgs.emplace_back(vgroup);
// super table
@ -313,4 +313,4 @@ int32_t MockCatalogService::catalogGetTableMeta(const SName* pTableName, STableM
int32_t MockCatalogService::catalogGetTableHashVgroup(const SName* pTableName, SVgroupInfo* vgInfo) const {
return impl_->catalogGetTableHashVgroup(pTableName, vgInfo);
}
}

View File

@ -254,7 +254,7 @@ static SSubplan* initSubplan(SPlanContext* pCxt, int32_t type) {
static void vgroupInfoToNodeAddr(const SVgroupInfo* vg, SQueryNodeAddr* pNodeAddr) {
pNodeAddr->nodeId = vg->vgId;
pNodeAddr->epset = vg->epset;
pNodeAddr->epSet = vg->epSet;
}
static uint64_t splitSubplanByTable(SPlanContext* pCxt, SQueryPlanNode* pPlanNode, SQueryTableInfo* pTableInfo) {
@ -363,7 +363,7 @@ static void splitModificationOpSubPlan(SPlanContext* pCxt, SQueryPlanNode* pPlan
SSubplan* subplan = initSubplan(pCxt, QUERY_TYPE_MODIFY);
SVgDataBlocks* blocks = (SVgDataBlocks*)taosArrayGetP(pPayload->payload, i);
subplan->execNode.epset = blocks->vg.epset;
subplan->execNode.epSet = blocks->vg.epSet;
subplan->pDataSink = createDataInserter(pCxt, blocks, NULL);
subplan->pNode = NULL;
subplan->type = QUERY_TYPE_MODIFY;

View File

@ -13,12 +13,12 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "plannerInt.h"
#include "parser.h"
#include "cJSON.h"
#include "parser.h"
#include "plannerInt.h"
typedef bool (*FToJson)(const void* obj, cJSON* json);
typedef bool (*FFromJson)(const cJSON* json, void* obj);
typedef bool (*FToJson)(const void* obj, cJSON* json);
typedef bool (*FFromJson)(const cJSON* json, void* obj);
static char* getString(const cJSON* json, const char* name) {
char* p = cJSON_GetStringValue(cJSON_GetObjectItem(json, name));
@ -30,7 +30,7 @@ static void copyString(const cJSON* json, const char* name, char* dst) {
}
static uint64_t getBigintFromString(const cJSON* json, const char* name) {
char* val = getString(json, name);
char* val = getString(json, name);
uint64_t intVal = strtoul(val, NULL, 10);
tfree(val);
@ -39,7 +39,7 @@ static uint64_t getBigintFromString(const cJSON* json, const char* name) {
static int64_t getNumber(const cJSON* json, const char* name) {
double d = cJSON_GetNumberValue(cJSON_GetObjectItem(json, name));
return (int64_t) d;
return (int64_t)d;
}
static bool addObject(cJSON* json, const char* name, FToJson func, const void* obj) {
@ -72,7 +72,8 @@ static bool fromObject(const cJSON* json, const char* name, FFromJson func, void
return func(jObj, obj);
}
static bool fromObjectWithAlloc(const cJSON* json, const char* name, FFromJson func, void** obj, int32_t size, bool required) {
static bool fromObjectWithAlloc(const cJSON* json, const char* name, FFromJson func, void** obj, int32_t size,
bool required) {
cJSON* jObj = cJSON_GetObjectItem(json, name);
if (NULL == jObj) {
return !required;
@ -85,7 +86,7 @@ static bool fromObjectWithAlloc(const cJSON* json, const char* name, FFromJson f
}
static const char* jkPnodeType = "Type";
static int32_t getPnodeTypeSize(cJSON* json) {
static int32_t getPnodeTypeSize(cJSON* json) {
switch (getNumber(json, jkPnodeType)) {
case OP_StreamScan:
case OP_TableScan:
@ -119,7 +120,7 @@ static bool fromPnode(const cJSON* json, const char* name, FFromJson func, void*
static bool fromPnodeArray(const cJSON* json, const char* name, FFromJson func, SArray** array) {
const cJSON* jArray = cJSON_GetObjectItem(json, name);
int32_t size = (NULL == jArray ? 0 : cJSON_GetArraySize(jArray));
int32_t size = (NULL == jArray ? 0 : cJSON_GetArraySize(jArray));
if (size > 0) {
*array = taosArrayInit(size, POINTER_BYTES);
if (NULL == *array) {
@ -128,7 +129,7 @@ static bool fromPnodeArray(const cJSON* json, const char* name, FFromJson func,
}
for (int32_t i = 0; i < size; ++i) {
cJSON* jItem = cJSON_GetArrayItem(jArray, i);
void* item = calloc(1, getPnodeTypeSize(jItem));
void* item = calloc(1, getPnodeTypeSize(jItem));
if (NULL == item || !func(jItem, item)) {
return false;
}
@ -161,9 +162,10 @@ static bool addArray(cJSON* json, const char* name, FToJson func, const SArray*
return addTarray(json, name, func, array, true);
}
static bool fromTarray(const cJSON* json, const char* name, FFromJson func, SArray** array, int32_t itemSize, bool isPoint) {
static bool fromTarray(const cJSON* json, const char* name, FFromJson func, SArray** array, int32_t itemSize,
bool isPoint) {
const cJSON* jArray = cJSON_GetObjectItem(json, name);
int32_t size = (NULL == jArray ? 0 : cJSON_GetArraySize(jArray));
int32_t size = (NULL == jArray ? 0 : cJSON_GetArraySize(jArray));
if (size > 0) {
*array = taosArrayInit(size, isPoint ? POINTER_BYTES : itemSize);
if (NULL == *array) {
@ -188,7 +190,8 @@ static bool fromArray(const cJSON* json, const char* name, FFromJson func, SArra
return fromTarray(json, name, func, array, itemSize, true);
}
static bool addRawArray(cJSON* json, const char* name, FToJson func, const void* array, int32_t itemSize, int32_t size) {
static bool addRawArray(cJSON* json, const char* name, FToJson func, const void* array, int32_t itemSize,
int32_t size) {
if (size > 0) {
cJSON* jArray = cJSON_AddArrayToObject(json, name);
if (NULL == jArray) {
@ -218,7 +221,8 @@ static bool fromItem(const cJSON* jArray, FFromJson func, void* array, int32_t i
return true;
}
static bool fromRawArrayWithAlloc(const cJSON* json, const char* name, FFromJson func, void** array, int32_t itemSize, int32_t* size) {
static bool fromRawArrayWithAlloc(const cJSON* json, const char* name, FFromJson func, void** array, int32_t itemSize,
int32_t* size) {
const cJSON* jArray = getArray(json, name, size);
if (*size > 0) {
*array = calloc(1, itemSize * (*size));
@ -229,7 +233,8 @@ static bool fromRawArrayWithAlloc(const cJSON* json, const char* name, FFromJson
return fromItem(jArray, func, *array, itemSize, *size);
}
static bool fromRawArray(const cJSON* json, const char* name, FFromJson func, void* array, int32_t itemSize, int32_t* size) {
static bool fromRawArray(const cJSON* json, const char* name, FFromJson func, void* array, int32_t itemSize,
int32_t* size) {
const cJSON* jArray = getArray(json, name, size);
return fromItem(jArray, func, array, itemSize, *size);
}
@ -240,7 +245,7 @@ static const char* jkSchemaBytes = "Bytes";
// The 'name' field do not need to be serialized.
static bool schemaToJson(const void* obj, cJSON* jSchema) {
const SSlotSchema* schema = (const SSlotSchema*)obj;
bool res = cJSON_AddNumberToObject(jSchema, jkSchemaType, schema->type);
bool res = cJSON_AddNumberToObject(jSchema, jkSchemaType, schema->type);
if (res) {
res = cJSON_AddNumberToObject(jSchema, jkSchemaColId, schema->colId);
}
@ -264,7 +269,8 @@ static const char* jkDataBlockSchemaPrecision = "Precision";
static bool dataBlockSchemaToJson(const void* obj, cJSON* json) {
const SDataBlockSchema* schema = (const SDataBlockSchema*)obj;
bool res = addRawArray(json, jkDataBlockSchemaSlotSchema, schemaToJson, schema->pSchema, sizeof(SSlotSchema), schema->numOfCols);
bool res = addRawArray(json, jkDataBlockSchemaSlotSchema, schemaToJson, schema->pSchema, sizeof(SSlotSchema),
schema->numOfCols);
if (res) {
res = cJSON_AddNumberToObject(json, jkDataBlockSchemaResultRowSize, schema->resultRowSize);
}
@ -279,7 +285,8 @@ static bool dataBlockSchemaFromJson(const cJSON* json, void* obj) {
schema->resultRowSize = getNumber(json, jkDataBlockSchemaResultRowSize);
schema->precision = getNumber(json, jkDataBlockSchemaPrecision);
return fromRawArrayWithAlloc(json, jkDataBlockSchemaSlotSchema, schemaFromJson, (void**)&(schema->pSchema), sizeof(SSlotSchema), &schema->numOfCols);
return fromRawArrayWithAlloc(json, jkDataBlockSchemaSlotSchema, schemaFromJson, (void**)&(schema->pSchema),
sizeof(SSlotSchema), &schema->numOfCols);
}
static const char* jkColumnFilterInfoLowerRelOptr = "LowerRelOptr";
@ -290,7 +297,7 @@ static const char* jkColumnFilterInfoUpperBnd = "UpperBnd";
static bool columnFilterInfoToJson(const void* obj, cJSON* jFilter) {
const SColumnFilterInfo* filter = (const SColumnFilterInfo*)obj;
bool res = cJSON_AddNumberToObject(jFilter, jkColumnFilterInfoLowerRelOptr, filter->lowerRelOptr);
bool res = cJSON_AddNumberToObject(jFilter, jkColumnFilterInfoLowerRelOptr, filter->lowerRelOptr);
if (res) {
res = cJSON_AddNumberToObject(jFilter, jkColumnFilterInfoUpperRelOptr, filter->upperRelOptr);
}
@ -323,7 +330,7 @@ static const char* jkColumnInfoFilterList = "FilterList";
static bool columnInfoToJson(const void* obj, cJSON* jCol) {
const SColumnInfo* col = (const SColumnInfo*)obj;
bool res = cJSON_AddNumberToObject(jCol, jkColumnInfoColId, col->colId);
bool res = cJSON_AddNumberToObject(jCol, jkColumnInfoColId, col->colId);
if (res) {
res = cJSON_AddNumberToObject(jCol, jkColumnInfoType, col->type);
}
@ -331,8 +338,9 @@ static bool columnInfoToJson(const void* obj, cJSON* jCol) {
res = cJSON_AddNumberToObject(jCol, jkColumnInfoBytes, col->bytes);
}
if (res) { // TODO: temporarily disable it
// res = addRawArray(jCol, jkColumnInfoFilterList, columnFilterInfoToJson, col->flist.filterInfo, sizeof(SColumnFilterInfo), col->flist.numOfFilters);
if (res) { // TODO: temporarily disable it
// res = addRawArray(jCol, jkColumnInfoFilterList, columnFilterInfoToJson, col->flist.filterInfo,
// sizeof(SColumnFilterInfo), col->flist.numOfFilters);
}
return res;
@ -341,10 +349,11 @@ static bool columnInfoToJson(const void* obj, cJSON* jCol) {
static bool columnInfoFromJson(const cJSON* json, void* obj) {
SColumnInfo* col = (SColumnInfo*)obj;
col->colId = getNumber(json, jkColumnInfoColId);
col->type = getNumber(json, jkColumnInfoType);
col->type = getNumber(json, jkColumnInfoType);
col->bytes = getNumber(json, jkColumnInfoBytes);
int32_t size = 0;
bool res = fromRawArrayWithAlloc(json, jkColumnInfoFilterList, columnFilterInfoFromJson, (void**)&col->flist.filterInfo, sizeof(SColumnFilterInfo), &size);
bool res = fromRawArrayWithAlloc(json, jkColumnInfoFilterList, columnFilterInfoFromJson,
(void**)&col->flist.filterInfo, sizeof(SColumnFilterInfo), &size);
col->flist.numOfFilters = size;
return res;
}
@ -355,7 +364,7 @@ static const char* jkColumnInfo = "Info";
static bool columnToJson(const void* obj, cJSON* jCol) {
const SColumn* col = (const SColumn*)obj;
bool res = cJSON_AddNumberToObject(jCol, jkColumnTableId, col->uid);
bool res = cJSON_AddNumberToObject(jCol, jkColumnTableId, col->uid);
if (res) {
res = cJSON_AddNumberToObject(jCol, jkColumnFlag, col->flag);
}
@ -381,7 +390,7 @@ static const char* jkExprNodeRight = "Right";
static bool operatorToJson(const void* obj, cJSON* jOper) {
const tExprNode* exprInfo = (const tExprNode*)obj;
bool res = cJSON_AddNumberToObject(jOper, jkExprNodeOper, exprInfo->_node.optr);
bool res = cJSON_AddNumberToObject(jOper, jkExprNodeOper, exprInfo->_node.optr);
if (res) {
res = addObject(jOper, jkExprNodeLeft, exprNodeToJson, exprInfo->_node.pLeft);
}
@ -406,9 +415,10 @@ static const char* jkFunctionChild = "Child";
static bool functionToJson(const void* obj, cJSON* jFunc) {
const tExprNode* exprInfo = (const tExprNode*)obj;
bool res = cJSON_AddStringToObject(jFunc, jkFunctionName, exprInfo->_function.functionName);
bool res = cJSON_AddStringToObject(jFunc, jkFunctionName, exprInfo->_function.functionName);
if (res && NULL != exprInfo->_function.pChild) {
res = addRawArray(jFunc, jkFunctionChild, exprNodeToJson, exprInfo->_function.pChild, sizeof(tExprNode*), exprInfo->_function.num);
res = addRawArray(jFunc, jkFunctionChild, exprNodeToJson, exprInfo->_function.pChild, sizeof(tExprNode*),
exprInfo->_function.num);
}
return res;
}
@ -420,7 +430,8 @@ static bool functionFromJson(const cJSON* json, void* obj) {
if (NULL == exprInfo->_function.pChild) {
return false;
}
return fromRawArrayWithAlloc(json, jkFunctionChild, exprNodeFromJson, (void**)exprInfo->_function.pChild, sizeof(tExprNode*), &exprInfo->_function.num);
return fromRawArrayWithAlloc(json, jkFunctionChild, exprNodeFromJson, (void**)exprInfo->_function.pChild,
sizeof(tExprNode*), &exprInfo->_function.num);
}
static const char* jkVariantType = "Type";
@ -430,12 +441,12 @@ static const char* jkVariantValue = "Value";
static bool variantToJson(const void* obj, cJSON* jVar) {
const SVariant* var = (const SVariant*)obj;
bool res = cJSON_AddNumberToObject(jVar, jkVariantType, var->nType);
bool res = cJSON_AddNumberToObject(jVar, jkVariantType, var->nType);
if (res) {
res = cJSON_AddNumberToObject(jVar, jkVariantLen, var->nLen);
}
if (res) {
if (0/* in */) {
if (0 /* in */) {
res = addArray(jVar, jkVariantvalues, variantToJson, var->arr);
} else if (IS_NUMERIC_TYPE(var->nType)) {
res = cJSON_AddNumberToObject(jVar, jkVariantValue, var->d);
@ -450,7 +461,7 @@ static bool variantFromJson(const cJSON* json, void* obj) {
SVariant* var = (SVariant*)obj;
var->nType = getNumber(json, jkVariantType);
var->nLen = getNumber(json, jkVariantLen);
if (0/* in */) {
if (0 /* in */) {
return fromArray(json, jkVariantvalues, variantFromJson, &var->arr, sizeof(SVariant));
} else if (IS_NUMERIC_TYPE(var->nType)) {
var->d = getNumber(json, jkVariantValue);
@ -468,7 +479,7 @@ static const char* jkExprNodeValue = "Value";
static bool exprNodeToJson(const void* obj, cJSON* jExprInfo) {
const tExprNode* exprInfo = *(const tExprNode**)obj;
bool res = cJSON_AddNumberToObject(jExprInfo, jkExprNodeType, exprInfo->nodeType);
bool res = cJSON_AddNumberToObject(jExprInfo, jkExprNodeType, exprInfo->nodeType);
if (res) {
switch (exprInfo->nodeType) {
case TEXPR_BINARYEXPR_NODE:
@ -502,7 +513,8 @@ static bool exprNodeFromJson(const cJSON* json, void* obj) {
case TEXPR_FUNCTION_NODE:
return fromObject(json, jkExprNodeFunction, functionFromJson, exprInfo, false);
case TEXPR_COL_NODE:
return fromObjectWithAlloc(json, jkExprNodeColumn, schemaFromJson, (void**)&exprInfo->pSchema, sizeof(SSchema), false);
return fromObjectWithAlloc(json, jkExprNodeColumn, schemaFromJson, (void**)&exprInfo->pSchema, sizeof(SSchema),
false);
case TEXPR_VALUE_NODE:
return fromObject(json, jkExprNodeValue, variantFromJson, exprInfo->pVal, false);
default:
@ -518,7 +530,7 @@ static const char* jkSqlExprParams = "Params";
// token does not need to be serialized.
static bool sqlExprToJson(const void* obj, cJSON* jExpr) {
const SSqlExpr* expr = (const SSqlExpr*)obj;
bool res = addObject(jExpr, jkSqlExprSchema, schemaToJson, &expr->resSchema);
bool res = addObject(jExpr, jkSqlExprSchema, schemaToJson, &expr->resSchema);
if (res) {
res = addRawArray(jExpr, jkSqlExprColumns, columnToJson, expr->pColumns, sizeof(SColumn), expr->numOfCols);
}
@ -533,9 +545,10 @@ static bool sqlExprToJson(const void* obj, cJSON* jExpr) {
static bool sqlExprFromJson(const cJSON* json, void* obj) {
SSqlExpr* expr = (SSqlExpr*)obj;
bool res = fromObject(json, jkSqlExprSchema, schemaFromJson, &expr->resSchema, false);
bool res = fromObject(json, jkSqlExprSchema, schemaFromJson, &expr->resSchema, false);
if (res) {
res = fromRawArrayWithAlloc(json, jkSqlExprColumns, columnFromJson, (void**)&expr->pColumns, sizeof(SColumn), &expr->numOfCols);
res = fromRawArrayWithAlloc(json, jkSqlExprColumns, columnFromJson, (void**)&expr->pColumns, sizeof(SColumn),
&expr->numOfCols);
}
if (res) {
expr->interBytes = getNumber(json, jkSqlExprInterBytes);
@ -553,7 +566,7 @@ static const char* jkExprInfoExpr = "Expr";
static bool exprInfoToJson(const void* obj, cJSON* jExprInfo) {
const SExprInfo* exprInfo = (const SExprInfo*)obj;
bool res = addObject(jExprInfo, jkExprInfoBase, sqlExprToJson, &exprInfo->base);
bool res = addObject(jExprInfo, jkExprInfoBase, sqlExprToJson, &exprInfo->base);
if (res) {
res = addObject(jExprInfo, jkExprInfoExpr, exprNodeToJson, &exprInfo->pExpr);
}
@ -562,9 +575,10 @@ static bool exprInfoToJson(const void* obj, cJSON* jExprInfo) {
static bool exprInfoFromJson(const cJSON* json, void* obj) {
SExprInfo* exprInfo = (SExprInfo*)obj;
bool res = fromObject(json, jkExprInfoBase, sqlExprFromJson, &exprInfo->base, true);
bool res = fromObject(json, jkExprInfoBase, sqlExprFromJson, &exprInfo->base, true);
if (res) {
res = fromObjectWithAlloc(json, jkExprInfoExpr, exprNodeFromJson, (void**)&exprInfo->pExpr, sizeof(tExprNode), true);
res =
fromObjectWithAlloc(json, jkExprInfoExpr, exprNodeFromJson, (void**)&exprInfo->pExpr, sizeof(tExprNode), true);
}
return res;
}
@ -576,12 +590,12 @@ static bool timeWindowToJson(const void* obj, cJSON* json) {
const STimeWindow* win = (const STimeWindow*)obj;
char tmp[40] = {0};
snprintf(tmp, tListLen(tmp),"%"PRId64, win->skey);
snprintf(tmp, tListLen(tmp), "%" PRId64, win->skey);
bool res = cJSON_AddStringToObject(json, jkTimeWindowStartKey, tmp);
if (res) {
memset(tmp, 0, tListLen(tmp));
snprintf(tmp, tListLen(tmp),"%"PRId64, win->ekey);
snprintf(tmp, tListLen(tmp), "%" PRId64, win->ekey);
res = cJSON_AddStringToObject(json, jkTimeWindowEndKey, tmp);
}
return res;
@ -604,7 +618,7 @@ static bool scanNodeToJson(const void* obj, cJSON* json) {
const SScanPhyNode* pNode = (const SScanPhyNode*)obj;
char uid[40] = {0};
snprintf(uid, tListLen(uid), "%"PRIu64, pNode->uid);
snprintf(uid, tListLen(uid), "%" PRIu64, pNode->uid);
bool res = cJSON_AddStringToObject(json, jkScanNodeTableId, uid);
if (res) {
@ -629,11 +643,11 @@ static bool scanNodeToJson(const void* obj, cJSON* json) {
static bool scanNodeFromJson(const cJSON* json, void* obj) {
SScanPhyNode* pNode = (SScanPhyNode*)obj;
pNode->uid = getBigintFromString(json, jkScanNodeTableId);
pNode->uid = getBigintFromString(json, jkScanNodeTableId);
pNode->tableType = getNumber(json, jkScanNodeTableType);
pNode->count = getNumber(json, jkScanNodeTableCount);
pNode->order = getNumber(json, jkScanNodeTableOrder);
pNode->reverse = getNumber(json, jkScanNodeTableRevCount);
pNode->count = getNumber(json, jkScanNodeTableCount);
pNode->order = getNumber(json, jkScanNodeTableOrder);
pNode->reverse = getNumber(json, jkScanNodeTableRevCount);
return true;
}
@ -644,7 +658,7 @@ static const char* jkColIndexName = "Name";
static bool colIndexToJson(const void* obj, cJSON* json) {
const SColIndex* col = (const SColIndex*)obj;
bool res = cJSON_AddNumberToObject(json, jkColIndexColId, col->colId);
bool res = cJSON_AddNumberToObject(json, jkColIndexColId, col->colId);
if (res) {
res = cJSON_AddNumberToObject(json, jkColIndexColIndex, col->colIndex);
}
@ -673,7 +687,7 @@ static const char* jkAggNodeGroupByList = "GroupByList";
static bool aggNodeToJson(const void* obj, cJSON* json) {
const SAggPhyNode* agg = (const SAggPhyNode*)obj;
bool res = cJSON_AddNumberToObject(json, jkAggNodeAggAlgo, agg->aggAlgo);
bool res = cJSON_AddNumberToObject(json, jkAggNodeAggAlgo, agg->aggAlgo);
if (res) {
res = cJSON_AddNumberToObject(json, jkAggNodeAggSplit, agg->aggSplit);
}
@ -703,7 +717,7 @@ static const char* jkTableScanNodeTagsConditions = "TagsConditions";
static bool tableScanNodeToJson(const void* obj, cJSON* json) {
const STableScanPhyNode* scan = (const STableScanPhyNode*)obj;
bool res = scanNodeToJson(obj, json);
bool res = scanNodeToJson(obj, json);
if (res) {
res = cJSON_AddNumberToObject(json, jkTableScanNodeFlag, scan->scanFlag);
}
@ -718,7 +732,7 @@ static bool tableScanNodeToJson(const void* obj, cJSON* json) {
static bool tableScanNodeFromJson(const cJSON* json, void* obj) {
STableScanPhyNode* scan = (STableScanPhyNode*)obj;
bool res = scanNodeFromJson(json, obj);
bool res = scanNodeFromJson(json, obj);
if (res) {
scan->scanFlag = getNumber(json, jkTableScanNodeFlag);
}
@ -736,7 +750,7 @@ static const char* jkEpAddrPort = "Port";
static bool epAddrToJson(const void* obj, cJSON* json) {
const SEp* ep = (const SEp*)obj;
bool res = cJSON_AddStringToObject(json, jkEpAddrFqdn, ep->fqdn);
bool res = cJSON_AddStringToObject(json, jkEpAddrFqdn, ep->fqdn);
if (res) {
res = cJSON_AddNumberToObject(json, jkEpAddrPort, ep->port);
}
@ -750,46 +764,46 @@ static bool epAddrFromJson(const cJSON* json, void* obj) {
return true;
}
static const char* jkNodeAddrId = "NodeId";
static const char* jkNodeAddrInUse = "InUse";
static const char* jkNodeAddrId = "NodeId";
static const char* jkNodeAddrInUse = "InUse";
static const char* jkNodeAddrEpAddrs = "Ep";
static const char* jkNodeAddr = "NodeAddr";
static const char* jkNodeTaskId = "TaskId";
static const char* jkNodeAddr = "NodeAddr";
static const char* jkNodeTaskId = "TaskId";
static const char* jkNodeTaskSchedId = "SchedId";
static bool queryNodeAddrToJson(const void* obj, cJSON* json) {
const SQueryNodeAddr* pAddr = (const SQueryNodeAddr*) obj;
bool res = cJSON_AddNumberToObject(json, jkNodeAddrId, pAddr->nodeId);
const SQueryNodeAddr* pAddr = (const SQueryNodeAddr*)obj;
bool res = cJSON_AddNumberToObject(json, jkNodeAddrId, pAddr->nodeId);
if (res) {
res = cJSON_AddNumberToObject(json, jkNodeAddrInUse, pAddr->epset.inUse);
res = cJSON_AddNumberToObject(json, jkNodeAddrInUse, pAddr->epSet.inUse);
}
if (res) {
res = addRawArray(json, jkNodeAddrEpAddrs, epAddrToJson, pAddr->epset.eps, sizeof(SEp), pAddr->epset.numOfEps);
res = addRawArray(json, jkNodeAddrEpAddrs, epAddrToJson, pAddr->epSet.eps, sizeof(SEp), pAddr->epSet.numOfEps);
}
return res;
}
static bool queryNodeAddrFromJson(const cJSON* json, void* obj) {
SQueryNodeAddr* pAddr = (SQueryNodeAddr*) obj;
SQueryNodeAddr* pAddr = (SQueryNodeAddr*)obj;
pAddr->nodeId = getNumber(json, jkNodeAddrId);
pAddr->epset.inUse = getNumber(json, jkNodeAddrInUse);
pAddr->epSet.inUse = getNumber(json, jkNodeAddrInUse);
int32_t numOfEps = 0;
bool res = fromRawArray(json, jkNodeAddrEpAddrs, epAddrFromJson, pAddr->epset.eps, sizeof(SEp), &numOfEps);
pAddr->epset.numOfEps = numOfEps;
bool res = fromRawArray(json, jkNodeAddrEpAddrs, epAddrFromJson, pAddr->epSet.eps, sizeof(SEp), &numOfEps);
pAddr->epSet.numOfEps = numOfEps;
return res;
}
static bool nodeAddrToJson(const void* obj, cJSON* json) {
const SDownstreamSource* pSource = (const SDownstreamSource*) obj;
bool res = cJSON_AddNumberToObject(json, jkNodeTaskId, pSource->taskId);
const SDownstreamSource* pSource = (const SDownstreamSource*)obj;
bool res = cJSON_AddNumberToObject(json, jkNodeTaskId, pSource->taskId);
if (res) {
char t[30] = {0};
snprintf(t, tListLen(t), "%"PRIu64, pSource->schedId);
snprintf(t, tListLen(t), "%" PRIu64, pSource->schedId);
res = cJSON_AddStringToObject(json, jkNodeTaskSchedId, t);
}
@ -813,9 +827,10 @@ static const char* jkExchangeNodeSrcEndPoints = "SrcAddrs";
static bool exchangeNodeToJson(const void* obj, cJSON* json) {
const SExchangePhyNode* exchange = (const SExchangePhyNode*)obj;
bool res = cJSON_AddNumberToObject(json, jkExchangeNodeSrcTemplateId, exchange->srcTemplateId);
bool res = cJSON_AddNumberToObject(json, jkExchangeNodeSrcTemplateId, exchange->srcTemplateId);
if (res) {
res = addRawArray(json, jkExchangeNodeSrcEndPoints, nodeAddrToJson, exchange->pSrcEndPoints->pData, sizeof(SDownstreamSource), taosArrayGetSize(exchange->pSrcEndPoints));
res = addRawArray(json, jkExchangeNodeSrcEndPoints, nodeAddrToJson, exchange->pSrcEndPoints->pData,
sizeof(SDownstreamSource), taosArrayGetSize(exchange->pSrcEndPoints));
}
return res;
}
@ -823,7 +838,8 @@ static bool exchangeNodeToJson(const void* obj, cJSON* json) {
static bool exchangeNodeFromJson(const cJSON* json, void* obj) {
SExchangePhyNode* exchange = (SExchangePhyNode*)obj;
exchange->srcTemplateId = getNumber(json, jkExchangeNodeSrcTemplateId);
return fromInlineArray(json, jkExchangeNodeSrcEndPoints, nodeAddrFromJson, &exchange->pSrcEndPoints, sizeof(SDownstreamSource));
return fromInlineArray(json, jkExchangeNodeSrcEndPoints, nodeAddrFromJson, &exchange->pSrcEndPoints,
sizeof(SDownstreamSource));
}
static bool specificPhyNodeToJson(const void* obj, cJSON* json) {
@ -855,7 +871,7 @@ static bool specificPhyNodeToJson(const void* obj, cJSON* json) {
case OP_AllTimeWindow:
case OP_AllMultiTableTimeInterval:
case OP_Order:
break; // todo
break; // todo
case OP_Exchange:
return exchangeNodeToJson(obj, json);
default:
@ -893,7 +909,7 @@ static bool specificPhyNodeFromJson(const cJSON* json, void* obj) {
case OP_AllTimeWindow:
case OP_AllMultiTableTimeInterval:
case OP_Order:
break; // todo
break; // todo
case OP_Exchange:
return exchangeNodeFromJson(json, obj);
default:
@ -910,7 +926,7 @@ static const char* jkPnodeChildren = "Children";
// The 'pParent' field do not need to be serialized.
static bool phyNodeToJson(const void* obj, cJSON* jNode) {
const SPhyNode* phyNode = (const SPhyNode*)obj;
bool res = cJSON_AddNumberToObject(jNode, jkPnodeType, phyNode->info.type);
bool res = cJSON_AddNumberToObject(jNode, jkPnodeType, phyNode->info.type);
if (res) {
res = cJSON_AddStringToObject(jNode, jkPnodeName, phyNode->info.name);
}
@ -933,7 +949,7 @@ static bool phyNodeToJson(const void* obj, cJSON* jNode) {
}
static bool phyNodeFromJson(const cJSON* json, void* obj) {
SPhyNode* node = (SPhyNode*) obj;
SPhyNode* node = (SPhyNode*)obj;
node->info.type = getNumber(json, jkPnodeType);
node->info.name = opTypeToOpName(node->info.type);
@ -959,7 +975,7 @@ static const char* jkInserterDataSize = "DataSize";
static bool inserterToJson(const void* obj, cJSON* json) {
const SDataInserter* inserter = (const SDataInserter*)obj;
bool res = cJSON_AddNumberToObject(json, jkInserterNumOfTables, inserter->numOfTables);
bool res = cJSON_AddNumberToObject(json, jkInserterNumOfTables, inserter->numOfTables);
if (res) {
res = cJSON_AddNumberToObject(json, jkInserterDataSize, inserter->size);
}
@ -1005,7 +1021,7 @@ static const char* jkDataSinkSchema = "Schema";
static bool dataSinkToJson(const void* obj, cJSON* json) {
const SDataSink* dsink = (const SDataSink*)obj;
bool res = cJSON_AddStringToObject(json, jkDataSinkName, dsink->info.name);
bool res = cJSON_AddStringToObject(json, jkDataSinkName, dsink->info.name);
if (res) {
res = addObject(json, dsink->info.name, specificDataSinkToJson, dsink);
}
@ -1034,7 +1050,7 @@ static bool subplanIdToJson(const void* obj, cJSON* jId) {
const SSubplanId* id = (const SSubplanId*)obj;
char ids[40] = {0};
snprintf(ids, tListLen(ids), "%"PRIu64, id->queryId);
snprintf(ids, tListLen(ids), "%" PRIu64, id->queryId);
bool res = cJSON_AddStringToObject(jId, jkIdQueryId, ids);
if (res) {
@ -1049,9 +1065,9 @@ static bool subplanIdToJson(const void* obj, cJSON* jId) {
static bool subplanIdFromJson(const cJSON* json, void* obj) {
SSubplanId* id = (SSubplanId*)obj;
id->queryId = getBigintFromString(json, jkIdQueryId);
id->queryId = getBigintFromString(json, jkIdQueryId);
id->templateId = getNumber(json, jkIdTemplateId);
id->subplanId = getNumber(json, jkIdSubplanId);
id->subplanId = getNumber(json, jkIdSubplanId);
return true;
}
@ -1094,9 +1110,10 @@ static SSubplan* subplanFromJson(const cJSON* json) {
}
if (res) {
res = fromObjectWithAlloc(json, jkSubplanDataSink, dataSinkFromJson, (void**)&subplan->pDataSink, sizeof(SDataSink), false);
res = fromObjectWithAlloc(json, jkSubplanDataSink, dataSinkFromJson, (void**)&subplan->pDataSink, sizeof(SDataSink),
false);
}
if (!res) {
qDestroySubplan(subplan);
return NULL;
@ -1137,15 +1154,15 @@ int32_t stringToSubplan(const char* str, SSubplan** subplan) {
cJSON* qDagToJson(const SQueryDag* pDag) {
cJSON* pRoot = cJSON_CreateObject();
if(pRoot == NULL) {
if (pRoot == NULL) {
return NULL;
}
cJSON_AddNumberToObject(pRoot, "Number", pDag->numOfSubplans);
cJSON_AddNumberToObject(pRoot, "QueryId", pDag->queryId);
cJSON *pLevels = cJSON_CreateArray();
if(pLevels == NULL) {
cJSON* pLevels = cJSON_CreateArray();
if (pLevels == NULL) {
cJSON_Delete(pRoot);
return NULL;
}
@ -1153,19 +1170,19 @@ cJSON* qDagToJson(const SQueryDag* pDag) {
cJSON_AddItemToObject(pRoot, "Subplans", pLevels);
size_t level = taosArrayGetSize(pDag->pSubplans);
for(size_t i = 0; i < level; i++) {
for (size_t i = 0; i < level; i++) {
const SArray* pSubplans = (const SArray*)taosArrayGetP(pDag->pSubplans, i);
size_t num = taosArrayGetSize(pSubplans);
cJSON* plansOneLevel = cJSON_CreateArray();
if(plansOneLevel == NULL) {
size_t num = taosArrayGetSize(pSubplans);
cJSON* plansOneLevel = cJSON_CreateArray();
if (plansOneLevel == NULL) {
cJSON_Delete(pRoot);
return NULL;
}
cJSON_AddItemToArray(pLevels, plansOneLevel);
for(size_t j = 0; j < num; j++) {
for (size_t j = 0; j < num; j++) {
cJSON* pSubplan = subplanToJson((const SSubplan*)taosArrayGetP(pSubplans, j));
if(pSubplan == NULL) {
if (pSubplan == NULL) {
cJSON_Delete(pRoot);
return NULL;
}
@ -1183,22 +1200,22 @@ char* qDagToString(const SQueryDag* pDag) {
SQueryDag* qJsonToDag(const cJSON* pRoot) {
SQueryDag* pDag = malloc(sizeof(SQueryDag));
if(pDag == NULL) {
if (pDag == NULL) {
return NULL;
}
pDag->numOfSubplans = cJSON_GetNumberValue(cJSON_GetObjectItem(pRoot, "Number"));
pDag->queryId = cJSON_GetNumberValue(cJSON_GetObjectItem(pRoot, "QueryId"));
pDag->pSubplans = taosArrayInit(0, sizeof(SArray));
pDag->pSubplans = taosArrayInit(0, sizeof(void*));
if (pDag->pSubplans == NULL) {
free(pDag);
return NULL;
}
cJSON* pLevels = cJSON_GetObjectItem(pRoot, "Subplans");
int level = cJSON_GetArraySize(pLevels);
for(int i = 0; i < level; i++) {
int level = cJSON_GetArraySize(pLevels);
for (int i = 0; i < level; i++) {
SArray* plansOneLevel = taosArrayInit(0, sizeof(void*));
if(plansOneLevel == NULL) {
for(int j = 0; j < i; j++) {
if (plansOneLevel == NULL) {
for (int j = 0; j < i; j++) {
taosArrayDestroy(taosArrayGetP(pDag->pSubplans, j));
}
taosArrayDestroy(pDag->pSubplans);
@ -1206,13 +1223,13 @@ SQueryDag* qJsonToDag(const cJSON* pRoot) {
return NULL;
}
cJSON* pItem = cJSON_GetArrayItem(pLevels, i);
int sz = cJSON_GetArraySize(pItem);
for(int j = 0; j < sz; j++) {
cJSON* pSubplanJson = cJSON_GetArrayItem(pItem, j);
int sz = cJSON_GetArraySize(pItem);
for (int j = 0; j < sz; j++) {
cJSON* pSubplanJson = cJSON_GetArrayItem(pItem, j);
SSubplan* pSubplan = subplanFromJson(pSubplanJson);
taosArrayPush(plansOneLevel, &pSubplan);
}
taosArrayPush(pDag->pSubplans, plansOneLevel);
taosArrayPush(pDag->pSubplans, &plansOneLevel);
}
return pDag;
}

View File

@ -423,13 +423,13 @@ int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) {
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
if (pTask->plan->execNode.epset.numOfEps > 0) {
if (pTask->plan->execNode.epSet.numOfEps > 0) {
if (NULL == taosArrayPush(pTask->candidateAddrs, &pTask->plan->execNode)) {
SCH_TASK_ELOG("taosArrayPush execNode to candidate addrs failed, errno:%d", errno);
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
SCH_TASK_DLOG("use execNode from plan as candidate addr, numOfEps:%d", pTask->plan->execNode.epset.numOfEps);
SCH_TASK_DLOG("use execNode from plan as candidate addr, numOfEps:%d", pTask->plan->execNode.epSet.numOfEps);
return TSDB_CODE_SUCCESS;
}
@ -1061,7 +1061,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
isCandidateAddr = true;
}
SEpSet epSet = addr->epset;
SEpSet epSet = addr->epSet;
switch (msgType) {
case TDMT_VND_CREATE_TABLE:

View File

@ -104,8 +104,8 @@ void schtBuildQueryDag(SQueryDag *dag) {
scanPlan->type = QUERY_TYPE_SCAN;
scanPlan->execNode.nodeId = 1;
scanPlan->execNode.epset.inUse = 0;
addEpIntoEpSet(&scanPlan->execNode.epset, "ep0", 6030);
scanPlan->execNode.epSet.inUse = 0;
addEpIntoEpSet(&scanPlan->execNode.epSet, "ep0", 6030);
scanPlan->pChildren = NULL;
scanPlan->level = 1;
@ -118,7 +118,7 @@ void schtBuildQueryDag(SQueryDag *dag) {
mergePlan->id.subplanId = 0x5555555555;
mergePlan->type = QUERY_TYPE_MERGE;
mergePlan->level = 0;
mergePlan->execNode.epset.numOfEps = 0;
mergePlan->execNode.epSet.numOfEps = 0;
mergePlan->pChildren = taosArrayInit(1, POINTER_BYTES);
mergePlan->pParents = NULL;
@ -157,8 +157,8 @@ void schtBuildInsertDag(SQueryDag *dag) {
insertPlan[0].level = 0;
insertPlan[0].execNode.nodeId = 1;
insertPlan[0].execNode.epset.inUse = 0;
addEpIntoEpSet(&insertPlan[0].execNode.epset, "ep0", 6030);
insertPlan[0].execNode.epSet.inUse = 0;
addEpIntoEpSet(&insertPlan[0].execNode.epSet, "ep0", 6030);
insertPlan[0].pChildren = NULL;
insertPlan[0].pParents = NULL;
@ -173,8 +173,8 @@ void schtBuildInsertDag(SQueryDag *dag) {
insertPlan[1].level = 0;
insertPlan[1].execNode.nodeId = 1;
insertPlan[1].execNode.epset.inUse = 0;
addEpIntoEpSet(&insertPlan[1].execNode.epset, "ep0", 6030);
insertPlan[1].execNode.epSet.inUse = 0;
addEpIntoEpSet(&insertPlan[1].execNode.epSet, "ep0", 6030);
insertPlan[1].pChildren = NULL;
insertPlan[1].pParents = NULL;

View File

@ -12,6 +12,7 @@ target_link_libraries(
PUBLIC os
PUBLIC util
PUBLIC common
PUBLIC zlib
)
if (${BUILD_WITH_UV_TRANS})
if (${BUILD_WITH_UV})

View File

@ -111,24 +111,22 @@ _OVER:
}
#ifdef USE_UV
#include <uv.h>
static void clientConnCb(uv_connect_t* req, int32_t status) {
if (status < 0) {
terrno = TAOS_SYSTEM_ERROR(status);
uError("Connection error %s\n", uv_strerror(status));
uv_close((uv_handle_t*)req->handle, NULL);
return;
}
// impl later
uv_buf_t* wb = req->data;
if (wb == NULL) {
uv_close((uv_handle_t*)req->handle, NULL);
}
assert(wb != NULL);
uv_write_t write_req;
uv_write(&write_req, req->handle, wb, 2, NULL);
uv_close((uv_handle_t*)req->handle, NULL);
}
int32_t taosSendHttpReport(const char* server, uint16_t port, const char* pCont, int32_t contLen, EHttpCompFlag flag) {
int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag) {
uint32_t ipv4 = taosGetIpv4FromFqdn(server);
if (ipv4 == 0xffffffff) {
terrno = TAOS_SYSTEM_ERROR(errno);

View File

@ -7,4 +7,4 @@ target_include_directories(
)
target_link_libraries(
os pthread dl rt m
)
)

View File

@ -542,7 +542,7 @@ int32_t taosFsyncFile(TdFilePtr pFile) {
}
if (pFile->fp != NULL) return fflush(pFile->fp);
if (pFile->fp >= 0) return fsync(pFile->fd);
if (pFile->fd >= 0) return fsync(pFile->fd);
return 0;
#endif

View File

@ -34,8 +34,6 @@
#include <unistd.h>
#endif
#ifndef USE_UV
// typedef struct TdSocketServer {
// #if SOCKET_WITH_LOCK
// pthread_rwlock_t rwlock;
@ -131,18 +129,8 @@ int32_t taosSetNonblocking(SOCKET sock, int32_t on) {
return 0;
}
void taosIgnSIGPIPE() { signal(SIGPIPE, SIG_IGN); }
void taosSetMaskSIGPIPE() {
sigset_t signal_mask;
sigemptyset(&signal_mask);
sigaddset(&signal_mask, SIGPIPE);
int32_t rc = pthread_sigmask(SIG_SETMASK, &signal_mask, NULL);
if (rc != 0) {
//printf("failed to setmask SIGPIPE");
}
}
#endif
@ -223,9 +211,6 @@ int32_t taosSetNonblocking(SOCKET sock, int32_t on) {
return 0;
}
void taosIgnSIGPIPE() {}
void taosSetMaskSIGPIPE() {}
int32_t taosSetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t optlen) {
if (level == SOL_SOCKET && optname == TCP_KEEPCNT) {
return 0;
@ -282,98 +267,6 @@ uint64_t htonll(uint64_t val) { return (((uint64_t)htonl(val)) << 32) + htonl(va
#define TCP_CONN_TIMEOUT 3000 // conn timeout
int32_t taosGetFqdn(char *fqdn) {
char hostname[1024];
hostname[1023] = '\0';
if (gethostname(hostname, 1023) == -1) {
//printf("failed to get hostname, reason:%s", strerror(errno));
return -1;
}
struct addrinfo hints = {0};
struct addrinfo *result = NULL;
#ifdef __APPLE__
// on macosx, hostname -f has the form of xxx.local
// which will block getaddrinfo for a few seconds if AI_CANONNAME is set
// thus, we choose AF_INET (ipv4 for the moment) to make getaddrinfo return
// immediately
hints.ai_family = AF_INET;
#else // __APPLE__
hints.ai_flags = AI_CANONNAME;
#endif // __APPLE__
int32_t ret = getaddrinfo(hostname, NULL, &hints, &result);
if (!result) {
//printf("failed to get fqdn, code:%d, reason:%s", ret, gai_strerror(ret));
return -1;
}
#ifdef __APPLE__
// refer to comments above
strcpy(fqdn, hostname);
#else // __APPLE__
strcpy(fqdn, result->ai_canonname);
#endif // __APPLE__
freeaddrinfo(result);
return 0;
}
uint32_t taosGetIpv4FromFqdn(const char *fqdn) {
struct addrinfo hints = {0};
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
struct addrinfo *result = NULL;
int32_t ret = getaddrinfo(fqdn, NULL, &hints, &result);
if (result) {
struct sockaddr * sa = result->ai_addr;
struct sockaddr_in *si = (struct sockaddr_in *)sa;
struct in_addr ia = si->sin_addr;
uint32_t ip = ia.s_addr;
freeaddrinfo(result);
return ip;
} else {
#ifdef EAI_SYSTEM
if (ret == EAI_SYSTEM) {
//printf("failed to get the ip address, fqdn:%s, since:%s", fqdn, strerror(errno));
} else {
//printf("failed to get the ip address, fqdn:%s, since:%s", fqdn, gai_strerror(ret));
}
#else
//printf("failed to get the ip address, fqdn:%s, since:%s", fqdn, gai_strerror(ret));
#endif
return 0xFFFFFFFF;
}
}
// Function converting an IP address string to an uint32_t.
uint32_t ip2uint(const char *const ip_addr) {
char ip_addr_cpy[20];
char ip[5];
tstrncpy(ip_addr_cpy, ip_addr, sizeof(ip_addr_cpy));
char *s_start, *s_end;
s_start = ip_addr_cpy;
s_end = ip_addr_cpy;
int32_t k;
for (k = 0; *s_start != '\0'; s_start = s_end) {
for (s_end = s_start; *s_end != '.' && *s_end != '\0'; s_end++) {
}
if (*s_end == '.') {
*s_end = '\0';
s_end++;
}
ip[k++] = (char)atoi(s_start);
}
ip[k] = '\0';
return *((uint32_t *)ip);
}
int32_t taosWriteMsg(SOCKET fd, void *buf, int32_t nbytes) {
int32_t nleft, nwritten;
char * ptr = (char *)buf;
@ -754,10 +647,6 @@ SOCKET taosOpenTcpServerSocket(uint32_t ip, uint16_t port) {
return sockFd;
}
void tinet_ntoa(char *ipstr, uint32_t ip) {
sprintf(ipstr, "%d.%d.%d.%d", ip & 0xFF, (ip >> 8) & 0xFF, (ip >> 16) & 0xFF, ip >> 24);
}
#define COPY_SIZE 32768
// sendfile shall be used
@ -795,12 +684,9 @@ int64_t taosCopyFds(SOCKET sfd, int32_t dfd, int64_t len) {
return len;
}
#endif
#if !(defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32))
void taosBlockSIGPIPE() {
#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
#else
sigset_t signal_mask;
sigemptyset(&signal_mask);
sigaddset(&signal_mask, SIGPIPE);
@ -808,7 +694,122 @@ void taosBlockSIGPIPE() {
if (rc != 0) {
//printf("failed to block SIGPIPE");
}
#endif
}
uint32_t taosGetIpv4FromFqdn(const char *fqdn) {
struct addrinfo hints = {0};
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
struct addrinfo *result = NULL;
int32_t ret = getaddrinfo(fqdn, NULL, &hints, &result);
if (result) {
struct sockaddr * sa = result->ai_addr;
struct sockaddr_in *si = (struct sockaddr_in *)sa;
struct in_addr ia = si->sin_addr;
uint32_t ip = ia.s_addr;
freeaddrinfo(result);
return ip;
} else {
#ifdef EAI_SYSTEM
if (ret == EAI_SYSTEM) {
//printf("failed to get the ip address, fqdn:%s, since:%s", fqdn, strerror(errno));
} else {
//printf("failed to get the ip address, fqdn:%s, since:%s", fqdn, gai_strerror(ret));
}
#else
void taosBlockSIGPIPE() {}
#endif
//printf("failed to get the ip address, fqdn:%s, since:%s", fqdn, gai_strerror(ret));
#endif
return 0xFFFFFFFF;
}
}
int32_t taosGetFqdn(char *fqdn) {
char hostname[1024];
hostname[1023] = '\0';
if (gethostname(hostname, 1023) == -1) {
//printf("failed to get hostname, reason:%s", strerror(errno));
return -1;
}
struct addrinfo hints = {0};
struct addrinfo *result = NULL;
#ifdef __APPLE__
// on macosx, hostname -f has the form of xxx.local
// which will block getaddrinfo for a few seconds if AI_CANONNAME is set
// thus, we choose AF_INET (ipv4 for the moment) to make getaddrinfo return
// immediately
hints.ai_family = AF_INET;
#else // __APPLE__
hints.ai_flags = AI_CANONNAME;
#endif // __APPLE__
int32_t ret = getaddrinfo(hostname, NULL, &hints, &result);
if (!result) {
//printf("failed to get fqdn, code:%d, reason:%s", ret, gai_strerror(ret));
return -1;
}
#ifdef __APPLE__
// refer to comments above
strcpy(fqdn, hostname);
#else // __APPLE__
strcpy(fqdn, result->ai_canonname);
#endif // __APPLE__
freeaddrinfo(result);
return 0;
}
// Function converting an IP address string to an uint32_t.
uint32_t ip2uint(const char *const ip_addr) {
char ip_addr_cpy[20];
char ip[5];
tstrncpy(ip_addr_cpy, ip_addr, sizeof(ip_addr_cpy));
char *s_start, *s_end;
s_start = ip_addr_cpy;
s_end = ip_addr_cpy;
int32_t k;
for (k = 0; *s_start != '\0'; s_start = s_end) {
for (s_end = s_start; *s_end != '.' && *s_end != '\0'; s_end++) {
}
if (*s_end == '.') {
*s_end = '\0';
s_end++;
}
ip[k++] = (char)atoi(s_start);
}
ip[k] = '\0';
return *((uint32_t *)ip);
}
void tinet_ntoa(char *ipstr, uint32_t ip) {
sprintf(ipstr, "%d.%d.%d.%d", ip & 0xFF, (ip >> 8) & 0xFF, (ip >> 16) & 0xFF, ip >> 24);
}
void taosIgnSIGPIPE() {
#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
#else
signal(SIGPIPE, SIG_IGN);
#endif
}
void taosSetMaskSIGPIPE() {
#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
#else
sigset_t signal_mask;
sigemptyset(&signal_mask);
sigaddset(&signal_mask, SIGPIPE);
int32_t rc = pthread_sigmask(SIG_SETMASK, &signal_mask, NULL);
if (rc != 0) {
//printf("failed to setmask SIGPIPE");
}
#endif
}

View File

@ -10,7 +10,7 @@ target_link_libraries(
util
PRIVATE os
PUBLIC lz4_static
PUBLIC api cjson zlib
PUBLIC api cjson
)
if(${BUILD_TEST})

1
tests

@ -1 +0,0 @@
Subproject commit 904e6f0e152e8fe61edfe0a0a9ae497cfde2a72c

4
tests/CMakeLists.txt Normal file
View File

@ -0,0 +1,4 @@
#ADD_SUBDIRECTORY(examples/c)
ADD_SUBDIRECTORY(tsim)
ADD_SUBDIRECTORY(test/c)
#ADD_SUBDIRECTORY(comparisonTest/tdengine)

View File

@ -0,0 +1,243 @@
### Prepare development environment
1. sudo apt install
build-essential cmake net-tools python-pip python-setuptools python3-pip
python3-setuptools valgrind psmisc curl
2. git clone <https://github.com/taosdata/TDengine>; cd TDengine
3. mkdir debug; cd debug; cmake ..; make ; sudo make install
4. pip install ../src/connector/python ; pip3 install
../src/connector/python
5. pip install numpy; pip3 install numpy (numpy is required only if you need to run querySort.py)
> Note: Both Python2 and Python3 are currently supported by the Python test
> framework. Since Python2 is no longer officially supported by Python Software
> Foundation since January 1, 2020, it is recommended that subsequent test case
> development be guaranteed to run correctly on Python3.
> For Python2, please consider being compatible if appropriate without
> additional burden.
>
> If you use some new Linux distribution like Ubuntu 20.04 which already do not
> include Python2, please do not install Python2-related packages.
>
> <https://nakedsecurity.sophos.com/2020/01/03/python-is-dead-long-live-python/> 
### How to run Python test suite
1. cd \<TDengine\>/tests/pytest
2. ./smoketest.sh \# for smoke test
3. ./smoketest.sh -g \# for memory leak detection test with valgrind
4. ./fulltest.sh \# for full test
> Note1: TDengine daemon's configuration and data files are stored in
> \<TDengine\>/sim directory. As a historical design, it's same place with
> TSIM script. So after the TSIM script ran with sudo privilege, the directory
> has been used by TSIM then the python script cannot write it by a normal
> user. You need to remove the directory completely first before running the
> Python test case. We should consider using two different locations to store
> for TSIM and Python script.
> Note2: if you need to debug crash problem with a core dump, you need
> manually edit smoketest.sh or fulltest.sh to add "ulimit -c unlimited"
> before the script line. Then you can look for the core file in
> \<TDengine\>/tests/pytest after the program crash.
### How to add a new test case
**1. TSIM test cases:**
TSIM was the testing framework has been used internally. Now it still be used to run the test cases we develop in the past as a legacy system. We are turning to use Python to develop new test case and are abandoning TSIM gradually.
**2. Python test cases:**
**2.1 Please refer to \<TDengine\>/tests/pytest/insert/basic.py to add a new
test case.** The new test case must implement 3 functions, where self.init()
and self.stop() simply copy the contents of insert/basic.py and the test
logic is implemented in self.run(). You can refer to the code in the util
directory for more information.
**2.2 Edit smoketest.sh to add the path and filename of the new test case**
Note: The Python test framework may continue to be improved in the future,
hopefully, to provide more functionality and ease of writing test cases. The
method of writing the test case above does not exclude that it will also be
affected.
**2.3 What test.py does in detail:**
test.py is the entry program for test case execution and monitoring.
test.py has the following functions.
\-f --file, Specifies the test case file name to be executed
-p --path, Specifies deployment path
\-m --master, Specifies the master server IP for cluster deployment
-c--cluster, test cluster function
-s--stop, terminates all running nodes
\-g--valgrind, load valgrind for memory leak detection test
\-h--help, display help
**2.4 What util/log.py does in detail:**
log.py is quite simple, the main thing is that you can print the output in
different colors as needed. The success() should be called for successful
test case execution and the success() will print green text. The exit() will
print red text and exit the program, exit() should be called for test
failure.
**util/log.py**
...
    def info(self, info):
        printf("%s %s" % (datetime.datetime.now(), info))
 
    def sleep(self, sec):
        printf("%s sleep %d seconds" % (datetime.datetime.now(), sec))
        time.sleep(sec)
 
    def debug(self, err):
        printf("\\033[1;36m%s %s\\033[0m" % (datetime.datetime.now(), err))
 
    def success(self, info):
        printf("\\033[1;32m%s %s\\033[0m" % (datetime.datetime.now(), info))
 
    def notice(self, err):
        printf("\\033[1;33m%s %s\\033[0m" % (datetime.datetime.now(), err))
 
    def exit(self, err):
        printf("\\033[1;31m%s %s\\033[0m" % (datetime.datetime.now(), err))
        sys.exit(1)
 
    def printNoPrefix(self, info):
        printf("\\033[1;36m%s\\033[0m" % (info)
...
**2.5 What util/sql.py does in detail:**
SQL.py is mainly used to execute SQL statements to manipulate the database,
and the code is extracted and commented as follows:
**util/sql.py**
\# prepare() is mainly used to set up the environment for testing table and
data, and to set up the database db for testing. do not call prepare() if you
need to test the database operation command.
def prepare(self):
tdLog.info("prepare database:db")
self.cursor.execute('reset query cache')
self.cursor.execute('drop database if exists db')
self.cursor.execute('create database db')
self.cursor.execute('use db')
...
\# query() is mainly used to execute select statements for normal syntax input
def query(self, sql):
...
\# error() is mainly used to execute the select statement with the wrong syntax
input, the error will be caught as a reasonable behavior, if not caught it will
prove that the test failed
def error()
...
\# checkRows() is used to check the number of returned lines after calling
query(select ...) after calling the query(select ...) to check the number of
rows of returned results.
def checkRows(self, expectRows):
...
\# checkData() is used to check the returned result data after calling
query(select ...) after the query(select ...) is called, failure to meet
expectation is
def checkData(self, row, col, data):
...
\# getData() returns the result data after calling query(select ...) to return
the resulting data after calling query(select ...)
def getData(self, row, col):
...
\# execute() used to execute sql and return the number of affected rows
def execute(self, sql):
...
\# executeTimes() Multiple executions of the same sql statement
def executeTimes(self, sql, times):
...
\# CheckAffectedRows() Check if the number of affected rows is as expected
def checkAffectedRows(self, expectAffectedRows):
...
### CI submission adoption principle.
- Every commit / PR compilation must pass. Currently, the warning is treated
as an error, so the warning must also be resolved.
- Test cases that already exist must pass.
- Because CI is very important to support build and automatically test
procedure, it is necessary to manually test the test case before adding it
and do as many iterations as possible to ensure that the test case provides
stable and reliable test results when added.
> Note: In the future, according to the requirements and test development
> progress will add stress testing, performance testing, code style,
> and other features based on functional testing.

328
tests/Jenkinsfile vendored Normal file
View File

@ -0,0 +1,328 @@
def pre_test(){
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
sh '''
cd ${WKC}
git reset --hard
git checkout $BRANCH_NAME
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout $BRANCH_NAME
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python
'''
return 1
}
def pre_test_p(){
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
sh '''
cd ${WKC}
git reset --hard
git checkout $BRANCH_NAME
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout $BRANCH_NAME
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python
'''
return 1
}
pipeline {
agent none
environment{
WK = '/data/lib/jenkins/workspace/TDinternal'
WKC= '/data/lib/jenkins/workspace/TDinternal/community'
}
stages {
stage('Parallel test stage') {
parallel {
stage('pytest') {
agent{label 'slad1'}
steps {
pre_test_p()
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh pytest
date'''
}
}
stage('test_b1') {
agent{label 'slad2'}
steps {
pre_test()
sh '''
cd ${WKC}/tests
./test-all.sh b1
date'''
}
}
stage('test_crash_gen') {
agent{label "slad3"}
steps {
pre_test()
sh '''
cd ${WKC}/tests/pytest
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
sh'''
nohup taosd >/dev/null &
sleep 10
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/gotest
bash batchtest.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
mvn clean package >/dev/null
java -jar target/JdbcRestfulDemo-jar-with-dependencies.jar
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cp -rf ${WKC}/tests/examples/nodejs ${JENKINS_HOME}/workspace/
cd ${JENKINS_HOME}/workspace/nodejs
node nodejsChecker.js host=localhost
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
dotnet run
'''
}
sh '''
pkill -9 taosd || echo 1
cd ${WKC}/tests
./test-all.sh b2
date
'''
sh '''
cd ${WKC}/tests
./test-all.sh full unit
date'''
}
}
stage('test_valgrind') {
agent{label "slad4"}
steps {
pre_test()
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
nohup taosd >/dev/null &
sleep 10
python3 concurrent_inquiry.py -c 1
'''
}
sh '''
cd ${WKC}/tests
./test-all.sh full jdbc
date'''
sh '''
cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
date
cd ${WKC}/tests
./test-all.sh b3
date'''
sh '''
date
cd ${WKC}/tests
./test-all.sh full example
date'''
}
}
stage('arm64_build'){
agent{label 'arm64'}
steps{
sh '''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
stage('arm32_build'){
agent{label 'arm32'}
steps{
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
}
}
}
}
post {
success {
emailext (
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS",
body: """<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${env.BRANCH_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${env.CHANGE_AUTHOR}</li>
<li>提交信息:${env.CHANGE_TITLE}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>""",
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
failure {
emailext (
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' FAIL",
body: """<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${env.BRANCH_NAME}</li>
<li>构建结果:<span style="color:red"> Failure </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${env.CHANGE_AUTHOR}</li>
<li>提交信息:${env.CHANGE_TITLE}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>""",
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
}
}

336
tests/mas/Jenkinsfile vendored Normal file
View File

@ -0,0 +1,336 @@
def pre_test(){
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
sh '''
cd ${WKC}
git reset --hard
git checkout $BRANCH_NAME
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout $BRANCH_NAME
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python/ || echo 0
'''
return 1
}
def pre_test_p(){
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
sh '''
cd ${WKC}
git reset --hard
git checkout $BRANCH_NAME
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout $BRANCH_NAME
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python/ || echo 0
'''
return 1
}
pipeline {
agent none
environment{
WK = '/data/lib/jenkins/workspace/TDinternal'
WKC= '/data/lib/jenkins/workspace/TDinternal/community'
}
stages {
stage('Parallel test stage') {
parallel {
stage('pytest') {
agent{label 'slam1'}
steps {
pre_test_p()
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh pytest
date'''
}
}
stage('test_b1') {
agent{label 'slam2'}
steps {
pre_test()
sh '''
cd ${WKC}/tests
./test-all.sh b1
date'''
}
}
stage('test_crash_gen') {
agent{label "slam3"}
steps {
pre_test()
sh '''
cd ${WKC}/tests/pytest
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
sh'''
nohup taosd >/dev/null &
sleep 10
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/gotest
bash batchtest.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
mvn clean package assembly:single -DskipTests >/dev/null
java -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/src/connector/jdbc
mvn clean package -Dmaven.test.skip=true >/dev/null
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
java --class-path=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cp -rf ${WKC}/tests/examples/nodejs ${JENKINS_HOME}/workspace/
cd ${JENKINS_HOME}/workspace/nodejs
node nodejsChecker.js host=localhost
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
dotnet run
'''
}
sh '''
pkill -9 taosd || echo 1
cd ${WKC}/tests
./test-all.sh b2
date
'''
sh '''
cd ${WKC}/tests
./test-all.sh full unit
date'''
}
}
stage('test_valgrind') {
agent{label "slam4"}
steps {
pre_test()
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
nohup taosd >/dev/null &
sleep 10
python3 concurrent_inquiry.py -c 1
'''
}
sh '''
cd ${WKC}/tests
./test-all.sh full jdbc
date'''
sh '''
cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
date
cd ${WKC}/tests
./test-all.sh b3
date'''
sh '''
date
cd ${WKC}/tests
./test-all.sh full example
date'''
}
}
stage('arm64_build'){
agent{label 'arm64'}
steps{
sh '''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
stage('arm32_build'){
agent{label 'arm32'}
steps{
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
}
}
}
}
post {
success {
emailext (
subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
failure {
emailext (
subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
}
}

200
tests/parallel_test/Jenkinsfile vendored Normal file
View File

@ -0,0 +1,200 @@
import hudson.model.Result
import hudson.model.*;
import jenkins.model.CauseOfInterruption
node {
}
def skipbuild=0
def win_stop=0
def abortPreviousBuilds() {
def currentJobName = env.JOB_NAME
def currentBuildNumber = env.BUILD_NUMBER.toInteger()
def jobs = Jenkins.instance.getItemByFullName(currentJobName)
def builds = jobs.getBuilds()
for (build in builds) {
if (!build.isBuilding()) {
continue;
}
if (currentBuildNumber == build.getNumber().toInteger()) {
continue;
}
build.doKill() //doTerm(),doKill(),doTerm()
}
}
// abort previous build
abortPreviousBuilds()
def abort_previous(){
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
def pre_test(){
sh'hostname'
sh '''
sudo rmtaos || echo "taosd has not installed"
'''
sh '''
killall -9 taosd ||echo "no taosd running"
killall -9 gdb || echo "no gdb running"
killall -9 python3.8 || echo "no python program running"
cd ${WKC}
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WKC}
git checkout master
'''
}
else if(env.CHANGE_TARGET == '2.0'){
sh '''
cd ${WKC}
git checkout 2.0
'''
}
else if(env.CHANGE_TARGET == '3.0'){
sh '''
cd ${WKC}
git checkout 3.0
'''
}
else{
sh '''
cd ${WKC}
git checkout develop
'''
}
}
sh'''
cd ${WKC}
git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
export TZ=Asia/Harbin
date
rm -rf debug
mkdir debug
cd debug
cmake .. > /dev/null
make -j4> /dev/null
'''
return 1
}
pipeline {
agent none
options { skipDefaultCheckout() }
environment{
WK = '/var/lib/jenkins/workspace/TDinternal'
WKC= '/var/lib/jenkins/workspace/TDengine'
}
stages {
stage('pre_build'){
agent{label 'slave3_0'}
options { skipDefaultCheckout() }
when {
changeRequest()
}
steps {
script{
abort_previous()
abortPreviousBuilds()
}
timeout(time: 45, unit: 'MINUTES'){
pre_test()
sh'''
cd ${WKC}/tests
./test-all.sh b1fq
'''
sh'''
cd ${WKC}/debug
ctest
'''
}
}
}
}
post {
success {
emailext (
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS",
body: """<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${env.BRANCH_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${env.CHANGE_AUTHOR}</li>
<li>提交信息:${env.CHANGE_TITLE}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>""",
to: "${env.CHANGE_AUTHOR_EMAIL}",
from: "support@taosdata.com"
)
}
failure {
emailext (
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' FAIL",
body: """<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${env.BRANCH_NAME}</li>
<li>构建结果:<span style="color:red"> Failure </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${env.CHANGE_AUTHOR}</li>
<li>提交信息:${env.CHANGE_TITLE}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>""",
to: "${env.CHANGE_AUTHOR_EMAIL}",
from: "support@taosdata.com"
)
}
}
}

View File

@ -0,0 +1,176 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import subprocess
import time
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
import datetime
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
tdSql.prepare()
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
tdSql.execute("create database timezone")
tdSql.execute("use timezone")
tdSql.execute("create stable st (ts timestamp, id int ) tags (index int)")
tdSql.execute("insert into tb0 using st tags (1) values ('2021-07-01 00:00:00.000',0)")
tdSql.query("select ts from tb0")
tdSql.checkData(0, 0, "2021-07-01 00:00:00.000")
tdSql.execute("insert into tb1 using st tags (1) values ('2021-07-01T00:00:00.000+07:50',1)")
tdSql.query("select ts from tb1")
tdSql.checkData(0, 0, "2021-07-01 00:10:00.000")
tdSql.execute("insert into tb2 using st tags (1) values ('2021-07-01T00:00:00.000+08:00',2)")
tdSql.query("select ts from tb2")
tdSql.checkData(0, 0, "2021-07-01 00:00:00.000")
tdSql.execute("insert into tb3 using st tags (1) values ('2021-07-01T00:00:00.000Z',3)")
tdSql.query("select ts from tb3")
tdSql.checkData(0, 0, "2021-07-01 08:00:00.000")
tdSql.execute("insert into tb4 using st tags (1) values ('2021-07-01 00:00:00.000+07:50',4)")
tdSql.query("select ts from tb4")
tdSql.checkData(0, 0, "2021-07-01 00:10:00.000")
tdSql.execute("insert into tb5 using st tags (1) values ('2021-07-01 00:00:00.000Z',5)")
tdSql.query("select ts from tb5")
tdSql.checkData(0, 0, "2021-07-01 08:00:00.000")
tdSql.execute("insert into tb6 using st tags (1) values ('2021-07-01T00:00:00.000+0800',6)")
tdSql.query("select ts from tb6")
tdSql.checkData(0, 0, "2021-07-01 00:00:00.000")
tdSql.execute("insert into tb7 using st tags (1) values ('2021-07-01 00:00:00.000+0800',7)")
tdSql.query("select ts from tb7")
tdSql.checkData(0, 0, "2021-07-01 00:00:00.000")
tdSql.execute("insert into tb8 using st tags (1) values ('2021-07-0100:00:00.000',8)")
tdSql.query("select ts from tb8")
tdSql.checkData(0, 0, "2021-07-01 00:00:00.000")
tdSql.execute("insert into tb9 using st tags (1) values ('2021-07-0100:00:00.000+0800',9)")
tdSql.query("select ts from tb9")
tdSql.checkData(0, 0, "2021-07-01 00:00:00.000")
tdSql.execute("insert into tb10 using st tags (1) values ('2021-07-0100:00:00.000+08:00',10)")
tdSql.query("select ts from tb10")
tdSql.checkData(0, 0, "2021-07-01 00:00:00.000")
tdSql.execute("insert into tb11 using st tags (1) values ('2021-07-0100:00:00.000+07:00',11)")
tdSql.query("select ts from tb11")
tdSql.checkData(0, 0, "2021-07-01 01:00:00.000")
tdSql.execute("insert into tb12 using st tags (1) values ('2021-07-0100:00:00.000+0700',12)")
tdSql.query("select ts from tb12")
tdSql.checkData(0, 0, "2021-07-01 01:00:00.000")
tdSql.execute("insert into tb13 using st tags (1) values ('2021-07-0100:00:00.000+07:12',13)")
tdSql.query("select ts from tb13")
tdSql.checkData(0, 0, "2021-07-01 00:48:00.000")
tdSql.execute("insert into tb14 using st tags (1) values ('2021-07-0100:00:00.000+712',14)")
tdSql.query("select ts from tb14")
tdSql.checkData(0, 0, "2021-06-28 08:58:00.000")
tdSql.execute("insert into tb15 using st tags (1) values ('2021-07-0100:00:00.000Z',15)")
tdSql.query("select ts from tb15")
tdSql.checkData(0, 0, "2021-07-01 08:00:00.000")
tdSql.execute("insert into tb16 using st tags (1) values ('2021-7-1 00:00:00.000Z',16)")
tdSql.query("select ts from tb16")
tdSql.checkData(0, 0, "2021-07-01 08:00:00.000")
tdSql.execute("insert into tb17 using st tags (1) values ('2021-07-0100:00:00.000+0750',17)")
tdSql.query("select ts from tb17")
tdSql.checkData(0, 0, "2021-07-01 00:10:00.000")
tdSql.execute("insert into tb18 using st tags (1) values ('2021-07-0100:00:00.000+0752',18)")
tdSql.query("select ts from tb18")
tdSql.checkData(0, 0, "2021-07-01 00:08:00.000")
tdSql.execute("insert into tb19 using st tags (1) values ('2021-07-0100:00:00.000+075',19)")
tdSql.query("select ts from tb19")
tdSql.checkData(0, 0, "2021-07-01 00:55:00.000")
tdSql.execute("insert into tb20 using st tags (1) values ('2021-07-0100:00:00.000+75',20)")
tdSql.query("select ts from tb20")
tdSql.checkData(0, 0, "2021-06-28 05:00:00.000")
tdSql.execute("insert into tb21 using st tags (1) values ('2021-7-1 1:1:1.234+075',21)")
tdSql.query("select ts from tb21")
tdSql.checkData(0, 0, "2021-07-01 01:56:01.234")
tdSql.execute("insert into tb22 using st tags (1) values ('2021-7-1T1:1:1.234+075',22)")
tdSql.query("select ts from tb22")
tdSql.checkData(0, 0, "2021-07-01 01:56:01.234")
tdSql.execute("insert into tb23 using st tags (1) values ('2021-7-131:1:1.234+075',22)")
tdSql.query("select ts from tb23")
tdSql.checkData(0, 0, "2021-07-13 01:56:01.234")
tdSql.error("insert into tberror using st tags (1) values ('20210701 00:00:00.000+0800',0)")
tdSql.error("insert into tberror using st tags (1) values ('2021070100:00:00.000+0800',0)")
tdSql.error("insert into tberror using st tags (1) values ('202171 00:00:00.000+0800',0)")
tdSql.error("insert into tberror using st tags (1) values ('2021 07 01 00:00:00.000+0800',0)")
tdSql.error("insert into tberror using st tags (1) values ('2021 -07-0100:00:00.000+0800',0)")
tdSql.error("insert into tberror using st tags (1) values ('2021-7-11:1:1.234+075',0)")
os.system("rm -rf ./TimeZone/*.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,174 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
import datetime
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def checkCommunity(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
return False
else:
return True
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosdump" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def run(self):
# clear envs
tdSql.execute(" create database ZoneTime precision 'us' ")
tdSql.execute(" use ZoneTime ")
tdSql.execute(" create stable st (ts timestamp , id int , val float) tags (tag1 timestamp ,tag2 int) ")
# standard case for Timestamp
tdSql.execute(" insert into tb1 using st tags (\"2021-07-01 00:00:00.000\" , 2) values( \"2021-07-01 00:00:00.000\" , 1 , 1.0 ) ")
case1 = (tdSql.getResult("select * from tb1"))
print(case1)
if case1 == [(datetime.datetime(2021, 7, 1, 0, 0), 1, 1.0)]:
print ("check pass! ")
else:
print ("check failed about timestamp '2021-07-01 00:00:00.000' ")
# RCF-3339 : it allows "T" is replaced by " "
tdSql.execute(" insert into tb2 using st tags (\"2021-07-01T00:00:00.000+07:50\" , 2) values( \"2021-07-01T00:00:00.000+07:50\" , 2 , 2.0 ) ")
case2 = (tdSql.getResult("select * from tb2"))
print(case2)
if case2 == [(datetime.datetime(2021, 7, 1, 0, 10), 2, 2.0)]:
print ("check pass! ")
else:
print ("check failed about timestamp '2021-07-01T00:00:00.000+07:50'! ")
tdSql.execute(" insert into tb3 using st tags (\"2021-07-01T00:00:00.000+08:00\" , 3) values( \"2021-07-01T00:00:00.000+08:00\" , 3 , 3.0 ) ")
case3 = (tdSql.getResult("select * from tb3"))
print(case3)
if case3 == [(datetime.datetime(2021, 7, 1, 0, 0), 3, 3.0)]:
print ("check pass! ")
else:
print ("check failed about timestamp '2021-07-01T00:00:00.000+08:00'! ")
tdSql.execute(" insert into tb4 using st tags (\"2021-07-01T00:00:00.000Z\" , 4) values( \"2021-07-01T00:00:00.000Z\" , 4 , 4.0 ) ")
case4 = (tdSql.getResult("select * from tb4"))
print(case4)
if case4 == [(datetime.datetime(2021, 7, 1, 8, 0), 4, 4.0)]:
print ("check pass! ")
else:
print ("check failed about timestamp '2021-07-01T00:00:00.000Z'! ")
tdSql.execute(" insert into tb5 using st tags (\"2021-07-01 00:00:00.000+07:50\" , 5) values( \"2021-07-01 00:00:00.000+07:50\" , 5 , 5.0 ) ")
case5 = (tdSql.getResult("select * from tb5"))
print(case5)
if case5 == [(datetime.datetime(2021, 7, 1, 0, 10), 5, 5.0)]:
print ("check pass! ")
else:
print ("check failed about timestamp '2021-07-01 00:00:00.000+08:00 ")
tdSql.execute(" insert into tb6 using st tags (\"2021-07-01 00:00:00.000Z\" , 6) values( \"2021-07-01 00:00:00.000Z\" , 6 , 6.0 ) ")
case6 = (tdSql.getResult("select * from tb6"))
print(case6)
if case6 == [(datetime.datetime(2021, 7, 1, 8, 0), 6, 6.0)]:
print ("check pass! ")
else:
print ("check failed about timestamp '2021-07-01 00:00:00.000Z'! ")
# ISO 8610 timestamp format , time days and hours must be split by "T"
tdSql.execute(" insert into tb7 using st tags (\"2021-07-01T00:00:00.000+0800\" , 7) values( \"2021-07-01T00:00:00.000+0800\" , 7 , 7.0 ) ")
case7 = (tdSql.getResult("select * from tb7"))
print(case7)
if case7 == [(datetime.datetime(2021, 7, 1, 0, 0), 7, 7.0)]:
print ("check pass! ")
else:
print ("check failed about timestamp '2021-07-01T00:00:00.000+0800'! ")
tdSql.execute(" insert into tb8 using st tags (\"2021-07-01T00:00:00.000+08\" , 8) values( \"2021-07-01T00:00:00.000+08\" , 8 , 8.0 ) ")
case8 = (tdSql.getResult("select * from tb8"))
print(case8)
if case8 == [(datetime.datetime(2021, 7, 1, 0, 0), 8, 8.0)]:
print ("check pass! ")
else:
print ("check failed about timestamp '2021-07-01T00:00:00.000+08'! ")
# Non-standard case for Timestamp
tdSql.execute(" insert into tb9 using st tags (\"2021-07-01 00:00:00.000+0800\" , 9) values( \"2021-07-01 00:00:00.000+0800\" , 9 , 9.0 ) ")
case9 = (tdSql.getResult("select * from tb9"))
print(case9)
tdSql.execute(" insert into tb10 using st tags (\"2021-07-0100:00:00.000\" , 10) values( \"2021-07-0100:00:00.000\" , 10 , 10.0 ) ")
case10 = (tdSql.getResult("select * from tb10"))
print(case10)
tdSql.execute(" insert into tb11 using st tags (\"2021-07-0100:00:00.000+0800\" , 11) values( \"2021-07-0100:00:00.000+0800\" , 11 , 11.0 ) ")
case11 = (tdSql.getResult("select * from tb11"))
print(case11)
tdSql.execute(" insert into tb12 using st tags (\"2021-07-0100:00:00.000+08:00\" , 12) values( \"2021-07-0100:00:00.000+08:00\" , 12 , 12.0 ) ")
case12 = (tdSql.getResult("select * from tb12"))
print(case12)
tdSql.execute(" insert into tb13 using st tags (\"2021-07-0100:00:00.000Z\" , 13) values( \"2021-07-0100:00:00.000Z\" , 13 , 13.0 ) ")
case13 = (tdSql.getResult("select * from tb13"))
print(case13)
tdSql.execute(" insert into tb14 using st tags (\"2021-07-0100:00:00.000Z\" , 14) values( \"2021-07-0100:00:00.000Z\" , 14 , 14.0 ) ")
case14 = (tdSql.getResult("select * from tb14"))
print(case14)
tdSql.execute(" insert into tb15 using st tags (\"2021-07-0100:00:00.000+08\" , 15) values( \"2021-07-0100:00:00.000+08\" , 15 , 15.0 ) ")
case15 = (tdSql.getResult("select * from tb15"))
print(case15)
tdSql.execute(" insert into tb16 using st tags (\"2021-07-0100:00:00.000+07:50\" , 16) values( \"2021-07-0100:00:00.000+07:50\" , 16 , 16.0 ) ")
case16 = (tdSql.getResult("select * from tb16"))
print(case16)
os.system("rm -rf *.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

View File

@ -0,0 +1,53 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.query("show users")
rows = tdSql.queryRows
tdSql.execute("create user test PASS 'test' ")
tdSql.query("show users")
tdSql.checkRows(rows + 1)
tdSql.error("create user tdenginetdenginetdengine PASS 'test' ")
tdSql.error("create user tdenginet PASS '1234512345123456' ")
try:
tdSql.execute("create account a&cc PASS 'pass123'")
except Exception as e:
print("create account a&cc PASS 'pass123'")
return
tdLog.exit("drop built-in user is error.")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,52 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
print("==========step1")
print("drop built-in account")
try:
tdSql.execute("drop account root")
except Exception as e:
if len(e.args) > 0 and 'no rights' != e.args[0]:
tdLog.exit(e)
print("==========step2")
print("drop built-in user")
try:
tdSql.execute("drop user root")
except Exception as e:
if len(e.args) > 0 and 'no rights' != e.args[0]:
tdLog.exit(e)
return
tdLog.exit("drop built-in user is error.")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

View File

@ -0,0 +1,67 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import random
import string
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def genColList(self):
'''
generate column list
'''
col_list = list()
for i in range(1, 18):
col_list.append(f'c{i}')
return col_list
def genIncreaseValue(self, input_value):
'''
add ', 1' to end of value every loop
'''
value_list = list(input_value)
value_list.insert(-1, ", 1")
return ''.join(value_list)
def insertAlter(self):
'''
after each alter and insert, when execute 'select * from {tbname};' taosd will coredump
'''
tbname = ''.join(random.choice(string.ascii_letters.lower()) for i in range(7))
input_value = '(now, 1)'
tdSql.execute(f'create table {tbname} (ts timestamp, c0 int);')
tdSql.execute(f'insert into {tbname} values {input_value};')
for col in self.genColList():
input_value = self.genIncreaseValue(input_value)
tdSql.execute(f'alter table {tbname} add column {col} int;')
tdSql.execute(f'insert into {tbname} values {input_value};')
tdSql.query(f'select * from {tbname};')
tdSql.checkRows(18)
def run(self):
tdSql.prepare()
self.insertAlter()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,85 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to execute {__file__}")
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 36500")
tdSql.execute("use db")
tdLog.printNoPrefix("==========step1:create table && insert data")
tdSql.execute("create table stbtag (ts timestamp, c1 int) TAGS(t1 int)")
tdSql.execute("create table tag1 using stbtag tags(1)")
tdLog.printNoPrefix("==========step2:alter stb add tag create new chiltable")
tdSql.execute("alter table stbtag add tag t2 int")
tdSql.execute("alter table stbtag add tag t3 tinyint")
tdSql.execute("alter table stbtag add tag t4 smallint ")
tdSql.execute("alter table stbtag add tag t5 bigint")
tdSql.execute("alter table stbtag add tag t6 float ")
tdSql.execute("alter table stbtag add tag t7 double ")
tdSql.execute("alter table stbtag add tag t8 bool ")
tdSql.execute("alter table stbtag add tag t9 binary(10) ")
tdSql.execute("alter table stbtag add tag t10 nchar(10)")
tdSql.execute("create table tag2 using stbtag tags(2, 22, 23, 24, 25, 26.1, 27.1, 1, 'binary9', 'nchar10')")
tdSql.query( "select tbname, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10 from stbtag" )
tdSql.checkData(1, 0, "tag2")
tdSql.checkData(1, 1, 2)
tdSql.checkData(1, 2, 22)
tdSql.checkData(1, 3, 23)
tdSql.checkData(1, 4, 24)
tdSql.checkData(1, 5, 25)
tdSql.checkData(1, 6, 26.1)
tdSql.checkData(1, 7, 27.1)
tdSql.checkData(1, 8, 1)
tdSql.checkData(1, 9, "binary9")
tdSql.checkData(1, 10, "nchar10")
tdLog.printNoPrefix("==========step3:alter stb drop tag create new chiltable")
tdSql.execute("alter table stbtag drop tag t2 ")
tdSql.execute("alter table stbtag drop tag t3 ")
tdSql.execute("alter table stbtag drop tag t4 ")
tdSql.execute("alter table stbtag drop tag t5 ")
tdSql.execute("alter table stbtag drop tag t6 ")
tdSql.execute("alter table stbtag drop tag t7 ")
tdSql.execute("alter table stbtag drop tag t8 ")
tdSql.execute("alter table stbtag drop tag t9 ")
tdSql.execute("alter table stbtag drop tag t10 ")
tdSql.execute("create table tag3 using stbtag tags(3)")
tdSql.query("select * from stbtag where tbname like 'tag3' ")
tdSql.checkCols(3)
tdSql.query("select tbname, t1 from stbtag where tbname like 'tag3' ")
tdSql.checkData(0, 1, 3)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,73 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to execute {__file__}")
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 36500")
tdSql.execute("use db")
tdLog.printNoPrefix("==========step1:create table && insert data")
# timestamp list:
# 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00"
# -631180800000 -> "1950-01-01 00:00:00"
ts1 = 0
ts2 = -28800000
ts3 = -946800000000
ts4 = "1950-01-01 00:00:00"
tdSql.execute(
"create table stb2ts (ts timestamp, ts1 timestamp, ts2 timestamp, c1 int, ts3 timestamp) TAGS(t1 int)"
)
tdSql.execute("create table t2ts1 using stb2ts tags(1)")
tdSql.execute(f"insert into t2ts1 values ({ts1}, {ts1}, {ts1}, 1, {ts1})")
tdSql.execute(f"insert into t2ts1 values ({ts2}, {ts2}, {ts2}, 2, {ts2})")
tdSql.execute(f"insert into t2ts1 values ({ts3}, {ts3}, {ts3}, 4, {ts3})")
tdSql.execute(f"insert into t2ts1 values ('{ts4}', '{ts4}', '{ts4}', 3, '{ts4}')")
tdLog.printNoPrefix("==========step2:check inserted data")
tdSql.query("select * from stb2ts where ts1=0 and ts2='1970-01-01 08:00:00' ")
tdSql.checkRows(1)
tdSql.checkData(0, 4,'1970-01-01 08:00:00')
tdSql.query("select * from stb2ts where ts1=-28800000 and ts2='1970-01-01 00:00:00' ")
tdSql.checkRows(1)
tdSql.checkData(0, 4, '1970-01-01 00:00:00')
tdSql.query("select * from stb2ts where ts1=-946800000000 and ts2='1940-01-01 00:00:00' ")
tdSql.checkRows(1)
tdSql.checkData(0, 4, '1940-01-01 00:00:00')
tdSql.query("select * from stb2ts where ts1=-631180800000 and ts2='1950-01-01 00:00:00' ")
tdSql.checkRows(1)
tdSql.checkData(0, 4, '1950-01-01 00:00:00')
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,109 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import tdDnodes
from datetime import datetime
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def run(self):
tdSql.prepare()
tdSql.query('show databases')
tdSql.checkData(0,15,0)
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
#write 5M rows into db, then restart to force the data move into disk.
#create 500 tables
os.system("%staosdemo -f tools/taosdemoAllTest/insert_5M_rows.json -y " % binPath)
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.execute('use db')
#prepare to query 500 tables last_row()
tableName = []
for i in range(500):
tableName.append(f"stb_{i}")
tdSql.execute('use db')
lastRow_Off_start = datetime.now()
slow = 0 #count time where lastRow on is slower
for i in range(5):
#switch lastRow to off and check
tdSql.execute('alter database db cachelast 0')
tdSql.query('show databases')
tdSql.checkData(0,15,0)
#run last_row(*) query 500 times
for i in range(500):
tdSql.execute(f'SELECT LAST_ROW(*) FROM {tableName[i]}')
lastRow_Off_end = datetime.now()
tdLog.debug(f'time used:{lastRow_Off_end-lastRow_Off_start}')
#switch lastRow to on and check
tdSql.execute('alter database db cachelast 1')
tdSql.query('show databases')
tdSql.checkData(0,15,1)
#run last_row(*) query 500 times
tdSql.execute('use db')
lastRow_On_start = datetime.now()
for i in range(500):
tdSql.execute(f'SELECT LAST_ROW(*) FROM {tableName[i]}')
lastRow_On_end = datetime.now()
tdLog.debug(f'time used:{lastRow_On_end-lastRow_On_start}')
#check which one used more time
if (lastRow_Off_end-lastRow_Off_start > lastRow_On_end-lastRow_On_start):
pass
else:
slow += 1
tdLog.debug(slow)
if slow > 1: #tolerance for the first time
tdLog.exit('lastRow hot alter failed')
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,91 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
#TODO: after TD-4518 and TD-4510 is resolved, add the exception test case for these situations
import sys
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
#checking string input exception for alter
tdSql.error("alter database db keep '10'")
tdSql.error('alter database db keep "10"')
tdSql.error("alter database db keep '\t'")
tdSql.error("alter database db keep \'\t\'")
tdSql.error('alter database db keep "a"')
tdSql.error('alter database db keep "1.4"')
tdSql.error("alter database db blocks '10'")
tdSql.error('alter database db comp "0"')
tdSql.execute('drop database if exists db')
#checking string input exception for create
tdSql.error("create database db comp '0'")
tdSql.error('create database db comp "1"')
tdSql.error("create database db comp '\t'")
tdSql.error("alter database db keep \'\t\'")
tdSql.error('create database db comp "a"')
tdSql.error('create database db comp "1.4"')
tdSql.error("create database db blocks '10'")
tdSql.error('create database db keep "3650"')
tdSql.error('create database db fsync "3650"')
tdSql.execute('create database db precision "us"')
tdSql.query('show databases')
tdSql.checkData(0,16,'us')
tdSql.execute('drop database if exists db')
#checking float input exception for create
tdSql.error("create database db fsync 7.3")
tdSql.error("create database db fsync 0.0")
tdSql.error("create database db fsync -5.32")
tdSql.error('create database db comp 7.2')
tdSql.error("create database db blocks 5.87")
tdSql.error('create database db keep 15.4')
#checking float input exception for insert
tdSql.execute('create database db')
tdSql.error('alter database db blocks 5.9')
tdSql.error('alter database db blocks -4.7')
tdSql.error('alter database db blocks 0.0')
tdSql.error('alter database db keep 15.4')
tdSql.error('alter database db comp 2.67')
#checking additional exception param for alter keep
tdSql.error('alter database db keep 365001')
tdSql.error('alter database db keep 364999,365000,365001')
tdSql.error('alter database db keep -10')
tdSql.error('alter database db keep 5')
tdSql.error('alter database db keep ')
tdSql.error('alter database db keep 40,a,60')
tdSql.error('alter database db keep ,,60,')
tdSql.error('alter database db keep \t')
tdSql.execute('alter database db keep \t50')
tdSql.query('show databases')
tdSql.checkData(0,7,'50,50,50')
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,54 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import random
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import tdDnodes
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
flagList=["debugflag", "cdebugflag", "tmrDebugFlag", "uDebugFlag", "rpcDebugFlag"]
for flag in flagList:
tdSql.execute("alter local %s 131" % flag)
tdSql.execute("alter local %s 135" % flag)
tdSql.execute("alter local %s 143" % flag)
randomFlag = random.randint(100, 250)
if randomFlag != 131 and randomFlag != 135 and randomFlag != 143:
tdSql.error("alter local %s %d" % (flag, randomFlag))
tdSql.query("show dnodes")
dnodeId = tdSql.getData(0, 0)
for flag in flagList:
tdSql.execute("alter dnode %d %s 131" % (dnodeId, flag))
tdSql.execute("alter dnode %d %s 135" % (dnodeId, flag))
tdSql.execute("alter dnode %d %s 143" % (dnodeId, flag))
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,208 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
import time
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def alterKeepCommunity(self):
tdLog.notice('running Keep Test, Community Version')
tdLog.notice('running parameter test for keep during create')
#testing keep parameter during create
tdSql.query('show databases')
tdSql.checkData(0,7,'3650')
tdSql.execute('drop database db')
tdSql.execute('create database db keep 100')
tdSql.query('show databases')
tdSql.checkData(0,7,'100')
tdSql.execute('drop database db')
tdSql.error('create database db keep ')
tdSql.error('create database db keep 0')
tdSql.error('create database db keep 10,20')
tdSql.error('create database db keep 10,20,30')
tdSql.error('create database db keep 20,30,40,50')
#testing keep parameter during alter
tdSql.execute('create database db')
tdLog.notice('running parameter test for keep during alter')
tdSql.execute('alter database db keep 100')
tdSql.query('show databases')
tdSql.checkData(0,7,'100')
tdSql.error('alter database db keep ')
tdSql.error('alter database db keep 0')
tdSql.error('alter database db keep 10,20')
tdSql.error('alter database db keep 10,20,30')
tdSql.error('alter database db keep 20,30,40,50')
tdSql.query('show databases')
tdSql.checkData(0,7,'100')
def alterKeepEnterprise(self):
tdLog.notice('running Keep Test, Enterprise Version')
#testing keep parameter during create
tdLog.notice('running parameter test for keep during create')
tdSql.query('show databases')
tdSql.checkData(0,7,'3650,3650,3650')
tdSql.execute('drop database db')
tdSql.execute('create database db keep 100')
tdSql.query('show databases')
tdSql.checkData(0,7,'100,100,100')
tdSql.execute('drop database db')
tdSql.execute('create database db keep 20, 30')
tdSql.query('show databases')
tdSql.checkData(0,7,'20,30,30')
tdSql.execute('drop database db')
tdSql.execute('create database db keep 30,40,50')
tdSql.query('show databases')
tdSql.checkData(0,7,'30,40,50')
tdSql.execute('drop database db')
tdSql.error('create database db keep ')
tdSql.error('create database db keep 20,30,40,50')
tdSql.error('create database db keep 0')
tdSql.error('create database db keep 100,50')
tdSql.error('create database db keep 100,40,50')
tdSql.error('create database db keep 20,100,50')
tdSql.error('create database db keep 50,60,20')
#testing keep parameter during alter
tdSql.execute('create database db')
tdLog.notice('running parameter test for keep during alter')
tdSql.execute('alter database db keep 10')
tdSql.query('show databases')
tdSql.checkData(0,7,'10,10,10')
tdSql.execute('alter database db keep 20,30')
tdSql.query('show databases')
tdSql.checkData(0,7,'20,30,30')
tdSql.execute('alter database db keep 100,200,300')
tdSql.query('show databases')
tdSql.checkData(0,7,'100,200,300')
tdSql.error('alter database db keep ')
tdSql.error('alter database db keep 20,30,40,50')
tdSql.error('alter database db keep 0')
tdSql.error('alter database db keep 100,50')
tdSql.error('alter database db keep 100,40,50')
tdSql.error('alter database db keep 20,100,50')
tdSql.error('alter database db keep 50,60,20')
tdSql.query('show databases')
tdSql.checkData(0,7,'100,200,300')
def run(self):
tdSql.prepare()
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
tdLog.debug('running enterprise test')
self.alterKeepEnterprise()
else:
tdLog.debug('running community test')
self.alterKeepCommunity()
tdSql.prepare()
## preset the keep
tdSql.prepare()
tdLog.notice('testing if alter will cause any error')
tdSql.execute('create table tb (ts timestamp, speed int)')
tdSql.execute('alter database db keep 10,10,10')
tdSql.execute('insert into tb values (now, 10)')
tdSql.execute('insert into tb values (now + 10m, 10)')
tdSql.query('select * from tb')
tdSql.checkRows(2)
#after alter from small to large, check if the alter if functioning
#test if change through test.py is consistent with change from taos client
#test case for TD-4459 and TD-4445
tdLog.notice('testing keep will be altered changing from small to big')
tdSql.execute('alter database db keep 40,40,40')
tdSql.query('show databases')
tdSql.checkData(0,7,'40,40,40')
tdSql.error('insert into tb values (now-60d, 10)')
tdSql.execute('insert into tb values (now-30d, 10)')
tdSql.query('select * from tb')
tdSql.checkRows(3)
rowNum = 3
for i in range(30):
rowNum += 1
tdSql.execute('alter database db keep 20,20,20')
tdSql.execute('alter database db keep 40,40,40')
tdSql.query('show databases')
tdSql.checkData(0,7,'40,40,40')
tdSql.error('insert into tb values (now-60d, 10)')
tdSql.execute('insert into tb values (now-30d, 10)')
tdSql.query('select * from tb')
tdSql.checkRows(rowNum)
tdLog.notice('testing keep will be altered changing from big to small')
tdSql.execute('alter database db keep 10,10,10')
tdSql.query('show databases')
tdSql.checkData(0,7,'10,10,10')
tdSql.error('insert into tb values (now-15d, 10)')
tdSql.query('select * from tb')
tdSql.checkRows(2)
rowNum = 2
tdLog.notice('testing keep will be altered if sudden change from small to big')
for i in range(30):
tdSql.execute('alter database db keep 14,14,14')
tdSql.execute('alter database db keep 16,16,16')
tdSql.execute('insert into tb values (now-15d, 10)')
tdSql.query('select * from tb')
rowNum += 1
tdSql.checkRows(rowNum)
tdLog.notice('testing keep will be altered if sudden change from big to small')
tdSql.execute('alter database db keep 16,16,16')
tdSql.execute('alter database db keep 14,14,14')
tdSql.error('insert into tb values (now-15d, 10)')
tdSql.query('select * from tb')
tdSql.checkRows(2)
tdLog.notice('testing data will show up again when keep is being changed to large value')
tdSql.execute('alter database db keep 40,40,40')
tdSql.query('select * from tb')
tdSql.checkRows(63)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,129 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self):
tdLog.debug("start to execute %s" % __file__)
tdLog.info("prepare cluster")
tdDnodes.stopAll()
tdDnodes.deploy(1)
tdDnodes.start(1)
self.conn = taos.connect(config=tdDnodes.getSimCfgPath())
tdSql.init(self.conn.cursor())
tdSql.execute('reset query cache')
tdSql.execute('create dnode 192.168.0.2')
tdDnodes.deploy(2)
tdDnodes.start(2)
self.conn = taos.connect(config=tdDnodes.getSimCfgPath())
tdSql.init(self.conn.cursor())
tdSql.execute('reset query cache')
tdSql.execute('create dnode 192.168.0.3')
tdDnodes.deploy(3)
tdDnodes.start(3)
def run(self):
tdSql.execute('create database db replica 3 days 7')
tdSql.execute('use db')
for tid in range(1, 11):
tdSql.execute('create table tb%d(ts timestamp, i int)' % tid)
tdLog.sleep(10)
tdLog.info("================= step1")
startTime = 1520000010000
for rid in range(1, 11):
for tid in range(1, 11):
tdSql.execute(
'insert into tb%d values(%ld, %d)' %
(tid, startTime, rid))
startTime += 1
tdSql.query('select * from tb1')
tdSql.checkRows(10)
tdLog.sleep(5)
tdLog.info("================= step2")
tdSql.execute('alter database db replica 2')
tdLog.sleep(10)
tdLog.info("================= step3")
for rid in range(1, 11):
for tid in range(1, 11):
tdSql.execute(
'insert into tb%d values(%ld, %d)' %
(tid, startTime, rid))
startTime += 1
tdSql.query('select * from tb1')
tdSql.checkRows(20)
tdLog.sleep(5)
tdLog.info("================= step4")
tdSql.execute('alter database db replica 1')
tdLog.sleep(10)
tdLog.info("================= step5")
for rid in range(1, 11):
for tid in range(1, 11):
tdSql.execute(
'insert into tb%d values(%ld, %d)' %
(tid, startTime, rid))
startTime += 1
tdSql.query('select * from tb1')
tdSql.checkRows(30)
tdLog.sleep(5)
tdLog.info("================= step6")
tdSql.execute('alter database db replica 2')
tdLog.sleep(10)
tdLog.info("================= step7")
for rid in range(1, 11):
for tid in range(1, 11):
tdSql.execute(
'insert into tb%d values(%ld, %d)' %
(tid, startTime, rid))
startTime += 1
tdSql.query('select * from tb1')
tdSql.checkRows(40)
tdLog.sleep(5)
tdLog.info("================= step8")
tdSql.execute('alter database db replica 3')
tdLog.sleep(10)
tdLog.info("================= step9")
for rid in range(1, 11):
for tid in range(1, 11):
tdSql.execute(
'insert into tb%d values(%ld, %d)' %
(tid, startTime, rid))
startTime += 1
tdSql.query('select * from tb1')
tdSql.checkRows(50)
tdLog.sleep(5)
def stop(self):
tdSql.close()
self.conn.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addCluster(__file__, TDTestCase())

View File

@ -0,0 +1,142 @@
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.types = [
"int",
"bigint",
"float",
"double",
"smallint",
"tinyint",
"int unsigned",
"bigint unsigned",
"smallint unsigned",
"tinyint unsigned",
"binary(10)",
"nchar(10)",
"timestamp"]
self.rowNum = 300
self.ts = 1537146000000
self.step = 1000
self.sqlHead = "select count(*), count(c1) "
self.sqlTail = " from stb"
def addColumnAndCount(self):
for colIdx in range(len(self.types)):
tdSql.execute(
"alter table stb add column c%d %s" %
(colIdx + 2, self.types[colIdx]))
self.sqlHead = self.sqlHead + ",count(c%d) " % (colIdx + 2)
tdSql.query(self.sqlHead + self.sqlTail)
# count non-NULL values in each column
tdSql.checkData(0, 0, self.rowNum * (colIdx + 1))
tdSql.checkData(0, 1, self.rowNum * (colIdx + 1))
for i in range(2, colIdx + 2):
print("check1: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 2))
# insert more rows
for k in range(self.rowNum):
self.ts += self.step
sql = "insert into tb values (%d, %d" % (self.ts, colIdx + 2)
for j in range(colIdx + 1):
sql += ", %d" % (colIdx + 2)
sql += ")"
tdSql.execute(sql)
# count non-NULL values in each column
tdSql.query(self.sqlHead + self.sqlTail)
tdSql.checkData(0, 0, self.rowNum * (colIdx + 2))
tdSql.checkData(0, 1, self.rowNum * (colIdx + 2))
for i in range(2, colIdx + 2):
print("check2: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3))
def dropColumnAndCount(self):
tdSql.query(self.sqlHead + self.sqlTail)
res = []
for i in range(len(self.types)):
res.append(tdSql.getData(0, i + 2))
print(res)
for colIdx in range(len(self.types), 0, -1):
tdSql.execute("alter table stb drop column c%d" % (colIdx + 2))
# self.sqlHead = self.sqlHead + ",count(c%d) " %(colIdx + 2)
tdSql.query(self.sqlHead + self.sqlTail)
# count non-NULL values in each column
tdSql.checkData(0, 0, self.rowNum * (colIdx + 1))
tdSql.checkData(0, 1, self.rowNum * (colIdx + 1))
for i in range(2, colIdx + 2):
print("check1: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 2))
# insert more rows
for k in range(self.rowNum):
self.ts += self.step
sql = "insert into tb values (%d, %d" % (self.ts, colIdx + 2)
for j in range(colIdx + 1):
sql += ", %d" % (colIdx + 2)
sql += ")"
tdSql.execute(sql)
# count non-NULL values in each column
tdSql.query(self.sqlHead + self.sqlTail)
tdSql.checkData(0, 0, self.rowNum * (colIdx + 2))
tdSql.checkData(0, 1, self.rowNum * (colIdx + 2))
for i in range(2, colIdx + 2):
print("check2: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3))
def run(self):
# Setup params
db = "db"
# Create db
tdSql.execute("drop database if exists %s" % (db))
tdSql.execute("reset query cache")
tdSql.execute("create database %s maxrows 200 maxtables 4" % (db))
tdSql.execute("use %s" % (db))
# Create a table with one colunm of int type and insert 300 rows
tdLog.info("Create stb and tb")
tdSql.execute("create table stb (ts timestamp, c1 int) tags (tg1 int)")
tdSql.execute("create table tb using stb tags (0)")
tdLog.info("Insert %d rows into tb" % (self.rowNum))
for k in range(1, self.rowNum + 1):
self.ts += self.step
tdSql.execute("insert into tb values (%d, 1)" % (self.ts))
# Alter tb and add a column of smallint type, then query tb to see if
# all added column are NULL
self.addColumnAndCount()
tdDnodes.stop(1)
time.sleep(5)
tdDnodes.start(1)
time.sleep(5)
tdSql.query(self.sqlHead + self.sqlTail)
for i in range(2, len(self.types) + 2):
tdSql.checkData(0, i, self.rowNum * (len(self.types) + 2 - i))
self.dropColumnAndCount()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
#tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,161 @@
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.types = [
"int",
"bigint",
"float",
"double",
"smallint",
"tinyint",
"int unsigned",
"bigint unsigned",
"smallint unsigned",
"tinyint unsigned",
"binary(10)",
"nchar(10)",
"timestamp"]
self.rowNum = 300
self.ts = 1537146000000
self.step = 1000
self.sqlHead = "select count(*), count(c1) "
self.sqlTail = " from tb"
def addColumnAndCount(self):
for colIdx in range(len(self.types)):
tdSql.execute(
"alter table tb add column c%d %s" %
(colIdx + 2, self.types[colIdx]))
self.sqlHead = self.sqlHead + ",count(c%d) " % (colIdx + 2)
tdSql.query(self.sqlHead + self.sqlTail)
# count non-NULL values in each column
tdSql.checkData(0, 0, self.rowNum * (colIdx + 1))
tdSql.checkData(0, 1, self.rowNum * (colIdx + 1))
for i in range(2, colIdx + 2):
print("check1: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 2))
# insert more rows
for k in range(self.rowNum):
self.ts += self.step
sql = "insert into tb values (%d, %d" % (self.ts, colIdx + 2)
for j in range(colIdx + 1):
sql += ", %d" % (colIdx + 2)
sql += ")"
tdSql.execute(sql)
# count non-NULL values in each column
tdSql.query(self.sqlHead + self.sqlTail)
tdSql.checkData(0, 0, self.rowNum * (colIdx + 2))
tdSql.checkData(0, 1, self.rowNum * (colIdx + 2))
for i in range(2, colIdx + 2):
print("check2: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3))
def dropColumnAndCount(self):
tdSql.query(self.sqlHead + self.sqlTail)
res = []
for i in range(len(self.types)):
res[i] = tdSql.getData(0, i + 2)
print(res.join)
for colIdx in range(len(self.types), 0, -1):
tdSql.execute("alter table tb drop column c%d" % (colIdx + 2))
# self.sqlHead = self.sqlHead + ",count(c%d) " %(colIdx + 2)
tdSql.query(self.sqlHead + self.sqlTail)
# count non-NULL values in each column
tdSql.checkData(0, 0, self.rowNum * (colIdx + 1))
tdSql.checkData(0, 1, self.rowNum * (colIdx + 1))
for i in range(2, colIdx + 2):
print("check1: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 2))
# insert more rows
for k in range(self.rowNum):
self.ts += self.step
sql = "insert into tb values (%d, %d" % (self.ts, colIdx + 2)
for j in range(colIdx + 1):
sql += ", %d" % (colIdx + 2)
sql += ")"
tdSql.execute(sql)
# count non-NULL values in each column
tdSql.query(self.sqlHead + self.sqlTail)
tdSql.checkData(0, 0, self.rowNum * (colIdx + 2))
tdSql.checkData(0, 1, self.rowNum * (colIdx + 2))
for i in range(2, colIdx + 2):
print("check2: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3))
def alter_table_255_times(self): # add case for TD-6207
for i in range(255):
tdLog.info("alter table st add column cb%d int"%i)
tdSql.execute("alter table st add column cb%d int"%i)
tdSql.execute("insert into t0 (ts,c1) values(now,1)")
tdSql.execute("reset query cache")
tdSql.query("select * from st")
tdSql.execute("create table mt(ts timestamp, i int)")
tdSql.execute("insert into mt values(now,11)")
tdSql.query("select * from mt")
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.query("describe db.st")
def run(self):
# Setup params
db = "db"
# Create db
tdSql.execute("drop database if exists %s" % (db))
tdSql.execute("reset query cache")
tdSql.execute("create database %s maxrows 200" % (db))
tdSql.execute("use %s" % (db))
# Create a table with one colunm of int type and insert 300 rows
tdLog.info("create table tb")
tdSql.execute("create table tb (ts timestamp, c1 int)")
tdLog.info("Insert %d rows into tb" % (self.rowNum))
for k in range(1, self.rowNum + 1):
self.ts += self.step
tdSql.execute("insert into tb values (%d, 1)" % (self.ts))
# Alter tb and add a column of smallint type, then query tb to see if
# all added column are NULL
self.addColumnAndCount()
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.query(self.sqlHead + self.sqlTail)
size = len(self.types) + 2
for i in range(2, size):
tdSql.checkData(0, i, self.rowNum * (size - i))
tdSql.execute("create table st(ts timestamp, c1 int) tags(t1 float,t2 int,t3 double)")
tdSql.execute("create table t0 using st tags(null,1,2.3)")
tdSql.execute("alter table t0 set tag t1=2.1")
tdSql.query("show tables")
tdSql.checkRows(2)
self.alter_table_255_times()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,91 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
tdSql.prepare()
print("==============Case 1: add column, restart taosd, drop the same colum then add it back")
tdSql.execute(
"create table st(ts timestamp, speed int) tags(loc nchar(20))")
tdSql.execute(
"insert into t1 using st tags('beijing') values(now, 1)")
tdSql.execute(
"alter table st add column tbcol binary(20)")
# restart taosd
tdDnodes.forcestop(1)
tdDnodes.start(1)
tdSql.execute(
"alter table st drop column tbcol")
tdSql.execute(
"alter table st add column tbcol binary(20)")
tdSql.query("select * from st")
tdSql.checkRows(1)
print("==============Case 2: keep adding columns, restart taosd")
tdSql.execute(
"create table dt(ts timestamp, tbcol1 tinyint) tags(tgcol1 tinyint)")
tdSql.execute(
"alter table dt add column tbcol2 int")
tdSql.execute(
"alter table dt add column tbcol3 smallint")
tdSql.execute(
"alter table dt add column tbcol4 bigint")
tdSql.execute(
"alter table dt add column tbcol5 float")
tdSql.execute(
"alter table dt add column tbcol6 double")
tdSql.execute(
"alter table dt add column tbcol7 bool")
tdSql.execute(
"alter table dt add column tbcol8 nchar(20)")
tdSql.execute(
"alter table dt add column tbcol9 binary(20)")
tdSql.execute(
"alter table dt add column tbcol10 tinyint unsigned")
tdSql.execute(
"alter table dt add column tbcol11 int unsigned")
tdSql.execute(
"alter table dt add column tbcol12 smallint unsigned")
tdSql.execute(
"alter table dt add column tbcol13 bigint unsigned")
# restart taosd
tdDnodes.forcestop(1)
tdDnodes.start(1)
tdSql.query("select * from dt")
tdSql.checkRows(0)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,71 @@
# -*- coding: utf-8 -*-
import random
import string
import subprocess
import sys
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdLog.debug("check database")
tdSql.prepare()
# check default update value
sql = "create database if not exists db"
tdSql.execute(sql)
tdSql.query('show databases')
tdSql.checkRows(1)
tdSql.checkData(0,16,0)
sql = "alter database db update 1"
# check update value
tdSql.execute(sql)
tdSql.query('show databases')
tdSql.checkRows(1)
tdSql.checkData(0,16,1)
sql = "alter database db update 0"
tdSql.execute(sql)
tdSql.query('show databases')
tdSql.checkRows(1)
tdSql.checkData(0,16,0)
sql = "alter database db update -1"
tdSql.error(sql)
sql = "alter database db update 100"
tdSql.error(sql)
tdSql.query('show databases')
tdSql.checkRows(1)
tdSql.checkData(0,16,0)
tdSql.execute('drop database db')
tdSql.error('create database db update 100')
tdSql.error('create database db update -1')
tdSql.execute('create database db update 1')
tdSql.query('show databases')
tdSql.checkRows(1)
tdSql.checkData(0,16,1)
tdSql.execute('drop database db')
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,77 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
tdSql.execute(
'create table st (ts timestamp, v1 int, v2 int, v3 int, v4 int, v5 int) tags (t int)')
totalTables = 100
batchSize = 500
totalBatch = 60
tdLog.info(
"create %d tables, insert %d rows per table" %
(totalTables, batchSize * totalBatch))
for t in range(0, totalTables):
tdSql.execute('create table t%d using st tags(%d)' % (t, t))
# 2019-06-10 00:00:00
beginTs = 1560096000000
interval = 10000
for r in range(0, totalBatch):
sql = 'insert into t%d values ' % (t)
for b in range(0, batchSize):
ts = beginTs + (r * batchSize + b) * interval
sql += '(%d, 1, 2, 3, 4, 5)' % (ts)
tdSql.execute(sql)
tdLog.info("insert data finished")
tdSql.execute('alter table st add column v6 int')
tdLog.sleep(5)
tdLog.info("alter table finished")
tdSql.query("select count(*) from t50")
tdSql.checkData(0, 0, (int)(batchSize * totalBatch))
tdLog.info("insert")
tdSql.execute(
"insert into t50 values ('2019-06-13 07:59:55.000', 1, 2, 3, 4, 5, 6)")
tdLog.info("import")
tdSql.execute(
"import into t50 values ('2019-06-13 07:59:55.000', 1, 2, 3, 4, 5, 6)")
tdLog.info("query")
tdSql.query("select count(*) from t50")
tdSql.checkData(0, 0, batchSize * totalBatch + 1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

85
tests/pytest/bug2265.py Normal file
View File

@ -0,0 +1,85 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
import taos
if __name__ == "__main__":
logSql = True
deployPath = ""
testCluster = False
valgrind = 0
print("start to execute %s" % __file__)
tdDnodes.init(deployPath)
tdDnodes.setTestCluster(testCluster)
tdDnodes.setValgrind(valgrind)
tdDnodes.stopAll()
tdDnodes.addSimExtraCfg("maxSQLLength", "1048576")
tdDnodes.deploy(1)
tdDnodes.start(1)
host = '127.0.0.1'
tdLog.info("Procedures for tdengine deployed in %s" % (host))
tdCases.logSql(logSql)
print('1')
conn = taos.connect(
host,
config=tdDnodes.getSimCfgPath())
tdSql.init(conn.cursor(), True)
print("==========step1")
print("create table ")
tdSql.execute("create database db")
tdSql.execute("use db")
tdSql.execute("create table t1 (ts timestamp, c1 int,c2 int ,c3 int)")
print("==========step2")
print("insert maxSQLLength data ")
data = 'insert into t1 values'
ts = 1604298064000
i = 0
while ((len(data)<(1024*1024)) & (i < 32767 - 1) ):
data += '(%s,%d,%d,%d)'%(ts+i,i%1000,i%1000,i%1000)
i+=1
tdSql.execute(data)
print("==========step4")
print("insert data batch larger than 32767 ")
i = 0
while ((len(data)<(1024*1024)) & (i < 32767) ):
data += '(%s,%d,%d,%d)'%(ts+i,i%1000,i%1000,i%1000)
i+=1
tdSql.error(data)
print("==========step4")
print("insert data larger than maxSQLLength ")
tdSql.execute("create table t2 (ts timestamp, c1 binary(50))")
data = 'insert into t2 values'
i = 0
while ((len(data)<(1024*1024)) & (i < 32767 - 1 ) ):
data += '(%s,%s)'%(ts+i,'a'*50)
i+=1
tdSql.error(data)
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -0,0 +1,54 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
tdSql.query('select database()')
tdSql.checkData(0, 0, "db")
tdSql.execute("alter database db comp 2")
tdSql.query("show databases")
tdSql.checkData(0, 14, 2)
tdSql.execute("alter database db keep 365,365,365")
tdSql.query("show databases")
tdSql.checkData(0, 7, "365,365,365")
tdSql.error("alter database db quorum 2")
tdSql.execute("alter database db blocks 100")
tdSql.query("show databases")
tdSql.checkData(0, 9, 100)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,88 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.pathFinding import *
from util.dnodes import tdDnodes
from datetime import datetime
import subprocess
import time
##TODO: this is now automatic, but not sure if this will run through jenkins
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
tdFindPath.init(__file__)
def run(self):
tdSql.prepare()
binPath = tdFindPath.getTaosdemoPath()
TDenginePath = tdFindPath.getTDenginePath()
## change system time to 2020/10/20
os.system('sudo timedatectl set-ntp off')
tdLog.sleep(10)
os.system('sudo timedatectl set-time 2020-10-20')
#run taosdemo to insert data. one row per second from 2020/10/11 to 2020/10/20
#11 data files should be generated
#vnode at TDinternal/community/sim/dnode1/data/vnode
try:
os.system(f"{binPath}taosdemo -f tools/taosdemoAllTest/manual_change_time_1_1_A.json")
commandArray = ['ls', '-l', f'{TDenginePath}/sim/dnode1/data/vnode/vnode2/tsdb/data']
result = subprocess.run(commandArray, stdout=subprocess.PIPE).stdout.decode('utf-8')
except BaseException:
os.system('sudo timedatectl set-ntp on')
tdLog.sleep(10)
if result.count('data') != 11:
os.system('sudo timedatectl set-ntp on')
tdLog.sleep(10)
tdLog.exit('wrong number of files')
else:
tdLog.debug("data file number correct")
#move 5 days ahead to 2020/10/25. 4 oldest files should be removed during the new write
#leaving 7 data files.
try:
os.system ('timedatectl set-time 2020-10-25')
os.system(f"{binPath}taosdemo -f tools/taosdemoAllTest/manual_change_time_1_1_B.json")
except BaseException:
os.system('sudo timedatectl set-ntp on')
tdLog.sleep(10)
os.system('sudo timedatectl set-ntp on')
tdLog.sleep(10)
commandArray = ['ls', '-l', f'{TDenginePath}/sim/dnode1/data/vnode/vnode2/tsdb/data']
result = subprocess.run(commandArray, stdout=subprocess.PIPE).stdout.decode('utf-8')
print(result.count('data'))
if result.count('data') != 7:
tdLog.exit('wrong number of files')
else:
tdLog.debug("data file number correct")
tdSql.query('select first(ts) from stb_0')
tdSql.checkData(0,0,datetime(2020,10,14,8,0,0,0)) #check the last data in the database
os.system('sudo timedatectl set-ntp on')
tdLog.sleep(10)
def stop(self):
os.system('sudo timedatectl set-ntp on')
tdLog.sleep(10)
tdSql.close()
tdLog.success("alter block manual check finish")
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,100 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import tdDnodes
from util.pathFinding import *
from datetime import datetime
import subprocess
##TODO: this is now automatic, but not sure if this will run through jenkins
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
tdFindPath.init(__file__)
def run(self):
tdSql.prepare()
binPath = tdFindPath.getTaosdemoPath()
TDenginePath = tdFindPath.getTDenginePath()
## change system time to 2020/10/20
os.system ('timedatectl set-ntp off')
tdLog.sleep(10)
os.system ('timedatectl set-time 2020-10-20')
#run taosdemo to insert data. one row per second from 2020/10/11 to 2020/10/20
#11 data files should be generated
#vnode at TDinternal/community/sim/dnode1/data/vnode
try:
os.system(f"{binPath}taosdemo -f tools/taosdemoAllTest/manual_change_time_1_1_A.json")
commandArray = ['ls', '-l', f'{TDenginePath}/sim/dnode1/data/vnode/vnode2/tsdb/data']
result = subprocess.run(commandArray, stdout=subprocess.PIPE).stdout.decode('utf-8')
except BaseException:
os.system('sudo timedatectl set-ntp on')
tdLog.sleep(10)
if result.count('data') != 11:
os.system('sudo timedatectl set-ntp on')
tdLog.sleep(10)
tdLog.exit('wrong number of files')
else:
tdLog.debug("data file number correct")
try:
tdSql.query('select first(ts) from stb_0') #check the last data in the database
tdSql.checkData(0,0,datetime(2020,10,11,0,0,0,0))
except BaseException:
os.system('sudo timedatectl set-ntp on')
tdLog.sleep(10)
#moves 5 days ahead to 2020/10/25 and restart taosd
#4 oldest data file should be removed from tsdb/data
#7 data file should be found
#vnode at TDinternal/community/sim/dnode1/data/vnode
try:
os.system ('timedatectl set-time 2020-10-25')
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.query('select first(ts) from stb_0')
tdSql.checkData(0,0,datetime(2020,10,14,8,0,0,0)) #check the last data in the database
except BaseException:
os.system('sudo timedatectl set-ntp on')
tdLog.sleep(10)
os.system('sudo timedatectl set-ntp on')
tdLog.sleep(10)
commandArray = ['ls', '-l', f'{TDenginePath}/sim/dnode1/data/vnode/vnode2/tsdb/data']
result = subprocess.run(commandArray, stdout=subprocess.PIPE).stdout.decode('utf-8')
print(result.count('data'))
if result.count('data') != 7:
tdLog.exit('wrong number of files')
else:
tdLog.debug("data file number correct")
os.system('sudo timedatectl set-ntp on')
tdLog.sleep(10)
def stop(self):
os.system('sudo timedatectl set-ntp on')
tdSql.close()
tdLog.success("alter block manual check finish")
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,83 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from datetime import timedelta
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
ret = tdSql.query('select database()')
tdSql.checkData(0, 0, "db")
ret = tdSql.query('select server_status()')
tdSql.checkData(0, 0, 1)
ret = tdSql.query('select server_status() as result')
tdSql.checkData(0, 0, 1)
time.sleep(1)
ret = tdSql.query('show dnodes')
dnodeId = tdSql.getData(0, 0);
dnodeEndpoint = tdSql.getData(0, 1);
ret = tdSql.execute('alter dnode "%s" debugFlag 135' % dnodeId)
tdLog.info('alter dnode "%s" debugFlag 135 -> ret: %d' % (dnodeId, ret))
time.sleep(1)
ret = tdSql.query('show mnodes')
tdSql.checkRows(1)
tdSql.checkData(0, 2, "master")
role_time = tdSql.getData(0, 3)
create_time = tdSql.getData(0, 4)
time_delta = timedelta(milliseconds=100)
if create_time-time_delta < role_time < create_time+time_delta:
tdLog.info("role_time {} and create_time {} expected within range".format(role_time, create_time))
else:
tdLog.exit("role_time {} and create_time {} not expected within range".format(role_time, create_time))
ret = tdSql.query('show vgroups')
tdSql.checkRows(0)
tdSql.execute('create stable st (ts timestamp, f int) tags(t int)')
tdSql.execute('create table ct1 using st tags(1)');
tdSql.execute('create table ct2 using st tags(2)');
time.sleep(3)
ret = tdSql.query('show vnodes "{}"'.format(dnodeEndpoint))
tdSql.checkRows(1)
tdSql.checkData(0, 0, 2)
tdSql.checkData(0, 1, "master")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,48 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import tdDnodes
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
tdDnodes.stop(1)
sql = "use db"
try:
tdSql.execute(sql)
except Exception as e:
expectError = 'Unable to establish connection'
if expectError in str(e):
pass
else:
caller = inspect.getframeinfo(inspect.stack()[1][1])
tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql))
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,55 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.getcwd())
from util.log import *
from util.sql import *
from util.dnodes import *
import taos
import threading
class TwoClients:
def initConnection(self):
self.host = "127.0.0.1"
self.user = "root"
self.password = "taosdata"
self.config = "/home/chr/taosdata/TDengine/sim/dnode1/cfg "
def newCloseCon(times):
newConList = []
for times in range(0,times) :
newConList.append(taos.connect(self.host, self.user, self.password, self.config))
for times in range(0,times) :
newConList[times].close()
def run(self):
tdDnodes.init("")
tdDnodes.setTestCluster(False)
tdDnodes.setValgrind(False)
tdDnodes.stopAll()
tdDnodes.deploy(1)
tdDnodes.start(1)
# multiple new and cloes connection
for m in range(1,101) :
t= threading.Thread(target=newCloseCon,args=(10,))
t.start()
clients = TwoClients()
clients.initConnection()
clients.run()

View File

@ -0,0 +1,96 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.getcwd())
from util.log import *
from util.sql import *
from util.dnodes import *
import taos
class TwoClients:
def initConnection(self):
self.host = "127.0.0.1"
self.user = "root"
self.password = "taosdata"
self.config = "/home/xp/git/TDengine/sim/dnode1/cfg"
def run(self):
tdDnodes.init("")
tdDnodes.setTestCluster(False)
tdDnodes.setValgrind(False)
tdDnodes.stopAll()
tdDnodes.deploy(1)
tdDnodes.start(1)
# first client create a stable and insert data
conn1 = taos.connect(self.host, self.user, self.password, self.config)
cursor1 = conn1.cursor()
cursor1.execute("drop database if exists db")
cursor1.execute("create database db")
cursor1.execute("use db")
cursor1.execute("create table tb (ts timestamp, id int) tags(loc nchar(30))")
cursor1.execute("insert into t0 using tb tags('beijing') values(now, 1)")
# second client alter the table created by cleint
conn2 = taos.connect(self.host, self.user, self.password, self.config)
cursor2 = conn2.cursor()
cursor2.execute("use db")
cursor2.execute("alter table tb add column name nchar(30)")
# first client should not be able to use the origin metadata
tdSql.init(cursor1, True)
tdSql.error("insert into t0 values(now, 2)")
# first client should be able to insert data with udpated medadata
tdSql.execute("insert into t0 values(now, 2, 'test')")
tdSql.query("select * from tb")
tdSql.checkRows(2)
# second client drop the table
cursor2.execute("drop table t0")
cursor2.execute("create table t0 using tb tags('beijing')")
tdSql.execute("insert into t0 values(now, 2, 'test')")
tdSql.query("select * from tb")
tdSql.checkRows(1)
# error expected for two clients drop the same cloumn
cursor2.execute("alter table tb drop column name")
tdSql.error("alter table tb drop column name")
cursor2.execute("alter table tb add column speed int")
tdSql.error("alter table tb add column speed int")
tdSql.execute("alter table tb add column size int")
tdSql.query("describe tb")
tdSql.checkRows(5)
tdSql.checkData(0, 0, "ts")
tdSql.checkData(1, 0, "id")
tdSql.checkData(2, 0, "speed")
tdSql.checkData(3, 0, "size")
tdSql.checkData(4, 0, "loc")
cursor1.close()
cursor2.close()
conn1.close()
conn2.close()
clients = TwoClients()
clients.initConnection()
clients.run()

View File

@ -0,0 +1,55 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from math import floor
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
sql = "select server_version()"
ret = tdSql.query(sql)
version = floor(float(tdSql.getData(0, 0)[0:3]))
expectedVersion = 2
if(version == expectedVersion):
tdLog.info("sql:%s, row:%d col:%d data:%d == expect" % (sql, 0, 0, version))
else:
tdLog.exit("sql:%s, row:%d col:%d data:%d != expect:%d " % (sql, 0, 0, version, expectedVersion))
sql = "select client_version()"
ret = tdSql.query(sql)
version = floor(float(tdSql.getData(0, 0)[0:3]))
expectedVersion = 2
if(version == expectedVersion):
tdLog.info("sql:%s, row:%d col:%d data:%d == expect" % (sql, 0, 0, version))
else:
tdLog.exit("sql:%s, row:%d col:%d data:%d != expect:%d " % (sql, 0, 0, version, expectedVersion))
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,9 @@
execute:
cd TDengine/tests/pytest && python3 ./test.py -f cluster/TD-3693/multClient.py && python3 cluster/TD-3693/multQuery.py
1. 使用测试的集群三个节点fc1、fct2、fct4。
2. 用taosdemo建两个库db1和db2副本数目为1插入一定数据。
3. db1在mnode的master上fct2db2在mnode的slave上fct4
4. 珲哥修改taosdemo变成多线程查询修改后的软件我命名成taosdemoMul然后做持续多线程查询db2上的数据建立多个连接
5. 4中查询过程放到后台同时再次在db2执行建表、插入查询操作。循环执行查询10次每次间隔91s。
6. 然后查询taosd的log日志看是否还存在上述问题“send auth msg to mnodes”。

View File

@ -0,0 +1,88 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "192.168.1.104",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db1",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 3650,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 20,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,88 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "192.168.1.104",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db2",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 3650,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 20,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,74 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.rowNum = 100000
self.ts = 1537146000000
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert data to cluster'db
os.system("%staosdemo -f cluster/TD-3693/insert1Data.json -y " % binPath)
# multiple new and cloes connection with query data
os.system("%staosdemo -f cluster/TD-3693/insert2Data.json -y " % binPath)
os.system("nohup %staosdemoMul -f cluster/TD-3693/queryCount.json -y & " % binPath)
# delete useless files
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf ./querySystemInfo*")
os.system("rm -rf cluster/TD-3693/multClient.py.sql")
os.system("rm -rf ./querySystemInfo*")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,72 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.getcwd())
from util.log import *
from util.sql import *
from util.dnodes import *
import taos
import threading
class TwoClients:
def initConnection(self):
self.host = "fct4"
self.user = "root"
self.password = "taosdata"
self.config = "/etc/taos/"
self.rowNum = 10
self.ts = 1537146000000
def run(self):
# query data from cluster'db
conn = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config)
cur = conn.cursor()
tdSql.init(cur, True)
tdSql.execute("use db2")
cur.execute("select count (tbname) from stb0")
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 10)
tdSql.query("select count (tbname) from stb1")
tdSql.checkData(0, 0, 20)
tdSql.query("select count(*) from stb00_0")
tdSql.checkData(0, 0, 10000)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 100000)
tdSql.query("select count(*) from stb01_0")
tdSql.checkData(0, 0, 20000)
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 400000)
tdSql.execute("drop table if exists squerytest")
tdSql.execute("drop table if exists querytest")
tdSql.execute('''create stable squerytest(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
tdSql.execute("create table querytest using squerytest tags('beijing')")
tdSql.execute("insert into querytest(ts) values(%d)" % (self.ts - 1))
for i in range(self.rowNum):
tdSql.execute("insert into querytest values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" % (self.ts + i, i + 1, 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
for j in range(10):
tdSql.execute("use db2")
tdSql.query("select count(*),last(*) from querytest group by col1")
tdSql.checkRows(10)
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 2, 2)
tdSql.checkData(1, 3, 1)
sleep(88)
tdSql.execute("drop table if exists squerytest")
tdSql.execute("drop table if exists querytest")
clients = TwoClients()
clients.initConnection()
clients.run()

View File

@ -0,0 +1,15 @@
{
"filetype":"query",
"cfgdir": "/etc/taos",
"host": "192.168.1.104",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "db2",
"query_times": 1000000,
"specified_table_query":
{"query_interval":1, "concurrent":100,
"sqls": [{"sql": "select count(*) from db.stb0", "result": ""}]
}
}

View File

@ -0,0 +1,57 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from clusterSetup import *
from util.sql import tdSql
from util.log import tdLog
import random
import time
class ClusterTestcase:
## test case 32 ##
def run(self):
nodes = Nodes()
nodes.addConfigs("maxVgroupsPerDb", "10")
nodes.addConfigs("maxTablesPerVnode", "1000")
nodes.restartAllTaosd()
ctest = ClusterTest(nodes.node1.hostName)
ctest.connectDB()
ctest.createSTable(1)
ctest.run()
tdSql.init(ctest.conn.cursor(), False)
tdSql.execute("use %s" % ctest.dbName)
tdSql.query("show vgroups")
dnodes = []
for i in range(10):
dnodes.append(int(tdSql.getData(i, 4)))
s = set(dnodes)
if len(s) < 3:
tdLog.exit("cluster is not balanced")
tdLog.info("cluster is balanced")
nodes.removeConfigs("maxVgroupsPerDb", "10")
nodes.removeConfigs("maxTablesPerVnode", "1000")
nodes.restartAllTaosd()
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
ct = ClusterTestcase()
ct.run()

View File

@ -0,0 +1,47 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from clusterSetup import *
from util.sql import tdSql
from util.log import tdLog
import random
class ClusterTestcase:
## test case 1, 33 ##
def run(self):
nodes = Nodes()
ctest = ClusterTest(nodes.node1.hostName)
ctest.connectDB()
tdSql.init(ctest.conn.cursor(), False)
## Test case 1 ##
tdLog.info("Test case 1 repeat %d times" % ctest.repeat)
for i in range(ctest.repeat):
tdLog.info("Start Round %d" % (i + 1))
replica = random.randint(1,3)
ctest.createSTable(replica)
ctest.run()
tdLog.sleep(10)
tdSql.query("select count(*) from %s.%s" %(ctest.dbName, ctest.stbName))
tdSql.checkData(0, 0, ctest.numberOfRecords * ctest.numberOfTables)
tdLog.info("Round %d completed" % (i + 1))
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
ct = ClusterTestcase()
ct.run()

View File

@ -0,0 +1,51 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from clusterSetup import *
from util.sql import tdSql
from util.log import tdLog
import random
class ClusterTestcase:
## test case 7, ##
def run(self):
nodes = Nodes()
ctest = ClusterTest(nodes.node1.hostName)
ctest.connectDB()
tdSql.init(ctest.conn.cursor(), False)
tdSql.execute("use %s" % ctest.dbName)
tdSql.query("show vgroups")
for i in range(10):
tdSql.checkData(i, 5, "master")
tdSql.execute("alter database %s replica 2" % ctest.dbName)
tdLog.sleep(30)
tdSql.query("show vgroups")
for i in range(10):
tdSql.checkData(i, 5, "master")
tdSql.checkData(i, 7, "slave")
tdSql.execute("alter database %s replica 3" % ctest.dbName)
tdLog.sleep(30)
tdSql.query("show vgroups")
for i in range(10):
tdSql.checkData(i, 5, "master")
tdSql.checkData(i, 7, "slave")
tdSql.checkData(i, 9, "slave")
ct = ClusterTestcase()
ct.run()

View File

@ -0,0 +1,214 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from fabric import Connection
import random
import time
import logging
class Node:
def __init__(self, index, username, hostIP, hostName, password, homeDir):
self.index = index
self.username = username
self.hostIP = hostIP
self.hostName = hostName
self.homeDir = homeDir
self.corePath = '/coredump'
self.conn = Connection("{}@{}".format(username, hostName), connect_kwargs={"password": "{}".format(password)})
def buildTaosd(self):
try:
self.conn.cd("/root/TDinternal/community")
self.conn.run("git checkout develop")
self.conn.run("git pull")
self.conn.cd("/root/TDinternal")
self.conn.run("git checkout develop")
self.conn.run("git pull")
self.conn.cd("/root/TDinternal/debug")
self.conn.run("cmake ..")
self.conn.run("make")
self.conn.run("make install")
except Exception as e:
print("Build Taosd error for node %d " % self.index)
logging.exception(e)
pass
def startTaosd(self):
try:
self.conn.run("sudo systemctl start taosd")
except Exception as e:
print("Start Taosd error for node %d " % self.index)
logging.exception(e)
def stopTaosd(self):
try:
self.conn.run("sudo systemctl stop taosd")
except Exception as e:
print("Stop Taosd error for node %d " % self.index)
logging.exception(e)
def restartTaosd(self):
try:
self.conn.run("sudo systemctl restart taosd")
except Exception as e:
print("Stop Taosd error for node %d " % self.index)
logging.exception(e)
def removeTaosd(self):
try:
self.conn.run("rmtaos")
except Exception as e:
print("remove taosd error for node %d " % self.index)
logging.exception(e)
def forceStopOneTaosd(self):
try:
self.conn.run("kill -9 $(ps -ax|grep taosd|awk '{print $1}')")
except Exception as e:
print("kill taosd error on node%d " % self.index)
def startOneTaosd(self):
try:
self.conn.run("nohup taosd -c /etc/taos/ > /dev/null 2>&1 &")
except Exception as e:
print("start taosd error on node%d " % self.index)
logging.exception(e)
def installTaosd(self, packagePath):
self.conn.put(packagePath, self.homeDir)
self.conn.cd(self.homeDir)
self.conn.run("tar -zxf $(basename '%s')" % packagePath)
with self.conn.cd("TDengine-enterprise-server"):
self.conn.run("yes|./install.sh")
def configTaosd(self, taosConfigKey, taosConfigValue):
self.conn.run("sudo echo '%s %s' >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg"))
def removeTaosConfig(self, taosConfigKey, taosConfigValue):
self.conn.run("sudo sed -in-place -e '/%s %s/d' %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg"))
def configHosts(self, ip, name):
self.conn.run("echo '%s %s' >> %s" % (ip, name, '/etc/hosts'))
def removeData(self):
try:
self.conn.run("sudo rm -rf /var/lib/taos/*")
except Exception as e:
print("remove taosd data error for node %d " % self.index)
logging.exception(e)
def removeLog(self):
try:
self.conn.run("sudo rm -rf /var/log/taos/*")
except Exception as e:
print("remove taosd error for node %d " % self.index)
logging.exception(e)
def removeDataForMnode(self):
try:
self.conn.run("sudo rm -rf /var/lib/taos/*")
except Exception as e:
print("remove taosd error for node %d " % self.index)
logging.exception(e)
def removeDataForVnode(self, id):
try:
self.conn.run("sudo rm -rf /var/lib/taos/vnode%d/*.data" % id)
except Exception as e:
print("remove taosd error for node %d " % self.index)
logging.exception(e)
def detectCoredumpFile(self):
try:
result = self.conn.run("find /coredump -name 'core_*' ", hide=True)
output = result.stdout
print("output: %s" % output)
return output
except Exception as e:
print("find coredump file error on node %d " % self.index)
logging.exception(e)
class Nodes:
def __init__(self):
self.tdnodes = []
self.tdnodes.append(Node(0, 'root', '192.168.17.194', 'taosdata', 'r', '/root/'))
# self.tdnodes.append(Node(1, 'root', '52.250.48.222', 'node2', 'a', '/root/'))
# self.tdnodes.append(Node(2, 'root', '51.141.167.23', 'node3', 'a', '/root/'))
# self.tdnodes.append(Node(3, 'root', '52.247.207.173', 'node4', 'a', '/root/'))
# self.tdnodes.append(Node(4, 'root', '51.141.166.100', 'node5', 'a', '/root/'))
def stopOneNode(self, index):
self.tdnodes[index].stopTaosd()
self.tdnodes[index].forceStopOneTaosd()
def startOneNode(self, index):
self.tdnodes[index].startOneTaosd()
def detectCoredumpFile(self, index):
return self.tdnodes[index].detectCoredumpFile()
def stopAllTaosd(self):
for i in range(len(self.tdnodes)):
self.tdnodes[i].stopTaosd()
def startAllTaosd(self):
for i in range(len(self.tdnodes)):
self.tdnodes[i].startTaosd()
def restartAllTaosd(self):
for i in range(len(self.tdnodes)):
self.tdnodes[i].restartTaosd()
def addConfigs(self, configKey, configValue):
for i in range(len(self.tdnodes)):
self.tdnodes[i].configTaosd(configKey, configValue)
def removeConfigs(self, configKey, configValue):
for i in range(len(self.tdnodes)):
self.tdnodes[i].removeTaosConfig(configKey, configValue)
def removeAllDataFiles(self):
for i in range(len(self.tdnodes)):
self.tdnodes[i].removeData()
class Test:
def __init__(self):
self.nodes = Nodes()
# kill taosd randomly every 10 mins
def randomlyKillDnode(self):
loop = 0
while True:
index = random.randint(0, 4)
print("loop: %d, kill taosd on node%d" %(loop, index))
self.nodes.stopOneNode(index)
time.sleep(60)
self.nodes.startOneNode(index)
time.sleep(600)
loop = loop + 1
def detectCoredump(self):
loop = 0
while True:
for i in range(len(self.nodes.tdnodes)):
result = self.nodes.detectCoredumpFile(i)
print("core file path is %s" % result)
if result and not result.isspace():
self.nodes.stopAllTaosd()
print("sleep for 10 mins")
time.sleep(600)
test = Test()
test.detectCoredump()

View File

@ -0,0 +1,53 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from clusterSetup import *
from util.sql import tdSql
from util.log import tdLog
import random
class ClusterTestcase:
## test case 20, 21, 22 ##
def run(self):
nodes = Nodes()
ctest = ClusterTest(nodes.node1.hostName)
ctest.connectDB()
ctest.createSTable(3)
ctest.run()
tdSql.init(ctest.conn.cursor(), False)
nodes.node2.stopTaosd()
tdSql.execute("use %s" % ctest.dbName)
tdSql.query("show vgroups")
vnodeID = tdSql.getData(0, 0)
nodes.node2.removeDataForVnode(vnodeID)
nodes.node2.startTaosd()
# Wait for vnode file to recover
for i in range(10):
tdSql.query("select count(*) from t0")
tdLog.sleep(10)
for i in range(10):
tdSql.query("select count(*) from t0")
tdSql.checkData(0, 0, 1000)
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
ct = ClusterTestcase()
ct.run()

View File

@ -0,0 +1,47 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from clusterSetup import *
from util.sql import tdSql
from util.log import tdLog
import random
class ClusterTestcase:
##Cover test case 5 ##
def run(self):
# cluster environment set up
nodes = Nodes()
nodes.addConfigs("maxVgroupsPerDb", "10")
nodes.addConfigs("maxTablesPerVnode", "1000")
nodes.restartAllTaosd()
ctest = ClusterTest(nodes.node1.hostName)
ctest.connectDB()
ctest.createSTable(1)
ctest.run()
tdSql.init(ctest.conn.cursor(), False)
tdSql.execute("use %s" % ctest.dbName)
tdSql.error("create table tt1 using %s tags(1)" % ctest.stbName)
nodes.removeConfigs("maxVgroupsPerDb", "10")
nodes.removeConfigs("maxTablesPerVnode", "1000")
nodes.restartAllTaosd()
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
ct = ClusterTestcase()
ct.run()

View File

@ -0,0 +1,75 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from clusterSetup import *
from util.sql import tdSql
from util.log import tdLog
import random
class ClusterTestcase:
## test case 7, 10 ##
def run(self):
# cluster environment set up
tdLog.info("Test case 7, 10")
nodes = Nodes()
ctest = ClusterTest(nodes.node1.hostName)
ctest.connectDB()
tdSql.init(ctest.conn.cursor(), False)
nodes.node1.stopTaosd()
tdSql.query("show dnodes")
tdSql.checkRows(3)
tdSql.checkData(0, 4, "offline")
tdSql.checkData(1, 4, "ready")
tdSql.checkData(2, 4, "ready")
nodes.node1.startTaosd()
tdSql.checkRows(3)
tdSql.checkData(0, 4, "ready")
tdSql.checkData(1, 4, "ready")
tdSql.checkData(2, 4, "ready")
nodes.node2.stopTaosd()
tdSql.query("show dnodes")
tdSql.checkRows(3)
tdSql.checkData(0, 4, "ready")
tdSql.checkData(1, 4, "offline")
tdSql.checkData(2, 4, "ready")
nodes.node2.startTaosd()
tdSql.checkRows(3)
tdSql.checkData(0, 4, "ready")
tdSql.checkData(1, 4, "ready")
tdSql.checkData(2, 4, "ready")
nodes.node3.stopTaosd()
tdSql.query("show dnodes")
tdSql.checkRows(3)
tdSql.checkData(0, 4, "ready")
tdSql.checkData(1, 4, "ready")
tdSql.checkData(2, 4, "offline")
nodes.node3.startTaosd()
tdSql.checkRows(3)
tdSql.checkData(0, 4, "ready")
tdSql.checkData(1, 4, "ready")
tdSql.checkData(2, 4, "ready")
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
ct = ClusterTestcase()
ct.run()

View File

@ -0,0 +1,54 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from clusterSetup import *
from util.sql import tdSql
from util.log import tdLog
import random
class ClusterTestcase:
## cover test case 6, 8, 9, 11 ##
def run(self):
# cluster environment set up
nodes = Nodes()
ctest = ClusterTest(nodes.node1.hostName)
ctest.connectDB()
tdSql.init(ctest.conn.cursor(), False)
nodes.addConfigs("offlineThreshold", "10")
nodes.removeAllDataFiles()
nodes.restartAllTaosd()
nodes.node3.stopTaosd()
tdLog.sleep(10)
tdSql.query("show dnodes")
tdSql.checkRows(3)
tdSql.checkData(2, 4, "offline")
tdLog.sleep(60)
tdSql.checkRows(3)
tdSql.checkData(2, 4, "dropping")
tdLog.sleep(300)
tdSql.checkRows(2)
nodes.removeConfigs("offlineThreshold", "10")
nodes.restartAllTaosd()
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
ct = ClusterTestcase()
ct.run()

View File

@ -0,0 +1,65 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from clusterSetup import *
from util.sql import tdSql
from util.log import tdLog
import random
class ClusterTestcase:
## test case 28, 29, 30, 31 ##
def run(self):
nodes = Nodes()
ctest = ClusterTest(nodes.node1.hostName)
ctest.connectDB()
ctest.createSTable(3)
ctest.run()
tdSql.init(ctest.conn.cursor(), False)
tdSql.execute("use %s" % ctest.dbName)
nodes.node2.stopTaosd()
for i in range(100):
tdSql.execute("drop table t%d" % i)
nodes.node2.startTaosd()
tdSql.query("show tables")
tdSql.checkRows(9900)
nodes.node2.stopTaosd()
for i in range(10):
tdSql.execute("create table a%d using meters tags(2)" % i)
nodes.node2.startTaosd()
tdSql.query("show tables")
tdSql.checkRows(9910)
nodes.node2.stopTaosd()
tdSql.execute("alter table meters add col col6 int")
nodes.node2.startTaosd()
nodes.node2.stopTaosd()
tdSql.execute("drop database %s" % ctest.dbName)
nodes.node2.startTaosd()
tdSql.query("show databases")
tdSql.checkRows(0)
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
ct = ClusterTestcase()
ct.run()

View File

@ -0,0 +1,54 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from clusterSetup import *
from util.sql import tdSql
from util.log import tdLog
import random
import time
class ClusterTestcase:
## test case 32 ##
def run(self):
nodes = Nodes()
ctest = ClusterTest(nodes.node1.hostName)
ctest.connectDB()
ctest.createSTable(1)
ctest.run()
tdSql.init(ctest.conn.cursor(), False)
tdSql.execute("use %s" % ctest.dbName)
totalTime = 0
for i in range(10):
startTime = time.time()
tdSql.query("select * from %s" % ctest.stbName)
totalTime += time.time() - startTime
print("replica 1: avarage query time for %d records: %f seconds" % (ctest.numberOfTables * ctest.numberOfRecords,totalTime / 10))
tdSql.execute("alter database %s replica 3" % ctest.dbName)
tdLog.sleep(60)
totalTime = 0
for i in range(10):
startTime = time.time()
tdSql.query("select * from %s" % ctest.stbName)
totalTime += time.time() - startTime
print("replica 3: avarage query time for %d records: %f seconds" % (ctest.numberOfTables * ctest.numberOfRecords,totalTime / 10))
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
ct = ClusterTestcase()
ct.run()

View File

@ -0,0 +1,45 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from clusterSetup import *
from util.sql import tdSql
from util.log import tdLog
import random
class ClusterTestcase:
## test case 19 ##
def run(self):
nodes = Nodes()
ctest = ClusterTest(nodes.node1.hostName)
tdSql.init(ctest.conn.cursor(), False)
tdSql.query("show databases")
count = tdSql.queryRows;
nodes.stopAllTaosd()
nodes.node1.startTaosd()
tdSql.error("show databases")
nodes.node2.startTaosd()
tdSql.error("show databases")
nodes.node3.startTaosd()
tdLog.sleep(10)
tdSql.query("show databases")
tdSql.checkRows(count)
ct = ClusterTestcase()
ct.run()

View File

@ -0,0 +1,48 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from clusterSetup import *
from util.sql import tdSql
from util.log import tdLog
import random
class ClusterTestcase:
## test case 17, 18 ##
def run(self):
nodes = Nodes()
ctest = ClusterTest(nodes.node1.hostName)
ctest.connectDB()
ctest.createSTable(1)
ctest.run()
tdSql.init(ctest.conn.cursor(), False)
tdSql.query("show databases")
count = tdSql.queryRows;
tdSql.execute("use %s" % ctest.dbName)
tdSql.execute("alter database %s replica 3" % ctest.dbName)
nodes.node2.stopTaosd()
nodes.node3.stopTaosd()
tdSql.error("show databases")
nodes.node2.startTaosd()
tdSql.error("show databases")
nodes.node3.startTaosd()
tdSql.query("show databases")
tdSql.checkRows(count)
ct = ClusterTestcase()
ct.run()

Some files were not shown because too many files have changed in this diff Show More