Merge remote-tracking branch 'origin/3.0' into feature/shm
This commit is contained in:
commit
565774b985
|
@ -24,6 +24,7 @@
|
|||
#include "thash.h"
|
||||
#include "tlist.h"
|
||||
#include "trow.h"
|
||||
#include "tuuid.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -171,7 +172,7 @@ typedef struct {
|
|||
char db[TSDB_DB_FNAME_LEN];
|
||||
int64_t dbId;
|
||||
int32_t vgVersion;
|
||||
int32_t numOfTable; // unit is TSDB_TABLE_NUM_UNIT
|
||||
int32_t numOfTable; // unit is TSDB_TABLE_NUM_UNIT
|
||||
} SBuildUseDBInput;
|
||||
|
||||
typedef struct SField {
|
||||
|
@ -427,10 +428,10 @@ typedef struct {
|
|||
int16_t slotId;
|
||||
};
|
||||
|
||||
int16_t type;
|
||||
int32_t bytes;
|
||||
uint8_t precision;
|
||||
uint8_t scale;
|
||||
int16_t type;
|
||||
int32_t bytes;
|
||||
uint8_t precision;
|
||||
uint8_t scale;
|
||||
} SColumnInfo;
|
||||
|
||||
typedef struct {
|
||||
|
@ -526,7 +527,7 @@ typedef struct {
|
|||
char db[TSDB_DB_FNAME_LEN];
|
||||
int64_t dbId;
|
||||
int32_t vgVersion;
|
||||
int32_t numOfTable; // unit is TSDB_TABLE_NUM_UNIT
|
||||
int32_t numOfTable; // unit is TSDB_TABLE_NUM_UNIT
|
||||
} SUseDbReq;
|
||||
|
||||
int32_t tSerializeSUseDbReq(void* buf, int32_t bufLen, SUseDbReq* pReq);
|
||||
|
@ -553,15 +554,13 @@ int32_t tSerializeSQnodeListReq(void* buf, int32_t bufLen, SQnodeListReq* pReq);
|
|||
int32_t tDeserializeSQnodeListReq(void* buf, int32_t bufLen, SQnodeListReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
SArray *epSetList; // SArray<SEpSet>
|
||||
SArray* epSetList; // SArray<SEpSet>
|
||||
} SQnodeListRsp;
|
||||
|
||||
int32_t tSerializeSQnodeListRsp(void* buf, int32_t bufLen, SQnodeListRsp* pRsp);
|
||||
int32_t tDeserializeSQnodeListRsp(void* buf, int32_t bufLen, SQnodeListRsp* pRsp);
|
||||
void tFreeSQnodeListRsp(SQnodeListRsp* pRsp);
|
||||
|
||||
|
||||
|
||||
typedef struct {
|
||||
SArray* pArray; // Array of SUseDbRsp
|
||||
} SUseDbBatchRsp;
|
||||
|
@ -777,7 +776,6 @@ typedef struct SVgroupInfo {
|
|||
int32_t numOfTable; // unit is TSDB_TABLE_NUM_UNIT
|
||||
} SVgroupInfo;
|
||||
|
||||
|
||||
typedef struct {
|
||||
int32_t numOfVgroups;
|
||||
SVgroupInfo vgroups[];
|
||||
|
@ -1062,8 +1060,8 @@ typedef struct {
|
|||
} STaskStatus;
|
||||
|
||||
typedef struct {
|
||||
int64_t refId;
|
||||
SArray *taskStatus; //SArray<STaskStatus>
|
||||
int64_t refId;
|
||||
SArray* taskStatus; // SArray<STaskStatus>
|
||||
} SSchedulerStatusRsp;
|
||||
|
||||
typedef struct {
|
||||
|
@ -1072,35 +1070,31 @@ typedef struct {
|
|||
int8_t action;
|
||||
} STaskAction;
|
||||
|
||||
|
||||
typedef struct SQueryNodeEpId {
|
||||
int32_t nodeId; // vgId or qnodeId
|
||||
SEp ep;
|
||||
} SQueryNodeEpId;
|
||||
|
||||
|
||||
typedef struct {
|
||||
SMsgHead header;
|
||||
uint64_t sId;
|
||||
SQueryNodeEpId epId;
|
||||
SArray *taskAction; //SArray<STaskAction>
|
||||
SArray* taskAction; // SArray<STaskAction>
|
||||
} SSchedulerHbReq;
|
||||
|
||||
int32_t tSerializeSSchedulerHbReq(void *buf, int32_t bufLen, SSchedulerHbReq *pReq);
|
||||
int32_t tDeserializeSSchedulerHbReq(void *buf, int32_t bufLen, SSchedulerHbReq *pReq);
|
||||
void tFreeSSchedulerHbReq(SSchedulerHbReq *pReq);
|
||||
|
||||
int32_t tSerializeSSchedulerHbReq(void* buf, int32_t bufLen, SSchedulerHbReq* pReq);
|
||||
int32_t tDeserializeSSchedulerHbReq(void* buf, int32_t bufLen, SSchedulerHbReq* pReq);
|
||||
void tFreeSSchedulerHbReq(SSchedulerHbReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
uint64_t seqId;
|
||||
SQueryNodeEpId epId;
|
||||
SArray *taskStatus; //SArray<STaskStatus>
|
||||
SArray* taskStatus; // SArray<STaskStatus>
|
||||
} SSchedulerHbRsp;
|
||||
|
||||
int32_t tSerializeSSchedulerHbRsp(void *buf, int32_t bufLen, SSchedulerHbRsp *pRsp);
|
||||
int32_t tDeserializeSSchedulerHbRsp(void *buf, int32_t bufLen, SSchedulerHbRsp *pRsp);
|
||||
void tFreeSSchedulerHbRsp(SSchedulerHbRsp *pRsp);
|
||||
|
||||
int32_t tSerializeSSchedulerHbRsp(void* buf, int32_t bufLen, SSchedulerHbRsp* pRsp);
|
||||
int32_t tDeserializeSSchedulerHbRsp(void* buf, int32_t bufLen, SSchedulerHbRsp* pRsp);
|
||||
void tFreeSSchedulerHbRsp(SSchedulerHbRsp* pRsp);
|
||||
|
||||
typedef struct {
|
||||
SMsgHead header;
|
||||
|
@ -1157,8 +1151,8 @@ typedef struct {
|
|||
char name[TSDB_TOPIC_FNAME_LEN];
|
||||
int8_t igExists;
|
||||
char* sql;
|
||||
char* physicalPlan;
|
||||
char* logicalPlan;
|
||||
char* ast;
|
||||
char subscribeDbName[TSDB_DB_NAME_LEN];
|
||||
} SCMCreateTopicReq;
|
||||
|
||||
int32_t tSerializeSCMCreateTopicReq(void* buf, int32_t bufLen, const SCMCreateTopicReq* pReq);
|
||||
|
@ -1370,7 +1364,7 @@ typedef struct SVCreateTbReq {
|
|||
} SVCreateTbReq, SVUpdateTbReq;
|
||||
|
||||
typedef struct {
|
||||
int tmp; // TODO: to avoid compile error
|
||||
int tmp; // TODO: to avoid compile error
|
||||
} SVCreateTbRsp, SVUpdateTbRsp;
|
||||
|
||||
int32_t tSerializeSVCreateTbReq(void** buf, SVCreateTbReq* pReq);
|
||||
|
@ -1382,7 +1376,7 @@ typedef struct {
|
|||
} SVCreateTbBatchReq;
|
||||
|
||||
typedef struct {
|
||||
int tmp; // TODO: to avoid compile error
|
||||
int tmp; // TODO: to avoid compile error
|
||||
} SVCreateTbBatchRsp;
|
||||
|
||||
int32_t tSerializeSVCreateTbBatchReq(void** buf, SVCreateTbBatchReq* pReq);
|
||||
|
@ -1396,7 +1390,7 @@ typedef struct {
|
|||
} SVDropTbReq;
|
||||
|
||||
typedef struct {
|
||||
int tmp; // TODO: to avoid compile error
|
||||
int tmp; // TODO: to avoid compile error
|
||||
} SVDropTbRsp;
|
||||
|
||||
int32_t tSerializeSVDropTbReq(void** buf, SVDropTbReq* pReq);
|
||||
|
@ -1896,33 +1890,18 @@ static FORCE_INLINE void* tDecodeSSchemaWrapper(void* buf, SSchemaWrapper* pSW)
|
|||
}
|
||||
return buf;
|
||||
}
|
||||
typedef enum {
|
||||
TD_TIME_UNIT_UNKNOWN = -1,
|
||||
TD_TIME_UNIT_YEAR = 0,
|
||||
TD_TIME_UNIT_SEASON = 1,
|
||||
TD_TIME_UNIT_MONTH = 2,
|
||||
TD_TIME_UNIT_WEEK = 3,
|
||||
TD_TIME_UNIT_DAY = 4,
|
||||
TD_TIME_UNIT_HOUR = 5,
|
||||
TD_TIME_UNIT_MINUTE = 6,
|
||||
TD_TIME_UNIT_SEC = 7,
|
||||
TD_TIME_UNIT_MILLISEC = 8,
|
||||
TD_TIME_UNIT_MICROSEC = 9,
|
||||
TD_TIME_UNIT_NANOSEC = 10
|
||||
} ETDTimeUnit;
|
||||
|
||||
typedef struct {
|
||||
int8_t version; // for compatibility(default 0)
|
||||
int8_t intervalUnit;
|
||||
int8_t slidingUnit;
|
||||
int8_t intervalUnit; // MACRO: TIME_UNIT_XXX
|
||||
int8_t slidingUnit; // MACRO: TIME_UNIT_XXX
|
||||
char indexName[TSDB_INDEX_NAME_LEN];
|
||||
char timezone[TD_TIMEZONE_LEN]; // sma data is invalid if timezone change.
|
||||
char timezone[TD_TIMEZONE_LEN]; // sma data expired if timezone changes.
|
||||
int32_t exprLen;
|
||||
int32_t tagsFilterLen;
|
||||
int64_t indexUid;
|
||||
tb_uid_t tableUid; // super/child/common table uid
|
||||
int64_t interval;
|
||||
int64_t offset;
|
||||
int64_t offset; // use unit by precision of DB
|
||||
int64_t sliding;
|
||||
char* expr; // sma expression
|
||||
char* tagsFilter;
|
||||
|
@ -1934,7 +1913,7 @@ typedef struct {
|
|||
} SVCreateTSmaReq;
|
||||
|
||||
typedef struct {
|
||||
int8_t type; // 0 status report, 1 update data
|
||||
int8_t type; // 0 status report, 1 update data
|
||||
char indexName[TSDB_INDEX_NAME_LEN]; //
|
||||
STimeWindow windows;
|
||||
} STSmaMsg;
|
||||
|
@ -1945,7 +1924,7 @@ typedef struct {
|
|||
} SVDropTSmaReq;
|
||||
|
||||
typedef struct {
|
||||
int tmp; // TODO: to avoid compile error
|
||||
int tmp; // TODO: to avoid compile error
|
||||
} SVCreateTSmaRsp, SVDropTSmaRsp;
|
||||
|
||||
int32_t tSerializeSVCreateTSmaReq(void** buf, SVCreateTSmaReq* pReq);
|
||||
|
@ -1967,7 +1946,7 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
int64_t indexUid;
|
||||
TSKEY skey; // startTS of one interval/sliding
|
||||
TSKEY skey; // startKey of one interval/sliding window
|
||||
int64_t interval;
|
||||
int32_t dataLen; // not including head
|
||||
int8_t intervalUnit;
|
||||
|
@ -2031,7 +2010,7 @@ static FORCE_INLINE int32_t tEncodeTSma(void** buf, const STSma* pSma) {
|
|||
tlen += taosEncodeFixedI64(buf, pSma->interval);
|
||||
tlen += taosEncodeFixedI64(buf, pSma->offset);
|
||||
tlen += taosEncodeFixedI64(buf, pSma->sliding);
|
||||
|
||||
|
||||
if (pSma->exprLen > 0) {
|
||||
tlen += taosEncodeString(buf, pSma->expr);
|
||||
}
|
||||
|
@ -2267,6 +2246,55 @@ static FORCE_INLINE void* tDecodeSMqCMGetSubEpRsp(void* buf, SMqCMGetSubEpRsp* p
|
|||
return buf;
|
||||
}
|
||||
|
||||
enum {
|
||||
STREAM_TASK_STATUS__RUNNING = 1,
|
||||
STREAM_TASK_STATUS__STOP,
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int32_t level;
|
||||
int8_t status;
|
||||
char* qmsg;
|
||||
void* executor;
|
||||
// void* stateStore;
|
||||
// storage handle
|
||||
} SStreamTask;
|
||||
|
||||
static FORCE_INLINE SStreamTask* streamTaskNew(int64_t streamId, int32_t level) {
|
||||
SStreamTask* pTask = (SStreamTask*)calloc(1, sizeof(SStreamTask));
|
||||
if (pTask == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
pTask->taskId = tGenIdPI32();
|
||||
pTask->status = STREAM_TASK_STATUS__RUNNING;
|
||||
pTask->qmsg = NULL;
|
||||
return pTask;
|
||||
}
|
||||
|
||||
int32_t tEncodeSStreamTask(SCoder* pEncoder, const SStreamTask* pTask);
|
||||
int32_t tDecodeSStreamTask(SCoder* pDecoder, SStreamTask* pTask);
|
||||
void tFreeSStreamTask(SStreamTask* pTask);
|
||||
|
||||
typedef struct {
|
||||
SMsgHead head;
|
||||
SStreamTask* task;
|
||||
} SStreamTaskDeployReq;
|
||||
|
||||
typedef struct {
|
||||
int32_t reserved;
|
||||
} SStreamTaskDeployRsp;
|
||||
|
||||
typedef struct {
|
||||
SMsgHead head;
|
||||
// TODO: other info needed by task
|
||||
} SStreamTaskExecReq;
|
||||
|
||||
typedef struct {
|
||||
int32_t reserved;
|
||||
} SStreamTaskExecRsp;
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
struct SRpcMsg;
|
||||
|
|
|
@ -199,6 +199,8 @@ enum {
|
|||
|
||||
// Requests handled by SNODE
|
||||
TD_NEW_MSG_SEG(TDMT_SND_MSG)
|
||||
TD_DEF_MSG_TYPE(TDMT_SND_TASK_DEPLOY, "snode-task-deploy", SStreamTaskDeployReq, SStreamTaskDeployRsp)
|
||||
TD_DEF_MSG_TYPE(TDMT_SND_TASK_EXEC, "snode-task-exec", SStreamTaskExecReq, SStreamTaskExecRsp)
|
||||
|
||||
#if defined(TD_MSG_NUMBER_)
|
||||
TDMT_MAX
|
||||
|
|
|
@ -98,13 +98,13 @@ typedef void *SRow;
|
|||
|
||||
typedef struct {
|
||||
TDRowValT valType;
|
||||
void *val;
|
||||
void * val;
|
||||
} SCellVal;
|
||||
|
||||
typedef struct {
|
||||
// TODO
|
||||
int tmp; // TODO: to avoid compile error
|
||||
} STpRow; // tuple
|
||||
int tmp; // TODO: to avoid compile error
|
||||
} STpRow; // tuple
|
||||
|
||||
#pragma pack(push, 1)
|
||||
typedef struct {
|
||||
|
@ -158,8 +158,8 @@ typedef struct {
|
|||
int16_t nBitmaps;
|
||||
int16_t nBoundBitmaps;
|
||||
int32_t offset;
|
||||
void *pBitmap;
|
||||
void *pOffset;
|
||||
void * pBitmap;
|
||||
void * pOffset;
|
||||
int32_t extendedRowSize;
|
||||
} SRowBuilder;
|
||||
|
||||
|
@ -273,7 +273,7 @@ static FORCE_INLINE int32_t tdSetBitmapValType(void *pBitmap, int16_t colIdx, TD
|
|||
}
|
||||
int16_t nBytes = colIdx / TD_VTYPE_PARTS;
|
||||
int16_t nOffset = colIdx & TD_VTYPE_OPTR;
|
||||
char *pDestByte = (char *)POINTER_SHIFT(pBitmap, nBytes);
|
||||
char * pDestByte = (char *)POINTER_SHIFT(pBitmap, nBytes);
|
||||
switch (nOffset) {
|
||||
case 0:
|
||||
*pDestByte = ((*pDestByte) & 0x3F) | (valType << 6);
|
||||
|
@ -311,7 +311,7 @@ static FORCE_INLINE int32_t tdGetBitmapValType(void *pBitmap, int16_t colIdx, TD
|
|||
}
|
||||
int16_t nBytes = colIdx / TD_VTYPE_PARTS;
|
||||
int16_t nOffset = colIdx & TD_VTYPE_OPTR;
|
||||
char *pDestByte = (char *)POINTER_SHIFT(pBitmap, nBytes);
|
||||
char * pDestByte = (char *)POINTER_SHIFT(pBitmap, nBytes);
|
||||
switch (nOffset) {
|
||||
case 0:
|
||||
*pValType = (((*pDestByte) & 0xC0) >> 6);
|
||||
|
@ -617,7 +617,7 @@ static FORCE_INLINE int32_t tdAppendColValToKvRow(SRowBuilder *pBuilder, TDRowVa
|
|||
if (tdValIsNorm(valType, val, colType)) {
|
||||
// ts key stored in STSRow.ts
|
||||
SKvRowIdx *pColIdx = (SKvRowIdx *)POINTER_SHIFT(TD_ROW_COL_IDX(row), offset);
|
||||
char *ptr = (char *)POINTER_SHIFT(row, TD_ROW_LEN(row));
|
||||
char * ptr = (char *)POINTER_SHIFT(row, TD_ROW_LEN(row));
|
||||
pColIdx->colId = colId;
|
||||
pColIdx->offset = TD_ROW_LEN(row); // the offset include the TD_ROW_HEAD_LEN
|
||||
|
||||
|
@ -635,7 +635,7 @@ static FORCE_INLINE int32_t tdAppendColValToKvRow(SRowBuilder *pBuilder, TDRowVa
|
|||
// NULL/None value
|
||||
else {
|
||||
SKvRowIdx *pColIdx = (SKvRowIdx *)POINTER_SHIFT(TD_ROW_COL_IDX(row), offset);
|
||||
char *ptr = (char *)POINTER_SHIFT(row, TD_ROW_LEN(row));
|
||||
char * ptr = (char *)POINTER_SHIFT(row, TD_ROW_LEN(row));
|
||||
pColIdx->colId = colId;
|
||||
pColIdx->offset = TD_ROW_LEN(row); // the offset include the TD_ROW_HEAD_LEN
|
||||
const void *nullVal = getNullValue(colType);
|
||||
|
@ -697,9 +697,9 @@ static FORCE_INLINE int32_t tdAppendColValToRow(SRowBuilder *pBuilder, int16_t c
|
|||
}
|
||||
// TODO: We can avoid the type judegement by FP, but would prevent the inline scheme.
|
||||
if (TD_IS_TP_ROW(pRow)) {
|
||||
tdAppendColValToTpRow(pBuilder, valType, val, true, colType, colIdx, offset);
|
||||
tdAppendColValToTpRow(pBuilder, valType, val, isCopyVarData, colType, colIdx, offset);
|
||||
} else {
|
||||
tdAppendColValToKvRow(pBuilder, valType, val, true, colType, colIdx, offset, colId);
|
||||
tdAppendColValToKvRow(pBuilder, valType, val, isCopyVarData, colType, colIdx, offset, colId);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -771,8 +771,8 @@ static FORCE_INLINE int32_t tdGetKvRowValOfCol(SCellVal *output, STSRow *pRow, v
|
|||
|
||||
typedef struct {
|
||||
STSchema *pSchema;
|
||||
STSRow *pRow;
|
||||
void *pBitmap;
|
||||
STSRow * pRow;
|
||||
void * pBitmap;
|
||||
uint32_t offset;
|
||||
col_id_t maxColId;
|
||||
col_id_t colIdx; // [PRIMARYKEY_TIMESTAMP_COL_ID, nSchemaCols], PRIMARYKEY_TIMESTAMP_COL_ID equals 1
|
||||
|
@ -877,7 +877,7 @@ static FORCE_INLINE bool tdGetTpRowDataOfCol(STSRowIter *pIter, col_type_t colTy
|
|||
// internal
|
||||
static FORCE_INLINE bool tdGetKvRowValOfColEx(STSRowIter *pIter, col_id_t colId, col_type_t colType, col_id_t *nIdx,
|
||||
SCellVal *pVal) {
|
||||
STSRow *pRow = pIter->pRow;
|
||||
STSRow * pRow = pIter->pRow;
|
||||
SKvRowIdx *pKvIdx = NULL;
|
||||
bool colFound = false;
|
||||
col_id_t kvNCols = tdRowGetNCols(pRow);
|
||||
|
@ -1068,7 +1068,7 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
STSchema *pSchema;
|
||||
STSRow *pRow;
|
||||
STSRow * pRow;
|
||||
} STSRowReader;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -108,53 +108,54 @@
|
|||
#define TK_FULLTEXT 90
|
||||
#define TK_FUNCTION 91
|
||||
#define TK_INTERVAL 92
|
||||
#define TK_MNODES 93
|
||||
#define TK_NK_FLOAT 94
|
||||
#define TK_NK_BOOL 95
|
||||
#define TK_NK_VARIABLE 96
|
||||
#define TK_BETWEEN 97
|
||||
#define TK_IS 98
|
||||
#define TK_NULL 99
|
||||
#define TK_NK_LT 100
|
||||
#define TK_NK_GT 101
|
||||
#define TK_NK_LE 102
|
||||
#define TK_NK_GE 103
|
||||
#define TK_NK_NE 104
|
||||
#define TK_NK_EQ 105
|
||||
#define TK_LIKE 106
|
||||
#define TK_MATCH 107
|
||||
#define TK_NMATCH 108
|
||||
#define TK_IN 109
|
||||
#define TK_FROM 110
|
||||
#define TK_AS 111
|
||||
#define TK_JOIN 112
|
||||
#define TK_INNER 113
|
||||
#define TK_SELECT 114
|
||||
#define TK_DISTINCT 115
|
||||
#define TK_WHERE 116
|
||||
#define TK_PARTITION 117
|
||||
#define TK_BY 118
|
||||
#define TK_SESSION 119
|
||||
#define TK_STATE_WINDOW 120
|
||||
#define TK_SLIDING 121
|
||||
#define TK_FILL 122
|
||||
#define TK_VALUE 123
|
||||
#define TK_NONE 124
|
||||
#define TK_PREV 125
|
||||
#define TK_LINEAR 126
|
||||
#define TK_NEXT 127
|
||||
#define TK_GROUP 128
|
||||
#define TK_HAVING 129
|
||||
#define TK_ORDER 130
|
||||
#define TK_SLIMIT 131
|
||||
#define TK_SOFFSET 132
|
||||
#define TK_LIMIT 133
|
||||
#define TK_OFFSET 134
|
||||
#define TK_ASC 135
|
||||
#define TK_DESC 136
|
||||
#define TK_NULLS 137
|
||||
#define TK_FIRST 138
|
||||
#define TK_LAST 139
|
||||
#define TK_TOPIC 93
|
||||
#define TK_AS 94
|
||||
#define TK_MNODES 95
|
||||
#define TK_NK_FLOAT 96
|
||||
#define TK_NK_BOOL 97
|
||||
#define TK_NK_VARIABLE 98
|
||||
#define TK_BETWEEN 99
|
||||
#define TK_IS 100
|
||||
#define TK_NULL 101
|
||||
#define TK_NK_LT 102
|
||||
#define TK_NK_GT 103
|
||||
#define TK_NK_LE 104
|
||||
#define TK_NK_GE 105
|
||||
#define TK_NK_NE 106
|
||||
#define TK_NK_EQ 107
|
||||
#define TK_LIKE 108
|
||||
#define TK_MATCH 109
|
||||
#define TK_NMATCH 110
|
||||
#define TK_IN 111
|
||||
#define TK_FROM 112
|
||||
#define TK_JOIN 113
|
||||
#define TK_INNER 114
|
||||
#define TK_SELECT 115
|
||||
#define TK_DISTINCT 116
|
||||
#define TK_WHERE 117
|
||||
#define TK_PARTITION 118
|
||||
#define TK_BY 119
|
||||
#define TK_SESSION 120
|
||||
#define TK_STATE_WINDOW 121
|
||||
#define TK_SLIDING 122
|
||||
#define TK_FILL 123
|
||||
#define TK_VALUE 124
|
||||
#define TK_NONE 125
|
||||
#define TK_PREV 126
|
||||
#define TK_LINEAR 127
|
||||
#define TK_NEXT 128
|
||||
#define TK_GROUP 129
|
||||
#define TK_HAVING 130
|
||||
#define TK_ORDER 131
|
||||
#define TK_SLIMIT 132
|
||||
#define TK_SOFFSET 133
|
||||
#define TK_LIMIT 134
|
||||
#define TK_OFFSET 135
|
||||
#define TK_ASC 136
|
||||
#define TK_DESC 137
|
||||
#define TK_NULLS 138
|
||||
#define TK_FIRST 139
|
||||
#define TK_LAST 140
|
||||
|
||||
#define TK_NK_SPACE 300
|
||||
#define TK_NK_COMMENT 301
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#ifndef _TD_SNODE_H_
|
||||
#define _TD_SNODE_H_
|
||||
|
||||
#include "tcommon.h"
|
||||
#include "tmsg.h"
|
||||
#include "trpc.h"
|
||||
|
||||
|
@ -75,7 +76,7 @@ int32_t sndGetLoad(SSnode *pSnode, SSnodeLoad *pLoad);
|
|||
* @param pRsp The response message
|
||||
* @return int32_t 0 for success, -1 for failure
|
||||
*/
|
||||
int32_t sndProcessMsg(SSnode *pSnode, SRpcMsg *pMsg, SRpcMsg **pRsp);
|
||||
// int32_t sndProcessMsg(SSnode *pSnode, SRpcMsg *pMsg, SRpcMsg **pRsp);
|
||||
|
||||
int32_t sndProcessUMsg(SSnode *pSnode, SRpcMsg *pMsg);
|
||||
|
||||
|
|
|
@ -61,6 +61,12 @@ typedef struct SDropDatabaseStmt {
|
|||
bool ignoreNotExists;
|
||||
} SDropDatabaseStmt;
|
||||
|
||||
typedef struct SAlterDatabaseStmt {
|
||||
ENodeType type;
|
||||
char dbName[TSDB_DB_NAME_LEN];
|
||||
SDatabaseOptions* pOptions;
|
||||
} SAlterDatabaseStmt;
|
||||
|
||||
typedef struct STableOptions {
|
||||
ENodeType type;
|
||||
int32_t keep;
|
||||
|
@ -179,11 +185,36 @@ typedef struct SCreateIndexStmt {
|
|||
SIndexOptions* pOptions;
|
||||
} SCreateIndexStmt;
|
||||
|
||||
typedef struct SDropIndexStmt {
|
||||
ENodeType type;
|
||||
char indexName[TSDB_INDEX_NAME_LEN];
|
||||
char tableName[TSDB_TABLE_NAME_LEN];
|
||||
} SDropIndexStmt;
|
||||
|
||||
typedef struct SCreateQnodeStmt {
|
||||
ENodeType type;
|
||||
int32_t dnodeId;
|
||||
} SCreateQnodeStmt;
|
||||
|
||||
typedef struct SDropQnodeStmt {
|
||||
ENodeType type;
|
||||
int32_t dnodeId;
|
||||
} SDropQnodeStmt;
|
||||
|
||||
typedef struct SCreateTopicStmt {
|
||||
ENodeType type;
|
||||
char topicName[TSDB_TABLE_NAME_LEN];
|
||||
char subscribeDbName[TSDB_DB_NAME_LEN];
|
||||
bool ignoreExists;
|
||||
SNode* pQuery;
|
||||
} SCreateTopicStmt;
|
||||
|
||||
typedef struct SDropTopicStmt {
|
||||
ENodeType type;
|
||||
char topicName[TSDB_TABLE_NAME_LEN];
|
||||
bool ignoreNotExists;
|
||||
} SDropTopicStmt;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -77,6 +77,7 @@ typedef enum ENodeType {
|
|||
QUERY_NODE_VNODE_MODIF_STMT,
|
||||
QUERY_NODE_CREATE_DATABASE_STMT,
|
||||
QUERY_NODE_DROP_DATABASE_STMT,
|
||||
QUERY_NODE_ALTER_DATABASE_STMT,
|
||||
QUERY_NODE_SHOW_DATABASES_STMT, // temp
|
||||
QUERY_NODE_CREATE_TABLE_STMT,
|
||||
QUERY_NODE_CREATE_SUBTABLE_CLAUSE,
|
||||
|
@ -98,7 +99,11 @@ typedef enum ENodeType {
|
|||
QUERY_NODE_SHOW_MNODES_STMT,
|
||||
QUERY_NODE_SHOW_QNODES_STMT,
|
||||
QUERY_NODE_CREATE_INDEX_STMT,
|
||||
QUERY_NODE_DROP_INDEX_STMT,
|
||||
QUERY_NODE_CREATE_QNODE_STMT,
|
||||
QUERY_NODE_DROP_QNODE_STMT,
|
||||
QUERY_NODE_CREATE_TOPIC_STMT,
|
||||
QUERY_NODE_DROP_TOPIC_STMT,
|
||||
|
||||
// logic plan node
|
||||
QUERY_NODE_LOGIC_PLAN_SCAN,
|
||||
|
|
|
@ -355,6 +355,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_TDB_IVLD_TAG_VAL TAOS_DEF_ERROR_CODE(0, 0x0615)
|
||||
#define TSDB_CODE_TDB_NO_CACHE_LAST_ROW TAOS_DEF_ERROR_CODE(0, 0x0616)
|
||||
#define TSDB_CODE_TDB_NO_SMA_INDEX_IN_META TAOS_DEF_ERROR_CODE(0, 0x0617)
|
||||
#define TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0618)
|
||||
|
||||
// query
|
||||
#define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700)
|
||||
|
|
|
@ -402,10 +402,33 @@ static int32_t tDecodeCStrTo(SCoder* pDecoder, char* val) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t tDecodeBinaryAlloc(SCoder* pDecoder, void** val, uint64_t* len) {
|
||||
if (tDecodeU64v(pDecoder, len) < 0) return -1;
|
||||
|
||||
if (TD_CODER_CHECK_CAPACITY_FAILED(pDecoder, *len)) return -1;
|
||||
*val = malloc(*len);
|
||||
if (*val == NULL) return -1;
|
||||
memcpy(*val, TD_CODER_CURRENT(pDecoder), *len);
|
||||
|
||||
TD_CODER_MOVE_POS(pDecoder, *len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t tDecodeCStrAndLenAlloc(SCoder* pDecoder, char** val, uint64_t* len) {
|
||||
if (tDecodeBinaryAlloc(pDecoder, (void**)val, len) < 0) return -1;
|
||||
(*len) -= 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t tDecodeCStrAlloc(SCoder* pDecoder, char** val) {
|
||||
uint64_t len;
|
||||
return tDecodeCStrAndLenAlloc(pDecoder, val, &len);
|
||||
}
|
||||
|
||||
static FORCE_INLINE bool tDecodeIsEnd(SCoder* pCoder) { return (pCoder->size == pCoder->pos); }
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_UTIL_ENCODE_H_*/
|
||||
#endif /*_TD_UTIL_ENCODE_H_*/
|
||||
|
|
|
@ -482,7 +482,7 @@ TAOS_RES* tmq_create_topic(TAOS* taos, const char* topicName, const char* sql, i
|
|||
}
|
||||
|
||||
tscDebug("start to create topic, %s", topicName);
|
||||
|
||||
#if 0
|
||||
CHECK_CODE_GOTO(buildRequest(pTscObj, sql, sqlLen, &pRequest), _return);
|
||||
CHECK_CODE_GOTO(parseSql(pRequest, &pQueryNode), _return);
|
||||
|
||||
|
@ -536,7 +536,7 @@ TAOS_RES* tmq_create_topic(TAOS* taos, const char* topicName, const char* sql, i
|
|||
asyncSendMsgToServer(pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
|
||||
|
||||
tsem_wait(&pRequest->body.rspSem);
|
||||
|
||||
#endif
|
||||
_return:
|
||||
qDestroyQuery(pQueryNode);
|
||||
/*if (sendInfo != NULL) {*/
|
||||
|
|
|
@ -1467,8 +1467,7 @@ int32_t tDeserializeSUseDbReq(void *buf, int32_t bufLen, SUseDbReq *pReq) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int32_t tSerializeSQnodeListReq(void* buf, int32_t bufLen, SQnodeListReq* pReq) {
|
||||
int32_t tSerializeSQnodeListReq(void *buf, int32_t bufLen, SQnodeListReq *pReq) {
|
||||
SCoder encoder = {0};
|
||||
tCoderInit(&encoder, TD_LITTLE_ENDIAN, buf, bufLen, TD_ENCODER);
|
||||
|
||||
|
@ -1499,7 +1498,7 @@ int32_t tSerializeSQnodeListRsp(void *buf, int32_t bufLen, SQnodeListRsp *pRsp)
|
|||
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
int32_t num = taosArrayGetSize(pRsp->epSetList);
|
||||
if (tEncodeI32(&encoder, num) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, num) < 0) return -1;
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
SEpSet *epSet = taosArrayGet(pRsp->epSetList, i);
|
||||
if (tEncodeSEpSet(&encoder, epSet) < 0) return -1;
|
||||
|
@ -1990,11 +1989,9 @@ int32_t tDeserializeSMDropTopicReq(void *buf, int32_t bufLen, SMDropTopicReq *pR
|
|||
|
||||
int32_t tSerializeSCMCreateTopicReq(void *buf, int32_t bufLen, const SCMCreateTopicReq *pReq) {
|
||||
int32_t sqlLen = 0;
|
||||
int32_t physicalPlanLen = 0;
|
||||
int32_t logicalPlanLen = 0;
|
||||
int32_t astLen = 0;
|
||||
if (pReq->sql != NULL) sqlLen = (int32_t)strlen(pReq->sql);
|
||||
if (pReq->physicalPlan != NULL) physicalPlanLen = (int32_t)strlen(pReq->physicalPlan);
|
||||
if (pReq->logicalPlan != NULL) logicalPlanLen = (int32_t)strlen(pReq->logicalPlan);
|
||||
if (pReq->ast != NULL) astLen = (int32_t)strlen(pReq->ast);
|
||||
|
||||
SCoder encoder = {0};
|
||||
tCoderInit(&encoder, TD_LITTLE_ENDIAN, buf, bufLen, TD_ENCODER);
|
||||
|
@ -2003,11 +2000,9 @@ int32_t tSerializeSCMCreateTopicReq(void *buf, int32_t bufLen, const SCMCreateTo
|
|||
if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
|
||||
if (tEncodeI8(&encoder, pReq->igExists) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, sqlLen) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, physicalPlanLen) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, logicalPlanLen) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pReq->sql) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pReq->physicalPlan) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pReq->logicalPlan) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, astLen) < 0) return -1;
|
||||
if (sqlLen > 0 && tEncodeCStr(&encoder, pReq->sql) < 0) return -1;
|
||||
if (astLen > 0 && tEncodeCStr(&encoder, pReq->ast) < 0) return -1;
|
||||
|
||||
tEndEncode(&encoder);
|
||||
|
||||
|
@ -2018,8 +2013,7 @@ int32_t tSerializeSCMCreateTopicReq(void *buf, int32_t bufLen, const SCMCreateTo
|
|||
|
||||
int32_t tDeserializeSCMCreateTopicReq(void *buf, int32_t bufLen, SCMCreateTopicReq *pReq) {
|
||||
int32_t sqlLen = 0;
|
||||
int32_t physicalPlanLen = 0;
|
||||
int32_t logicalPlanLen = 0;
|
||||
int32_t astLen = 0;
|
||||
|
||||
SCoder decoder = {0};
|
||||
tCoderInit(&decoder, TD_LITTLE_ENDIAN, buf, bufLen, TD_DECODER);
|
||||
|
@ -2028,17 +2022,20 @@ int32_t tDeserializeSCMCreateTopicReq(void *buf, int32_t bufLen, SCMCreateTopicR
|
|||
if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1;
|
||||
if (tDecodeI8(&decoder, &pReq->igExists) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &sqlLen) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &physicalPlanLen) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &logicalPlanLen) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &astLen) < 0) return -1;
|
||||
|
||||
pReq->sql = calloc(1, sqlLen + 1);
|
||||
pReq->physicalPlan = calloc(1, physicalPlanLen + 1);
|
||||
pReq->logicalPlan = calloc(1, logicalPlanLen + 1);
|
||||
if (pReq->sql == NULL || pReq->physicalPlan == NULL || pReq->logicalPlan == NULL) return -1;
|
||||
if (sqlLen > 0) {
|
||||
pReq->sql = calloc(1, sqlLen + 1);
|
||||
if (pReq->sql == NULL) return -1;
|
||||
if (tDecodeCStrTo(&decoder, pReq->sql) < 0) return -1;
|
||||
}
|
||||
|
||||
if (astLen > 0) {
|
||||
pReq->ast = calloc(1, astLen + 1);
|
||||
if (pReq->ast == NULL) return -1;
|
||||
if (tDecodeCStrTo(&decoder, pReq->ast) < 0) return -1;
|
||||
}
|
||||
|
||||
if (tDecodeCStrTo(&decoder, pReq->sql) < 0) return -1;
|
||||
if (tDecodeCStrTo(&decoder, pReq->physicalPlan) < 0) return -1;
|
||||
if (tDecodeCStrTo(&decoder, pReq->logicalPlan) < 0) return -1;
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tCoderClear(&decoder);
|
||||
|
@ -2047,8 +2044,7 @@ int32_t tDeserializeSCMCreateTopicReq(void *buf, int32_t bufLen, SCMCreateTopicR
|
|||
|
||||
void tFreeSCMCreateTopicReq(SCMCreateTopicReq *pReq) {
|
||||
tfree(pReq->sql);
|
||||
tfree(pReq->physicalPlan);
|
||||
tfree(pReq->logicalPlan);
|
||||
tfree(pReq->ast);
|
||||
}
|
||||
|
||||
int32_t tSerializeSCMCreateTopicRsp(void *buf, int32_t bufLen, const SCMCreateTopicRsp *pRsp) {
|
||||
|
@ -2491,27 +2487,27 @@ int32_t tSerializeSSchedulerHbReq(void *buf, int32_t bufLen, SSchedulerHbReq *pR
|
|||
tCoderInit(&encoder, TD_LITTLE_ENDIAN, buf, bufLen, TD_ENCODER);
|
||||
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
if (tEncodeU64(&encoder, pReq->sId) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->epId.nodeId) < 0) return -1;
|
||||
if (tEncodeU64(&encoder, pReq->sId) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->epId.nodeId) < 0) return -1;
|
||||
if (tEncodeU16(&encoder, pReq->epId.ep.port) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pReq->epId.ep.fqdn) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pReq->epId.ep.fqdn) < 0) return -1;
|
||||
if (pReq->taskAction) {
|
||||
int32_t num = taosArrayGetSize(pReq->taskAction);
|
||||
if (tEncodeI32(&encoder, num) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, num) < 0) return -1;
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
STaskAction *action = taosArrayGet(pReq->taskAction, i);
|
||||
if (tEncodeU64(&encoder, action->queryId) < 0) return -1;
|
||||
if (tEncodeU64(&encoder, action->taskId) < 0) return -1;
|
||||
if (tEncodeI8(&encoder, action->action) < 0) return -1;
|
||||
if (tEncodeU64(&encoder, action->queryId) < 0) return -1;
|
||||
if (tEncodeU64(&encoder, action->taskId) < 0) return -1;
|
||||
if (tEncodeI8(&encoder, action->action) < 0) return -1;
|
||||
}
|
||||
} else {
|
||||
if (tEncodeI32(&encoder, 0) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, 0) < 0) return -1;
|
||||
}
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
tCoderClear(&encoder);
|
||||
|
||||
|
||||
if (buf != NULL) {
|
||||
SMsgHead *pHead = (SMsgHead *)((char *)buf - headLen);
|
||||
pHead->vgId = htonl(pReq->header.vgId);
|
||||
|
@ -2559,29 +2555,27 @@ int32_t tDeserializeSSchedulerHbReq(void *buf, int32_t bufLen, SSchedulerHbReq *
|
|||
|
||||
void tFreeSSchedulerHbReq(SSchedulerHbReq *pReq) { taosArrayDestroy(pReq->taskAction); }
|
||||
|
||||
|
||||
|
||||
int32_t tSerializeSSchedulerHbRsp(void *buf, int32_t bufLen, SSchedulerHbRsp *pRsp) {
|
||||
SCoder encoder = {0};
|
||||
tCoderInit(&encoder, TD_LITTLE_ENDIAN, buf, bufLen, TD_ENCODER);
|
||||
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
if (tEncodeU64(&encoder, pRsp->seqId) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pRsp->epId.nodeId) < 0) return -1;
|
||||
if (tEncodeU64(&encoder, pRsp->seqId) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pRsp->epId.nodeId) < 0) return -1;
|
||||
if (tEncodeU16(&encoder, pRsp->epId.ep.port) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pRsp->epId.ep.fqdn) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pRsp->epId.ep.fqdn) < 0) return -1;
|
||||
if (pRsp->taskStatus) {
|
||||
int32_t num = taosArrayGetSize(pRsp->taskStatus);
|
||||
if (tEncodeI32(&encoder, num) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, num) < 0) return -1;
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
STaskStatus *status = taosArrayGet(pRsp->taskStatus, i);
|
||||
if (tEncodeU64(&encoder, status->queryId) < 0) return -1;
|
||||
if (tEncodeU64(&encoder, status->taskId) < 0) return -1;
|
||||
if (tEncodeI64(&encoder, status->refId) < 0) return -1;
|
||||
if (tEncodeI8(&encoder, status->status) < 0) return -1;
|
||||
if (tEncodeU64(&encoder, status->queryId) < 0) return -1;
|
||||
if (tEncodeU64(&encoder, status->taskId) < 0) return -1;
|
||||
if (tEncodeI64(&encoder, status->refId) < 0) return -1;
|
||||
if (tEncodeI8(&encoder, status->status) < 0) return -1;
|
||||
}
|
||||
} else {
|
||||
if (tEncodeI32(&encoder, 0) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, 0) < 0) return -1;
|
||||
}
|
||||
tEndEncode(&encoder);
|
||||
|
||||
|
@ -2680,9 +2674,9 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea
|
|||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1;
|
||||
if (tDecodeI8(&decoder, &pReq->igExists) < 0) return -1;
|
||||
if (tDecodeCStr(&decoder, (const char **)&pReq->sql) < 0) return -1;
|
||||
if (tDecodeCStr(&decoder, (const char **)&pReq->physicalPlan) < 0) return -1;
|
||||
if (tDecodeCStr(&decoder, (const char **)&pReq->logicalPlan) < 0) return -1;
|
||||
if (tDecodeCStrAlloc(&decoder, &pReq->sql) < 0) return -1;
|
||||
if (tDecodeCStrAlloc(&decoder, &pReq->physicalPlan) < 0) return -1;
|
||||
if (tDecodeCStrAlloc(&decoder, &pReq->logicalPlan) < 0) return -1;
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tCoderClear(&decoder);
|
||||
|
@ -2694,3 +2688,32 @@ void tFreeSCMCreateStreamReq(SCMCreateStreamReq *pReq) {
|
|||
tfree(pReq->physicalPlan);
|
||||
tfree(pReq->logicalPlan);
|
||||
}
|
||||
|
||||
int32_t tEncodeSStreamTask(SCoder *pEncoder, const SStreamTask *pTask) {
|
||||
if (tStartEncode(pEncoder) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pTask->streamId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pTask->taskId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pTask->level) < 0) return -1;
|
||||
if (tEncodeI8(pEncoder, pTask->status) < 0) return -1;
|
||||
if (tEncodeCStr(pEncoder, pTask->qmsg) < 0) return -1;
|
||||
tEndEncode(pEncoder);
|
||||
return pEncoder->pos;
|
||||
}
|
||||
|
||||
int32_t tDecodeSStreamTask(SCoder *pDecoder, SStreamTask *pTask) {
|
||||
if (tStartDecode(pDecoder) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pTask->streamId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pTask->taskId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pTask->level) < 0) return -1;
|
||||
if (tDecodeI8(pDecoder, &pTask->status) < 0) return -1;
|
||||
if (tDecodeCStrAlloc(pDecoder, &pTask->qmsg) < 0) return -1;
|
||||
tEndDecode(pDecoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tFreeSStreamTask(SStreamTask *pTask) {
|
||||
// TODO
|
||||
/*free(pTask->qmsg);*/
|
||||
/*free(pTask->executor);*/
|
||||
/*free(pTask);*/
|
||||
}
|
||||
|
|
|
@ -325,8 +325,8 @@ int32_t smProcessDropReq(SDnode *pDnode, SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
static void dndProcessSnodeUniqueQueue(SDnode *pDnode, STaosQall *qall, int32_t numOfMsgs) {
|
||||
SSnodeMgmt *pMgmt = &pDnode->smgmt;
|
||||
int32_t code = TSDB_CODE_DND_SNODE_NOT_DEPLOYED;
|
||||
/*SSnodeMgmt *pMgmt = &pDnode->smgmt;*/
|
||||
int32_t code = TSDB_CODE_DND_SNODE_NOT_DEPLOYED;
|
||||
|
||||
SSnode *pSnode = dndAcquireSnode(pDnode);
|
||||
if (pSnode != NULL) {
|
||||
|
@ -336,22 +336,35 @@ static void dndProcessSnodeUniqueQueue(SDnode *pDnode, STaosQall *qall, int32_t
|
|||
|
||||
sndProcessUMsg(pSnode, pMsg);
|
||||
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
}
|
||||
dndReleaseSnode(pDnode, pSnode);
|
||||
} else {
|
||||
for (int32_t i = 0; i < numOfMsgs; i++) {
|
||||
SRpcMsg *pMsg = NULL;
|
||||
taosGetQitem(qall, (void **)&pMsg);
|
||||
SRpcMsg rpcRsp = {.handle = pMsg->handle, .ahandle = pMsg->ahandle, .code = code};
|
||||
rpcSendResponse(&rpcRsp);
|
||||
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
}
|
||||
}
|
||||
dndReleaseSnode(pDnode, pSnode);
|
||||
}
|
||||
|
||||
static void dndProcessSnodeSharedQueue(SDnode *pDnode, SRpcMsg *pMsg) {
|
||||
SSnodeMgmt *pMgmt = &pDnode->smgmt;
|
||||
int32_t code = TSDB_CODE_DND_SNODE_NOT_DEPLOYED;
|
||||
/*SSnodeMgmt *pMgmt = &pDnode->smgmt;*/
|
||||
int32_t code = TSDB_CODE_DND_SNODE_NOT_DEPLOYED;
|
||||
|
||||
SSnode *pSnode = dndAcquireSnode(pDnode);
|
||||
if (pSnode != NULL) {
|
||||
code = sndProcessSMsg(pSnode, pMsg);
|
||||
sndProcessSMsg(pSnode, pMsg);
|
||||
dndReleaseSnode(pDnode, pSnode);
|
||||
} else {
|
||||
SRpcMsg rpcRsp = {.handle = pMsg->handle, .ahandle = pMsg->ahandle, .code = code};
|
||||
rpcSendResponse(&rpcRsp);
|
||||
}
|
||||
dndReleaseSnode(pDnode, pSnode);
|
||||
|
||||
#if 0
|
||||
if (pMsg->msgType & 1u) {
|
||||
|
|
|
@ -85,6 +85,8 @@ typedef enum {
|
|||
TRN_TYPE_REBALANCE = 1017,
|
||||
TRN_TYPE_COMMIT_OFFSET = 1018,
|
||||
TRN_TYPE_CREATE_STREAM = 1019,
|
||||
TRN_TYPE_DROP_STREAM = 1020,
|
||||
TRN_TYPE_ALTER_STREAM = 1021,
|
||||
TRN_TYPE_BASIC_SCOPE_END,
|
||||
TRN_TYPE_GLOBAL_SCOPE = 2000,
|
||||
TRN_TYPE_CREATE_DNODE = 2001,
|
||||
|
@ -679,12 +681,6 @@ static FORCE_INLINE void* tDecodeSMqConsumerObj(void* buf, SMqConsumerObj* pCons
|
|||
return buf;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
int32_t taskId;
|
||||
int32_t level;
|
||||
SSubplan* plan;
|
||||
} SStreamTaskMeta;
|
||||
|
||||
typedef struct {
|
||||
char name[TSDB_TOPIC_FNAME_LEN];
|
||||
char db[TSDB_DB_FNAME_LEN];
|
||||
|
@ -700,7 +696,7 @@ typedef struct {
|
|||
char* sql;
|
||||
char* logicalPlan;
|
||||
char* physicalPlan;
|
||||
SArray* tasks; // SArray<SArray<SStreamTaskMeta>>
|
||||
SArray* tasks; // SArray<SArray<SStreamTask>>
|
||||
} SStreamObj;
|
||||
|
||||
int32_t tEncodeSStreamObj(SCoder* pEncoder, const SStreamObj* pObj);
|
||||
|
|
|
@ -27,6 +27,8 @@ void mndCleanupScheduler(SMnode* pMnode);
|
|||
|
||||
int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscribeObj* pSub);
|
||||
|
||||
int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -39,8 +39,8 @@ int32_t tDecodeSStreamObj(SCoder *pDecoder, SStreamObj *pObj) {
|
|||
if (tDecodeI64(pDecoder, &pObj->dbUid) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pObj->version) < 0) return -1;
|
||||
if (tDecodeI8(pDecoder, &pObj->status) < 0) return -1;
|
||||
if (tDecodeCStr(pDecoder, (const char **)&pObj->sql) < 0) return -1;
|
||||
if (tDecodeCStr(pDecoder, (const char **)&pObj->logicalPlan) < 0) return -1;
|
||||
if (tDecodeCStr(pDecoder, (const char **)&pObj->physicalPlan) < 0) return -1;
|
||||
if (tDecodeCStrAlloc(pDecoder, &pObj->sql) < 0) return -1;
|
||||
if (tDecodeCStrAlloc(pDecoder, &pObj->logicalPlan) < 0) return -1;
|
||||
if (tDecodeCStrAlloc(pDecoder, &pObj->physicalPlan) < 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#include "tname.h"
|
||||
#include "tuuid.h"
|
||||
|
||||
int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
|
||||
int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
|
||||
SSdb* pSdb = pMnode->pSdb;
|
||||
SVgObj* pVgroup = NULL;
|
||||
SQueryPlan* pPlan = qStringToQueryPlan(pStream->physicalPlan);
|
||||
|
@ -41,17 +41,18 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
|
|||
}
|
||||
ASSERT(pStream->vgNum == 0);
|
||||
|
||||
int32_t levelNum = LIST_LENGTH(pPlan->pSubplans);
|
||||
pStream->tasks = taosArrayInit(levelNum, sizeof(SArray));
|
||||
int32_t totLevel = LIST_LENGTH(pPlan->pSubplans);
|
||||
pStream->tasks = taosArrayInit(totLevel, sizeof(SArray));
|
||||
|
||||
for (int32_t i = 0; i < levelNum; i++) {
|
||||
SArray* taskOneLevel = taosArrayInit(0, sizeof(SStreamTaskMeta));
|
||||
SNodeListNode* inner = nodesListGetNode(pPlan->pSubplans, i);
|
||||
int32_t msgLen;
|
||||
for (int32_t level = 0; level < totLevel; level++) {
|
||||
SArray* taskOneLevel = taosArrayInit(0, sizeof(SStreamTask));
|
||||
SNodeListNode* inner = nodesListGetNode(pPlan->pSubplans, level);
|
||||
int32_t opNum = LIST_LENGTH(inner->pNodeList);
|
||||
ASSERT(opNum == 1);
|
||||
|
||||
SSubplan* plan = nodesListGetNode(inner->pNodeList, 0);
|
||||
if (i == 0) {
|
||||
SSubplan* plan = nodesListGetNode(inner->pNodeList, level);
|
||||
if (level == 0) {
|
||||
ASSERT(plan->type == SUBPLAN_TYPE_SCAN);
|
||||
void* pIter = NULL;
|
||||
while (1) {
|
||||
|
@ -63,15 +64,19 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
|
|||
}
|
||||
|
||||
pStream->vgNum++;
|
||||
// send to vnode
|
||||
|
||||
SStreamTask* pTask = streamTaskNew(pStream->uid, level);
|
||||
|
||||
plan->execNode.nodeId = pVgroup->vgId;
|
||||
plan->execNode.epSet = mndGetVgroupEpset(pMnode, pVgroup);
|
||||
SStreamTaskMeta task = {
|
||||
.taskId = tGenIdPI32(),
|
||||
.level = i,
|
||||
.plan = plan,
|
||||
};
|
||||
// send to vnode
|
||||
taosArrayPush(taskOneLevel, &task);
|
||||
if (qSubPlanToString(plan, &pTask->qmsg, &msgLen) < 0) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
qDestroyQueryPlan(pPlan);
|
||||
terrno = TSDB_CODE_QRY_INVALID_INPUT;
|
||||
return -1;
|
||||
}
|
||||
taosArrayPush(taskOneLevel, pTask);
|
||||
}
|
||||
} else if (plan->subplanType == SUBPLAN_TYPE_SCAN) {
|
||||
// duplicatable
|
||||
|
@ -82,22 +87,36 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
|
|||
// if has snode, set to shared thread num in snode
|
||||
parallel = SND_SHARED_THREAD_NUM;
|
||||
|
||||
for (int32_t j = 0; j < parallel; j++) {
|
||||
SStreamTaskMeta task = {
|
||||
.taskId = tGenIdPI32(),
|
||||
.level = i,
|
||||
.plan = plan,
|
||||
};
|
||||
taosArrayPush(taskOneLevel, &task);
|
||||
for (int32_t i = 0; i < parallel; i++) {
|
||||
SStreamTask* pTask = streamTaskNew(pStream->uid, level);
|
||||
|
||||
// TODO:get snode id and ep
|
||||
plan->execNode.nodeId = pVgroup->vgId;
|
||||
plan->execNode.epSet = mndGetVgroupEpset(pMnode, pVgroup);
|
||||
|
||||
if (qSubPlanToString(plan, &pTask->qmsg, &msgLen) < 0) {
|
||||
qDestroyQueryPlan(pPlan);
|
||||
terrno = TSDB_CODE_QRY_INVALID_INPUT;
|
||||
return -1;
|
||||
}
|
||||
|
||||
taosArrayPush(taskOneLevel, pTask);
|
||||
}
|
||||
} else {
|
||||
// not duplicatable
|
||||
SStreamTaskMeta task = {
|
||||
.taskId = tGenIdPI32(),
|
||||
.level = i,
|
||||
.plan = plan,
|
||||
};
|
||||
taosArrayPush(taskOneLevel, &task);
|
||||
SStreamTask* pTask = streamTaskNew(pStream->uid, level);
|
||||
|
||||
// TODO:get snode id and ep
|
||||
plan->execNode.nodeId = pVgroup->vgId;
|
||||
plan->execNode.epSet = mndGetVgroupEpset(pMnode, pVgroup);
|
||||
|
||||
if (qSubPlanToString(plan, &pTask->qmsg, &msgLen) < 0) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
qDestroyQueryPlan(pPlan);
|
||||
terrno = TSDB_CODE_QRY_INVALID_INPUT;
|
||||
return -1;
|
||||
}
|
||||
taosArrayPush(taskOneLevel, pTask);
|
||||
}
|
||||
taosArrayPush(pStream->tasks, taskOneLevel);
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "mndDb.h"
|
||||
#include "mndDnode.h"
|
||||
#include "mndMnode.h"
|
||||
#include "mndScheduler.h"
|
||||
#include "mndShow.h"
|
||||
#include "mndStb.h"
|
||||
#include "mndTrans.h"
|
||||
|
@ -237,6 +238,12 @@ static int32_t mndCreateStream(SMnode *pMnode, SNodeMsg *pReq, SCMCreateStreamRe
|
|||
}
|
||||
sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY);
|
||||
|
||||
if (mndScheduleStream(pMnode, pTrans, &streamObj) < 0) {
|
||||
mError("stream:%ld, schedule stream since %s", streamObj.uid, terrstr());
|
||||
mndTransDrop(pTrans);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (mndTransPrepare(pMnode, pTrans) != 0) {
|
||||
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
|
||||
mndTransDrop(pTrans);
|
||||
|
|
|
@ -236,6 +236,29 @@ static int32_t mndCheckCreateTopicReq(SCMCreateTopicReq *pCreate) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndGetPlanString(SCMCreateTopicReq *pCreate, char **pStr) {
|
||||
if (NULL == pCreate->ast) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SNode* pAst = NULL;
|
||||
int32_t code = nodesStringToNode(pCreate->ast, &pAst);
|
||||
|
||||
SQueryPlan* pPlan = NULL;
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
SPlanContext cxt = { .pAstRoot = pAst, .streamQuery = true };
|
||||
code = qCreateQueryPlan(&cxt, &pPlan, NULL);
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodesNodeToString(pPlan, false, pStr, NULL);
|
||||
}
|
||||
nodesDestroyNode(pAst);
|
||||
nodesDestroyNode(pPlan);
|
||||
terrno = code;
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t mndCreateTopic(SMnode *pMnode, SNodeMsg *pReq, SCMCreateTopicReq *pCreate, SDbObj *pDb) {
|
||||
mDebug("topic:%s to create", pCreate->name);
|
||||
SMqTopicObj topicObj = {0};
|
||||
|
@ -247,13 +270,23 @@ static int32_t mndCreateTopic(SMnode *pMnode, SNodeMsg *pReq, SCMCreateTopicReq
|
|||
topicObj.dbUid = pDb->uid;
|
||||
topicObj.version = 1;
|
||||
topicObj.sql = pCreate->sql;
|
||||
topicObj.physicalPlan = pCreate->physicalPlan;
|
||||
topicObj.logicalPlan = pCreate->logicalPlan;
|
||||
topicObj.physicalPlan = "";
|
||||
topicObj.logicalPlan = "";
|
||||
topicObj.sqlLen = strlen(pCreate->sql);
|
||||
|
||||
char* pPlanStr = NULL;
|
||||
if (TSDB_CODE_SUCCESS != mndGetPlanString(pCreate, &pPlanStr)) {
|
||||
mError("topic:%s, failed to get plan since %s", pCreate->name, terrstr());
|
||||
return -1;
|
||||
}
|
||||
if (NULL != pPlanStr) {
|
||||
topicObj.physicalPlan = pPlanStr;
|
||||
}
|
||||
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_TOPIC, &pReq->rpcMsg);
|
||||
if (pTrans == NULL) {
|
||||
mError("topic:%s, failed to create since %s", pCreate->name, terrstr());
|
||||
tfree(pPlanStr);
|
||||
return -1;
|
||||
}
|
||||
mDebug("trans:%d, used to create topic:%s", pTrans->id, pCreate->name);
|
||||
|
@ -261,6 +294,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SNodeMsg *pReq, SCMCreateTopicReq
|
|||
SSdbRaw *pRedoRaw = mndTopicActionEncode(&topicObj);
|
||||
if (pRedoRaw == NULL || mndTransAppendRedolog(pTrans, pRedoRaw) != 0) {
|
||||
mError("trans:%d, failed to append redo log since %s", pTrans->id, terrstr());
|
||||
tfree(pPlanStr);
|
||||
mndTransDrop(pTrans);
|
||||
return -1;
|
||||
}
|
||||
|
@ -268,10 +302,12 @@ static int32_t mndCreateTopic(SMnode *pMnode, SNodeMsg *pReq, SCMCreateTopicReq
|
|||
|
||||
if (mndTransPrepare(pMnode, pTrans) != 0) {
|
||||
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
|
||||
tfree(pPlanStr);
|
||||
mndTransDrop(pTrans);
|
||||
return -1;
|
||||
}
|
||||
|
||||
tfree(pPlanStr);
|
||||
mndTransDrop(pTrans);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -65,8 +65,7 @@ void* MndTestTopic::BuildCreateTopicReq(const char* topicName, const char* sql,
|
|||
strcpy(createReq.name, topicName);
|
||||
createReq.igExists = 0;
|
||||
createReq.sql = (char*)sql;
|
||||
createReq.physicalPlan = (char*)"physicalPlan";
|
||||
createReq.logicalPlan = (char*)"logicalPlan";
|
||||
createReq.ast = NULL;
|
||||
|
||||
int32_t contLen = tSerializeSCMCreateTopicReq(NULL, 0, &createReq);
|
||||
void* pReq = rpcMallocCont(contLen);
|
||||
|
|
|
@ -7,8 +7,9 @@ target_include_directories(
|
|||
)
|
||||
target_link_libraries(
|
||||
snode
|
||||
PRIVATE executor
|
||||
PRIVATE transport
|
||||
PRIVATE os
|
||||
PRIVATE common
|
||||
PRIVATE util
|
||||
)
|
||||
)
|
||||
|
|
|
@ -38,13 +38,8 @@ enum {
|
|||
STREAM_STATUS__DELETING,
|
||||
};
|
||||
|
||||
enum {
|
||||
STREAM_TASK_STATUS__RUNNING = 1,
|
||||
STREAM_TASK_STATUS__STOP,
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
SHashObj* pHash; // taskId -> streamTask
|
||||
SHashObj* pHash; // taskId -> SStreamTask
|
||||
} SStreamMeta;
|
||||
|
||||
typedef struct SSnode {
|
||||
|
@ -52,26 +47,17 @@ typedef struct SSnode {
|
|||
SSnodeOpt cfg;
|
||||
} SSnode;
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int32_t IdxInLevel;
|
||||
int32_t level;
|
||||
} SStreamTaskInfo;
|
||||
SStreamMeta* sndMetaNew();
|
||||
void sndMetaDelete(SStreamMeta* pMeta);
|
||||
|
||||
typedef struct {
|
||||
SStreamTaskInfo meta;
|
||||
int8_t status;
|
||||
void* executor;
|
||||
void* stateStore;
|
||||
// storage handle
|
||||
} SStreamTask;
|
||||
int32_t sndMetaDeployTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
||||
SStreamTask* sndMetaGetTask(SStreamMeta* pMeta, int32_t taskId);
|
||||
int32_t sndMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
|
||||
|
||||
int32_t sndCreateTask();
|
||||
int32_t sndDropTaskOfStream(int64_t streamId);
|
||||
int32_t sndDropTaskOfStream(SStreamMeta* pMeta, int64_t streamId);
|
||||
|
||||
int32_t sndStopTaskOfStream(int64_t streamId);
|
||||
int32_t sndResumeTaskOfStream(int64_t streamId);
|
||||
int32_t sndStopTaskOfStream(SStreamMeta* pMeta, int64_t streamId);
|
||||
int32_t sndResumeTaskOfStream(SStreamMeta* pMeta, int64_t streamId);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -13,44 +13,116 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "executor.h"
|
||||
#include "sndInt.h"
|
||||
#include "tuuid.h"
|
||||
|
||||
SSnode *sndOpen(const char *path, const SSnodeOpt *pOption) {
|
||||
SSnode *pSnode = calloc(1, sizeof(SSnode));
|
||||
if (pSnode == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
memcpy(&pSnode->cfg, pOption, sizeof(SSnodeOpt));
|
||||
pSnode->pMeta = sndMetaNew();
|
||||
if (pSnode->pMeta == NULL) {
|
||||
free(pSnode);
|
||||
return NULL;
|
||||
}
|
||||
return pSnode;
|
||||
}
|
||||
|
||||
void sndClose(SSnode *pSnode) { free(pSnode); }
|
||||
void sndClose(SSnode *pSnode) {
|
||||
sndMetaDelete(pSnode->pMeta);
|
||||
free(pSnode);
|
||||
}
|
||||
|
||||
int32_t sndGetLoad(SSnode *pSnode, SSnodeLoad *pLoad) { return 0; }
|
||||
|
||||
int32_t sndProcessMsg(SSnode *pSnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
|
||||
*pRsp = NULL;
|
||||
return 0;
|
||||
}
|
||||
/*int32_t sndProcessMsg(SSnode *pSnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {*/
|
||||
/**pRsp = NULL;*/
|
||||
/*return 0;*/
|
||||
/*}*/
|
||||
|
||||
void sndDestroy(const char *path) {}
|
||||
|
||||
static int32_t sndDeployTask(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||
SStreamTask *task = malloc(sizeof(SStreamTask));
|
||||
if (task == NULL) {
|
||||
SStreamMeta *sndMetaNew() {
|
||||
SStreamMeta *pMeta = calloc(1, sizeof(SStreamMeta));
|
||||
if (pMeta == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
pMeta->pHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
|
||||
if (pMeta->pHash == NULL) {
|
||||
free(pMeta);
|
||||
return NULL;
|
||||
}
|
||||
return pMeta;
|
||||
}
|
||||
|
||||
void sndMetaDelete(SStreamMeta *pMeta) {
|
||||
taosHashCleanup(pMeta->pHash);
|
||||
free(pMeta);
|
||||
}
|
||||
|
||||
int32_t sndMetaDeployTask(SStreamMeta *pMeta, SStreamTask *pTask) {
|
||||
pTask->executor = qCreateStreamExecTaskInfo(pTask->qmsg, NULL);
|
||||
return taosHashPut(pMeta->pHash, &pTask->taskId, sizeof(int32_t), pTask, sizeof(void *));
|
||||
}
|
||||
|
||||
SStreamTask *sndMetaGetTask(SStreamMeta *pMeta, int32_t taskId) {
|
||||
return taosHashGet(pMeta->pHash, &taskId, sizeof(int32_t));
|
||||
}
|
||||
|
||||
int32_t sndMetaRemoveTask(SStreamMeta *pMeta, int32_t taskId) {
|
||||
SStreamTask *pTask = taosHashGet(pMeta->pHash, &taskId, sizeof(int32_t));
|
||||
if (pTask == NULL) {
|
||||
return -1;
|
||||
}
|
||||
free(pTask->qmsg);
|
||||
// TODO:free executor
|
||||
free(pTask);
|
||||
return taosHashRemove(pMeta->pHash, &taskId, sizeof(int32_t));
|
||||
}
|
||||
|
||||
static int32_t sndProcessTaskExecReq(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||
SMsgHead *pHead = pMsg->pCont;
|
||||
int32_t taskId = pHead->streamTaskId;
|
||||
SStreamTask *pTask = sndMetaGetTask(pSnode->pMeta, taskId);
|
||||
if (pTask == NULL) {
|
||||
return -1;
|
||||
}
|
||||
task->meta.taskId = tGenIdPI32();
|
||||
taosHashPut(pSnode->pMeta->pHash, &task->meta.taskId, sizeof(int32_t), &task, sizeof(void *));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t sndProcessUMsg(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||
// stream deployment
|
||||
// stream deploy
|
||||
// stream stop/resume
|
||||
// operator exec
|
||||
if (pMsg->msgType == TDMT_SND_TASK_DEPLOY) {
|
||||
void *msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
SStreamTask *pTask = malloc(sizeof(SStreamTask));
|
||||
if (pTask == NULL) {
|
||||
return -1;
|
||||
}
|
||||
SCoder decoder;
|
||||
tCoderInit(&decoder, TD_LITTLE_ENDIAN, msg, pMsg->contLen - sizeof(SMsgHead), TD_DECODER);
|
||||
tDecodeSStreamTask(&decoder, pTask);
|
||||
tCoderClear(&decoder);
|
||||
|
||||
sndMetaDeployTask(pSnode->pMeta, pTask);
|
||||
} else if (pMsg->msgType == TDMT_SND_TASK_EXEC) {
|
||||
sndProcessTaskExecReq(pSnode, pMsg);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t sndProcessSMsg(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||
// operator exec
|
||||
if (pMsg->msgType == TDMT_SND_TASK_EXEC) {
|
||||
sndProcessTaskExecReq(pSnode, pMsg);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -60,10 +60,11 @@ struct STsdb {
|
|||
SSmaEnv * pRSmaEnv;
|
||||
};
|
||||
|
||||
#define REPO_ID(r) ((r)->vgId)
|
||||
#define REPO_CFG(r) (&(r)->config)
|
||||
#define REPO_FS(r) (r)->fs
|
||||
#define IS_REPO_LOCKED(r) (r)->repoLocked
|
||||
#define REPO_ID(r) ((r)->vgId)
|
||||
#define REPO_CFG(r) (&(r)->config)
|
||||
#define REPO_FS(r) (r)->fs
|
||||
#define IS_REPO_LOCKED(r) (r)->repoLocked
|
||||
#define REPO_SMA_ENV(r, t) ((TSDB_SMA_TYPE_ROLLUP == (t)) ? (r)->pRSmaEnv : (r)->pTSmaEnv)
|
||||
|
||||
int tsdbLockRepo(STsdb *pTsdb);
|
||||
int tsdbUnlockRepo(STsdb *pTsdb);
|
||||
|
|
|
@ -68,8 +68,8 @@ int32_t tsdbOpenBDBEnv(DB_ENV **ppEnv, const char *path) {
|
|||
|
||||
ret = pEnv->open(pEnv, path, DB_CREATE | DB_INIT_CDB | DB_INIT_MPOOL, 0);
|
||||
if (ret != 0) {
|
||||
// BDB_PERR("Failed to open tsdb env", ret);
|
||||
tsdbWarn("Failed to open tsdb env for path %s since %d", path ? path : "NULL", ret);
|
||||
terrno = TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR;
|
||||
tsdbWarn("Failed to open tsdb env for path %s since ret %d != 0", path ? path : "NULL", ret);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -23,8 +23,8 @@ static const char *TSDB_FNAME_SUFFIX[] = {
|
|||
"smal", // TSDB_FILE_SMAL
|
||||
"", // TSDB_FILE_MAX
|
||||
"meta", // TSDB_FILE_META
|
||||
"sma", // TSDB_FILE_TSMA(directory name)
|
||||
"sma", // TSDB_FILE_RSMA(directory name)
|
||||
"tsma", // TSDB_FILE_TSMA
|
||||
"rsma", // TSDB_FILE_RSMA
|
||||
};
|
||||
|
||||
static void tsdbGetFilename(int vid, int fid, uint32_t ver, TSDB_FILE_T ftype, char *fname);
|
||||
|
|
|
@ -15,9 +15,15 @@
|
|||
|
||||
#include "tsdbDef.h"
|
||||
|
||||
static const char *TSDB_SMA_DNAME[] = {
|
||||
"", // TSDB_SMA_TYPE_BLOCK
|
||||
"tsma", // TSDB_SMA_TYPE_TIME_RANGE
|
||||
"rsma", // TSDB_SMA_TYPE_ROLLUP
|
||||
};
|
||||
|
||||
#undef SMA_PRINT_DEBUG_LOG
|
||||
#define SMA_STORAGE_TSDB_DAYS 30
|
||||
#define SMA_STORAGE_TSDB_TIMES 30
|
||||
#define SMA_STORAGE_TSDB_TIMES 10
|
||||
#define SMA_STORAGE_SPLIT_HOURS 24
|
||||
#define SMA_KEY_LEN 18 // tableUid_colId_TSKEY 8+2+8
|
||||
|
||||
|
@ -93,6 +99,11 @@ static int32_t tsdbGetTSmaDays(STsdb *pTsdb, int64_t interval, int32_t storageLe
|
|||
static int32_t tsdbSetTSmaDataFile(STSmaWriteH *pSmaH, STSmaDataWrapper *pData, int32_t storageLevel, int32_t fid);
|
||||
static int32_t tsdbInitTSmaFile(STSmaReadH *pSmaH, TSKEY skey);
|
||||
static bool tsdbSetAndOpenTSmaFile(STSmaReadH *pReadH, TSKEY *queryKey);
|
||||
static void tsdbGetSmaDir(int32_t vgId, ETsdbSmaType smaType, char dirName[]);
|
||||
|
||||
static void tsdbGetSmaDir(int32_t vgId, ETsdbSmaType smaType, char dirName[]) {
|
||||
snprintf(dirName, TSDB_FILENAME_LEN, "vnode/vnode%d/tsdb/%s", vgId, TSDB_SMA_DNAME[smaType]);
|
||||
}
|
||||
|
||||
static SSmaEnv *tsdbNewSmaEnv(const STsdb *pTsdb, const char *path) {
|
||||
SSmaEnv *pEnv = NULL;
|
||||
|
@ -136,7 +147,7 @@ static int32_t tsdbInitSmaEnv(STsdb *pTsdb, const char *path, SSmaEnv **pEnv) {
|
|||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
if (pEnv && *pEnv) {
|
||||
if (*pEnv) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -144,7 +155,7 @@ static int32_t tsdbInitSmaEnv(STsdb *pTsdb, const char *path, SSmaEnv **pEnv) {
|
|||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
if (*pEnv == NULL) {
|
||||
if (*pEnv == NULL) { // 2nd phase check
|
||||
if ((*pEnv = tsdbNewSmaEnv(pTsdb, path)) == NULL) {
|
||||
tsdbUnlockRepo(pTsdb);
|
||||
return TSDB_CODE_FAILED;
|
||||
|
@ -152,7 +163,7 @@ static int32_t tsdbInitSmaEnv(STsdb *pTsdb, const char *path, SSmaEnv **pEnv) {
|
|||
}
|
||||
|
||||
if (tsdbUnlockRepo(pTsdb) != 0) {
|
||||
tsdbFreeSmaEnv(*pEnv);
|
||||
*pEnv = tsdbFreeSmaEnv(*pEnv);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
|
@ -244,6 +255,39 @@ int32_t tsdbDestroySmaState(SSmaStat *pSmaStat) {
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t tsdbCheckAndInitSmaEnv(STsdb *pTsdb, int8_t smaType) {
|
||||
switch (smaType) {
|
||||
case TSDB_SMA_TYPE_TIME_RANGE:
|
||||
if (pTsdb->pTSmaEnv) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
break;
|
||||
case TSDB_SMA_TYPE_ROLLUP:
|
||||
if (pTsdb->pRSmaEnv) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
// SDiskID did = {0};
|
||||
SSmaEnv *pEnv = NULL;
|
||||
char smaPath[TSDB_FILENAME_LEN] = "/proj/.sma/";
|
||||
if (tsdbInitSmaEnv(pTsdb, smaPath, &pEnv) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
|
||||
pTsdb->pTSmaEnv = pEnv;
|
||||
} else {
|
||||
pTsdb->pRSmaEnv = pEnv;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Update expired window according to msg from stream computing module.
|
||||
*
|
||||
|
@ -253,26 +297,17 @@ int32_t tsdbDestroySmaState(SSmaStat *pSmaStat) {
|
|||
* @return int32_t
|
||||
*/
|
||||
int32_t tsdbUpdateExpiredWindow(STsdb *pTsdb, int8_t smaType, char *msg) {
|
||||
STsdbCfg *pCfg = REPO_CFG(pTsdb);
|
||||
SSmaEnv * pEnv = NULL;
|
||||
|
||||
if (!msg || !pTsdb->pMeta) {
|
||||
terrno = TSDB_CODE_INVALID_PTR;
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
char smaPath[TSDB_FILENAME_LEN] = "/proj/.sma/";
|
||||
if (tsdbInitSmaEnv(pTsdb, smaPath, &pEnv) != TSDB_CODE_SUCCESS) {
|
||||
if (tsdbCheckAndInitSmaEnv(pTsdb, smaType) != TSDB_CODE_SUCCESS) {
|
||||
terrno = TSDB_CODE_TDB_INIT_FAILED;
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
|
||||
pTsdb->pTSmaEnv = pEnv;
|
||||
} else if (smaType == TSDB_SMA_TYPE_ROLLUP) {
|
||||
pTsdb->pRSmaEnv = pEnv;
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
SSmaEnv *pEnv = REPO_SMA_ENV(pTsdb, smaType);
|
||||
|
||||
// TODO: decode the msg => start
|
||||
int64_t indexUid = SMA_TEST_INDEX_UID;
|
||||
|
@ -308,7 +343,6 @@ int32_t tsdbUpdateExpiredWindow(STsdb *pTsdb, int8_t smaType, char *msg) {
|
|||
}
|
||||
pItem->pSma = pSma;
|
||||
|
||||
// TODO: change indexName to indexUid
|
||||
if (taosHashPut(pItemsHash, &indexUid, sizeof(indexUid), &pItem, sizeof(pItem)) != 0) {
|
||||
// If error occurs during put smaStatItem, free the resources of pItem
|
||||
taosHashCleanup(pItem->expiredWindows);
|
||||
|
@ -378,32 +412,32 @@ static int32_t tsdbResetExpiredWindow(SSmaStat *pStat, int64_t indexUid, TSKEY s
|
|||
static int32_t tsdbGetSmaStorageLevel(int64_t interval, int8_t intervalUnit) {
|
||||
// TODO: configurable for SMA_STORAGE_SPLIT_HOURS?
|
||||
switch (intervalUnit) {
|
||||
case TD_TIME_UNIT_HOUR:
|
||||
case TIME_UNIT_HOUR:
|
||||
if (interval < SMA_STORAGE_SPLIT_HOURS) {
|
||||
return SMA_STORAGE_LEVEL_DFILESET;
|
||||
}
|
||||
break;
|
||||
case TD_TIME_UNIT_MINUTE:
|
||||
case TIME_UNIT_MINUTE:
|
||||
if (interval < 60 * SMA_STORAGE_SPLIT_HOURS) {
|
||||
return SMA_STORAGE_LEVEL_DFILESET;
|
||||
}
|
||||
break;
|
||||
case TD_TIME_UNIT_SEC:
|
||||
case TIME_UNIT_SECOND:
|
||||
if (interval < 3600 * SMA_STORAGE_SPLIT_HOURS) {
|
||||
return SMA_STORAGE_LEVEL_DFILESET;
|
||||
}
|
||||
break;
|
||||
case TD_TIME_UNIT_MILLISEC:
|
||||
case TIME_UNIT_MILLISECOND:
|
||||
if (interval < 3600 * 1e3 * SMA_STORAGE_SPLIT_HOURS) {
|
||||
return SMA_STORAGE_LEVEL_DFILESET;
|
||||
}
|
||||
break;
|
||||
case TD_TIME_UNIT_MICROSEC:
|
||||
case TIME_UNIT_MICROSECOND:
|
||||
if (interval < 3600 * 1e6 * SMA_STORAGE_SPLIT_HOURS) {
|
||||
return SMA_STORAGE_LEVEL_DFILESET;
|
||||
}
|
||||
break;
|
||||
case TD_TIME_UNIT_NANOSEC:
|
||||
case TIME_UNIT_NANOSECOND:
|
||||
if (interval < 3600 * 1e9 * SMA_STORAGE_SPLIT_HOURS) {
|
||||
return SMA_STORAGE_LEVEL_DFILESET;
|
||||
}
|
||||
|
@ -429,8 +463,8 @@ static int32_t tsdbInsertTSmaBlocks(STSmaWriteH *pSmaH, void *smaKey, uint32_t k
|
|||
|
||||
// TODO: insert sma data blocks into B+Tree
|
||||
tsdbDebug("vgId:%d insert sma data blocks into %s: smaKey %" PRIx64 "-%" PRIu16 "-%" PRIx64 ", dataLen %d",
|
||||
REPO_ID(pSmaH->pTsdb), pDBFile->path, *(tb_uid_t *)smaKey, *(uint16_t *)POINTER_SHIFT(smaKey, 8),
|
||||
*(int64_t *)POINTER_SHIFT(smaKey, 10), dataLen);
|
||||
REPO_ID(pSmaH->pTsdb), pDBFile->path, *(tb_uid_t *)smaKey, *(uint16_t *)POINTER_SHIFT(smaKey, 8),
|
||||
*(int64_t *)POINTER_SHIFT(smaKey, 10), dataLen);
|
||||
|
||||
if (tsdbSaveSmaToDB(pDBFile, smaKey, keyLen, pData, dataLen) != 0) {
|
||||
return TSDB_CODE_FAILED;
|
||||
|
@ -447,66 +481,73 @@ static int32_t tsdbInsertTSmaBlocks(STSmaWriteH *pSmaH, void *smaKey, uint32_t k
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Approximate value for week/month/year.
|
||||
*
|
||||
* @param interval
|
||||
* @param intervalUnit
|
||||
* @param precision
|
||||
* @return int64_t
|
||||
*/
|
||||
static int64_t tsdbGetIntervalByPrecision(int64_t interval, uint8_t intervalUnit, int8_t precision) {
|
||||
if (intervalUnit < TD_TIME_UNIT_MILLISEC) {
|
||||
switch (intervalUnit) {
|
||||
case TD_TIME_UNIT_YEAR:
|
||||
case TD_TIME_UNIT_SEASON:
|
||||
case TD_TIME_UNIT_MONTH:
|
||||
case TD_TIME_UNIT_WEEK:
|
||||
// illegal time unit
|
||||
tsdbError("invalid interval unit: %d\n", intervalUnit);
|
||||
TASSERT(0);
|
||||
break;
|
||||
case TD_TIME_UNIT_DAY: // the interval for tSma calculation must <= day
|
||||
interval *= 86400 * 1e3;
|
||||
break;
|
||||
case TD_TIME_UNIT_HOUR:
|
||||
interval *= 3600 * 1e3;
|
||||
break;
|
||||
case TD_TIME_UNIT_MINUTE:
|
||||
interval *= 60 * 1e3;
|
||||
break;
|
||||
case TD_TIME_UNIT_SEC:
|
||||
interval *= 1e3;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
switch (intervalUnit) {
|
||||
case TIME_UNIT_YEAR: // approximate value
|
||||
interval *= 365 * 86400 * 1e3;
|
||||
break;
|
||||
case TIME_UNIT_MONTH: // approximate value
|
||||
interval *= 30 * 86400 * 1e3;
|
||||
break;
|
||||
case TIME_UNIT_WEEK: // approximate value
|
||||
interval *= 7 * 86400 * 1e3;
|
||||
break;
|
||||
case TIME_UNIT_DAY: // the interval for tSma calculation must <= day
|
||||
interval *= 86400 * 1e3;
|
||||
break;
|
||||
case TIME_UNIT_HOUR:
|
||||
interval *= 3600 * 1e3;
|
||||
break;
|
||||
case TIME_UNIT_MINUTE:
|
||||
interval *= 60 * 1e3;
|
||||
break;
|
||||
case TIME_UNIT_SECOND:
|
||||
interval *= 1e3;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (precision) {
|
||||
case TSDB_TIME_PRECISION_MILLI:
|
||||
if (TD_TIME_UNIT_MICROSEC == intervalUnit) { // us
|
||||
if (TIME_UNIT_MICROSECOND == intervalUnit) { // us
|
||||
return interval / 1e3;
|
||||
} else if (TD_TIME_UNIT_NANOSEC == intervalUnit) { // nano second
|
||||
} else if (TIME_UNIT_NANOSECOND == intervalUnit) { // nano second
|
||||
return interval / 1e6;
|
||||
} else {
|
||||
return interval;
|
||||
}
|
||||
break;
|
||||
case TSDB_TIME_PRECISION_MICRO:
|
||||
if (TD_TIME_UNIT_MICROSEC == intervalUnit) { // us
|
||||
if (TIME_UNIT_MICROSECOND == intervalUnit) { // us
|
||||
return interval;
|
||||
} else if (TD_TIME_UNIT_NANOSEC == intervalUnit) { // nano second
|
||||
} else if (TIME_UNIT_NANOSECOND == intervalUnit) { // nano second
|
||||
return interval / 1e3;
|
||||
} else {
|
||||
return interval * 1e3;
|
||||
}
|
||||
break;
|
||||
case TSDB_TIME_PRECISION_NANO:
|
||||
if (TD_TIME_UNIT_MICROSEC == intervalUnit) {
|
||||
if (TIME_UNIT_MICROSECOND == intervalUnit) {
|
||||
return interval * 1e3;
|
||||
} else if (TD_TIME_UNIT_NANOSEC == intervalUnit) { // nano second
|
||||
} else if (TIME_UNIT_NANOSECOND == intervalUnit) { // nano second
|
||||
return interval;
|
||||
} else {
|
||||
return interval * 1e6;
|
||||
}
|
||||
break;
|
||||
default: // ms
|
||||
if (TD_TIME_UNIT_MICROSEC == intervalUnit) { // us
|
||||
if (TIME_UNIT_MICROSECOND == intervalUnit) { // us
|
||||
return interval / 1e3;
|
||||
} else if (TD_TIME_UNIT_NANOSEC == intervalUnit) { // nano second
|
||||
} else if (TIME_UNIT_NANOSECOND == intervalUnit) { // nano second
|
||||
return interval / 1e6;
|
||||
} else {
|
||||
return interval;
|
||||
|
@ -800,10 +841,19 @@ static bool tsdbSetAndOpenTSmaFile(STSmaReadH *pReadH, TSKEY *queryKey) {
|
|||
static int32_t tsdbGetTSmaDataImpl(STsdb *pTsdb, STSmaDataWrapper *pData, int64_t indexUid, int64_t interval,
|
||||
int8_t intervalUnit, tb_uid_t tableUid, col_id_t colId, TSKEY querySkey,
|
||||
int32_t nMaxResult) {
|
||||
if (!pTsdb->pTSmaEnv) {
|
||||
terrno = TSDB_CODE_INVALID_PTR;
|
||||
tsdbWarn("vgId:%d getTSmaDataImpl failed since pTSmaEnv is NULL", REPO_ID(pTsdb));
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
SSmaStatItem *pItem = (SSmaStatItem *)taosHashGet(SMA_ENV_STAT_ITEMS(pTsdb->pTSmaEnv), &indexUid, sizeof(indexUid));
|
||||
if (pItem == NULL) {
|
||||
// mark all window as expired and notify query module to query raw TS data.
|
||||
return TSDB_CODE_SUCCESS;
|
||||
// Normally pItem should not be NULL, mark all windows as expired and notify query module to fetch raw TS data if
|
||||
// it's NULL.
|
||||
terrno = TSDB_CODE_TDB_INVALID_ACTION;
|
||||
tsdbWarn("vgId:%d getTSmaDataImpl failed since no index %" PRIi64 " in local cache", REPO_ID(pTsdb), indexUid);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
@ -815,6 +865,7 @@ static int32_t tsdbGetTSmaDataImpl(STsdb *pTsdb, STSmaDataWrapper *pData, int64_
|
|||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
if (taosHashGet(pItem->expiredWindows, &querySkey, sizeof(TSKEY)) != NULL) {
|
||||
// TODO: mark this window as expired.
|
||||
|
@ -835,8 +886,8 @@ static int32_t tsdbGetTSmaDataImpl(STsdb *pTsdb, STSmaDataWrapper *pData, int64_
|
|||
tsdbEncodeTSmaKey(tableUid, colId, querySkey, (void **)&pSmaKey);
|
||||
|
||||
tsdbDebug("vgId:%d get sma data from %s: smaKey %" PRIx64 "-%" PRIu16 "-%" PRIx64 ", keyLen %d", REPO_ID(pTsdb),
|
||||
tReadH.dFile.path, *(tb_uid_t *)smaKey, *(uint16_t *)POINTER_SHIFT(smaKey, 8),
|
||||
*(int64_t *)POINTER_SHIFT(smaKey, 10), SMA_KEY_LEN);
|
||||
tReadH.dFile.path, *(tb_uid_t *)smaKey, *(uint16_t *)POINTER_SHIFT(smaKey, 8),
|
||||
*(int64_t *)POINTER_SHIFT(smaKey, 10), SMA_KEY_LEN);
|
||||
|
||||
void * result = NULL;
|
||||
uint32_t valueSize = 0;
|
||||
|
@ -947,7 +998,6 @@ int32_t tsdbUpdateSmaWindow(STsdb *pTsdb, int8_t smaType, char *msg) {
|
|||
* @brief Insert Time-range-wise Rollup Sma(RSma) data
|
||||
*
|
||||
* @param pTsdb
|
||||
* @param param
|
||||
* @param msg
|
||||
* @return int32_t
|
||||
*/
|
||||
|
|
|
@ -37,9 +37,9 @@ TEST(testCase, tSma_Meta_Encode_Decode_Test) {
|
|||
// encode
|
||||
STSma tSma = {0};
|
||||
tSma.version = 0;
|
||||
tSma.intervalUnit = TD_TIME_UNIT_DAY;
|
||||
tSma.intervalUnit = TIME_UNIT_DAY;
|
||||
tSma.interval = 1;
|
||||
tSma.slidingUnit = TD_TIME_UNIT_HOUR;
|
||||
tSma.slidingUnit = TIME_UNIT_HOUR;
|
||||
tSma.sliding = 0;
|
||||
tstrncpy(tSma.indexName, "sma_index_test", TSDB_INDEX_NAME_LEN);
|
||||
tstrncpy(tSma.timezone, "Asia/Shanghai", TD_TIMEZONE_LEN);
|
||||
|
@ -50,37 +50,37 @@ TEST(testCase, tSma_Meta_Encode_Decode_Test) {
|
|||
uint32_t bufLen = tEncodeTSmaWrapper(NULL, &tSmaWrapper);
|
||||
|
||||
void *buf = calloc(bufLen, 1);
|
||||
assert(buf != NULL);
|
||||
ASSERT_NE(buf, nullptr);
|
||||
|
||||
STSmaWrapper *pSW = (STSmaWrapper *)buf;
|
||||
uint32_t len = tEncodeTSmaWrapper(&buf, &tSmaWrapper);
|
||||
|
||||
EXPECT_EQ(len, bufLen);
|
||||
ASSERT_EQ(len, bufLen);
|
||||
|
||||
// decode
|
||||
STSmaWrapper dstTSmaWrapper = {0};
|
||||
void * result = tDecodeTSmaWrapper(pSW, &dstTSmaWrapper);
|
||||
assert(result != NULL);
|
||||
ASSERT_NE(result, nullptr);
|
||||
|
||||
EXPECT_EQ(tSmaWrapper.number, dstTSmaWrapper.number);
|
||||
ASSERT_EQ(tSmaWrapper.number, dstTSmaWrapper.number);
|
||||
|
||||
for (int i = 0; i < tSmaWrapper.number; ++i) {
|
||||
STSma *pSma = tSmaWrapper.tSma + i;
|
||||
STSma *qSma = dstTSmaWrapper.tSma + i;
|
||||
|
||||
EXPECT_EQ(pSma->version, qSma->version);
|
||||
EXPECT_EQ(pSma->intervalUnit, qSma->intervalUnit);
|
||||
EXPECT_EQ(pSma->slidingUnit, qSma->slidingUnit);
|
||||
EXPECT_STRCASEEQ(pSma->indexName, qSma->indexName);
|
||||
EXPECT_STRCASEEQ(pSma->timezone, qSma->timezone);
|
||||
EXPECT_EQ(pSma->indexUid, qSma->indexUid);
|
||||
EXPECT_EQ(pSma->tableUid, qSma->tableUid);
|
||||
EXPECT_EQ(pSma->interval, qSma->interval);
|
||||
EXPECT_EQ(pSma->sliding, qSma->sliding);
|
||||
EXPECT_EQ(pSma->exprLen, qSma->exprLen);
|
||||
EXPECT_STRCASEEQ(pSma->expr, qSma->expr);
|
||||
EXPECT_EQ(pSma->tagsFilterLen, qSma->tagsFilterLen);
|
||||
EXPECT_STRCASEEQ(pSma->tagsFilter, qSma->tagsFilter);
|
||||
ASSERT_EQ(pSma->version, qSma->version);
|
||||
ASSERT_EQ(pSma->intervalUnit, qSma->intervalUnit);
|
||||
ASSERT_EQ(pSma->slidingUnit, qSma->slidingUnit);
|
||||
ASSERT_STRCASEEQ(pSma->indexName, qSma->indexName);
|
||||
ASSERT_STRCASEEQ(pSma->timezone, qSma->timezone);
|
||||
ASSERT_EQ(pSma->indexUid, qSma->indexUid);
|
||||
ASSERT_EQ(pSma->tableUid, qSma->tableUid);
|
||||
ASSERT_EQ(pSma->interval, qSma->interval);
|
||||
ASSERT_EQ(pSma->sliding, qSma->sliding);
|
||||
ASSERT_EQ(pSma->exprLen, qSma->exprLen);
|
||||
ASSERT_STRCASEEQ(pSma->expr, qSma->expr);
|
||||
ASSERT_EQ(pSma->tagsFilterLen, qSma->tagsFilterLen);
|
||||
ASSERT_STRCASEEQ(pSma->tagsFilter, qSma->tagsFilter);
|
||||
}
|
||||
|
||||
// resource release
|
||||
|
@ -103,9 +103,9 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) {
|
|||
// encode
|
||||
STSma tSma = {0};
|
||||
tSma.version = 0;
|
||||
tSma.intervalUnit = TD_TIME_UNIT_DAY;
|
||||
tSma.intervalUnit = TIME_UNIT_DAY;
|
||||
tSma.interval = 1;
|
||||
tSma.slidingUnit = TD_TIME_UNIT_HOUR;
|
||||
tSma.slidingUnit = TIME_UNIT_HOUR;
|
||||
tSma.sliding = 0;
|
||||
tSma.indexUid = indexUid1;
|
||||
tstrncpy(tSma.indexName, smaIndexName1, TSDB_INDEX_NAME_LEN);
|
||||
|
@ -114,10 +114,12 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) {
|
|||
|
||||
tSma.exprLen = strlen(expr);
|
||||
tSma.expr = (char *)calloc(tSma.exprLen + 1, 1);
|
||||
ASSERT_NE(tSma.expr, nullptr);
|
||||
tstrncpy(tSma.expr, expr, tSma.exprLen + 1);
|
||||
|
||||
tSma.tagsFilterLen = strlen(tagsFilter);
|
||||
tSma.tagsFilter = (char *)calloc(tSma.tagsFilterLen + 1, 1);
|
||||
ASSERT_NE(tSma.tagsFilter, nullptr);
|
||||
tstrncpy(tSma.tagsFilter, tagsFilter, tSma.tagsFilterLen + 1);
|
||||
|
||||
SMeta * pMeta = NULL;
|
||||
|
@ -129,18 +131,18 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) {
|
|||
pMeta = metaOpen(smaTestDir, pMetaCfg, NULL);
|
||||
assert(pMeta != NULL);
|
||||
// save index 1
|
||||
EXPECT_EQ(metaSaveSmaToDB(pMeta, pSmaCfg), 0);
|
||||
ASSERT_EQ(metaSaveSmaToDB(pMeta, pSmaCfg), 0);
|
||||
|
||||
pSmaCfg->indexUid = indexUid2;
|
||||
tstrncpy(pSmaCfg->indexName, smaIndexName2, TSDB_INDEX_NAME_LEN);
|
||||
pSmaCfg->version = 1;
|
||||
pSmaCfg->intervalUnit = TD_TIME_UNIT_HOUR;
|
||||
pSmaCfg->intervalUnit = TIME_UNIT_HOUR;
|
||||
pSmaCfg->interval = 1;
|
||||
pSmaCfg->slidingUnit = TD_TIME_UNIT_MINUTE;
|
||||
pSmaCfg->slidingUnit = TIME_UNIT_MINUTE;
|
||||
pSmaCfg->sliding = 5;
|
||||
|
||||
// save index 2
|
||||
EXPECT_EQ(metaSaveSmaToDB(pMeta, pSmaCfg), 0);
|
||||
ASSERT_EQ(metaSaveSmaToDB(pMeta, pSmaCfg), 0);
|
||||
|
||||
// get value by indexName
|
||||
STSma *qSmaCfg = NULL;
|
||||
|
@ -150,8 +152,8 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) {
|
|||
printf("timezone1 = %s\n", qSmaCfg->timezone);
|
||||
printf("expr1 = %s\n", qSmaCfg->expr != NULL ? qSmaCfg->expr : "");
|
||||
printf("tagsFilter1 = %s\n", qSmaCfg->tagsFilter != NULL ? qSmaCfg->tagsFilter : "");
|
||||
EXPECT_STRCASEEQ(qSmaCfg->indexName, smaIndexName1);
|
||||
EXPECT_EQ(qSmaCfg->tableUid, tSma.tableUid);
|
||||
ASSERT_STRCASEEQ(qSmaCfg->indexName, smaIndexName1);
|
||||
ASSERT_EQ(qSmaCfg->tableUid, tSma.tableUid);
|
||||
tdDestroyTSma(qSmaCfg);
|
||||
tfree(qSmaCfg);
|
||||
|
||||
|
@ -161,8 +163,8 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) {
|
|||
printf("timezone2 = %s\n", qSmaCfg->timezone);
|
||||
printf("expr2 = %s\n", qSmaCfg->expr != NULL ? qSmaCfg->expr : "");
|
||||
printf("tagsFilter2 = %s\n", qSmaCfg->tagsFilter != NULL ? qSmaCfg->tagsFilter : "");
|
||||
EXPECT_STRCASEEQ(qSmaCfg->indexName, smaIndexName2);
|
||||
EXPECT_EQ(qSmaCfg->interval, tSma.interval);
|
||||
ASSERT_STRCASEEQ(qSmaCfg->indexName, smaIndexName2);
|
||||
ASSERT_EQ(qSmaCfg->interval, tSma.interval);
|
||||
tdDestroyTSma(qSmaCfg);
|
||||
tfree(qSmaCfg);
|
||||
|
||||
|
@ -178,25 +180,25 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) {
|
|||
printf("indexName = %s\n", indexName);
|
||||
++indexCnt;
|
||||
}
|
||||
EXPECT_EQ(indexCnt, nCntTSma);
|
||||
ASSERT_EQ(indexCnt, nCntTSma);
|
||||
metaCloseSmaCurosr(pSmaCur);
|
||||
|
||||
// get wrapper by table uid
|
||||
STSmaWrapper *pSW = metaGetSmaInfoByTable(pMeta, tbUid);
|
||||
assert(pSW != NULL);
|
||||
EXPECT_EQ(pSW->number, nCntTSma);
|
||||
EXPECT_STRCASEEQ(pSW->tSma->indexName, smaIndexName1);
|
||||
EXPECT_STRCASEEQ(pSW->tSma->timezone, timezone);
|
||||
EXPECT_STRCASEEQ(pSW->tSma->expr, expr);
|
||||
EXPECT_STRCASEEQ(pSW->tSma->tagsFilter, tagsFilter);
|
||||
EXPECT_EQ(pSW->tSma->indexUid, indexUid1);
|
||||
EXPECT_EQ(pSW->tSma->tableUid, tbUid);
|
||||
EXPECT_STRCASEEQ((pSW->tSma + 1)->indexName, smaIndexName2);
|
||||
EXPECT_STRCASEEQ((pSW->tSma + 1)->timezone, timezone);
|
||||
EXPECT_STRCASEEQ((pSW->tSma + 1)->expr, expr);
|
||||
EXPECT_STRCASEEQ((pSW->tSma + 1)->tagsFilter, tagsFilter);
|
||||
EXPECT_EQ((pSW->tSma + 1)->indexUid, indexUid2);
|
||||
EXPECT_EQ((pSW->tSma + 1)->tableUid, tbUid);
|
||||
ASSERT_EQ(pSW->number, nCntTSma);
|
||||
ASSERT_STRCASEEQ(pSW->tSma->indexName, smaIndexName1);
|
||||
ASSERT_STRCASEEQ(pSW->tSma->timezone, timezone);
|
||||
ASSERT_STRCASEEQ(pSW->tSma->expr, expr);
|
||||
ASSERT_STRCASEEQ(pSW->tSma->tagsFilter, tagsFilter);
|
||||
ASSERT_EQ(pSW->tSma->indexUid, indexUid1);
|
||||
ASSERT_EQ(pSW->tSma->tableUid, tbUid);
|
||||
ASSERT_STRCASEEQ((pSW->tSma + 1)->indexName, smaIndexName2);
|
||||
ASSERT_STRCASEEQ((pSW->tSma + 1)->timezone, timezone);
|
||||
ASSERT_STRCASEEQ((pSW->tSma + 1)->expr, expr);
|
||||
ASSERT_STRCASEEQ((pSW->tSma + 1)->tagsFilter, tagsFilter);
|
||||
ASSERT_EQ((pSW->tSma + 1)->indexUid, indexUid2);
|
||||
ASSERT_EQ((pSW->tSma + 1)->tableUid, tbUid);
|
||||
|
||||
tdDestroyTSmaWrapper(pSW);
|
||||
tfree(pSW);
|
||||
|
@ -208,7 +210,7 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) {
|
|||
printf("metaGetSmaTbUids: uid[%" PRIu32 "] = %" PRIi64 "\n", i, *(tb_uid_t *)taosArrayGet(pUids, i));
|
||||
// printf("metaGetSmaTbUids: index[%" PRIu32 "] = %s", i, (char *)taosArrayGet(pUids, i));
|
||||
}
|
||||
EXPECT_EQ(taosArrayGetSize(pUids), 1);
|
||||
ASSERT_EQ(taosArrayGetSize(pUids), 1);
|
||||
taosArrayDestroy(pUids);
|
||||
|
||||
// resource release
|
||||
|
@ -231,7 +233,7 @@ TEST(testCase, tSma_Data_Insert_Query_Test) {
|
|||
const tb_uid_t tbUid = 1234567890;
|
||||
const int64_t indexUid1 = 2000000001;
|
||||
const int64_t interval1 = 1;
|
||||
const int8_t intervalUnit1 = TD_TIME_UNIT_DAY;
|
||||
const int8_t intervalUnit1 = TIME_UNIT_DAY;
|
||||
const uint32_t nCntTSma = 2;
|
||||
TSKEY skey1 = 1646987196;
|
||||
const int64_t testSmaData1 = 100;
|
||||
|
@ -239,9 +241,9 @@ TEST(testCase, tSma_Data_Insert_Query_Test) {
|
|||
// encode
|
||||
STSma tSma = {0};
|
||||
tSma.version = 0;
|
||||
tSma.intervalUnit = TD_TIME_UNIT_DAY;
|
||||
tSma.intervalUnit = TIME_UNIT_DAY;
|
||||
tSma.interval = 1;
|
||||
tSma.slidingUnit = TD_TIME_UNIT_HOUR;
|
||||
tSma.slidingUnit = TIME_UNIT_HOUR;
|
||||
tSma.sliding = 0;
|
||||
tSma.indexUid = indexUid1;
|
||||
tstrncpy(tSma.indexName, smaIndexName1, TSDB_INDEX_NAME_LEN);
|
||||
|
@ -250,10 +252,12 @@ TEST(testCase, tSma_Data_Insert_Query_Test) {
|
|||
|
||||
tSma.exprLen = strlen(expr);
|
||||
tSma.expr = (char *)calloc(tSma.exprLen + 1, 1);
|
||||
ASSERT_NE(tSma.expr, nullptr);
|
||||
tstrncpy(tSma.expr, expr, tSma.exprLen + 1);
|
||||
|
||||
tSma.tagsFilterLen = strlen(tagsFilter);
|
||||
tSma.tagsFilter = (char *)calloc(tSma.tagsFilterLen + 1, 1);
|
||||
ASSERT_NE(tSma.tagsFilter, nullptr);
|
||||
tstrncpy(tSma.tagsFilter, tagsFilter, tSma.tagsFilterLen + 1);
|
||||
|
||||
SMeta * pMeta = NULL;
|
||||
|
@ -265,7 +269,7 @@ TEST(testCase, tSma_Data_Insert_Query_Test) {
|
|||
pMeta = metaOpen(smaTestDir, pMetaCfg, NULL);
|
||||
assert(pMeta != NULL);
|
||||
// save index 1
|
||||
EXPECT_EQ(metaSaveSmaToDB(pMeta, pSmaCfg), 0);
|
||||
ASSERT_EQ(metaSaveSmaToDB(pMeta, pSmaCfg), 0);
|
||||
|
||||
// step 2: insert data
|
||||
STSmaDataWrapper *pSmaData = NULL;
|
||||
|
@ -298,18 +302,19 @@ TEST(testCase, tSma_Data_Insert_Query_Test) {
|
|||
}
|
||||
|
||||
char *msg = (char *)calloc(100, 1);
|
||||
EXPECT_EQ(tsdbUpdateSmaWindow(&tsdb, TSDB_SMA_TYPE_TIME_RANGE, msg), 0);
|
||||
assert(msg != NULL);
|
||||
ASSERT_EQ(tsdbUpdateSmaWindow(&tsdb, TSDB_SMA_TYPE_TIME_RANGE, msg), 0);
|
||||
|
||||
// init
|
||||
int32_t allocCnt = 0;
|
||||
int32_t allocStep = 40960;
|
||||
int32_t buffer = 4096;
|
||||
int32_t allocStep = 16384;
|
||||
int32_t buffer = 1024;
|
||||
void * buf = NULL;
|
||||
EXPECT_EQ(tsdbMakeRoom(&buf, allocStep), 0);
|
||||
ASSERT_EQ(tsdbMakeRoom(&buf, allocStep), 0);
|
||||
int32_t bufSize = taosTSizeof(buf);
|
||||
int32_t numOfTables = 10;
|
||||
col_id_t numOfCols = 4096;
|
||||
EXPECT_GT(numOfCols, 0);
|
||||
ASSERT_GT(numOfCols, 0);
|
||||
|
||||
pSmaData = (STSmaDataWrapper *)buf;
|
||||
printf(">> allocate [%d] time to %d and addr is %p\n", ++allocCnt, bufSize, pSmaData);
|
||||
|
@ -326,7 +331,7 @@ TEST(testCase, tSma_Data_Insert_Query_Test) {
|
|||
int32_t tableDataLen = sizeof(STSmaTbData);
|
||||
for (col_id_t c = 0; c < numOfCols; ++c) {
|
||||
if (bufSize - len - tableDataLen < buffer) {
|
||||
EXPECT_EQ(tsdbMakeRoom(&buf, bufSize + allocStep), 0);
|
||||
ASSERT_EQ(tsdbMakeRoom(&buf, bufSize + allocStep), 0);
|
||||
pSmaData = (STSmaDataWrapper *)buf;
|
||||
pTbData = (STSmaTbData *)POINTER_SHIFT(pSmaData, len);
|
||||
bufSize = taosTSizeof(buf);
|
||||
|
@ -353,22 +358,22 @@ TEST(testCase, tSma_Data_Insert_Query_Test) {
|
|||
}
|
||||
pSmaData->dataLen = (len - sizeof(STSmaDataWrapper));
|
||||
|
||||
EXPECT_GE(bufSize, pSmaData->dataLen);
|
||||
ASSERT_GE(bufSize, pSmaData->dataLen);
|
||||
|
||||
// execute
|
||||
EXPECT_EQ(tsdbInsertTSmaData(&tsdb, (char *)pSmaData), TSDB_CODE_SUCCESS);
|
||||
ASSERT_EQ(tsdbInsertTSmaData(&tsdb, (char *)pSmaData), TSDB_CODE_SUCCESS);
|
||||
|
||||
// step 3: query
|
||||
uint32_t checkDataCnt = 0;
|
||||
for (int32_t t = 0; t < numOfTables; ++t) {
|
||||
for (col_id_t c = 0; c < numOfCols; ++c) {
|
||||
EXPECT_EQ(tsdbGetTSmaData(&tsdb, NULL, indexUid1, interval1, intervalUnit1, tbUid + t,
|
||||
ASSERT_EQ(tsdbGetTSmaData(&tsdb, NULL, indexUid1, interval1, intervalUnit1, tbUid + t,
|
||||
c + PRIMARYKEY_TIMESTAMP_COL_ID, skey1, 1),
|
||||
TSDB_CODE_SUCCESS);
|
||||
++checkDataCnt;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
printf("%s:%d The sma data check count for insert and query is %" PRIu32 "\n", __FILE__, __LINE__, checkDataCnt);
|
||||
|
||||
// release data
|
||||
|
|
|
@ -1730,7 +1730,7 @@ static int32_t jsonToNodeObject(const SJson* pJson, const char* pName, SNode** p
|
|||
}
|
||||
|
||||
int32_t nodesNodeToString(const SNodeptr pNode, bool format, char** pStr, int32_t* pLen) {
|
||||
if (NULL == pNode || NULL == pStr || NULL == pLen) {
|
||||
if (NULL == pNode || NULL == pStr) {
|
||||
terrno = TSDB_CODE_FAILED;
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
@ -1750,7 +1750,10 @@ int32_t nodesNodeToString(const SNodeptr pNode, bool format, char** pStr, int32_
|
|||
*pStr = format ? tjsonToString(pJson) : tjsonToUnformattedString(pJson);
|
||||
tjsonDelete(pJson);
|
||||
|
||||
*pLen = strlen(*pStr) + 1;
|
||||
if (NULL != pLen) {
|
||||
*pLen = strlen(*pStr) + 1;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -92,6 +92,8 @@ SNodeptr nodesMakeNode(ENodeType type) {
|
|||
return makeNode(type, sizeof(SCreateDatabaseStmt));
|
||||
case QUERY_NODE_DROP_DATABASE_STMT:
|
||||
return makeNode(type, sizeof(SDropDatabaseStmt));
|
||||
case QUERY_NODE_ALTER_DATABASE_STMT:
|
||||
return makeNode(type, sizeof(SAlterDatabaseStmt));
|
||||
case QUERY_NODE_SHOW_DATABASES_STMT:
|
||||
return makeNode(type, sizeof(SShowStmt));
|
||||
case QUERY_NODE_CREATE_TABLE_STMT:
|
||||
|
@ -131,8 +133,16 @@ SNodeptr nodesMakeNode(ENodeType type) {
|
|||
return makeNode(type, sizeof(SShowStmt));
|
||||
case QUERY_NODE_CREATE_INDEX_STMT:
|
||||
return makeNode(type, sizeof(SCreateIndexStmt));
|
||||
case QUERY_NODE_DROP_INDEX_STMT:
|
||||
return makeNode(type, sizeof(SDropIndexStmt));
|
||||
case QUERY_NODE_CREATE_QNODE_STMT:
|
||||
return makeNode(type, sizeof(SCreateQnodeStmt));
|
||||
case QUERY_NODE_DROP_QNODE_STMT:
|
||||
return makeNode(type, sizeof(SDropQnodeStmt));
|
||||
case QUERY_NODE_CREATE_TOPIC_STMT:
|
||||
return makeNode(type, sizeof(SCreateTopicStmt));
|
||||
case QUERY_NODE_DROP_TOPIC_STMT:
|
||||
return makeNode(type, sizeof(SDropTopicStmt));
|
||||
case QUERY_NODE_LOGIC_PLAN_SCAN:
|
||||
return makeNode(type, sizeof(SScanLogicNode));
|
||||
case QUERY_NODE_LOGIC_PLAN_JOIN:
|
||||
|
|
|
@ -113,6 +113,7 @@ SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt);
|
|||
SNode* setDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, EDatabaseOptionType type, const SToken* pVal);
|
||||
SNode* createCreateDatabaseStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pDbName, SNode* pOptions);
|
||||
SNode* createDropDatabaseStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pDbName);
|
||||
SNode* createAlterDatabaseStmt(SAstCreateContext* pCxt, const SToken* pDbName, SNode* pOptions);
|
||||
SNode* createDefaultTableOptions(SAstCreateContext* pCxt);
|
||||
SNode* setTableOption(SAstCreateContext* pCxt, SNode* pOptions, ETableOptionType type, const SToken* pVal);
|
||||
SNode* setTableSmaOption(SAstCreateContext* pCxt, SNode* pOptions, SNodeList* pSma);
|
||||
|
@ -134,7 +135,11 @@ SNode* createCreateDnodeStmt(SAstCreateContext* pCxt, const SToken* pFqdn, const
|
|||
SNode* createDropDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode);
|
||||
SNode* createCreateIndexStmt(SAstCreateContext* pCxt, EIndexType type, const SToken* pIndexName, const SToken* pTableName, SNodeList* pCols, SNode* pOptions);
|
||||
SNode* createIndexOption(SAstCreateContext* pCxt, SNodeList* pFuncs, SNode* pInterval, SNode* pOffset, SNode* pSliding);
|
||||
SNode* createDropIndexStmt(SAstCreateContext* pCxt, const SToken* pIndexName, const SToken* pTableName);
|
||||
SNode* createCreateQnodeStmt(SAstCreateContext* pCxt, const SToken* pDnodeId);
|
||||
SNode* createDropQnodeStmt(SAstCreateContext* pCxt, const SToken* pDnodeId);
|
||||
SNode* createCreateTopicStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pTopicName, SNode* pQuery, const SToken* pSubscribeDbName);
|
||||
SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pTopicName);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -42,17 +42,17 @@
|
|||
//%right NK_BITNOT.
|
||||
|
||||
/************************************************ create/alter/drop/show user *****************************************/
|
||||
cmd ::= CREATE USER user_name(A) PASS NK_STRING(B). { pCxt->pRootNode = createCreateUserStmt(pCxt, &A, &B);}
|
||||
cmd ::= ALTER USER user_name(A) PASS NK_STRING(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_PASSWD, &B);}
|
||||
cmd ::= ALTER USER user_name(A) PRIVILEGE NK_STRING(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_PRIVILEGES, &B);}
|
||||
cmd ::= CREATE USER user_name(A) PASS NK_STRING(B). { pCxt->pRootNode = createCreateUserStmt(pCxt, &A, &B); }
|
||||
cmd ::= ALTER USER user_name(A) PASS NK_STRING(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_PASSWD, &B); }
|
||||
cmd ::= ALTER USER user_name(A) PRIVILEGE NK_STRING(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_PRIVILEGES, &B); }
|
||||
cmd ::= DROP USER user_name(A). { pCxt->pRootNode = createDropUserStmt(pCxt, &A); }
|
||||
cmd ::= SHOW USERS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_USERS_STMT, NULL); }
|
||||
|
||||
/************************************************ create/drop/show dnode **********************************************/
|
||||
cmd ::= CREATE DNODE dnode_endpoint(A). { pCxt->pRootNode = createCreateDnodeStmt(pCxt, &A, NULL);}
|
||||
cmd ::= CREATE DNODE dnode_host_name(A) PORT NK_INTEGER(B). { pCxt->pRootNode = createCreateDnodeStmt(pCxt, &A, &B);}
|
||||
cmd ::= DROP DNODE NK_INTEGER(A). { pCxt->pRootNode = createDropDnodeStmt(pCxt, &A);}
|
||||
cmd ::= DROP DNODE dnode_endpoint(A). { pCxt->pRootNode = createDropDnodeStmt(pCxt, &A);}
|
||||
cmd ::= CREATE DNODE dnode_endpoint(A). { pCxt->pRootNode = createCreateDnodeStmt(pCxt, &A, NULL); }
|
||||
cmd ::= CREATE DNODE dnode_host_name(A) PORT NK_INTEGER(B). { pCxt->pRootNode = createCreateDnodeStmt(pCxt, &A, &B); }
|
||||
cmd ::= DROP DNODE NK_INTEGER(A). { pCxt->pRootNode = createDropDnodeStmt(pCxt, &A); }
|
||||
cmd ::= DROP DNODE dnode_endpoint(A). { pCxt->pRootNode = createDropDnodeStmt(pCxt, &A); }
|
||||
cmd ::= SHOW DNODES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DNODES_STMT, NULL); }
|
||||
|
||||
%type dnode_endpoint { SToken }
|
||||
|
@ -64,15 +64,17 @@ dnode_endpoint(A) ::= NK_STRING(B).
|
|||
dnode_host_name(A) ::= NK_ID(B). { A = B; }
|
||||
dnode_host_name(A) ::= NK_IPTOKEN(B). { A = B; }
|
||||
|
||||
/************************************************ create qnode ********************************************************/
|
||||
/************************************************ create/drop qnode ***************************************************/
|
||||
cmd ::= CREATE QNODE ON DNODE NK_INTEGER(A). { pCxt->pRootNode = createCreateQnodeStmt(pCxt, &A); }
|
||||
cmd ::= DROP QNODE ON DNODE NK_INTEGER(A). { pCxt->pRootNode = createDropQnodeStmt(pCxt, &A); }
|
||||
cmd ::= SHOW QNODES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QNODES_STMT, NULL); }
|
||||
|
||||
/************************************************ create/drop/show/use database ***************************************/
|
||||
cmd ::= CREATE DATABASE not_exists_opt(A) db_name(B) db_options(C). { pCxt->pRootNode = createCreateDatabaseStmt(pCxt, A, &B, C);}
|
||||
cmd ::= CREATE DATABASE not_exists_opt(A) db_name(B) db_options(C). { pCxt->pRootNode = createCreateDatabaseStmt(pCxt, A, &B, C); }
|
||||
cmd ::= DROP DATABASE exists_opt(A) db_name(B). { pCxt->pRootNode = createDropDatabaseStmt(pCxt, A, &B); }
|
||||
cmd ::= SHOW DATABASES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DATABASES_STMT, NULL); }
|
||||
cmd ::= USE db_name(A). { pCxt->pRootNode = createUseDatabaseStmt(pCxt, &A);}
|
||||
cmd ::= USE db_name(A). { pCxt->pRootNode = createUseDatabaseStmt(pCxt, &A); }
|
||||
cmd ::= ALTER DATABASE db_name(A) db_options(B). { pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &A, B); }
|
||||
|
||||
%type not_exists_opt { bool }
|
||||
%destructor not_exists_opt { }
|
||||
|
@ -198,6 +200,7 @@ col_name(A) ::= column_name(B).
|
|||
cmd ::= CREATE SMA INDEX index_name(A) ON table_name(B) index_options(C). { pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, &A, &B, NULL, C); }
|
||||
cmd ::= CREATE FULLTEXT INDEX
|
||||
index_name(A) ON table_name(B) NK_LP col_name_list(C) NK_RP. { pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_FULLTEXT, &A, &B, C, NULL); }
|
||||
cmd ::= DROP INDEX index_name(A) ON table_name(B). { pCxt->pRootNode = createDropIndexStmt(pCxt, &A, &B); }
|
||||
|
||||
index_options(A) ::= . { A = NULL; }
|
||||
index_options(A) ::= FUNCTION NK_LP func_list(B) NK_RP INTERVAL
|
||||
|
@ -212,6 +215,11 @@ func_list(A) ::= func_list(B) NK_COMMA func(C).
|
|||
|
||||
func(A) ::= function_name(B) NK_LP expression_list(C) NK_RP. { A = createFunctionNode(pCxt, &B, C); }
|
||||
|
||||
/************************************************ create/drop topic ***************************************************/
|
||||
cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) AS query_expression(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, C, NULL); }
|
||||
cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) AS db_name(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, NULL, &C); }
|
||||
cmd ::= DROP TOPIC exists_opt(A) topic_name(B). { pCxt->pRootNode = createDropTopicStmt(pCxt, A, &B); }
|
||||
|
||||
/************************************************ show vgroups ********************************************************/
|
||||
cmd ::= SHOW VGROUPS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, NULL); }
|
||||
cmd ::= SHOW db_name(B) NK_DOT VGROUPS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, &B); }
|
||||
|
@ -270,6 +278,10 @@ user_name(A) ::= NK_ID(B).
|
|||
%destructor index_name { }
|
||||
index_name(A) ::= NK_ID(B). { A = B; }
|
||||
|
||||
%type topic_name { SToken }
|
||||
%destructor topic_name { }
|
||||
topic_name(A) ::= NK_ID(B). { A = B; }
|
||||
|
||||
/************************************************ expression **********************************************************/
|
||||
expression(A) ::= literal(B). { A = B; }
|
||||
//expression(A) ::= NK_QUESTION(B). { A = B; }
|
||||
|
|
|
@ -799,6 +799,17 @@ SNode* createDropDatabaseStmt(SAstCreateContext* pCxt, bool ignoreNotExists, con
|
|||
return (SNode*)pStmt;
|
||||
}
|
||||
|
||||
SNode* createAlterDatabaseStmt(SAstCreateContext* pCxt, const SToken* pDbName, SNode* pOptions) {
|
||||
if (!checkDbName(pCxt, pDbName)) {
|
||||
return NULL;
|
||||
}
|
||||
SAlterDatabaseStmt* pStmt = nodesMakeNode(QUERY_NODE_ALTER_DATABASE_STMT);
|
||||
CHECK_OUT_OF_MEM(pStmt);
|
||||
strncpy(pStmt->dbName, pDbName->z, pDbName->n);
|
||||
pStmt->pOptions = (SDatabaseOptions*)pOptions;
|
||||
return (SNode*)pStmt;
|
||||
}
|
||||
|
||||
SNode* createDefaultTableOptions(SAstCreateContext* pCxt) {
|
||||
STableOptions* pOptions = nodesMakeNode(QUERY_NODE_TABLE_OPTIONS);
|
||||
CHECK_OUT_OF_MEM(pOptions);
|
||||
|
@ -1022,9 +1033,47 @@ SNode* createIndexOption(SAstCreateContext* pCxt, SNodeList* pFuncs, SNode* pInt
|
|||
return (SNode*)pOptions;
|
||||
}
|
||||
|
||||
SNode* createDropIndexStmt(SAstCreateContext* pCxt, const SToken* pIndexName, const SToken* pTableName) {
|
||||
if (!checkIndexName(pCxt, pIndexName) || !checkTableName(pCxt, pTableName)) {
|
||||
return NULL;
|
||||
}
|
||||
SDropIndexStmt* pStmt = nodesMakeNode(QUERY_NODE_DROP_INDEX_STMT);
|
||||
CHECK_OUT_OF_MEM(pStmt);
|
||||
strncpy(pStmt->indexName, pIndexName->z, pIndexName->n);
|
||||
strncpy(pStmt->tableName, pTableName->z, pTableName->n);
|
||||
return (SNode*)pStmt;
|
||||
}
|
||||
|
||||
SNode* createCreateQnodeStmt(SAstCreateContext* pCxt, const SToken* pDnodeId) {
|
||||
SCreateQnodeStmt* pStmt = nodesMakeNode(QUERY_NODE_CREATE_QNODE_STMT);
|
||||
CHECK_OUT_OF_MEM(pStmt);
|
||||
pStmt->dnodeId = strtol(pDnodeId->z, NULL, 10);;
|
||||
return (SNode*)pStmt;
|
||||
}
|
||||
|
||||
SNode* createDropQnodeStmt(SAstCreateContext* pCxt, const SToken* pDnodeId) {
|
||||
SDropQnodeStmt* pStmt = nodesMakeNode(QUERY_NODE_DROP_QNODE_STMT);
|
||||
CHECK_OUT_OF_MEM(pStmt);
|
||||
pStmt->dnodeId = strtol(pDnodeId->z, NULL, 10);;
|
||||
return (SNode*)pStmt;
|
||||
}
|
||||
|
||||
SNode* createCreateTopicStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pTopicName, SNode* pQuery, const SToken* pSubscribeDbName) {
|
||||
SCreateTopicStmt* pStmt = nodesMakeNode(QUERY_NODE_CREATE_TOPIC_STMT);
|
||||
CHECK_OUT_OF_MEM(pStmt);
|
||||
strncpy(pStmt->topicName, pTopicName->z, pTopicName->n);
|
||||
pStmt->ignoreExists = ignoreExists;
|
||||
pStmt->pQuery = pQuery;
|
||||
if (NULL != pSubscribeDbName) {
|
||||
strncpy(pStmt->subscribeDbName, pSubscribeDbName->z, pSubscribeDbName->n);
|
||||
}
|
||||
return (SNode*)pStmt;
|
||||
}
|
||||
|
||||
SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pTopicName) {
|
||||
SDropTopicStmt* pStmt = nodesMakeNode(QUERY_NODE_DROP_TOPIC_STMT);
|
||||
CHECK_OUT_OF_MEM(pStmt);
|
||||
strncpy(pStmt->topicName, pTopicName->z, pTopicName->n);
|
||||
pStmt->ignoreNotExists = ignoreNotExists;
|
||||
return (SNode*)pStmt;
|
||||
}
|
||||
|
|
|
@ -617,7 +617,7 @@ static FORCE_INLINE int32_t MemRowAppend(const void* value, int32_t len, void* p
|
|||
if (TSDB_DATA_TYPE_BINARY == pa->schema->type) {
|
||||
const char* rowEnd = tdRowEnd(rb->pBuf);
|
||||
STR_WITH_SIZE_TO_VARSTR(rowEnd, value, len);
|
||||
tdAppendColValToRow(rb, pa->schema->colId, pa->schema->type, TD_VTYPE_NORM, rowEnd, false, pa->toffset, pa->colIdx);
|
||||
tdAppendColValToRow(rb, pa->schema->colId, pa->schema->type, TD_VTYPE_NORM, rowEnd, true, pa->toffset, pa->colIdx);
|
||||
} else if (TSDB_DATA_TYPE_NCHAR == pa->schema->type) {
|
||||
// if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long'
|
||||
int32_t output = 0;
|
||||
|
@ -626,9 +626,9 @@ static FORCE_INLINE int32_t MemRowAppend(const void* value, int32_t len, void* p
|
|||
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
}
|
||||
varDataSetLen(rowEnd, output);
|
||||
tdAppendColValToRow(rb, pa->schema->colId, pa->schema->type, TD_VTYPE_NORM, rowEnd, false, pa->toffset, pa->colIdx);
|
||||
tdAppendColValToRow(rb, pa->schema->colId, pa->schema->type, TD_VTYPE_NORM, rowEnd, true, pa->toffset, pa->colIdx);
|
||||
} else {
|
||||
tdAppendColValToRow(rb, pa->schema->colId, pa->schema->type, TD_VTYPE_NORM, value, true, pa->toffset, pa->colIdx);
|
||||
tdAppendColValToRow(rb, pa->schema->colId, pa->schema->type, TD_VTYPE_NORM, value, false, pa->toffset, pa->colIdx);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -121,6 +121,7 @@ static SKeyword keywordTable[] = {
|
|||
{"TAGS", TK_TAGS},
|
||||
{"TIMESTAMP", TK_TIMESTAMP},
|
||||
{"TINYINT", TK_TINYINT},
|
||||
{"TOPIC", TK_TOPIC},
|
||||
{"TTL", TK_TTL},
|
||||
{"UNION", TK_UNION},
|
||||
{"UNSIGNED", TK_UNSIGNED},
|
||||
|
@ -230,7 +231,6 @@ static SKeyword keywordTable[] = {
|
|||
// {"TBNAME", TK_TBNAME},
|
||||
// {"VNODES", TK_VNODES},
|
||||
// {"PARTITIONS", TK_PARTITIONS},
|
||||
// {"TOPIC", TK_TOPIC},
|
||||
// {"TOPICS", TK_TOPICS},
|
||||
// {"COMPACT", TK_COMPACT},
|
||||
// {"MODIFY", TK_MODIFY},
|
||||
|
|
|
@ -21,14 +21,6 @@
|
|||
#include "parUtil.h"
|
||||
#include "ttime.h"
|
||||
|
||||
static bool afterGroupBy(ESqlClause clause) {
|
||||
return clause > SQL_CLAUSE_GROUP_BY;
|
||||
}
|
||||
|
||||
static bool beforeHaving(ESqlClause clause) {
|
||||
return clause < SQL_CLAUSE_HAVING;
|
||||
}
|
||||
|
||||
typedef struct STranslateContext {
|
||||
SParseContext* pParseCxt;
|
||||
int32_t errCode;
|
||||
|
@ -41,6 +33,15 @@ typedef struct STranslateContext {
|
|||
} STranslateContext;
|
||||
|
||||
static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode);
|
||||
static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode);
|
||||
|
||||
static bool afterGroupBy(ESqlClause clause) {
|
||||
return clause > SQL_CLAUSE_GROUP_BY;
|
||||
}
|
||||
|
||||
static bool beforeHaving(ESqlClause clause) {
|
||||
return clause < SQL_CLAUSE_HAVING;
|
||||
}
|
||||
|
||||
static EDealRes generateDealNodeErrMsg(STranslateContext* pCxt, int32_t errCode, ...) {
|
||||
va_list vArgList;
|
||||
|
@ -833,6 +834,41 @@ static int32_t translateDropDatabase(STranslateContext* pCxt, SDropDatabaseStmt*
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void buildAlterDbReq(STranslateContext* pCxt, SAlterDatabaseStmt* pStmt, SAlterDbReq* pReq) {
|
||||
SName name = {0};
|
||||
tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName));
|
||||
tNameGetFullDbName(&name, pReq->db);
|
||||
pReq->totalBlocks = pStmt->pOptions->numOfBlocks;
|
||||
pReq->daysToKeep0 = pStmt->pOptions->keep;
|
||||
pReq->daysToKeep1 = -1;
|
||||
pReq->daysToKeep2 = -1;
|
||||
pReq->fsyncPeriod = pStmt->pOptions->fsyncPeriod;
|
||||
pReq->walLevel = pStmt->pOptions->walLevel;
|
||||
pReq->quorum = pStmt->pOptions->quorum;
|
||||
pReq->cacheLastRow = pStmt->pOptions->cachelast;
|
||||
return;
|
||||
}
|
||||
|
||||
static int32_t translateAlterDatabase(STranslateContext* pCxt, SAlterDatabaseStmt* pStmt) {
|
||||
SAlterDbReq alterReq = {0};
|
||||
buildAlterDbReq(pCxt, pStmt, &alterReq);
|
||||
|
||||
pCxt->pCmdMsg = malloc(sizeof(SCmdMsgInfo));
|
||||
if (NULL == pCxt->pCmdMsg) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pCxt->pCmdMsg->epSet = pCxt->pParseCxt->mgmtEpSet;
|
||||
pCxt->pCmdMsg->msgType = TDMT_MND_ALTER_DB;
|
||||
pCxt->pCmdMsg->msgLen = tSerializeSAlterDbReq(NULL, 0, &alterReq);
|
||||
pCxt->pCmdMsg->pMsg = malloc(pCxt->pCmdMsg->msgLen);
|
||||
if (NULL == pCxt->pCmdMsg->pMsg) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
tSerializeSAlterDbReq(pCxt->pCmdMsg->pMsg, pCxt->pCmdMsg->msgLen, &alterReq);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t columnNodeToField(SNodeList* pList, SArray** pArray) {
|
||||
*pArray = taosArrayInit(LIST_LENGTH(pList), sizeof(SField));
|
||||
SNode* pNode;
|
||||
|
@ -1201,6 +1237,27 @@ static int32_t translateCreateIndex(STranslateContext* pCxt, SCreateIndexStmt* p
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t translateDropIndex(STranslateContext* pCxt, SDropIndexStmt* pStmt) {
|
||||
SVDropTSmaReq dropSmaReq = {0};
|
||||
strcpy(dropSmaReq.indexName, pStmt->indexName);
|
||||
|
||||
pCxt->pCmdMsg = malloc(sizeof(SCmdMsgInfo));
|
||||
if (NULL == pCxt->pCmdMsg) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pCxt->pCmdMsg->epSet = pCxt->pParseCxt->mgmtEpSet;
|
||||
pCxt->pCmdMsg->msgType = TDMT_VND_DROP_SMA;
|
||||
pCxt->pCmdMsg->msgLen = tSerializeSVDropTSmaReq(NULL, &dropSmaReq);
|
||||
pCxt->pCmdMsg->pMsg = malloc(pCxt->pCmdMsg->msgLen);
|
||||
if (NULL == pCxt->pCmdMsg->pMsg) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
void* pBuf = pCxt->pCmdMsg->pMsg;
|
||||
tSerializeSVDropTSmaReq(&pBuf, &dropSmaReq);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t translateCreateQnode(STranslateContext* pCxt, SCreateQnodeStmt* pStmt) {
|
||||
SMCreateQnodeReq createReq = { .dnodeId = pStmt->dnodeId };
|
||||
|
||||
|
@ -1220,6 +1277,93 @@ static int32_t translateCreateQnode(STranslateContext* pCxt, SCreateQnodeStmt* p
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t translateDropQnode(STranslateContext* pCxt, SDropQnodeStmt* pStmt) {
|
||||
SDDropQnodeReq dropReq = { .dnodeId = pStmt->dnodeId };
|
||||
|
||||
pCxt->pCmdMsg = malloc(sizeof(SCmdMsgInfo));
|
||||
if (NULL == pCxt->pCmdMsg) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pCxt->pCmdMsg->epSet = pCxt->pParseCxt->mgmtEpSet;
|
||||
pCxt->pCmdMsg->msgType = TDMT_DND_DROP_QNODE;
|
||||
pCxt->pCmdMsg->msgLen = tSerializeSMCreateDropQSBNodeReq(NULL, 0, &dropReq);
|
||||
pCxt->pCmdMsg->pMsg = malloc(pCxt->pCmdMsg->msgLen);
|
||||
if (NULL == pCxt->pCmdMsg->pMsg) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
tSerializeSMCreateDropQSBNodeReq(pCxt->pCmdMsg->pMsg, pCxt->pCmdMsg->msgLen, &dropReq);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t translateCreateTopic(STranslateContext* pCxt, SCreateTopicStmt* pStmt) {
|
||||
SCMCreateTopicReq createReq = {0};
|
||||
|
||||
if (NULL != pStmt->pQuery) {
|
||||
int32_t code = translateQuery(pCxt, pStmt->pQuery);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodesNodeToString(pStmt->pQuery, false, &createReq.ast, NULL);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code ) {
|
||||
return code;
|
||||
}
|
||||
} else {
|
||||
strcpy(createReq.subscribeDbName, pStmt->subscribeDbName);
|
||||
}
|
||||
|
||||
createReq.sql = strdup(pCxt->pParseCxt->pSql);
|
||||
if (NULL == createReq.sql) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
SName name = { .type = TSDB_TABLE_NAME_T, .acctId = pCxt->pParseCxt->acctId };
|
||||
strcpy(name.dbname, pCxt->pParseCxt->db);
|
||||
strcpy(name.tname, pStmt->topicName);
|
||||
tNameExtractFullName(&name, createReq.name);
|
||||
createReq.igExists = pStmt->ignoreExists;
|
||||
|
||||
pCxt->pCmdMsg = malloc(sizeof(SCmdMsgInfo));
|
||||
if (NULL == pCxt->pCmdMsg) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pCxt->pCmdMsg->epSet = pCxt->pParseCxt->mgmtEpSet;
|
||||
pCxt->pCmdMsg->msgType = TDMT_MND_CREATE_TOPIC;
|
||||
pCxt->pCmdMsg->msgLen = tSerializeSCMCreateTopicReq(NULL, 0, &createReq);
|
||||
pCxt->pCmdMsg->pMsg = malloc(pCxt->pCmdMsg->msgLen);
|
||||
if (NULL == pCxt->pCmdMsg->pMsg) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
tSerializeSCMCreateTopicReq(pCxt->pCmdMsg->pMsg, pCxt->pCmdMsg->msgLen, &createReq);
|
||||
tFreeSCMCreateTopicReq(&createReq);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t translateDropTopic(STranslateContext* pCxt, SDropTopicStmt* pStmt) {
|
||||
SMDropTopicReq dropReq = {0};
|
||||
|
||||
SName name = { .type = TSDB_TABLE_NAME_T, .acctId = pCxt->pParseCxt->acctId };
|
||||
strcpy(name.dbname, pCxt->pParseCxt->db);
|
||||
strcpy(name.tname, pStmt->topicName);
|
||||
tNameExtractFullName(&name, dropReq.name);
|
||||
dropReq.igNotExists = pStmt->ignoreNotExists;
|
||||
|
||||
pCxt->pCmdMsg = malloc(sizeof(SCmdMsgInfo));
|
||||
if (NULL == pCxt->pCmdMsg) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pCxt->pCmdMsg->epSet = pCxt->pParseCxt->mgmtEpSet;
|
||||
pCxt->pCmdMsg->msgType = TDMT_MND_DROP_TOPIC;
|
||||
pCxt->pCmdMsg->msgLen = tSerializeSMDropTopicReq(NULL, 0, &dropReq);
|
||||
pCxt->pCmdMsg->pMsg = malloc(pCxt->pCmdMsg->msgLen);
|
||||
if (NULL == pCxt->pCmdMsg->pMsg) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
tSerializeSMDropTopicReq(pCxt->pCmdMsg->pMsg, pCxt->pCmdMsg->msgLen, &dropReq);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
switch (nodeType(pNode)) {
|
||||
|
@ -1232,6 +1376,9 @@ static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode) {
|
|||
case QUERY_NODE_DROP_DATABASE_STMT:
|
||||
code = translateDropDatabase(pCxt, (SDropDatabaseStmt*)pNode);
|
||||
break;
|
||||
case QUERY_NODE_ALTER_DATABASE_STMT:
|
||||
code = translateAlterDatabase(pCxt, (SAlterDatabaseStmt*)pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_TABLE_STMT:
|
||||
code = translateCreateSuperTable(pCxt, (SCreateTableStmt*)pNode);
|
||||
break;
|
||||
|
@ -1274,9 +1421,21 @@ static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode) {
|
|||
case QUERY_NODE_CREATE_INDEX_STMT:
|
||||
code = translateCreateIndex(pCxt, (SCreateIndexStmt*)pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_INDEX_STMT:
|
||||
code = translateDropIndex(pCxt, (SDropIndexStmt*)pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_QNODE_STMT:
|
||||
code = translateCreateQnode(pCxt, (SCreateQnodeStmt*)pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_QNODE_STMT:
|
||||
code = translateDropQnode(pCxt, (SDropQnodeStmt*)pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_TOPIC_STMT:
|
||||
code = translateCreateTopic(pCxt, (SCreateTopicStmt*)pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_TOPIC_STMT:
|
||||
code = translateDropTopic(pCxt, (SDropTopicStmt*)pNode);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -340,6 +340,23 @@ TEST_F(ParserTest, createDatabase) {
|
|||
ASSERT_TRUE(run());
|
||||
}
|
||||
|
||||
TEST_F(ParserTest, alterDatabase) {
|
||||
setDatabase("root", "test");
|
||||
|
||||
bind("alter database wxy_db BLOCKS 200");
|
||||
ASSERT_TRUE(run());
|
||||
|
||||
bind("alter database wxy_db "
|
||||
"BLOCKS 200 "
|
||||
"CACHELAST 1 "
|
||||
"FSYNC 200 "
|
||||
"KEEP 200 "
|
||||
"QUORUM 2 "
|
||||
"WAL 1 "
|
||||
);
|
||||
ASSERT_TRUE(run());
|
||||
}
|
||||
|
||||
TEST_F(ParserTest, showDatabase) {
|
||||
setDatabase("root", "test");
|
||||
|
||||
|
@ -406,9 +423,49 @@ TEST_F(ParserTest, createSmaIndex) {
|
|||
ASSERT_TRUE(run());
|
||||
}
|
||||
|
||||
TEST_F(ParserTest, dropIndex) {
|
||||
setDatabase("root", "test");
|
||||
|
||||
bind("drop index index1 on t1");
|
||||
ASSERT_TRUE(run());
|
||||
}
|
||||
|
||||
TEST_F(ParserTest, createQnode) {
|
||||
setDatabase("root", "test");
|
||||
|
||||
bind("create qnode on dnode 1");
|
||||
ASSERT_TRUE(run());
|
||||
}
|
||||
|
||||
TEST_F(ParserTest, dropQnode) {
|
||||
setDatabase("root", "test");
|
||||
|
||||
bind("drop qnode on dnode 1");
|
||||
ASSERT_TRUE(run());
|
||||
}
|
||||
|
||||
TEST_F(ParserTest, createTopic) {
|
||||
setDatabase("root", "test");
|
||||
|
||||
bind("create topic tp1 as select * from t1");
|
||||
ASSERT_TRUE(run());
|
||||
|
||||
bind("create topic if not exists tp1 as select * from t1");
|
||||
ASSERT_TRUE(run());
|
||||
|
||||
bind("create topic tp1 as test");
|
||||
ASSERT_TRUE(run());
|
||||
|
||||
bind("create topic if not exists tp1 as test");
|
||||
ASSERT_TRUE(run());
|
||||
}
|
||||
|
||||
TEST_F(ParserTest, dropTopic) {
|
||||
setDatabase("root", "test");
|
||||
|
||||
bind("drop topic tp1");
|
||||
ASSERT_TRUE(run());
|
||||
|
||||
bind("drop topic if exists tp1");
|
||||
ASSERT_TRUE(run());
|
||||
}
|
||||
|
|
|
@ -31,10 +31,10 @@ extern "C" {
|
|||
#define TIMER_MAX_MS 0x7FFFFFFF
|
||||
#define ENV_TICK_TIMER_MS 1000
|
||||
#define PING_TIMER_MS 1000
|
||||
#define ELECT_TIMER_MS_MIN 150
|
||||
#define ELECT_TIMER_MS_MAX 300
|
||||
#define ELECT_TIMER_MS_MIN 1500
|
||||
#define ELECT_TIMER_MS_MAX 3000
|
||||
#define ELECT_TIMER_MS_RANGE (ELECT_TIMER_MS_MAX - ELECT_TIMER_MS_MIN)
|
||||
#define HEARTBEAT_TIMER_MS 30
|
||||
#define HEARTBEAT_TIMER_MS 300
|
||||
|
||||
#define EMPTY_RAFT_ID ((SRaftId){.addr = 0, .vgId = 0})
|
||||
|
||||
|
|
|
@ -34,11 +34,11 @@ extern "C" {
|
|||
|
||||
typedef struct SSyncIO {
|
||||
STaosQueue *pMsgQ;
|
||||
STaosQset *pQset;
|
||||
STaosQset * pQset;
|
||||
pthread_t consumerTid;
|
||||
|
||||
void *serverRpc;
|
||||
void *clientRpc;
|
||||
void * serverRpc;
|
||||
void * clientRpc;
|
||||
SEpSet myAddr;
|
||||
|
||||
tmr_h qTimer;
|
||||
|
|
|
@ -43,11 +43,14 @@ int32_t raftStorePersist(SRaftStore *pRaftStore);
|
|||
int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len);
|
||||
int32_t raftStoreDeserialize(SRaftStore *pRaftStore, char *buf, size_t len);
|
||||
|
||||
bool raftStoreHasVoted(SRaftStore *pRaftStore);
|
||||
void raftStoreVote(SRaftStore *pRaftStore, SRaftId *pRaftId);
|
||||
void raftStoreClearVote(SRaftStore *pRaftStore);
|
||||
void raftStoreNextTerm(SRaftStore *pRaftStore);
|
||||
void raftStoreSetTerm(SRaftStore *pRaftStore, SyncTerm term);
|
||||
bool raftStoreHasVoted(SRaftStore *pRaftStore);
|
||||
void raftStoreVote(SRaftStore *pRaftStore, SRaftId *pRaftId);
|
||||
void raftStoreClearVote(SRaftStore *pRaftStore);
|
||||
void raftStoreNextTerm(SRaftStore *pRaftStore);
|
||||
void raftStoreSetTerm(SRaftStore *pRaftStore, SyncTerm term);
|
||||
int32_t raftStoreFromJson(SRaftStore *pRaftStore, cJSON *pJson);
|
||||
cJSON * raftStore2Json(SRaftStore *pRaftStore);
|
||||
char * raftStore2Str(SRaftStore *pRaftStore);
|
||||
|
||||
// for debug -------------------
|
||||
void raftStorePrint(SRaftStore *pObj);
|
||||
|
|
|
@ -102,7 +102,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
|
|||
|
||||
SyncTerm localPreLogTerm = 0;
|
||||
if (pMsg->prevLogTerm >= SYNC_INDEX_BEGIN && pMsg->prevLogTerm <= ths->pLogStore->getLastIndex(ths->pLogStore)) {
|
||||
SSyncRaftEntry* pEntry = logStoreGetEntry(ths->pLogStore, pMsg->prevLogTerm);
|
||||
SSyncRaftEntry* pEntry = logStoreGetEntry(ths->pLogStore, pMsg->prevLogIndex);
|
||||
assert(pEntry != NULL);
|
||||
localPreLogTerm = pEntry->term;
|
||||
syncEntryDestory(pEntry);
|
||||
|
@ -111,9 +111,9 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
|
|||
bool logOK =
|
||||
(pMsg->prevLogIndex == SYNC_INDEX_INVALID) ||
|
||||
((pMsg->prevLogIndex >= SYNC_INDEX_BEGIN) &&
|
||||
(pMsg->prevLogIndex <= ths->pLogStore->getLastIndex(ths->pLogStore)) && (pMsg->prevLogIndex == localPreLogTerm));
|
||||
(pMsg->prevLogIndex <= ths->pLogStore->getLastIndex(ths->pLogStore)) && (pMsg->prevLogTerm == localPreLogTerm));
|
||||
|
||||
// reject
|
||||
// reject request
|
||||
if ((pMsg->term < ths->pRaftStore->currentTerm) ||
|
||||
((pMsg->term == ths->pRaftStore->currentTerm) && (ths->state == TAOS_SYNC_STATE_FOLLOWER) && !logOK)) {
|
||||
SyncAppendEntriesReply* pReply = syncAppendEntriesReplyBuild();
|
||||
|
@ -134,6 +134,9 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
|
|||
// return to follower state
|
||||
if (pMsg->term == ths->pRaftStore->currentTerm && ths->state == TAOS_SYNC_STATE_CANDIDATE) {
|
||||
syncNodeBecomeFollower(ths);
|
||||
|
||||
// need ret?
|
||||
return ret;
|
||||
}
|
||||
|
||||
// accept request
|
||||
|
@ -144,17 +147,17 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
|
|||
matchSuccess = true;
|
||||
}
|
||||
if (pMsg->prevLogIndex >= SYNC_INDEX_BEGIN && pMsg->prevLogIndex <= ths->pLogStore->getLastIndex(ths->pLogStore)) {
|
||||
SSyncRaftEntry* pEntry = logStoreGetEntry(ths->pLogStore, pMsg->prevLogTerm);
|
||||
assert(pEntry != NULL);
|
||||
if (pMsg->prevLogTerm == pEntry->term) {
|
||||
SSyncRaftEntry* pPreEntry = logStoreGetEntry(ths->pLogStore, pMsg->prevLogIndex);
|
||||
assert(pPreEntry != NULL);
|
||||
if (pMsg->prevLogTerm == pPreEntry->term) {
|
||||
matchSuccess = true;
|
||||
}
|
||||
syncEntryDestory(pEntry);
|
||||
syncEntryDestory(pPreEntry);
|
||||
}
|
||||
|
||||
if (matchSuccess) {
|
||||
// delete conflict entries
|
||||
if (ths->pLogStore->getLastIndex(ths->pLogStore) > pMsg->prevLogIndex) {
|
||||
if (pMsg->prevLogIndex < ths->pLogStore->getLastIndex(ths->pLogStore)) {
|
||||
SyncIndex fromIndex = pMsg->prevLogIndex + 1;
|
||||
ths->pLogStore->truncate(ths->pLogStore, fromIndex);
|
||||
}
|
||||
|
@ -178,6 +181,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
|
|||
syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg);
|
||||
|
||||
syncAppendEntriesReplyDestroy(pReply);
|
||||
|
||||
} else {
|
||||
SyncAppendEntriesReply* pReply = syncAppendEntriesReplyBuild();
|
||||
pReply->srcId = ths->myRaftId;
|
||||
|
|
|
@ -51,10 +51,10 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p
|
|||
assert(pMsg->term == ths->pRaftStore->currentTerm);
|
||||
|
||||
if (pMsg->success) {
|
||||
// nextIndex = reply.matchIndex + 1
|
||||
// nextIndex' = [nextIndex EXCEPT ![i][j] = m.mmatchIndex + 1]
|
||||
syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), pMsg->matchIndex + 1);
|
||||
|
||||
// matchIndex = reply.matchIndex
|
||||
// matchIndex' = [matchIndex EXCEPT ![i][j] = m.mmatchIndex]
|
||||
syncIndexMgrSetIndex(ths->pMatchIndex, &(pMsg->srcId), pMsg->matchIndex);
|
||||
|
||||
// maybe commit
|
||||
|
@ -62,6 +62,8 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p
|
|||
|
||||
} else {
|
||||
SyncIndex nextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId));
|
||||
|
||||
// notice! int64, uint64
|
||||
if (nextIndex > SYNC_INDEX_BEGIN) {
|
||||
--nextIndex;
|
||||
} else {
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "syncIndexMgr.h"
|
||||
#include "syncInt.h"
|
||||
|
||||
// \* Leader i advances its commitIndex.
|
||||
// \* This is done as a separate step from handling AppendEntries responses,
|
||||
// \* in part to minimize atomic regions, and in part so that leaders of
|
||||
// \* single-server clusters are able to mark entries committed.
|
||||
// AdvanceCommitIndex(i) ==
|
||||
// /\ state[i] = Leader
|
||||
// /\ LET \* The set of servers that agree up through index.
|
||||
// Agree(index) == {i} \cup {k \in Server :
|
||||
// matchIndex[i][k] >= index}
|
||||
// \* The maximum indexes for which a quorum agrees
|
||||
// agreeIndexes == {index \in 1..Len(log[i]) :
|
||||
// Agree(index) \in Quorum}
|
||||
// \* New value for commitIndex'[i]
|
||||
// newCommitIndex ==
|
||||
// IF /\ agreeIndexes /= {}
|
||||
// /\ log[i][Max(agreeIndexes)].term = currentTerm[i]
|
||||
// THEN
|
||||
// Max(agreeIndexes)
|
||||
// ELSE
|
||||
// commitIndex[i]
|
||||
// IN commitIndex' = [commitIndex EXCEPT ![i] = newCommitIndex]
|
||||
// /\ UNCHANGED <<messages, serverVars, candidateVars, leaderVars, log>>
|
||||
//
|
||||
void syncNodeMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
|
||||
syncIndexMgrLog2("==syncNodeMaybeAdvanceCommitIndex== pNextIndex", pSyncNode->pNextIndex);
|
||||
syncIndexMgrLog2("==syncNodeMaybeAdvanceCommitIndex== pMatchIndex", pSyncNode->pMatchIndex);
|
||||
}
|
|
@ -50,6 +50,7 @@ int32_t syncNodeRequestVotePeers(SSyncNode* pSyncNode) {
|
|||
}
|
||||
|
||||
int32_t syncNodeElect(SSyncNode* pSyncNode) {
|
||||
int32_t ret = 0;
|
||||
if (pSyncNode->state == TAOS_SYNC_STATE_FOLLOWER) {
|
||||
syncNodeFollower2Candidate(pSyncNode);
|
||||
}
|
||||
|
@ -62,7 +63,15 @@ int32_t syncNodeElect(SSyncNode* pSyncNode) {
|
|||
votesRespondReset(pSyncNode->pVotesRespond, pSyncNode->pRaftStore->currentTerm);
|
||||
|
||||
syncNodeVoteForSelf(pSyncNode);
|
||||
int32_t ret = syncNodeRequestVotePeers(pSyncNode);
|
||||
if (voteGrantedMajority(pSyncNode->pVotesGranted)) {
|
||||
// only myself, to leader
|
||||
assert(!pSyncNode->pVotesGranted->toLeader);
|
||||
syncNodeCandidate2Leader(pSyncNode);
|
||||
pSyncNode->pVotesGranted->toLeader = true;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = syncNodeRequestVotePeers(pSyncNode);
|
||||
assert(ret == 0);
|
||||
syncNodeResetElectTimer(pSyncNode);
|
||||
|
||||
|
|
|
@ -269,6 +269,7 @@ static void *syncIOConsumerFunc(void *param) {
|
|||
} else if (pRpcMsg->msgType == SYNC_PING_REPLY) {
|
||||
if (io->FpOnSyncPingReply != NULL) {
|
||||
SyncPingReply *pSyncMsg = syncPingReplyFromRpcMsg2(pRpcMsg);
|
||||
assert(pSyncMsg != NULL);
|
||||
io->FpOnSyncPingReply(io->pSyncNode, pSyncMsg);
|
||||
syncPingReplyDestroy(pSyncMsg);
|
||||
}
|
||||
|
@ -276,6 +277,7 @@ static void *syncIOConsumerFunc(void *param) {
|
|||
} else if (pRpcMsg->msgType == SYNC_CLIENT_REQUEST) {
|
||||
if (io->FpOnSyncClientRequest != NULL) {
|
||||
SyncClientRequest *pSyncMsg = syncClientRequestFromRpcMsg2(pRpcMsg);
|
||||
assert(pSyncMsg != NULL);
|
||||
io->FpOnSyncClientRequest(io->pSyncNode, pSyncMsg);
|
||||
syncClientRequestDestroy(pSyncMsg);
|
||||
}
|
||||
|
@ -283,6 +285,7 @@ static void *syncIOConsumerFunc(void *param) {
|
|||
} else if (pRpcMsg->msgType == SYNC_REQUEST_VOTE) {
|
||||
if (io->FpOnSyncRequestVote != NULL) {
|
||||
SyncRequestVote *pSyncMsg = syncRequestVoteFromRpcMsg2(pRpcMsg);
|
||||
assert(pSyncMsg != NULL);
|
||||
io->FpOnSyncRequestVote(io->pSyncNode, pSyncMsg);
|
||||
syncRequestVoteDestroy(pSyncMsg);
|
||||
}
|
||||
|
@ -290,6 +293,7 @@ static void *syncIOConsumerFunc(void *param) {
|
|||
} else if (pRpcMsg->msgType == SYNC_REQUEST_VOTE_REPLY) {
|
||||
if (io->FpOnSyncRequestVoteReply != NULL) {
|
||||
SyncRequestVoteReply *pSyncMsg = syncRequestVoteReplyFromRpcMsg2(pRpcMsg);
|
||||
assert(pSyncMsg != NULL);
|
||||
io->FpOnSyncRequestVoteReply(io->pSyncNode, pSyncMsg);
|
||||
syncRequestVoteReplyDestroy(pSyncMsg);
|
||||
}
|
||||
|
@ -297,6 +301,7 @@ static void *syncIOConsumerFunc(void *param) {
|
|||
} else if (pRpcMsg->msgType == SYNC_APPEND_ENTRIES) {
|
||||
if (io->FpOnSyncAppendEntries != NULL) {
|
||||
SyncAppendEntries *pSyncMsg = syncAppendEntriesFromRpcMsg2(pRpcMsg);
|
||||
assert(pSyncMsg != NULL);
|
||||
io->FpOnSyncAppendEntries(io->pSyncNode, pSyncMsg);
|
||||
syncAppendEntriesDestroy(pSyncMsg);
|
||||
}
|
||||
|
@ -304,6 +309,7 @@ static void *syncIOConsumerFunc(void *param) {
|
|||
} else if (pRpcMsg->msgType == SYNC_APPEND_ENTRIES_REPLY) {
|
||||
if (io->FpOnSyncAppendEntriesReply != NULL) {
|
||||
SyncAppendEntriesReply *pSyncMsg = syncAppendEntriesReplyFromRpcMsg2(pRpcMsg);
|
||||
assert(pSyncMsg != NULL);
|
||||
io->FpOnSyncAppendEntriesReply(io->pSyncNode, pSyncMsg);
|
||||
syncAppendEntriesReplyDestroy(pSyncMsg);
|
||||
}
|
||||
|
@ -311,6 +317,7 @@ static void *syncIOConsumerFunc(void *param) {
|
|||
} else if (pRpcMsg->msgType == SYNC_TIMEOUT) {
|
||||
if (io->FpOnSyncTimeout != NULL) {
|
||||
SyncTimeout *pSyncMsg = syncTimeoutFromRpcMsg2(pRpcMsg);
|
||||
assert(pSyncMsg != NULL);
|
||||
io->FpOnSyncTimeout(io->pSyncNode, pSyncMsg);
|
||||
syncTimeoutDestroy(pSyncMsg);
|
||||
}
|
||||
|
|
|
@ -70,22 +70,24 @@ cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr) {
|
|||
char u64buf[128];
|
||||
cJSON *pRoot = cJSON_CreateObject();
|
||||
|
||||
cJSON_AddNumberToObject(pRoot, "replicaNum", pSyncIndexMgr->replicaNum);
|
||||
cJSON *pReplicas = cJSON_CreateArray();
|
||||
cJSON_AddItemToObject(pRoot, "replicas", pReplicas);
|
||||
for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) {
|
||||
cJSON_AddItemToArray(pReplicas, syncUtilRaftId2Json(&(*(pSyncIndexMgr->replicas))[i]));
|
||||
if (pSyncIndexMgr != NULL) {
|
||||
cJSON_AddNumberToObject(pRoot, "replicaNum", pSyncIndexMgr->replicaNum);
|
||||
cJSON *pReplicas = cJSON_CreateArray();
|
||||
cJSON_AddItemToObject(pRoot, "replicas", pReplicas);
|
||||
for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) {
|
||||
cJSON_AddItemToArray(pReplicas, syncUtilRaftId2Json(&(*(pSyncIndexMgr->replicas))[i]));
|
||||
}
|
||||
int respondNum = 0;
|
||||
int *arr = (int *)malloc(sizeof(int) * pSyncIndexMgr->replicaNum);
|
||||
for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) {
|
||||
arr[i] = pSyncIndexMgr->index[i];
|
||||
}
|
||||
cJSON *pIndex = cJSON_CreateIntArray(arr, pSyncIndexMgr->replicaNum);
|
||||
free(arr);
|
||||
cJSON_AddItemToObject(pRoot, "index", pIndex);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncIndexMgr->pSyncNode);
|
||||
cJSON_AddStringToObject(pRoot, "pSyncNode", u64buf);
|
||||
}
|
||||
int respondNum = 0;
|
||||
int *arr = (int *)malloc(sizeof(int) * pSyncIndexMgr->replicaNum);
|
||||
for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) {
|
||||
arr[i] = pSyncIndexMgr->index[i];
|
||||
}
|
||||
cJSON *pIndex = cJSON_CreateIntArray(arr, pSyncIndexMgr->replicaNum);
|
||||
free(arr);
|
||||
cJSON_AddItemToObject(pRoot, "index", pIndex);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncIndexMgr->pSyncNode);
|
||||
cJSON_AddStringToObject(pRoot, "pSyncNode", u64buf);
|
||||
|
||||
cJSON *pJson = cJSON_CreateObject();
|
||||
cJSON_AddItemToObject(pJson, "pSyncIndexMgr", pRoot);
|
||||
|
|
|
@ -103,6 +103,12 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pSyncInfo) {
|
|||
assert(pSyncNode != NULL);
|
||||
memset(pSyncNode, 0, sizeof(SSyncNode));
|
||||
|
||||
if (taosMkDir(pSyncInfo->path) != 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
sError("failed to create dir:%s since %s", pSyncInfo->path, terrstr());
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// init by SSyncInfo
|
||||
pSyncNode->vgId = pSyncInfo->vgId;
|
||||
pSyncNode->syncCfg = pSyncInfo->syncCfg;
|
||||
|
@ -200,6 +206,9 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pSyncInfo) {
|
|||
pSyncNode->FpOnAppendEntriesReply = syncNodeOnAppendEntriesReplyCb;
|
||||
pSyncNode->FpOnTimeout = syncNodeOnTimeoutCb;
|
||||
|
||||
// start raft
|
||||
syncNodeBecomeFollower(pSyncNode);
|
||||
|
||||
return pSyncNode;
|
||||
}
|
||||
|
||||
|
@ -355,128 +364,130 @@ cJSON* syncNode2Json(const SSyncNode* pSyncNode) {
|
|||
char u64buf[128];
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
// init by SSyncInfo
|
||||
cJSON_AddNumberToObject(pRoot, "vgId", pSyncNode->vgId);
|
||||
cJSON_AddStringToObject(pRoot, "path", pSyncNode->path);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->pWal);
|
||||
cJSON_AddStringToObject(pRoot, "pWal", u64buf);
|
||||
if (pSyncNode != NULL) {
|
||||
// init by SSyncInfo
|
||||
cJSON_AddNumberToObject(pRoot, "vgId", pSyncNode->vgId);
|
||||
cJSON_AddStringToObject(pRoot, "path", pSyncNode->path);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->pWal);
|
||||
cJSON_AddStringToObject(pRoot, "pWal", u64buf);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->rpcClient);
|
||||
cJSON_AddStringToObject(pRoot, "rpcClient", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpSendMsg);
|
||||
cJSON_AddStringToObject(pRoot, "FpSendMsg", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->rpcClient);
|
||||
cJSON_AddStringToObject(pRoot, "rpcClient", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpSendMsg);
|
||||
cJSON_AddStringToObject(pRoot, "FpSendMsg", u64buf);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->queue);
|
||||
cJSON_AddStringToObject(pRoot, "queue", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpEqMsg);
|
||||
cJSON_AddStringToObject(pRoot, "FpEqMsg", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->queue);
|
||||
cJSON_AddStringToObject(pRoot, "queue", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpEqMsg);
|
||||
cJSON_AddStringToObject(pRoot, "FpEqMsg", u64buf);
|
||||
|
||||
// init internal
|
||||
cJSON* pMe = syncUtilNodeInfo2Json(&pSyncNode->myNodeInfo);
|
||||
cJSON_AddItemToObject(pRoot, "myNodeInfo", pMe);
|
||||
cJSON* pRaftId = syncUtilRaftId2Json(&pSyncNode->myRaftId);
|
||||
cJSON_AddItemToObject(pRoot, "myRaftId", pRaftId);
|
||||
// init internal
|
||||
cJSON* pMe = syncUtilNodeInfo2Json(&pSyncNode->myNodeInfo);
|
||||
cJSON_AddItemToObject(pRoot, "myNodeInfo", pMe);
|
||||
cJSON* pRaftId = syncUtilRaftId2Json(&pSyncNode->myRaftId);
|
||||
cJSON_AddItemToObject(pRoot, "myRaftId", pRaftId);
|
||||
|
||||
cJSON_AddNumberToObject(pRoot, "peersNum", pSyncNode->peersNum);
|
||||
cJSON* pPeers = cJSON_CreateArray();
|
||||
cJSON_AddItemToObject(pRoot, "peersNodeInfo", pPeers);
|
||||
for (int i = 0; i < pSyncNode->peersNum; ++i) {
|
||||
cJSON_AddItemToArray(pPeers, syncUtilNodeInfo2Json(&pSyncNode->peersNodeInfo[i]));
|
||||
cJSON_AddNumberToObject(pRoot, "peersNum", pSyncNode->peersNum);
|
||||
cJSON* pPeers = cJSON_CreateArray();
|
||||
cJSON_AddItemToObject(pRoot, "peersNodeInfo", pPeers);
|
||||
for (int i = 0; i < pSyncNode->peersNum; ++i) {
|
||||
cJSON_AddItemToArray(pPeers, syncUtilNodeInfo2Json(&pSyncNode->peersNodeInfo[i]));
|
||||
}
|
||||
cJSON* pPeersId = cJSON_CreateArray();
|
||||
cJSON_AddItemToObject(pRoot, "peersId", pPeersId);
|
||||
for (int i = 0; i < pSyncNode->peersNum; ++i) {
|
||||
cJSON_AddItemToArray(pPeersId, syncUtilRaftId2Json(&pSyncNode->peersId[i]));
|
||||
}
|
||||
|
||||
cJSON_AddNumberToObject(pRoot, "replicaNum", pSyncNode->replicaNum);
|
||||
cJSON* pReplicasId = cJSON_CreateArray();
|
||||
cJSON_AddItemToObject(pRoot, "replicasId", pReplicasId);
|
||||
for (int i = 0; i < pSyncNode->replicaNum; ++i) {
|
||||
cJSON_AddItemToArray(pReplicasId, syncUtilRaftId2Json(&pSyncNode->replicasId[i]));
|
||||
}
|
||||
|
||||
// raft algorithm
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->pFsm);
|
||||
cJSON_AddStringToObject(pRoot, "pFsm", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "quorum", pSyncNode->quorum);
|
||||
cJSON* pLaderCache = syncUtilRaftId2Json(&pSyncNode->leaderCache);
|
||||
cJSON_AddItemToObject(pRoot, "leaderCache", pLaderCache);
|
||||
|
||||
// tla+ server vars
|
||||
cJSON_AddNumberToObject(pRoot, "state", pSyncNode->state);
|
||||
cJSON_AddStringToObject(pRoot, "state_str", syncUtilState2String(pSyncNode->state));
|
||||
char tmpBuf[RAFT_STORE_BLOCK_SIZE];
|
||||
raftStoreSerialize(pSyncNode->pRaftStore, tmpBuf, sizeof(tmpBuf));
|
||||
cJSON_AddStringToObject(pRoot, "pRaftStore", tmpBuf);
|
||||
|
||||
// tla+ candidate vars
|
||||
cJSON_AddItemToObject(pRoot, "pVotesGranted", voteGranted2Json(pSyncNode->pVotesGranted));
|
||||
cJSON_AddItemToObject(pRoot, "pVotesRespond", votesRespond2Json(pSyncNode->pVotesRespond));
|
||||
|
||||
// tla+ leader vars
|
||||
cJSON_AddItemToObject(pRoot, "pNextIndex", syncIndexMgr2Json(pSyncNode->pNextIndex));
|
||||
cJSON_AddItemToObject(pRoot, "pMatchIndex", syncIndexMgr2Json(pSyncNode->pMatchIndex));
|
||||
|
||||
// tla+ log vars
|
||||
cJSON_AddItemToObject(pRoot, "pLogStore", logStore2Json(pSyncNode->pLogStore));
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRId64 "", pSyncNode->commitIndex);
|
||||
cJSON_AddStringToObject(pRoot, "commitIndex", u64buf);
|
||||
|
||||
// ping timer
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->pPingTimer);
|
||||
cJSON_AddStringToObject(pRoot, "pPingTimer", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "pingTimerMS", pSyncNode->pingTimerMS);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->pingTimerLogicClock);
|
||||
cJSON_AddStringToObject(pRoot, "pingTimerLogicClock", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->pingTimerLogicClockUser);
|
||||
cJSON_AddStringToObject(pRoot, "pingTimerLogicClockUser", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpPingTimerCB);
|
||||
cJSON_AddStringToObject(pRoot, "FpPingTimerCB", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->pingTimerCounter);
|
||||
cJSON_AddStringToObject(pRoot, "pingTimerCounter", u64buf);
|
||||
|
||||
// elect timer
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->pElectTimer);
|
||||
cJSON_AddStringToObject(pRoot, "pElectTimer", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "electTimerMS", pSyncNode->electTimerMS);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->electTimerLogicClock);
|
||||
cJSON_AddStringToObject(pRoot, "electTimerLogicClock", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->electTimerLogicClockUser);
|
||||
cJSON_AddStringToObject(pRoot, "electTimerLogicClockUser", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpElectTimerCB);
|
||||
cJSON_AddStringToObject(pRoot, "FpElectTimerCB", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->electTimerCounter);
|
||||
cJSON_AddStringToObject(pRoot, "electTimerCounter", u64buf);
|
||||
|
||||
// heartbeat timer
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->pHeartbeatTimer);
|
||||
cJSON_AddStringToObject(pRoot, "pHeartbeatTimer", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "heartbeatTimerMS", pSyncNode->heartbeatTimerMS);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->heartbeatTimerLogicClock);
|
||||
cJSON_AddStringToObject(pRoot, "heartbeatTimerLogicClock", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->heartbeatTimerLogicClockUser);
|
||||
cJSON_AddStringToObject(pRoot, "heartbeatTimerLogicClockUser", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpHeartbeatTimerCB);
|
||||
cJSON_AddStringToObject(pRoot, "FpHeartbeatTimerCB", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->heartbeatTimerCounter);
|
||||
cJSON_AddStringToObject(pRoot, "heartbeatTimerCounter", u64buf);
|
||||
|
||||
// callback
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpOnPing);
|
||||
cJSON_AddStringToObject(pRoot, "FpOnPing", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpOnPingReply);
|
||||
cJSON_AddStringToObject(pRoot, "FpOnPingReply", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpOnRequestVote);
|
||||
cJSON_AddStringToObject(pRoot, "FpOnRequestVote", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpOnRequestVoteReply);
|
||||
cJSON_AddStringToObject(pRoot, "FpOnRequestVoteReply", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpOnAppendEntries);
|
||||
cJSON_AddStringToObject(pRoot, "FpOnAppendEntries", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpOnAppendEntriesReply);
|
||||
cJSON_AddStringToObject(pRoot, "FpOnAppendEntriesReply", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpOnTimeout);
|
||||
cJSON_AddStringToObject(pRoot, "FpOnTimeout", u64buf);
|
||||
}
|
||||
cJSON* pPeersId = cJSON_CreateArray();
|
||||
cJSON_AddItemToObject(pRoot, "peersId", pPeersId);
|
||||
for (int i = 0; i < pSyncNode->peersNum; ++i) {
|
||||
cJSON_AddItemToArray(pPeersId, syncUtilRaftId2Json(&pSyncNode->peersId[i]));
|
||||
}
|
||||
|
||||
cJSON_AddNumberToObject(pRoot, "replicaNum", pSyncNode->replicaNum);
|
||||
cJSON* pReplicasId = cJSON_CreateArray();
|
||||
cJSON_AddItemToObject(pRoot, "replicasId", pReplicasId);
|
||||
for (int i = 0; i < pSyncNode->replicaNum; ++i) {
|
||||
cJSON_AddItemToArray(pReplicasId, syncUtilRaftId2Json(&pSyncNode->replicasId[i]));
|
||||
}
|
||||
|
||||
// raft algorithm
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->pFsm);
|
||||
cJSON_AddStringToObject(pRoot, "pFsm", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "quorum", pSyncNode->quorum);
|
||||
cJSON* pLaderCache = syncUtilRaftId2Json(&pSyncNode->leaderCache);
|
||||
cJSON_AddItemToObject(pRoot, "leaderCache", pLaderCache);
|
||||
|
||||
// tla+ server vars
|
||||
cJSON_AddNumberToObject(pRoot, "state", pSyncNode->state);
|
||||
cJSON_AddStringToObject(pRoot, "state_str", syncUtilState2String(pSyncNode->state));
|
||||
char tmpBuf[RAFT_STORE_BLOCK_SIZE];
|
||||
raftStoreSerialize(pSyncNode->pRaftStore, tmpBuf, sizeof(tmpBuf));
|
||||
cJSON_AddStringToObject(pRoot, "pRaftStore", tmpBuf);
|
||||
|
||||
// tla+ candidate vars
|
||||
cJSON_AddItemToObject(pRoot, "pVotesGranted", voteGranted2Json(pSyncNode->pVotesGranted));
|
||||
cJSON_AddItemToObject(pRoot, "pVotesRespond", votesRespond2Json(pSyncNode->pVotesRespond));
|
||||
|
||||
// tla+ leader vars
|
||||
cJSON_AddItemToObject(pRoot, "pNextIndex", syncIndexMgr2Json(pSyncNode->pNextIndex));
|
||||
cJSON_AddItemToObject(pRoot, "pMatchIndex", syncIndexMgr2Json(pSyncNode->pMatchIndex));
|
||||
|
||||
// tla+ log vars
|
||||
cJSON_AddItemToObject(pRoot, "pLogStore", logStore2Json(pSyncNode->pLogStore));
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRId64 "", pSyncNode->commitIndex);
|
||||
cJSON_AddStringToObject(pRoot, "commitIndex", u64buf);
|
||||
|
||||
// ping timer
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->pPingTimer);
|
||||
cJSON_AddStringToObject(pRoot, "pPingTimer", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "pingTimerMS", pSyncNode->pingTimerMS);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->pingTimerLogicClock);
|
||||
cJSON_AddStringToObject(pRoot, "pingTimerLogicClock", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->pingTimerLogicClockUser);
|
||||
cJSON_AddStringToObject(pRoot, "pingTimerLogicClockUser", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpPingTimerCB);
|
||||
cJSON_AddStringToObject(pRoot, "FpPingTimerCB", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->pingTimerCounter);
|
||||
cJSON_AddStringToObject(pRoot, "pingTimerCounter", u64buf);
|
||||
|
||||
// elect timer
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->pElectTimer);
|
||||
cJSON_AddStringToObject(pRoot, "pElectTimer", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "electTimerMS", pSyncNode->electTimerMS);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->electTimerLogicClock);
|
||||
cJSON_AddStringToObject(pRoot, "electTimerLogicClock", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->electTimerLogicClockUser);
|
||||
cJSON_AddStringToObject(pRoot, "electTimerLogicClockUser", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpElectTimerCB);
|
||||
cJSON_AddStringToObject(pRoot, "FpElectTimerCB", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->electTimerCounter);
|
||||
cJSON_AddStringToObject(pRoot, "electTimerCounter", u64buf);
|
||||
|
||||
// heartbeat timer
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->pHeartbeatTimer);
|
||||
cJSON_AddStringToObject(pRoot, "pHeartbeatTimer", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "heartbeatTimerMS", pSyncNode->heartbeatTimerMS);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->heartbeatTimerLogicClock);
|
||||
cJSON_AddStringToObject(pRoot, "heartbeatTimerLogicClock", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->heartbeatTimerLogicClockUser);
|
||||
cJSON_AddStringToObject(pRoot, "heartbeatTimerLogicClockUser", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpHeartbeatTimerCB);
|
||||
cJSON_AddStringToObject(pRoot, "FpHeartbeatTimerCB", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pSyncNode->heartbeatTimerCounter);
|
||||
cJSON_AddStringToObject(pRoot, "heartbeatTimerCounter", u64buf);
|
||||
|
||||
// callback
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpOnPing);
|
||||
cJSON_AddStringToObject(pRoot, "FpOnPing", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpOnPingReply);
|
||||
cJSON_AddStringToObject(pRoot, "FpOnPingReply", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpOnRequestVote);
|
||||
cJSON_AddStringToObject(pRoot, "FpOnRequestVote", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpOnRequestVoteReply);
|
||||
cJSON_AddStringToObject(pRoot, "FpOnRequestVoteReply", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpOnAppendEntries);
|
||||
cJSON_AddStringToObject(pRoot, "FpOnAppendEntries", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpOnAppendEntriesReply);
|
||||
cJSON_AddStringToObject(pRoot, "FpOnAppendEntriesReply", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpOnTimeout);
|
||||
cJSON_AddStringToObject(pRoot, "FpOnTimeout", u64buf);
|
||||
|
||||
cJSON* pJson = cJSON_CreateObject();
|
||||
cJSON_AddItemToObject(pJson, "SSyncNode", pRoot);
|
||||
|
@ -500,15 +511,17 @@ void syncNodeUpdateTerm(SSyncNode* pSyncNode, SyncTerm term) {
|
|||
}
|
||||
|
||||
void syncNodeBecomeFollower(SSyncNode* pSyncNode) {
|
||||
// maybe clear leader cache
|
||||
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
|
||||
pSyncNode->leaderCache = EMPTY_RAFT_ID;
|
||||
}
|
||||
|
||||
// state change
|
||||
pSyncNode->state = TAOS_SYNC_STATE_FOLLOWER;
|
||||
syncNodeStopHeartbeatTimer(pSyncNode);
|
||||
|
||||
int32_t electMS = syncUtilElectRandomMS();
|
||||
syncNodeRestartElectTimer(pSyncNode, electMS);
|
||||
// reset elect timer
|
||||
syncNodeResetElectTimer(pSyncNode);
|
||||
}
|
||||
|
||||
// TLA+ Spec
|
||||
|
@ -530,20 +543,32 @@ void syncNodeBecomeFollower(SSyncNode* pSyncNode) {
|
|||
// /\ UNCHANGED <<messages, currentTerm, votedFor, candidateVars, logVars>>
|
||||
//
|
||||
void syncNodeBecomeLeader(SSyncNode* pSyncNode) {
|
||||
// state change
|
||||
pSyncNode->state = TAOS_SYNC_STATE_LEADER;
|
||||
|
||||
// set leader cache
|
||||
pSyncNode->leaderCache = pSyncNode->myRaftId;
|
||||
|
||||
for (int i = 0; i < pSyncNode->pNextIndex->replicaNum; ++i) {
|
||||
// maybe overwrite myself, no harm
|
||||
// just do it!
|
||||
pSyncNode->pNextIndex->index[i] = pSyncNode->pLogStore->getLastIndex(pSyncNode->pLogStore) + 1;
|
||||
}
|
||||
|
||||
for (int i = 0; i < pSyncNode->pMatchIndex->replicaNum; ++i) {
|
||||
// maybe overwrite myself, no harm
|
||||
// just do it!
|
||||
pSyncNode->pMatchIndex->index[i] = SYNC_INDEX_INVALID;
|
||||
}
|
||||
|
||||
// stop elect timer
|
||||
syncNodeStopElectTimer(pSyncNode);
|
||||
syncNodeStartHeartbeatTimer(pSyncNode);
|
||||
|
||||
// start replicate right now!
|
||||
syncNodeReplicate(pSyncNode);
|
||||
|
||||
// start heartbeat timer
|
||||
syncNodeStartHeartbeatTimer(pSyncNode);
|
||||
}
|
||||
|
||||
void syncNodeCandidate2Leader(SSyncNode* pSyncNode) {
|
||||
|
@ -568,6 +593,9 @@ void syncNodeCandidate2Follower(SSyncNode* pSyncNode) {
|
|||
}
|
||||
|
||||
// raft vote --------------
|
||||
|
||||
// just called by syncNodeVoteForSelf
|
||||
// need assert
|
||||
void syncNodeVoteForTerm(SSyncNode* pSyncNode, SyncTerm term, SRaftId* pRaftId) {
|
||||
assert(term == pSyncNode->pRaftStore->currentTerm);
|
||||
assert(!raftStoreHasVoted(pSyncNode->pRaftStore));
|
||||
|
@ -575,6 +603,7 @@ void syncNodeVoteForTerm(SSyncNode* pSyncNode, SyncTerm term, SRaftId* pRaftId)
|
|||
raftStoreVote(pSyncNode->pRaftStore, pRaftId);
|
||||
}
|
||||
|
||||
// simulate get vote from outside
|
||||
void syncNodeVoteForSelf(SSyncNode* pSyncNode) {
|
||||
syncNodeVoteForTerm(pSyncNode, pSyncNode->pRaftStore->currentTerm, &(pSyncNode->myRaftId));
|
||||
|
||||
|
@ -589,8 +618,6 @@ void syncNodeVoteForSelf(SSyncNode* pSyncNode) {
|
|||
syncRequestVoteReplyDestroy(pMsg);
|
||||
}
|
||||
|
||||
void syncNodeMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {}
|
||||
|
||||
// for debug --------------
|
||||
void syncNodePrint(SSyncNode* pObj) {
|
||||
char* serialized = syncNode2Str(pObj);
|
||||
|
@ -676,7 +703,8 @@ static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) {
|
|||
taosTmrReset(syncNodeEqHeartbeatTimer, pSyncNode->heartbeatTimerMS, pSyncNode, gSyncEnv->pTimerManager,
|
||||
&pSyncNode->pHeartbeatTimer);
|
||||
} else {
|
||||
sTrace("==syncNodeEqHeartbeatTimer== heartbeatTimerLogicClock:%" PRIu64 ", heartbeatTimerLogicClockUser:%" PRIu64 "",
|
||||
sTrace("==syncNodeEqHeartbeatTimer== heartbeatTimerLogicClock:%" PRIu64 ", heartbeatTimerLogicClockUser:%" PRIu64
|
||||
"",
|
||||
pSyncNode->heartbeatTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -212,17 +212,19 @@ SyncTimeout* syncTimeoutFromRpcMsg2(const SRpcMsg* pRpcMsg) {
|
|||
}
|
||||
|
||||
cJSON* syncTimeout2Json(const SyncTimeout* pMsg) {
|
||||
char u64buf[128];
|
||||
|
||||
char u64buf[128];
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
|
||||
cJSON_AddNumberToObject(pRoot, "timeoutType", pMsg->timeoutType);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->logicClock);
|
||||
cJSON_AddStringToObject(pRoot, "logicClock", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "timerMS", pMsg->timerMS);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pMsg->data);
|
||||
cJSON_AddStringToObject(pRoot, "data", u64buf);
|
||||
|
||||
if (pMsg != NULL) {
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
|
||||
cJSON_AddNumberToObject(pRoot, "timeoutType", pMsg->timeoutType);
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->logicClock);
|
||||
cJSON_AddStringToObject(pRoot, "logicClock", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "timerMS", pMsg->timerMS);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pMsg->data);
|
||||
cJSON_AddStringToObject(pRoot, "data", u64buf);
|
||||
}
|
||||
|
||||
cJSON* pJson = cJSON_CreateObject();
|
||||
cJSON_AddItemToObject(pJson, "SyncTimeout", pRoot);
|
||||
|
@ -342,50 +344,52 @@ SyncPing* syncPingFromRpcMsg2(const SRpcMsg* pRpcMsg) {
|
|||
}
|
||||
|
||||
cJSON* syncPing2Json(const SyncPing* pMsg) {
|
||||
char u64buf[128];
|
||||
|
||||
char u64buf[128];
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
|
||||
|
||||
cJSON* pSrcId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->srcId.addr);
|
||||
cJSON_AddStringToObject(pSrcId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
if (pMsg != NULL) {
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
|
||||
|
||||
cJSON* pSrcId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->srcId.addr);
|
||||
cJSON_AddStringToObject(pSrcId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
|
||||
|
||||
cJSON* pDestId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->destId.addr);
|
||||
cJSON_AddStringToObject(pDestId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "destId", pDestId);
|
||||
|
||||
cJSON_AddNumberToObject(pRoot, "dataLen", pMsg->dataLen);
|
||||
char* s;
|
||||
s = syncUtilprintBin((char*)(pMsg->data), pMsg->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data", s);
|
||||
free(s);
|
||||
s = syncUtilprintBin2((char*)(pMsg->data), pMsg->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data2", s);
|
||||
free(s);
|
||||
}
|
||||
cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
|
||||
|
||||
cJSON* pDestId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->destId.addr);
|
||||
cJSON_AddStringToObject(pDestId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "destId", pDestId);
|
||||
|
||||
cJSON_AddNumberToObject(pRoot, "dataLen", pMsg->dataLen);
|
||||
char* s;
|
||||
s = syncUtilprintBin((char*)(pMsg->data), pMsg->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data", s);
|
||||
free(s);
|
||||
s = syncUtilprintBin2((char*)(pMsg->data), pMsg->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data2", s);
|
||||
free(s);
|
||||
|
||||
cJSON* pJson = cJSON_CreateObject();
|
||||
cJSON_AddItemToObject(pJson, "SyncPing", pRoot);
|
||||
|
@ -505,50 +509,52 @@ SyncPingReply* syncPingReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) {
|
|||
}
|
||||
|
||||
cJSON* syncPingReply2Json(const SyncPingReply* pMsg) {
|
||||
char u64buf[128];
|
||||
|
||||
char u64buf[128];
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
|
||||
|
||||
cJSON* pSrcId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->srcId.addr);
|
||||
cJSON_AddStringToObject(pSrcId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
if (pMsg != NULL) {
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
|
||||
|
||||
cJSON* pSrcId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->srcId.addr);
|
||||
cJSON_AddStringToObject(pSrcId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
|
||||
|
||||
cJSON* pDestId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->destId.addr);
|
||||
cJSON_AddStringToObject(pDestId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "destId", pDestId);
|
||||
|
||||
cJSON_AddNumberToObject(pRoot, "dataLen", pMsg->dataLen);
|
||||
char* s;
|
||||
s = syncUtilprintBin((char*)(pMsg->data), pMsg->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data", s);
|
||||
free(s);
|
||||
s = syncUtilprintBin2((char*)(pMsg->data), pMsg->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data2", s);
|
||||
free(s);
|
||||
}
|
||||
cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
|
||||
|
||||
cJSON* pDestId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->destId.addr);
|
||||
cJSON_AddStringToObject(pDestId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "destId", pDestId);
|
||||
|
||||
cJSON_AddNumberToObject(pRoot, "dataLen", pMsg->dataLen);
|
||||
char* s;
|
||||
s = syncUtilprintBin((char*)(pMsg->data), pMsg->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data", s);
|
||||
free(s);
|
||||
s = syncUtilprintBin2((char*)(pMsg->data), pMsg->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data2", s);
|
||||
free(s);
|
||||
|
||||
cJSON* pJson = cJSON_CreateObject();
|
||||
cJSON_AddItemToObject(pJson, "SyncPingReply", pRoot);
|
||||
|
@ -664,24 +670,26 @@ SyncClientRequest* syncClientRequestFromRpcMsg2(const SRpcMsg* pRpcMsg) {
|
|||
}
|
||||
|
||||
cJSON* syncClientRequest2Json(const SyncClientRequest* pMsg) {
|
||||
char u64buf[128];
|
||||
|
||||
char u64buf[128];
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
|
||||
cJSON_AddNumberToObject(pRoot, "originalRpcType", pMsg->originalRpcType);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->seqNum);
|
||||
cJSON_AddStringToObject(pRoot, "seqNum", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "isWeak", pMsg->isWeak);
|
||||
cJSON_AddNumberToObject(pRoot, "dataLen", pMsg->dataLen);
|
||||
|
||||
char* s;
|
||||
s = syncUtilprintBin((char*)(pMsg->data), pMsg->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data", s);
|
||||
free(s);
|
||||
s = syncUtilprintBin2((char*)(pMsg->data), pMsg->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data2", s);
|
||||
free(s);
|
||||
if (pMsg != NULL) {
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
|
||||
cJSON_AddNumberToObject(pRoot, "originalRpcType", pMsg->originalRpcType);
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->seqNum);
|
||||
cJSON_AddStringToObject(pRoot, "seqNum", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "isWeak", pMsg->isWeak);
|
||||
cJSON_AddNumberToObject(pRoot, "dataLen", pMsg->dataLen);
|
||||
|
||||
char* s;
|
||||
s = syncUtilprintBin((char*)(pMsg->data), pMsg->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data", s);
|
||||
free(s);
|
||||
s = syncUtilprintBin2((char*)(pMsg->data), pMsg->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data2", s);
|
||||
free(s);
|
||||
}
|
||||
|
||||
cJSON* pJson = cJSON_CreateObject();
|
||||
cJSON_AddItemToObject(pJson, "SyncClientRequest", pRoot);
|
||||
|
@ -785,47 +793,49 @@ SyncRequestVote* syncRequestVoteFromRpcMsg2(const SRpcMsg* pRpcMsg) {
|
|||
}
|
||||
|
||||
cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) {
|
||||
char u64buf[128];
|
||||
|
||||
char u64buf[128];
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
|
||||
|
||||
cJSON* pSrcId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->srcId.addr);
|
||||
cJSON_AddStringToObject(pSrcId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
if (pMsg != NULL) {
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
|
||||
|
||||
cJSON* pSrcId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->srcId.addr);
|
||||
cJSON_AddStringToObject(pSrcId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
|
||||
|
||||
cJSON* pDestId = cJSON_CreateObject();
|
||||
cJSON_AddNumberToObject(pDestId, "addr", pMsg->destId.addr);
|
||||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "destId", pDestId);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->term);
|
||||
cJSON_AddStringToObject(pRoot, "term", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->lastLogIndex);
|
||||
cJSON_AddStringToObject(pRoot, "lastLogIndex", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->lastLogTerm);
|
||||
cJSON_AddStringToObject(pRoot, "lastLogTerm", u64buf);
|
||||
}
|
||||
cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
|
||||
|
||||
cJSON* pDestId = cJSON_CreateObject();
|
||||
cJSON_AddNumberToObject(pDestId, "addr", pMsg->destId.addr);
|
||||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "destId", pDestId);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->term);
|
||||
cJSON_AddStringToObject(pRoot, "term", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->lastLogIndex);
|
||||
cJSON_AddStringToObject(pRoot, "lastLogIndex", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->lastLogTerm);
|
||||
cJSON_AddStringToObject(pRoot, "lastLogTerm", u64buf);
|
||||
|
||||
cJSON* pJson = cJSON_CreateObject();
|
||||
cJSON_AddItemToObject(pJson, "SyncRequestVote", pRoot);
|
||||
|
@ -929,44 +939,46 @@ SyncRequestVoteReply* syncRequestVoteReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) {
|
|||
}
|
||||
|
||||
cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) {
|
||||
char u64buf[128];
|
||||
|
||||
char u64buf[128];
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
|
||||
|
||||
cJSON* pSrcId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->srcId.addr);
|
||||
cJSON_AddStringToObject(pSrcId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
if (pMsg != NULL) {
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
|
||||
|
||||
cJSON* pSrcId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->srcId.addr);
|
||||
cJSON_AddStringToObject(pSrcId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
|
||||
|
||||
cJSON* pDestId = cJSON_CreateObject();
|
||||
cJSON_AddNumberToObject(pDestId, "addr", pMsg->destId.addr);
|
||||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "destId", pDestId);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->term);
|
||||
cJSON_AddStringToObject(pRoot, "term", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "vote_granted", pMsg->voteGranted);
|
||||
}
|
||||
cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
|
||||
|
||||
cJSON* pDestId = cJSON_CreateObject();
|
||||
cJSON_AddNumberToObject(pDestId, "addr", pMsg->destId.addr);
|
||||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "destId", pDestId);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->term);
|
||||
cJSON_AddStringToObject(pRoot, "term", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "vote_granted", pMsg->voteGranted);
|
||||
|
||||
cJSON* pJson = cJSON_CreateObject();
|
||||
cJSON_AddItemToObject(pJson, "SyncRequestVoteReply", pRoot);
|
||||
|
@ -1072,62 +1084,64 @@ SyncAppendEntries* syncAppendEntriesFromRpcMsg2(const SRpcMsg* pRpcMsg) {
|
|||
}
|
||||
|
||||
cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) {
|
||||
char u64buf[128];
|
||||
|
||||
char u64buf[128];
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
|
||||
|
||||
cJSON* pSrcId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->srcId.addr);
|
||||
cJSON_AddStringToObject(pSrcId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
if (pMsg != NULL) {
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
|
||||
|
||||
cJSON* pSrcId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->srcId.addr);
|
||||
cJSON_AddStringToObject(pSrcId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
|
||||
|
||||
cJSON* pDestId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->destId.addr);
|
||||
cJSON_AddStringToObject(pDestId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "destId", pDestId);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->term);
|
||||
cJSON_AddStringToObject(pRoot, "term", u64buf);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->prevLogIndex);
|
||||
cJSON_AddStringToObject(pRoot, "pre_log_index", u64buf);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->prevLogTerm);
|
||||
cJSON_AddStringToObject(pRoot, "pre_log_term", u64buf);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->commitIndex);
|
||||
cJSON_AddStringToObject(pRoot, "commit_index", u64buf);
|
||||
|
||||
cJSON_AddNumberToObject(pRoot, "dataLen", pMsg->dataLen);
|
||||
char* s;
|
||||
s = syncUtilprintBin((char*)(pMsg->data), pMsg->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data", s);
|
||||
free(s);
|
||||
s = syncUtilprintBin2((char*)(pMsg->data), pMsg->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data2", s);
|
||||
free(s);
|
||||
}
|
||||
cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
|
||||
|
||||
cJSON* pDestId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->destId.addr);
|
||||
cJSON_AddStringToObject(pDestId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "destId", pDestId);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->term);
|
||||
cJSON_AddStringToObject(pRoot, "term", u64buf);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->prevLogIndex);
|
||||
cJSON_AddStringToObject(pRoot, "pre_log_index", u64buf);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->prevLogTerm);
|
||||
cJSON_AddStringToObject(pRoot, "pre_log_term", u64buf);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->commitIndex);
|
||||
cJSON_AddStringToObject(pRoot, "commit_index", u64buf);
|
||||
|
||||
cJSON_AddNumberToObject(pRoot, "dataLen", pMsg->dataLen);
|
||||
char* s;
|
||||
s = syncUtilprintBin((char*)(pMsg->data), pMsg->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data", s);
|
||||
free(s);
|
||||
s = syncUtilprintBin2((char*)(pMsg->data), pMsg->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data2", s);
|
||||
free(s);
|
||||
|
||||
cJSON* pJson = cJSON_CreateObject();
|
||||
cJSON_AddItemToObject(pJson, "SyncAppendEntries", pRoot);
|
||||
|
@ -1231,47 +1245,49 @@ SyncAppendEntriesReply* syncAppendEntriesReplyFromRpcMsg2(const SRpcMsg* pRpcMsg
|
|||
}
|
||||
|
||||
cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) {
|
||||
char u64buf[128];
|
||||
|
||||
char u64buf[128];
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
|
||||
|
||||
cJSON* pSrcId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->srcId.addr);
|
||||
cJSON_AddStringToObject(pSrcId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
if (pMsg != NULL) {
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
|
||||
|
||||
cJSON* pSrcId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->srcId.addr);
|
||||
cJSON_AddStringToObject(pSrcId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->srcId.addr;
|
||||
cJSON* pTmp = pSrcId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
|
||||
|
||||
cJSON* pDestId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->destId.addr);
|
||||
cJSON_AddStringToObject(pDestId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "destId", pDestId);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->term);
|
||||
cJSON_AddStringToObject(pRoot, "term", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "success", pMsg->success);
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->matchIndex);
|
||||
cJSON_AddStringToObject(pRoot, "matchIndex", u64buf);
|
||||
}
|
||||
cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
|
||||
|
||||
cJSON* pDestId = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->destId.addr);
|
||||
cJSON_AddStringToObject(pDestId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pMsg->destId.addr;
|
||||
cJSON* pTmp = pDestId;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pTmp, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pTmp, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "destId", pDestId);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->term);
|
||||
cJSON_AddStringToObject(pRoot, "term", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "success", pMsg->success);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pMsg->matchIndex);
|
||||
cJSON_AddStringToObject(pRoot, "matchIndex", u64buf);
|
||||
|
||||
cJSON* pJson = cJSON_CreateObject();
|
||||
cJSON_AddItemToObject(pJson, "SyncAppendEntriesReply", pRoot);
|
||||
|
|
|
@ -68,29 +68,31 @@ SSyncRaftEntry* syncEntryDeserialize(const char* buf, uint32_t len) {
|
|||
}
|
||||
|
||||
cJSON* syncEntry2Json(const SSyncRaftEntry* pEntry) {
|
||||
char u64buf[128];
|
||||
|
||||
char u64buf[128];
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pEntry->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pEntry->msgType);
|
||||
cJSON_AddNumberToObject(pRoot, "originalRpcType", pEntry->originalRpcType);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pEntry->seqNum);
|
||||
cJSON_AddStringToObject(pRoot, "seqNum", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "isWeak", pEntry->isWeak);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pEntry->term);
|
||||
cJSON_AddStringToObject(pRoot, "term", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pEntry->index);
|
||||
cJSON_AddStringToObject(pRoot, "index", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "dataLen", pEntry->dataLen);
|
||||
|
||||
char* s;
|
||||
s = syncUtilprintBin((char*)(pEntry->data), pEntry->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data", s);
|
||||
free(s);
|
||||
if (pEntry != NULL) {
|
||||
cJSON_AddNumberToObject(pRoot, "bytes", pEntry->bytes);
|
||||
cJSON_AddNumberToObject(pRoot, "msgType", pEntry->msgType);
|
||||
cJSON_AddNumberToObject(pRoot, "originalRpcType", pEntry->originalRpcType);
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pEntry->seqNum);
|
||||
cJSON_AddStringToObject(pRoot, "seqNum", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "isWeak", pEntry->isWeak);
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pEntry->term);
|
||||
cJSON_AddStringToObject(pRoot, "term", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pEntry->index);
|
||||
cJSON_AddStringToObject(pRoot, "index", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "dataLen", pEntry->dataLen);
|
||||
|
||||
s = syncUtilprintBin2((char*)(pEntry->data), pEntry->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data2", s);
|
||||
free(s);
|
||||
char* s;
|
||||
s = syncUtilprintBin((char*)(pEntry->data), pEntry->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data", s);
|
||||
free(s);
|
||||
|
||||
s = syncUtilprintBin2((char*)(pEntry->data), pEntry->dataLen);
|
||||
cJSON_AddStringToObject(pRoot, "data2", s);
|
||||
free(s);
|
||||
}
|
||||
|
||||
cJSON* pJson = cJSON_CreateObject();
|
||||
cJSON_AddItemToObject(pJson, "SSyncRaftEntry", pRoot);
|
||||
|
|
|
@ -34,7 +34,7 @@ SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode) {
|
|||
pLogStore->getLastTerm = logStoreLastTerm;
|
||||
pLogStore->updateCommitIndex = logStoreUpdateCommitIndex;
|
||||
pLogStore->getCommitIndex = logStoreGetCommitIndex;
|
||||
return pLogStore; // to avoid compiler error
|
||||
return pLogStore; // to avoid compiler error
|
||||
}
|
||||
|
||||
void logStoreDestory(SSyncLogStore* pLogStore) {
|
||||
|
@ -59,21 +59,24 @@ int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry) {
|
|||
|
||||
walFsync(pWal, true);
|
||||
free(serialized);
|
||||
return code; // to avoid compiler error
|
||||
return code; // to avoid compiler error
|
||||
}
|
||||
|
||||
SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index) {
|
||||
SSyncLogStoreData* pData = pLogStore->data;
|
||||
SWal* pWal = pData->pWal;
|
||||
SSyncRaftEntry* pEntry;
|
||||
SSyncRaftEntry* pEntry = NULL;
|
||||
|
||||
SWalReadHandle* pWalHandle = walOpenReadHandle(pWal);
|
||||
walReadWithHandle(pWalHandle, index);
|
||||
pEntry = syncEntryDeserialize(pWalHandle->pHead->head.body, pWalHandle->pHead->head.len);
|
||||
assert(pEntry != NULL);
|
||||
if (index >= SYNC_INDEX_BEGIN && index <= logStoreLastIndex(pLogStore)) {
|
||||
SWalReadHandle* pWalHandle = walOpenReadHandle(pWal);
|
||||
walReadWithHandle(pWalHandle, index);
|
||||
pEntry = syncEntryDeserialize(pWalHandle->pHead->head.body, pWalHandle->pHead->head.len);
|
||||
assert(pEntry != NULL);
|
||||
|
||||
// need to hold, do not new every time!!
|
||||
walCloseReadHandle(pWalHandle);
|
||||
}
|
||||
|
||||
// need to hold, do not new every time!!
|
||||
walCloseReadHandle(pWalHandle);
|
||||
return pEntry;
|
||||
}
|
||||
|
||||
|
@ -81,7 +84,7 @@ int32_t logStoreTruncate(SSyncLogStore* pLogStore, SyncIndex fromIndex) {
|
|||
SSyncLogStoreData* pData = pLogStore->data;
|
||||
SWal* pWal = pData->pWal;
|
||||
walRollback(pWal, fromIndex);
|
||||
return 0; // to avoid compiler error
|
||||
return 0; // to avoid compiler error
|
||||
}
|
||||
|
||||
SyncIndex logStoreLastIndex(SSyncLogStore* pLogStore) {
|
||||
|
@ -105,7 +108,7 @@ int32_t logStoreUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index) {
|
|||
SSyncLogStoreData* pData = pLogStore->data;
|
||||
SWal* pWal = pData->pWal;
|
||||
walCommit(pWal, index);
|
||||
return 0; // to avoid compiler error
|
||||
return 0; // to avoid compiler error
|
||||
}
|
||||
|
||||
SyncIndex logStoreGetCommitIndex(SSyncLogStore* pLogStore) {
|
||||
|
@ -126,26 +129,28 @@ SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore) {
|
|||
}
|
||||
|
||||
cJSON* logStore2Json(SSyncLogStore* pLogStore) {
|
||||
char u64buf[128];
|
||||
|
||||
char u64buf[128];
|
||||
SSyncLogStoreData* pData = (SSyncLogStoreData*)pLogStore->data;
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pData->pSyncNode);
|
||||
cJSON_AddStringToObject(pRoot, "pSyncNode", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pData->pWal);
|
||||
cJSON_AddStringToObject(pRoot, "pWal", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRId64 "", logStoreLastIndex(pLogStore));
|
||||
cJSON_AddStringToObject(pRoot, "LastIndex", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", logStoreLastTerm(pLogStore));
|
||||
cJSON_AddStringToObject(pRoot, "LastTerm", u64buf);
|
||||
|
||||
cJSON* pEntries = cJSON_CreateArray();
|
||||
cJSON_AddItemToObject(pRoot, "pEntries", pEntries);
|
||||
SyncIndex lastIndex = logStoreLastIndex(pLogStore);
|
||||
for (SyncIndex i = 0; i <= lastIndex; ++i) {
|
||||
SSyncRaftEntry* pEntry = logStoreGetEntry(pLogStore, i);
|
||||
cJSON_AddItemToArray(pEntries, syncEntry2Json(pEntry));
|
||||
syncEntryDestory(pEntry);
|
||||
if (pData != NULL && pData->pWal != NULL) {
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pData->pSyncNode);
|
||||
cJSON_AddStringToObject(pRoot, "pSyncNode", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pData->pWal);
|
||||
cJSON_AddStringToObject(pRoot, "pWal", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%ld", logStoreLastIndex(pLogStore));
|
||||
cJSON_AddStringToObject(pRoot, "LastIndex", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", logStoreLastTerm(pLogStore));
|
||||
cJSON_AddStringToObject(pRoot, "LastTerm", u64buf);
|
||||
|
||||
cJSON* pEntries = cJSON_CreateArray();
|
||||
cJSON_AddItemToObject(pRoot, "pEntries", pEntries);
|
||||
SyncIndex lastIndex = logStoreLastIndex(pLogStore);
|
||||
for (SyncIndex i = 0; i <= lastIndex; ++i) {
|
||||
SSyncRaftEntry* pEntry = logStoreGetEntry(pLogStore, i);
|
||||
cJSON_AddItemToArray(pEntries, syncEntry2Json(pEntry));
|
||||
syncEntryDestory(pEntry);
|
||||
}
|
||||
}
|
||||
|
||||
cJSON* pJson = cJSON_CreateObject();
|
||||
|
|
|
@ -97,16 +97,32 @@ int32_t raftStorePersist(SRaftStore *pRaftStore) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool raftStoreFileExist(char *path) { return taosStatFile(path, NULL, NULL) >= 0; }
|
||||
static bool raftStoreFileExist(char *path) {
|
||||
bool b = taosStatFile(path, NULL, NULL) >= 0;
|
||||
return b;
|
||||
}
|
||||
|
||||
int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) {
|
||||
assert(pRaftStore != NULL);
|
||||
|
||||
cJSON *pRoot = cJSON_CreateObject();
|
||||
cJSON_AddNumberToObject(pRoot, "current_term", pRaftStore->currentTerm);
|
||||
cJSON_AddNumberToObject(pRoot, "vote_for_addr", pRaftStore->voteFor.addr);
|
||||
|
||||
char u64Buf[128];
|
||||
snprintf(u64Buf, sizeof(u64Buf), "%lu", pRaftStore->currentTerm);
|
||||
cJSON_AddStringToObject(pRoot, "current_term", u64Buf);
|
||||
|
||||
snprintf(u64Buf, sizeof(u64Buf), "%lu", pRaftStore->voteFor.addr);
|
||||
cJSON_AddStringToObject(pRoot, "vote_for_addr", u64Buf);
|
||||
|
||||
cJSON_AddNumberToObject(pRoot, "vote_for_vgid", pRaftStore->voteFor.vgId);
|
||||
|
||||
uint64_t u64 = pRaftStore->voteFor.addr;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pRoot, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pRoot, "addr_port", port);
|
||||
|
||||
char *serialized = cJSON_Print(pRoot);
|
||||
int len2 = strlen(serialized);
|
||||
assert(len2 < len);
|
||||
|
@ -125,10 +141,12 @@ int32_t raftStoreDeserialize(SRaftStore *pRaftStore, char *buf, size_t len) {
|
|||
cJSON *pRoot = cJSON_Parse(buf);
|
||||
|
||||
cJSON *pCurrentTerm = cJSON_GetObjectItem(pRoot, "current_term");
|
||||
pRaftStore->currentTerm = pCurrentTerm->valueint;
|
||||
assert(cJSON_IsString(pCurrentTerm));
|
||||
sscanf(pCurrentTerm->valuestring, "%lu", &(pRaftStore->currentTerm));
|
||||
|
||||
cJSON *pVoteForAddr = cJSON_GetObjectItem(pRoot, "vote_for_addr");
|
||||
pRaftStore->voteFor.addr = pVoteForAddr->valueint;
|
||||
assert(cJSON_IsString(pVoteForAddr));
|
||||
sscanf(pVoteForAddr->valuestring, "%lu", &(pRaftStore->voteFor.addr));
|
||||
|
||||
cJSON *pVoteForVgid = cJSON_GetObjectItem(pRoot, "vote_for_vgid");
|
||||
pRaftStore->voteFor.vgId = pVoteForVgid->valueint;
|
||||
|
@ -139,11 +157,10 @@ int32_t raftStoreDeserialize(SRaftStore *pRaftStore, char *buf, size_t len) {
|
|||
|
||||
bool raftStoreHasVoted(SRaftStore *pRaftStore) {
|
||||
bool b = syncUtilEmptyId(&(pRaftStore->voteFor));
|
||||
return b;
|
||||
return (!b);
|
||||
}
|
||||
|
||||
void raftStoreVote(SRaftStore *pRaftStore, SRaftId *pRaftId) {
|
||||
assert(!raftStoreHasVoted(pRaftStore));
|
||||
assert(!syncUtilEmptyId(pRaftId));
|
||||
pRaftStore->voteFor = *pRaftId;
|
||||
raftStorePersist(pRaftStore);
|
||||
|
@ -164,30 +181,68 @@ void raftStoreSetTerm(SRaftStore *pRaftStore, SyncTerm term) {
|
|||
raftStorePersist(pRaftStore);
|
||||
}
|
||||
|
||||
int32_t raftStoreFromJson(SRaftStore *pRaftStore, cJSON *pJson) { return 0; }
|
||||
|
||||
cJSON *raftStore2Json(SRaftStore *pRaftStore) {
|
||||
char u64buf[128];
|
||||
cJSON *pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pRaftStore != NULL) {
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pRaftStore->currentTerm);
|
||||
cJSON_AddStringToObject(pRoot, "currentTerm", u64buf);
|
||||
|
||||
cJSON *pVoteFor = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pRaftStore->voteFor.addr);
|
||||
cJSON_AddStringToObject(pVoteFor, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pRaftStore->voteFor.addr;
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
cJSON_AddStringToObject(pVoteFor, "addr_host", host);
|
||||
cJSON_AddNumberToObject(pVoteFor, "addr_port", port);
|
||||
}
|
||||
cJSON_AddNumberToObject(pVoteFor, "vgId", pRaftStore->voteFor.vgId);
|
||||
cJSON_AddItemToObject(pRoot, "voteFor", pVoteFor);
|
||||
|
||||
int hasVoted = raftStoreHasVoted(pRaftStore);
|
||||
cJSON_AddNumberToObject(pRoot, "hasVoted", hasVoted);
|
||||
}
|
||||
|
||||
cJSON *pJson = cJSON_CreateObject();
|
||||
cJSON_AddItemToObject(pJson, "SRaftStore", pRoot);
|
||||
return pJson;
|
||||
}
|
||||
|
||||
char *raftStore2Str(SRaftStore *pRaftStore) {
|
||||
cJSON *pJson = raftStore2Json(pRaftStore);
|
||||
char * serialized = cJSON_Print(pJson);
|
||||
cJSON_Delete(pJson);
|
||||
return serialized;
|
||||
}
|
||||
|
||||
// for debug -------------------
|
||||
void raftStorePrint(SRaftStore *pObj) {
|
||||
char serialized[RAFT_STORE_BLOCK_SIZE];
|
||||
raftStoreSerialize(pObj, serialized, sizeof(serialized));
|
||||
char *serialized = raftStore2Str(pObj);
|
||||
printf("raftStorePrint | len:%lu | %s \n", strlen(serialized), serialized);
|
||||
fflush(NULL);
|
||||
free(serialized);
|
||||
}
|
||||
|
||||
void raftStorePrint2(char *s, SRaftStore *pObj) {
|
||||
char serialized[RAFT_STORE_BLOCK_SIZE];
|
||||
raftStoreSerialize(pObj, serialized, sizeof(serialized));
|
||||
char *serialized = raftStore2Str(pObj);
|
||||
printf("raftStorePrint2 | len:%lu | %s | %s \n", strlen(serialized), s, serialized);
|
||||
fflush(NULL);
|
||||
free(serialized);
|
||||
}
|
||||
void raftStoreLog(SRaftStore *pObj) {
|
||||
char serialized[RAFT_STORE_BLOCK_SIZE];
|
||||
raftStoreSerialize(pObj, serialized, sizeof(serialized));
|
||||
char *serialized = raftStore2Str(pObj);
|
||||
sTrace("raftStoreLog | len:%lu | %s", strlen(serialized), serialized);
|
||||
fflush(NULL);
|
||||
free(serialized);
|
||||
}
|
||||
|
||||
void raftStoreLog2(char *s, SRaftStore *pObj) {
|
||||
char serialized[RAFT_STORE_BLOCK_SIZE];
|
||||
raftStoreSerialize(pObj, serialized, sizeof(serialized));
|
||||
char *serialized = raftStore2Str(pObj);
|
||||
sTrace("raftStoreLog2 | len:%lu | %s | %s", strlen(serialized), s, serialized);
|
||||
fflush(NULL);
|
||||
free(serialized);
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "syncMessage.h"
|
||||
#include "syncRaftEntry.h"
|
||||
#include "syncRaftLog.h"
|
||||
#include "syncRaftStore.h"
|
||||
#include "syncUtil.h"
|
||||
|
||||
// TLA+ Spec
|
||||
|
@ -50,33 +51,54 @@ int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode) {
|
|||
|
||||
int32_t ret = 0;
|
||||
for (int i = 0; i < pSyncNode->peersNum; ++i) {
|
||||
SRaftId* pDestId = &(pSyncNode->peersId[i]);
|
||||
SyncIndex nextIndex = syncIndexMgrGetIndex(pSyncNode->pNextIndex, pDestId);
|
||||
SRaftId* pDestId = &(pSyncNode->peersId[i]);
|
||||
|
||||
// set prevLogIndex
|
||||
SyncIndex nextIndex = syncIndexMgrGetIndex(pSyncNode->pNextIndex, pDestId);
|
||||
SyncIndex preLogIndex = nextIndex - 1;
|
||||
|
||||
// set preLogTerm
|
||||
SyncTerm preLogTerm = 0;
|
||||
if (preLogIndex >= SYNC_INDEX_BEGIN) {
|
||||
SSyncRaftEntry* pPreEntry = pSyncNode->pLogStore->getEntry(pSyncNode->pLogStore, preLogIndex);
|
||||
assert(pPreEntry != NULL);
|
||||
|
||||
preLogTerm = pPreEntry->term;
|
||||
syncEntryDestory(pPreEntry);
|
||||
}
|
||||
|
||||
SyncIndex lastIndex = syncUtilMinIndex(pSyncNode->pLogStore->getLastIndex(pSyncNode->pLogStore), nextIndex);
|
||||
assert(nextIndex == lastIndex);
|
||||
// batch optimized
|
||||
// SyncIndex lastIndex = syncUtilMinIndex(pSyncNode->pLogStore->getLastIndex(pSyncNode->pLogStore), nextIndex);
|
||||
|
||||
SSyncRaftEntry* pEntry = logStoreGetEntry(pSyncNode->pLogStore, nextIndex);
|
||||
assert(pEntry != NULL);
|
||||
SyncAppendEntries* pMsg = NULL;
|
||||
SSyncRaftEntry* pEntry = logStoreGetEntry(pSyncNode->pLogStore, nextIndex);
|
||||
if (pEntry != NULL) {
|
||||
SyncAppendEntries* pMsg = syncAppendEntriesBuild(pEntry->bytes);
|
||||
|
||||
// add pEntry into msg
|
||||
uint32_t len;
|
||||
char* serialized = syncEntrySerialize(pEntry, &len);
|
||||
assert(len == pEntry->bytes);
|
||||
memcpy(pMsg->data, serialized, len);
|
||||
|
||||
free(serialized);
|
||||
syncEntryDestory(pEntry);
|
||||
|
||||
} else {
|
||||
// maybe overflow, send empty record
|
||||
SyncAppendEntries* pMsg = syncAppendEntriesBuild(0);
|
||||
}
|
||||
|
||||
SyncAppendEntries* pMsg = syncAppendEntriesBuild(pEntry->bytes);
|
||||
pMsg->srcId = pSyncNode->myRaftId;
|
||||
pMsg->destId = *pDestId;
|
||||
pMsg->term = pSyncNode->pRaftStore->currentTerm;
|
||||
pMsg->prevLogIndex = preLogIndex;
|
||||
pMsg->prevLogTerm = preLogTerm;
|
||||
pMsg->commitIndex = pSyncNode->commitIndex;
|
||||
pMsg->dataLen = pEntry->bytes;
|
||||
// add pEntry into msg
|
||||
|
||||
// send AppendEntries
|
||||
syncNodeAppendEntries(pSyncNode, pDestId, pMsg);
|
||||
syncAppendEntriesDestroy(pMsg);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -56,6 +56,8 @@ int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg) {
|
|||
bool grant = (pMsg->term == ths->pRaftStore->currentTerm) && logOK &&
|
||||
((!raftStoreHasVoted(ths->pRaftStore)) || (syncUtilSameId(&(ths->pRaftStore->voteFor), &(pMsg->srcId))));
|
||||
if (grant) {
|
||||
// maybe has already voted for pMsg->srcId
|
||||
// vote again, no harm
|
||||
raftStoreVote(ths->pRaftStore, &(pMsg->srcId));
|
||||
}
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg)
|
|||
return ret;
|
||||
}
|
||||
|
||||
assert(!(pMsg->term > ths->pRaftStore->currentTerm));
|
||||
// no need this code, because if I receive reply.term, then I must have sent for that term.
|
||||
// if (pMsg->term > ths->pRaftStore->currentTerm) {
|
||||
// syncNodeUpdateTerm(ths, pMsg->term);
|
||||
|
@ -52,17 +53,29 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg)
|
|||
|
||||
assert(pMsg->term == ths->pRaftStore->currentTerm);
|
||||
|
||||
// This tallies votes even when the current state is not Candidate,
|
||||
// but they won't be looked at, so it doesn't matter.
|
||||
if (ths->state == TAOS_SYNC_STATE_CANDIDATE) {
|
||||
votesRespondAdd(ths->pVotesRespond, pMsg);
|
||||
if (pMsg->voteGranted) {
|
||||
// add vote
|
||||
voteGrantedVote(ths->pVotesGranted, pMsg);
|
||||
|
||||
// maybe to leader
|
||||
if (voteGrantedMajority(ths->pVotesGranted)) {
|
||||
if (ths->pVotesGranted->toLeader) {
|
||||
if (!ths->pVotesGranted->toLeader) {
|
||||
syncNodeCandidate2Leader(ths);
|
||||
|
||||
// prevent to leader again!
|
||||
ths->pVotesGranted->toLeader = true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
;
|
||||
// do nothing
|
||||
// UNCHANGED <<votesGranted, voterLog>>
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -24,7 +24,8 @@ int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
|
|||
if (pMsg->timeoutType == SYNC_TIMEOUT_PING) {
|
||||
if (atomic_load_64(&ths->pingTimerLogicClockUser) <= pMsg->logicClock) {
|
||||
++(ths->pingTimerCounter);
|
||||
syncNodePingAll(ths);
|
||||
// syncNodePingAll(ths);
|
||||
syncNodePingPeers(ths);
|
||||
}
|
||||
|
||||
} else if (pMsg->timeoutType == SYNC_TIMEOUT_ELECTION) {
|
||||
|
|
|
@ -82,30 +82,32 @@ cJSON *voteGranted2Json(SVotesGranted *pVotesGranted) {
|
|||
char u64buf[128];
|
||||
cJSON *pRoot = cJSON_CreateObject();
|
||||
|
||||
cJSON_AddNumberToObject(pRoot, "replicaNum", pVotesGranted->replicaNum);
|
||||
cJSON *pReplicas = cJSON_CreateArray();
|
||||
cJSON_AddItemToObject(pRoot, "replicas", pReplicas);
|
||||
for (int i = 0; i < pVotesGranted->replicaNum; ++i) {
|
||||
cJSON_AddItemToArray(pReplicas, syncUtilRaftId2Json(&(*(pVotesGranted->replicas))[i]));
|
||||
}
|
||||
int *arr = (int *)malloc(sizeof(int) * pVotesGranted->replicaNum);
|
||||
for (int i = 0; i < pVotesGranted->replicaNum; ++i) {
|
||||
arr[i] = pVotesGranted->isGranted[i];
|
||||
}
|
||||
cJSON *pIsGranted = cJSON_CreateIntArray(arr, pVotesGranted->replicaNum);
|
||||
free(arr);
|
||||
cJSON_AddItemToObject(pRoot, "isGranted", pIsGranted);
|
||||
if (pVotesGranted != NULL) {
|
||||
cJSON_AddNumberToObject(pRoot, "replicaNum", pVotesGranted->replicaNum);
|
||||
cJSON *pReplicas = cJSON_CreateArray();
|
||||
cJSON_AddItemToObject(pRoot, "replicas", pReplicas);
|
||||
for (int i = 0; i < pVotesGranted->replicaNum; ++i) {
|
||||
cJSON_AddItemToArray(pReplicas, syncUtilRaftId2Json(&(*(pVotesGranted->replicas))[i]));
|
||||
}
|
||||
int *arr = (int *)malloc(sizeof(int) * pVotesGranted->replicaNum);
|
||||
for (int i = 0; i < pVotesGranted->replicaNum; ++i) {
|
||||
arr[i] = pVotesGranted->isGranted[i];
|
||||
}
|
||||
cJSON *pIsGranted = cJSON_CreateIntArray(arr, pVotesGranted->replicaNum);
|
||||
free(arr);
|
||||
cJSON_AddItemToObject(pRoot, "isGranted", pIsGranted);
|
||||
|
||||
cJSON_AddNumberToObject(pRoot, "votes", pVotesGranted->votes);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pVotesGranted->term);
|
||||
cJSON_AddStringToObject(pRoot, "term", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "quorum", pVotesGranted->quorum);
|
||||
cJSON_AddNumberToObject(pRoot, "toLeader", pVotesGranted->toLeader);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pVotesGranted->pSyncNode);
|
||||
cJSON_AddStringToObject(pRoot, "pSyncNode", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "votes", pVotesGranted->votes);
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pVotesGranted->term);
|
||||
cJSON_AddStringToObject(pRoot, "term", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "quorum", pVotesGranted->quorum);
|
||||
cJSON_AddNumberToObject(pRoot, "toLeader", pVotesGranted->toLeader);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pVotesGranted->pSyncNode);
|
||||
cJSON_AddStringToObject(pRoot, "pSyncNode", u64buf);
|
||||
|
||||
bool majority = voteGrantedMajority(pVotesGranted);
|
||||
cJSON_AddNumberToObject(pRoot, "majority", majority);
|
||||
bool majority = voteGrantedMajority(pVotesGranted);
|
||||
cJSON_AddNumberToObject(pRoot, "majority", majority);
|
||||
}
|
||||
|
||||
cJSON *pJson = cJSON_CreateObject();
|
||||
cJSON_AddItemToObject(pJson, "SVotesGranted", pRoot);
|
||||
|
@ -122,27 +124,27 @@ char *voteGranted2Str(SVotesGranted *pVotesGranted) {
|
|||
// for debug -------------------
|
||||
void voteGrantedPrint(SVotesGranted *pObj) {
|
||||
char *serialized = voteGranted2Str(pObj);
|
||||
printf("voteGrantedPrint | len:%zu | %s \n", strlen(serialized), serialized);
|
||||
printf("voteGrantedPrint | len:%lu | %s \n", strlen(serialized), serialized);
|
||||
fflush(NULL);
|
||||
free(serialized);
|
||||
}
|
||||
|
||||
void voteGrantedPrint2(char *s, SVotesGranted *pObj) {
|
||||
char *serialized = voteGranted2Str(pObj);
|
||||
printf("voteGrantedPrint2 | len:%zu | %s | %s \n", strlen(serialized), s, serialized);
|
||||
printf("voteGrantedPrint2 | len:%lu | %s | %s \n", strlen(serialized), s, serialized);
|
||||
fflush(NULL);
|
||||
free(serialized);
|
||||
}
|
||||
|
||||
void voteGrantedLog(SVotesGranted *pObj) {
|
||||
char *serialized = voteGranted2Str(pObj);
|
||||
sTrace("voteGrantedLog | len:%zu | %s", strlen(serialized), serialized);
|
||||
sTrace("voteGrantedLog | len:%lu | %s", strlen(serialized), serialized);
|
||||
free(serialized);
|
||||
}
|
||||
|
||||
void voteGrantedLog2(char *s, SVotesGranted *pObj) {
|
||||
char *serialized = voteGranted2Str(pObj);
|
||||
sTrace("voteGrantedLog2 | len:%zu | %s | %s", strlen(serialized), s, serialized);
|
||||
sTrace("voteGrantedLog2 | len:%lu | %s | %s", strlen(serialized), s, serialized);
|
||||
free(serialized);
|
||||
}
|
||||
|
||||
|
@ -203,29 +205,31 @@ cJSON *votesRespond2Json(SVotesRespond *pVotesRespond) {
|
|||
char u64buf[128];
|
||||
cJSON *pRoot = cJSON_CreateObject();
|
||||
|
||||
cJSON_AddNumberToObject(pRoot, "replicaNum", pVotesRespond->replicaNum);
|
||||
cJSON *pReplicas = cJSON_CreateArray();
|
||||
cJSON_AddItemToObject(pRoot, "replicas", pReplicas);
|
||||
for (int i = 0; i < pVotesRespond->replicaNum; ++i) {
|
||||
cJSON_AddItemToArray(pReplicas, syncUtilRaftId2Json(&(*(pVotesRespond->replicas))[i]));
|
||||
}
|
||||
int respondNum = 0;
|
||||
int *arr = (int *)malloc(sizeof(int) * pVotesRespond->replicaNum);
|
||||
for (int i = 0; i < pVotesRespond->replicaNum; ++i) {
|
||||
arr[i] = pVotesRespond->isRespond[i];
|
||||
if (pVotesRespond->isRespond[i]) {
|
||||
respondNum++;
|
||||
if (pVotesRespond != NULL) {
|
||||
cJSON_AddNumberToObject(pRoot, "replicaNum", pVotesRespond->replicaNum);
|
||||
cJSON *pReplicas = cJSON_CreateArray();
|
||||
cJSON_AddItemToObject(pRoot, "replicas", pReplicas);
|
||||
for (int i = 0; i < pVotesRespond->replicaNum; ++i) {
|
||||
cJSON_AddItemToArray(pReplicas, syncUtilRaftId2Json(&(*(pVotesRespond->replicas))[i]));
|
||||
}
|
||||
}
|
||||
cJSON *pIsRespond = cJSON_CreateIntArray(arr, pVotesRespond->replicaNum);
|
||||
free(arr);
|
||||
cJSON_AddItemToObject(pRoot, "isRespond", pIsRespond);
|
||||
cJSON_AddNumberToObject(pRoot, "respondNum", respondNum);
|
||||
int respondNum = 0;
|
||||
int *arr = (int *)malloc(sizeof(int) * pVotesRespond->replicaNum);
|
||||
for (int i = 0; i < pVotesRespond->replicaNum; ++i) {
|
||||
arr[i] = pVotesRespond->isRespond[i];
|
||||
if (pVotesRespond->isRespond[i]) {
|
||||
respondNum++;
|
||||
}
|
||||
}
|
||||
cJSON *pIsRespond = cJSON_CreateIntArray(arr, pVotesRespond->replicaNum);
|
||||
free(arr);
|
||||
cJSON_AddItemToObject(pRoot, "isRespond", pIsRespond);
|
||||
cJSON_AddNumberToObject(pRoot, "respondNum", respondNum);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pVotesRespond->term);
|
||||
cJSON_AddStringToObject(pRoot, "term", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pVotesRespond->pSyncNode);
|
||||
cJSON_AddStringToObject(pRoot, "pSyncNode", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pVotesRespond->term);
|
||||
cJSON_AddStringToObject(pRoot, "term", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pVotesRespond->pSyncNode);
|
||||
cJSON_AddStringToObject(pRoot, "pSyncNode", u64buf);
|
||||
}
|
||||
|
||||
cJSON *pJson = cJSON_CreateObject();
|
||||
cJSON_AddItemToObject(pJson, "SVotesRespond", pRoot);
|
||||
|
@ -242,26 +246,26 @@ char *votesRespond2Str(SVotesRespond *pVotesRespond) {
|
|||
// for debug -------------------
|
||||
void votesRespondPrint(SVotesRespond *pObj) {
|
||||
char *serialized = votesRespond2Str(pObj);
|
||||
printf("votesRespondPrint | len:%zu | %s \n", strlen(serialized), serialized);
|
||||
printf("votesRespondPrint | len:%lu | %s \n", strlen(serialized), serialized);
|
||||
fflush(NULL);
|
||||
free(serialized);
|
||||
}
|
||||
|
||||
void votesRespondPrint2(char *s, SVotesRespond *pObj) {
|
||||
char *serialized = votesRespond2Str(pObj);
|
||||
printf("votesRespondPrint2 | len:%zu | %s | %s \n", strlen(serialized), s, serialized);
|
||||
printf("votesRespondPrint2 | len:%lu | %s | %s \n", strlen(serialized), s, serialized);
|
||||
fflush(NULL);
|
||||
free(serialized);
|
||||
}
|
||||
|
||||
void votesRespondLog(SVotesRespond *pObj) {
|
||||
char *serialized = votesRespond2Str(pObj);
|
||||
sTrace("votesRespondLog | len:%zu | %s", strlen(serialized), serialized);
|
||||
sTrace("votesRespondLog | len:%lu | %s", strlen(serialized), serialized);
|
||||
free(serialized);
|
||||
}
|
||||
|
||||
void votesRespondLog2(char *s, SVotesRespond *pObj) {
|
||||
char *serialized = votesRespond2Str(pObj);
|
||||
sTrace("votesRespondLog2 | len:%zu | %s | %s", strlen(serialized), s, serialized);
|
||||
sTrace("votesRespondLog2 | len:%lu | %s | %s", strlen(serialized), s, serialized);
|
||||
free(serialized);
|
||||
}
|
||||
|
|
|
@ -25,6 +25,9 @@ add_executable(syncTimeoutTest "")
|
|||
add_executable(syncPingTest "")
|
||||
add_executable(syncPingReplyTest "")
|
||||
add_executable(syncRpcMsgTest "")
|
||||
add_executable(syncPingTimerTest2 "")
|
||||
add_executable(syncPingSelfTest "")
|
||||
add_executable(syncElectTest "")
|
||||
|
||||
|
||||
target_sources(syncTest
|
||||
|
@ -135,6 +138,18 @@ target_sources(syncRpcMsgTest
|
|||
PRIVATE
|
||||
"syncRpcMsgTest.cpp"
|
||||
)
|
||||
target_sources(syncPingTimerTest2
|
||||
PRIVATE
|
||||
"syncPingTimerTest2.cpp"
|
||||
)
|
||||
target_sources(syncPingSelfTest
|
||||
PRIVATE
|
||||
"syncPingSelfTest.cpp"
|
||||
)
|
||||
target_sources(syncElectTest
|
||||
PRIVATE
|
||||
"syncElectTest.cpp"
|
||||
)
|
||||
|
||||
|
||||
target_include_directories(syncTest
|
||||
|
@ -272,6 +287,26 @@ target_include_directories(syncRpcMsgTest
|
|||
"${CMAKE_SOURCE_DIR}/include/libs/sync"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
target_include_directories(syncPingTimerTest2
|
||||
PUBLIC
|
||||
"${CMAKE_SOURCE_DIR}/include/libs/sync"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
target_include_directories(syncPingSelfTest
|
||||
PUBLIC
|
||||
"${CMAKE_SOURCE_DIR}/include/libs/sync"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
target_include_directories(syncElectTest
|
||||
PUBLIC
|
||||
"${CMAKE_SOURCE_DIR}/include/libs/sync"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
target_include_directories(syncElectTest
|
||||
PUBLIC
|
||||
"${CMAKE_SOURCE_DIR}/include/libs/sync"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
|
||||
|
||||
target_link_libraries(syncTest
|
||||
|
@ -382,6 +417,18 @@ target_link_libraries(syncRpcMsgTest
|
|||
sync
|
||||
gtest_main
|
||||
)
|
||||
target_link_libraries(syncPingTimerTest2
|
||||
sync
|
||||
gtest_main
|
||||
)
|
||||
target_link_libraries(syncPingSelfTest
|
||||
sync
|
||||
gtest_main
|
||||
)
|
||||
target_link_libraries(syncElectTest
|
||||
sync
|
||||
gtest_main
|
||||
)
|
||||
|
||||
|
||||
enable_testing()
|
||||
|
|
|
@ -0,0 +1,124 @@
|
|||
#include <gtest/gtest.h>
|
||||
#include <stdio.h>
|
||||
#include "syncEnv.h"
|
||||
#include "syncIO.h"
|
||||
#include "syncInt.h"
|
||||
#include "syncRaftLog.h"
|
||||
#include "syncRaftStore.h"
|
||||
#include "syncUtil.h"
|
||||
|
||||
void logTest() {
|
||||
sTrace("--- sync log test: trace");
|
||||
sDebug("--- sync log test: debug");
|
||||
sInfo("--- sync log test: info");
|
||||
sWarn("--- sync log test: warn");
|
||||
sError("--- sync log test: error");
|
||||
sFatal("--- sync log test: fatal");
|
||||
}
|
||||
|
||||
uint16_t ports[] = {7010, 7110, 7210, 7310, 7410};
|
||||
int32_t replicaNum = 3;
|
||||
int32_t myIndex = 0;
|
||||
|
||||
SRaftId ids[TSDB_MAX_REPLICA];
|
||||
SSyncInfo syncInfo;
|
||||
SSyncFSM* pFsm;
|
||||
SWal* pWal;
|
||||
SSyncNode* gSyncNode;
|
||||
|
||||
SSyncNode* syncNodeInit() {
|
||||
syncInfo.vgId = 1234;
|
||||
syncInfo.rpcClient = gSyncIO->clientRpc;
|
||||
syncInfo.FpSendMsg = syncIOSendMsg;
|
||||
syncInfo.queue = gSyncIO->pMsgQ;
|
||||
syncInfo.FpEqMsg = syncIOEqMsg;
|
||||
syncInfo.pFsm = pFsm;
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "./elect_test_%d", myIndex);
|
||||
|
||||
int code = walInit();
|
||||
assert(code == 0);
|
||||
SWalCfg walCfg;
|
||||
memset(&walCfg, 0, sizeof(SWalCfg));
|
||||
walCfg.vgId = syncInfo.vgId;
|
||||
walCfg.fsyncPeriod = 1000;
|
||||
walCfg.retentionPeriod = 1000;
|
||||
walCfg.rollPeriod = 1000;
|
||||
walCfg.retentionSize = 1000;
|
||||
walCfg.segSize = 1000;
|
||||
walCfg.level = TAOS_WAL_FSYNC;
|
||||
|
||||
char tmpdir[128];
|
||||
snprintf(tmpdir, sizeof(tmpdir), "./elect_test_wal_%d", myIndex);
|
||||
pWal = walOpen(tmpdir, &walCfg);
|
||||
assert(pWal != NULL);
|
||||
|
||||
syncInfo.pWal = pWal;
|
||||
|
||||
SSyncCfg* pCfg = &syncInfo.syncCfg;
|
||||
pCfg->myIndex = myIndex;
|
||||
pCfg->replicaNum = replicaNum;
|
||||
|
||||
for (int i = 0; i < replicaNum; ++i) {
|
||||
pCfg->nodeInfo[i].nodePort = ports[i];
|
||||
snprintf(pCfg->nodeInfo[i].nodeFqdn, sizeof(pCfg->nodeInfo[i].nodeFqdn), "%s", "127.0.0.1");
|
||||
// taosGetFqdn(pCfg->nodeInfo[0].nodeFqdn);
|
||||
}
|
||||
|
||||
SSyncNode* pSyncNode = syncNodeOpen(&syncInfo);
|
||||
assert(pSyncNode != NULL);
|
||||
|
||||
gSyncIO->FpOnSyncPing = pSyncNode->FpOnPing;
|
||||
gSyncIO->FpOnSyncPingReply = pSyncNode->FpOnPingReply;
|
||||
gSyncIO->FpOnSyncRequestVote = pSyncNode->FpOnRequestVote;
|
||||
gSyncIO->FpOnSyncRequestVoteReply = pSyncNode->FpOnRequestVoteReply;
|
||||
gSyncIO->FpOnSyncAppendEntries = pSyncNode->FpOnAppendEntries;
|
||||
gSyncIO->FpOnSyncAppendEntriesReply = pSyncNode->FpOnAppendEntriesReply;
|
||||
gSyncIO->FpOnSyncPing = pSyncNode->FpOnPing;
|
||||
gSyncIO->FpOnSyncPingReply = pSyncNode->FpOnPingReply;
|
||||
gSyncIO->FpOnSyncTimeout = pSyncNode->FpOnTimeout;
|
||||
gSyncIO->pSyncNode = pSyncNode;
|
||||
|
||||
return pSyncNode;
|
||||
}
|
||||
|
||||
SSyncNode* syncInitTest() { return syncNodeInit(); }
|
||||
|
||||
void initRaftId(SSyncNode* pSyncNode) {
|
||||
for (int i = 0; i < replicaNum; ++i) {
|
||||
ids[i] = pSyncNode->replicasId[i];
|
||||
char* s = syncUtilRaftId2Str(&ids[i]);
|
||||
printf("raftId[%d] : %s\n", i, s);
|
||||
free(s);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
// taosInitLog((char *)"syncTest.log", 100000, 10);
|
||||
tsAsyncLog = 0;
|
||||
sDebugFlag = 143 + 64;
|
||||
|
||||
myIndex = 0;
|
||||
if (argc >= 2) {
|
||||
myIndex = atoi(argv[1]);
|
||||
}
|
||||
|
||||
int32_t ret = syncIOStart((char*)"127.0.0.1", ports[myIndex]);
|
||||
assert(ret == 0);
|
||||
|
||||
ret = syncEnvStart();
|
||||
assert(ret == 0);
|
||||
|
||||
gSyncNode = syncInitTest();
|
||||
assert(gSyncNode != NULL);
|
||||
syncNodePrint2((char*)"", gSyncNode);
|
||||
|
||||
initRaftId(gSyncNode);
|
||||
|
||||
//---------------------------
|
||||
while (1) {
|
||||
sTrace("while 1 sleep, state: %d, %s", gSyncNode->state, syncUtilState2String(gSyncNode->state));
|
||||
taosMsleep(1000);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -47,12 +47,11 @@ SSyncNode* syncNodeInit() {
|
|||
|
||||
gSyncIO->FpOnSyncPing = pSyncNode->FpOnPing;
|
||||
gSyncIO->FpOnSyncPingReply = pSyncNode->FpOnPingReply;
|
||||
gSyncIO->FpOnSyncClientRequest = pSyncNode->FpOnClientRequest;
|
||||
gSyncIO->FpOnSyncRequestVote = pSyncNode->FpOnRequestVote;
|
||||
gSyncIO->FpOnSyncRequestVoteReply = pSyncNode->FpOnRequestVoteReply;
|
||||
gSyncIO->FpOnSyncAppendEntries = pSyncNode->FpOnAppendEntries;
|
||||
gSyncIO->FpOnSyncAppendEntriesReply = pSyncNode->FpOnAppendEntriesReply;
|
||||
gSyncIO->FpOnSyncPing = pSyncNode->FpOnPing;
|
||||
gSyncIO->FpOnSyncPingReply = pSyncNode->FpOnPingReply;
|
||||
gSyncIO->FpOnSyncTimeout = pSyncNode->FpOnTimeout;
|
||||
gSyncIO->pSyncNode = pSyncNode;
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ SSyncNode* syncNodeInit() {
|
|||
SSyncNode* syncInitTest() { return syncNodeInit(); }
|
||||
|
||||
void logStoreTest() {
|
||||
logStorePrint2((char*)"logStoreTest2", pSyncNode->pLogStore);
|
||||
logStorePrint2((char*)"logStoreTest", pSyncNode->pLogStore);
|
||||
|
||||
assert(pSyncNode->pLogStore->getLastIndex(pSyncNode->pLogStore) == SYNC_INDEX_INVALID);
|
||||
|
||||
|
@ -105,10 +105,10 @@ void logStoreTest() {
|
|||
assert(pSyncNode->pLogStore->getLastIndex(pSyncNode->pLogStore) == SYNC_INDEX_BEGIN);
|
||||
}
|
||||
}
|
||||
logStorePrint(pSyncNode->pLogStore);
|
||||
logStorePrint2((char*)"after appendEntry", pSyncNode->pLogStore);
|
||||
|
||||
pSyncNode->pLogStore->truncate(pSyncNode->pLogStore, 3);
|
||||
logStorePrint(pSyncNode->pLogStore);
|
||||
logStorePrint2((char*)"after truncate 3", pSyncNode->pLogStore);
|
||||
}
|
||||
|
||||
void initRaftId(SSyncNode* pSyncNode) {
|
||||
|
|
|
@ -0,0 +1,102 @@
|
|||
#include <gtest/gtest.h>
|
||||
#include <stdio.h>
|
||||
#include "syncEnv.h"
|
||||
#include "syncIO.h"
|
||||
#include "syncInt.h"
|
||||
#include "syncRaftStore.h"
|
||||
#include "syncUtil.h"
|
||||
|
||||
void logTest() {
|
||||
sTrace("--- sync log test: trace");
|
||||
sDebug("--- sync log test: debug");
|
||||
sInfo("--- sync log test: info");
|
||||
sWarn("--- sync log test: warn");
|
||||
sError("--- sync log test: error");
|
||||
sFatal("--- sync log test: fatal");
|
||||
}
|
||||
|
||||
uint16_t ports[] = {7010, 7110, 7210, 7310, 7410};
|
||||
int32_t replicaNum = 3;
|
||||
int32_t myIndex = 0;
|
||||
|
||||
SRaftId ids[TSDB_MAX_REPLICA];
|
||||
SSyncInfo syncInfo;
|
||||
SSyncFSM* pFsm;
|
||||
|
||||
SSyncNode* syncNodeInit() {
|
||||
syncInfo.vgId = 1234;
|
||||
syncInfo.rpcClient = gSyncIO->clientRpc;
|
||||
syncInfo.FpSendMsg = syncIOSendMsg;
|
||||
syncInfo.queue = gSyncIO->pMsgQ;
|
||||
syncInfo.FpEqMsg = syncIOEqMsg;
|
||||
syncInfo.pFsm = pFsm;
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");
|
||||
|
||||
SSyncCfg* pCfg = &syncInfo.syncCfg;
|
||||
pCfg->myIndex = myIndex;
|
||||
pCfg->replicaNum = replicaNum;
|
||||
|
||||
for (int i = 0; i < replicaNum; ++i) {
|
||||
pCfg->nodeInfo[i].nodePort = ports[i];
|
||||
snprintf(pCfg->nodeInfo[i].nodeFqdn, sizeof(pCfg->nodeInfo[i].nodeFqdn), "%s", "127.0.0.1");
|
||||
// taosGetFqdn(pCfg->nodeInfo[0].nodeFqdn);
|
||||
}
|
||||
|
||||
SSyncNode* pSyncNode = syncNodeOpen(&syncInfo);
|
||||
assert(pSyncNode != NULL);
|
||||
|
||||
gSyncIO->FpOnSyncPing = pSyncNode->FpOnPing;
|
||||
gSyncIO->FpOnSyncPingReply = pSyncNode->FpOnPingReply;
|
||||
gSyncIO->FpOnSyncClientRequest = pSyncNode->FpOnClientRequest;
|
||||
gSyncIO->FpOnSyncRequestVote = pSyncNode->FpOnRequestVote;
|
||||
gSyncIO->FpOnSyncRequestVoteReply = pSyncNode->FpOnRequestVoteReply;
|
||||
gSyncIO->FpOnSyncAppendEntries = pSyncNode->FpOnAppendEntries;
|
||||
gSyncIO->FpOnSyncAppendEntriesReply = pSyncNode->FpOnAppendEntriesReply;
|
||||
gSyncIO->FpOnSyncTimeout = pSyncNode->FpOnTimeout;
|
||||
gSyncIO->pSyncNode = pSyncNode;
|
||||
|
||||
return pSyncNode;
|
||||
}
|
||||
|
||||
SSyncNode* syncInitTest() { return syncNodeInit(); }
|
||||
|
||||
void initRaftId(SSyncNode* pSyncNode) {
|
||||
for (int i = 0; i < replicaNum; ++i) {
|
||||
ids[i] = pSyncNode->replicasId[i];
|
||||
char* s = syncUtilRaftId2Str(&ids[i]);
|
||||
printf("raftId[%d] : %s\n", i, s);
|
||||
free(s);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
// taosInitLog((char *)"syncTest.log", 100000, 10);
|
||||
tsAsyncLog = 0;
|
||||
sDebugFlag = 143 + 64;
|
||||
|
||||
myIndex = 0;
|
||||
if (argc >= 2) {
|
||||
myIndex = atoi(argv[1]);
|
||||
}
|
||||
|
||||
int32_t ret = syncIOStart((char*)"127.0.0.1", ports[myIndex]);
|
||||
assert(ret == 0);
|
||||
|
||||
ret = syncEnvStart();
|
||||
assert(ret == 0);
|
||||
|
||||
SSyncNode* pSyncNode = syncInitTest();
|
||||
assert(pSyncNode != NULL);
|
||||
syncNodePrint2((char*)"", pSyncNode);
|
||||
|
||||
initRaftId(pSyncNode);
|
||||
|
||||
//---------------------------
|
||||
|
||||
while (1) {
|
||||
syncNodePingSelf(pSyncNode);
|
||||
taosMsleep(1000);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -47,6 +47,7 @@ SSyncNode* syncNodeInit() {
|
|||
|
||||
gSyncIO->FpOnSyncPing = pSyncNode->FpOnPing;
|
||||
gSyncIO->FpOnSyncPingReply = pSyncNode->FpOnPingReply;
|
||||
gSyncIO->FpOnSyncClientRequest = pSyncNode->FpOnClientRequest;
|
||||
gSyncIO->FpOnSyncRequestVote = pSyncNode->FpOnRequestVote;
|
||||
gSyncIO->FpOnSyncRequestVoteReply = pSyncNode->FpOnRequestVoteReply;
|
||||
gSyncIO->FpOnSyncAppendEntries = pSyncNode->FpOnAppendEntries;
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
#include <gtest/gtest.h>
|
||||
#include <stdio.h>
|
||||
#include "syncEnv.h"
|
||||
#include "syncIO.h"
|
||||
#include "syncInt.h"
|
||||
#include "syncRaftStore.h"
|
||||
#include "syncUtil.h"
|
||||
|
||||
void logTest() {
|
||||
sTrace("--- sync log test: trace");
|
||||
sDebug("--- sync log test: debug");
|
||||
sInfo("--- sync log test: info");
|
||||
sWarn("--- sync log test: warn");
|
||||
sError("--- sync log test: error");
|
||||
sFatal("--- sync log test: fatal");
|
||||
}
|
||||
|
||||
uint16_t ports[] = {7010, 7110, 7210, 7310, 7410};
|
||||
int32_t replicaNum = 3;
|
||||
int32_t myIndex = 0;
|
||||
|
||||
SRaftId ids[TSDB_MAX_REPLICA];
|
||||
SSyncInfo syncInfo;
|
||||
SSyncFSM* pFsm;
|
||||
|
||||
SSyncNode* syncNodeInit() {
|
||||
syncInfo.vgId = 1234;
|
||||
syncInfo.rpcClient = gSyncIO->clientRpc;
|
||||
syncInfo.FpSendMsg = syncIOSendMsg;
|
||||
syncInfo.queue = gSyncIO->pMsgQ;
|
||||
syncInfo.FpEqMsg = syncIOEqMsg;
|
||||
syncInfo.pFsm = pFsm;
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");
|
||||
|
||||
SSyncCfg* pCfg = &syncInfo.syncCfg;
|
||||
pCfg->myIndex = myIndex;
|
||||
pCfg->replicaNum = replicaNum;
|
||||
|
||||
for (int i = 0; i < replicaNum; ++i) {
|
||||
pCfg->nodeInfo[i].nodePort = ports[i];
|
||||
snprintf(pCfg->nodeInfo[i].nodeFqdn, sizeof(pCfg->nodeInfo[i].nodeFqdn), "%s", "127.0.0.1");
|
||||
// taosGetFqdn(pCfg->nodeInfo[0].nodeFqdn);
|
||||
}
|
||||
|
||||
SSyncNode* pSyncNode = syncNodeOpen(&syncInfo);
|
||||
assert(pSyncNode != NULL);
|
||||
|
||||
gSyncIO->FpOnSyncPing = pSyncNode->FpOnPing;
|
||||
gSyncIO->FpOnSyncPingReply = pSyncNode->FpOnPingReply;
|
||||
gSyncIO->FpOnSyncClientRequest = pSyncNode->FpOnClientRequest;
|
||||
gSyncIO->FpOnSyncRequestVote = pSyncNode->FpOnRequestVote;
|
||||
gSyncIO->FpOnSyncRequestVoteReply = pSyncNode->FpOnRequestVoteReply;
|
||||
gSyncIO->FpOnSyncAppendEntries = pSyncNode->FpOnAppendEntries;
|
||||
gSyncIO->FpOnSyncAppendEntriesReply = pSyncNode->FpOnAppendEntriesReply;
|
||||
gSyncIO->FpOnSyncTimeout = pSyncNode->FpOnTimeout;
|
||||
gSyncIO->pSyncNode = pSyncNode;
|
||||
|
||||
return pSyncNode;
|
||||
}
|
||||
|
||||
SSyncNode* syncInitTest() { return syncNodeInit(); }
|
||||
|
||||
void initRaftId(SSyncNode* pSyncNode) {
|
||||
for (int i = 0; i < replicaNum; ++i) {
|
||||
ids[i] = pSyncNode->replicasId[i];
|
||||
char* s = syncUtilRaftId2Str(&ids[i]);
|
||||
printf("raftId[%d] : %s\n", i, s);
|
||||
free(s);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
// taosInitLog((char *)"syncTest.log", 100000, 10);
|
||||
tsAsyncLog = 0;
|
||||
sDebugFlag = 143 + 64;
|
||||
|
||||
myIndex = 0;
|
||||
if (argc >= 2) {
|
||||
myIndex = atoi(argv[1]);
|
||||
}
|
||||
|
||||
int32_t ret = syncIOStart((char*)"127.0.0.1", ports[myIndex]);
|
||||
assert(ret == 0);
|
||||
|
||||
ret = syncEnvStart();
|
||||
assert(ret == 0);
|
||||
|
||||
SSyncNode* pSyncNode = syncInitTest();
|
||||
assert(pSyncNode != NULL);
|
||||
syncNodePrint2((char*)"", pSyncNode);
|
||||
|
||||
initRaftId(pSyncNode);
|
||||
|
||||
//---------------------------
|
||||
|
||||
sTrace("syncNodeStartPingTimer ...");
|
||||
ret = syncNodeStartPingTimer(pSyncNode);
|
||||
assert(ret == 0);
|
||||
|
||||
while (1) {
|
||||
sTrace("while 1 sleep ...");
|
||||
taosMsleep(1000);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -3,6 +3,7 @@
|
|||
#include <stdio.h>
|
||||
#include "syncIO.h"
|
||||
#include "syncInt.h"
|
||||
#include "syncUtil.h"
|
||||
|
||||
void logTest() {
|
||||
sTrace("--- sync log test: trace");
|
||||
|
@ -13,6 +14,21 @@ void logTest() {
|
|||
sFatal("--- sync log test: fatal");
|
||||
}
|
||||
|
||||
uint16_t ports[] = {7010, 7110, 7210, 7310, 7410};
|
||||
int32_t replicaNum = 5;
|
||||
int32_t myIndex = 0;
|
||||
SRaftId ids[TSDB_MAX_REPLICA];
|
||||
|
||||
void initRaftId() {
|
||||
for (int i = 0; i < replicaNum; ++i) {
|
||||
ids[i].addr = syncUtilAddr2U64("127.0.0.1", ports[i]);
|
||||
ids[i].vgId = 1234;
|
||||
char* s = syncUtilRaftId2Str(&ids[i]);
|
||||
printf("raftId[%d] : %s\n", i, s);
|
||||
free(s);
|
||||
}
|
||||
}
|
||||
|
||||
int main() {
|
||||
// taosInitLog((char *)"syncTest.log", 100000, 10);
|
||||
tsAsyncLog = 0;
|
||||
|
@ -20,23 +36,35 @@ int main() {
|
|||
|
||||
logTest();
|
||||
|
||||
SRaftStore *pRaftStore = raftStoreOpen("./raft_store.json");
|
||||
initRaftId();
|
||||
|
||||
SRaftStore* pRaftStore = raftStoreOpen("./test_raft_store.json");
|
||||
assert(pRaftStore != NULL);
|
||||
raftStorePrint(pRaftStore);
|
||||
raftStorePrint2((char*)"==raftStoreOpen==", pRaftStore);
|
||||
|
||||
#if 0
|
||||
pRaftStore->currentTerm = 100;
|
||||
pRaftStore->voteFor.addr = 200;
|
||||
pRaftStore->voteFor.vgId = 300;
|
||||
raftStorePersist(pRaftStore);
|
||||
raftStorePrint(pRaftStore);
|
||||
#endif
|
||||
raftStoreSetTerm(pRaftStore, 100);
|
||||
raftStorePrint2((char*)"==raftStoreSetTerm==", pRaftStore);
|
||||
|
||||
++(pRaftStore->currentTerm);
|
||||
++(pRaftStore->voteFor.addr);
|
||||
++(pRaftStore->voteFor.vgId);
|
||||
raftStorePersist(pRaftStore);
|
||||
raftStorePrint(pRaftStore);
|
||||
raftStoreVote(pRaftStore, &ids[0]);
|
||||
raftStorePrint2((char*)"==raftStoreVote==", pRaftStore);
|
||||
|
||||
raftStoreClearVote(pRaftStore);
|
||||
raftStorePrint2((char*)"==raftStoreClearVote==", pRaftStore);
|
||||
|
||||
raftStoreVote(pRaftStore, &ids[1]);
|
||||
raftStorePrint2((char*)"==raftStoreVote==", pRaftStore);
|
||||
|
||||
raftStoreNextTerm(pRaftStore);
|
||||
raftStorePrint2((char*)"==raftStoreNextTerm==", pRaftStore);
|
||||
|
||||
raftStoreNextTerm(pRaftStore);
|
||||
raftStorePrint2((char*)"==raftStoreNextTerm==", pRaftStore);
|
||||
|
||||
raftStoreNextTerm(pRaftStore);
|
||||
raftStorePrint2((char*)"==raftStoreNextTerm==", pRaftStore);
|
||||
|
||||
raftStoreNextTerm(pRaftStore);
|
||||
raftStorePrint2((char*)"==raftStoreNextTerm==", pRaftStore);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ typedef struct SCliConn {
|
|||
void* hostThrd;
|
||||
SConnBuffer readBuf;
|
||||
void* data;
|
||||
SArray* cliMsgs;
|
||||
queue conn;
|
||||
uint64_t expireTime;
|
||||
int hThrdIdx;
|
||||
|
@ -106,6 +107,7 @@ static void cliAsyncCb(uv_async_t* handle);
|
|||
static SCliConn* cliCreateConn(SCliThrdObj* thrd);
|
||||
static void cliDestroyConn(SCliConn* pConn, bool clear /*clear tcp handle or not*/);
|
||||
static void cliDestroy(uv_handle_t* handle);
|
||||
static void cliSend(SCliConn* pConn);
|
||||
|
||||
// process data read from server, add decompress etc later
|
||||
static void cliHandleResp(SCliConn* conn);
|
||||
|
@ -158,6 +160,14 @@ static void destroyThrdObj(SCliThrdObj* pThrd);
|
|||
|
||||
static void* cliWorkThread(void* arg);
|
||||
|
||||
bool cliMayContinueSendMsg(SCliConn* conn) {
|
||||
if (taosArrayGetSize(conn->cliMsgs) > 0) {
|
||||
cliSend(conn);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
void cliHandleResp(SCliConn* conn) {
|
||||
SCliThrdObj* pThrd = conn->hostThrd;
|
||||
STrans* pTransInst = pThrd->pTransInst;
|
||||
|
@ -173,18 +183,18 @@ void cliHandleResp(SCliConn* conn) {
|
|||
transMsg.msgType = pHead->msgType;
|
||||
transMsg.ahandle = NULL;
|
||||
|
||||
SCliMsg* pMsg = conn->data;
|
||||
SCliMsg* pMsg = NULL;
|
||||
if (taosArrayGetSize(conn->cliMsgs) > 0) {
|
||||
pMsg = taosArrayGetP(conn->cliMsgs, 0);
|
||||
taosArrayRemove(conn->cliMsgs, 0);
|
||||
}
|
||||
|
||||
STransConnCtx* pCtx = pMsg ? pMsg->ctx : NULL;
|
||||
if (pMsg == NULL && !CONN_NO_PERSIST_BY_APP(conn)) {
|
||||
transMsg.ahandle = pTransInst->mfp ? (*pTransInst->mfp)(pTransInst->parent, transMsg.msgType) : NULL;
|
||||
} else {
|
||||
transMsg.ahandle = pCtx ? pCtx->ahandle : NULL;
|
||||
}
|
||||
// if (rpcMsg.ahandle == NULL) {
|
||||
// tDebug("%s cli conn %p handle except", CONN_GET_INST_LABEL(conn), conn);
|
||||
// return;
|
||||
//}
|
||||
|
||||
// buf's mem alread translated to transMsg.pCont
|
||||
transClearBuffer(&conn->readBuf);
|
||||
|
||||
|
@ -214,12 +224,15 @@ void cliHandleResp(SCliConn* conn) {
|
|||
memcpy((char*)pCtx->pRsp, (char*)&transMsg, sizeof(transMsg));
|
||||
tsem_post(pCtx->pSem);
|
||||
}
|
||||
destroyCmsg(pMsg);
|
||||
|
||||
if (cliMayContinueSendMsg(conn) == true) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (CONN_NO_PERSIST_BY_APP(conn)) {
|
||||
addConnToPool(pThrd->pool, conn);
|
||||
}
|
||||
destroyCmsg(conn->data);
|
||||
conn->data = NULL;
|
||||
|
||||
uv_read_start((uv_stream_t*)conn->stream, cliAllocRecvBufferCb, cliRecvCb);
|
||||
// start thread's timer of conn pool if not active
|
||||
|
@ -229,7 +242,7 @@ void cliHandleResp(SCliConn* conn) {
|
|||
}
|
||||
|
||||
void cliHandleExcept(SCliConn* pConn) {
|
||||
if (pConn->data == NULL) {
|
||||
if (taosArrayGetSize(pConn->cliMsgs) == 0) {
|
||||
if (pConn->broken == true || CONN_NO_PERSIST_BY_APP(pConn)) {
|
||||
transUnrefCliHandle(pConn);
|
||||
return;
|
||||
|
@ -238,32 +251,38 @@ void cliHandleExcept(SCliConn* pConn) {
|
|||
SCliThrdObj* pThrd = pConn->hostThrd;
|
||||
STrans* pTransInst = pThrd->pTransInst;
|
||||
|
||||
SCliMsg* pMsg = pConn->data;
|
||||
STransConnCtx* pCtx = pMsg ? pMsg->ctx : NULL;
|
||||
do {
|
||||
SCliMsg* pMsg = NULL;
|
||||
if (taosArrayGetSize(pConn->cliMsgs) > 0) {
|
||||
pMsg = taosArrayGetP(pConn->cliMsgs, 0);
|
||||
taosArrayRemove(pConn->cliMsgs, 0);
|
||||
}
|
||||
|
||||
STransMsg transMsg = {0};
|
||||
transMsg.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
|
||||
transMsg.msgType = pMsg ? pMsg->msg.msgType + 1 : 0;
|
||||
transMsg.ahandle = NULL;
|
||||
STransConnCtx* pCtx = pMsg ? pMsg->ctx : NULL;
|
||||
|
||||
if (pMsg == NULL && !CONN_NO_PERSIST_BY_APP(pConn)) {
|
||||
transMsg.ahandle = pTransInst->mfp ? (*pTransInst->mfp)(pTransInst->parent, transMsg.msgType) : NULL;
|
||||
} else {
|
||||
transMsg.ahandle = pCtx ? pCtx->ahandle : NULL;
|
||||
}
|
||||
STransMsg transMsg = {0};
|
||||
transMsg.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
|
||||
transMsg.msgType = pMsg ? pMsg->msg.msgType + 1 : 0;
|
||||
transMsg.ahandle = NULL;
|
||||
|
||||
if (pCtx == NULL || pCtx->pSem == NULL) {
|
||||
tTrace("%s cli conn %p handle resp", pTransInst->label, pConn);
|
||||
(pTransInst->cfp)(pTransInst->parent, &transMsg, NULL);
|
||||
} else {
|
||||
tTrace("%s cli conn(sync) %p handle resp", pTransInst->label, pConn);
|
||||
memcpy((char*)(pCtx->pRsp), (char*)(&transMsg), sizeof(transMsg));
|
||||
tsem_post(pCtx->pSem);
|
||||
}
|
||||
destroyCmsg(pConn->data);
|
||||
pConn->data = NULL;
|
||||
if (pMsg == NULL && !CONN_NO_PERSIST_BY_APP(pConn)) {
|
||||
transMsg.ahandle = pTransInst->mfp ? (*pTransInst->mfp)(pTransInst->parent, transMsg.msgType) : NULL;
|
||||
} else {
|
||||
transMsg.ahandle = pCtx ? pCtx->ahandle : NULL;
|
||||
}
|
||||
|
||||
if (pCtx == NULL || pCtx->pSem == NULL) {
|
||||
tTrace("%s cli conn %p handle resp", pTransInst->label, pConn);
|
||||
(pTransInst->cfp)(pTransInst->parent, &transMsg, NULL);
|
||||
} else {
|
||||
tTrace("%s cli conn(sync) %p handle resp", pTransInst->label, pConn);
|
||||
memcpy((char*)(pCtx->pRsp), (char*)(&transMsg), sizeof(transMsg));
|
||||
tsem_post(pCtx->pSem);
|
||||
}
|
||||
destroyCmsg(pMsg);
|
||||
tTrace("%s cli conn %p start to destroy", CONN_GET_INST_LABEL(pConn), pConn);
|
||||
} while (taosArrayGetSize(pConn->cliMsgs) > 0);
|
||||
|
||||
tTrace("%s cli conn %p start to destroy", CONN_GET_INST_LABEL(pConn), pConn);
|
||||
transUnrefCliHandle(pConn);
|
||||
}
|
||||
|
||||
|
@ -398,6 +417,7 @@ static SCliConn* cliCreateConn(SCliThrdObj* pThrd) {
|
|||
|
||||
conn->writeReq.data = conn;
|
||||
conn->connReq.data = conn;
|
||||
conn->cliMsgs = taosArrayInit(2, sizeof(void*));
|
||||
|
||||
QUEUE_INIT(&conn->conn);
|
||||
conn->hostThrd = pThrd;
|
||||
|
@ -417,6 +437,7 @@ static void cliDestroy(uv_handle_t* handle) {
|
|||
SCliConn* conn = handle->data;
|
||||
free(conn->ip);
|
||||
free(conn->stream);
|
||||
taosArrayDestroy(conn->cliMsgs);
|
||||
tTrace("%s cli conn %p destroy successfully", CONN_GET_INST_LABEL(conn), conn);
|
||||
free(conn);
|
||||
}
|
||||
|
@ -426,11 +447,6 @@ static void cliSendCb(uv_write_t* req, int status) {
|
|||
|
||||
if (status == 0) {
|
||||
tTrace("%s cli conn %p data already was written out", CONN_GET_INST_LABEL(pConn), pConn);
|
||||
SCliMsg* pMsg = pConn->data;
|
||||
if (pMsg == NULL) {
|
||||
return;
|
||||
}
|
||||
destroyUserdata(&pMsg->msg);
|
||||
} else {
|
||||
tError("%s cli conn %p failed to write: %s", CONN_GET_INST_LABEL(pConn), pConn, uv_err_name(status));
|
||||
cliHandleExcept(pConn);
|
||||
|
@ -442,7 +458,8 @@ static void cliSendCb(uv_write_t* req, int status) {
|
|||
void cliSend(SCliConn* pConn) {
|
||||
CONN_HANDLE_BROKEN(pConn);
|
||||
|
||||
SCliMsg* pCliMsg = pConn->data;
|
||||
assert(taosArrayGetSize(pConn->cliMsgs) > 0);
|
||||
SCliMsg* pCliMsg = taosArrayGetP(pConn->cliMsgs, 0);
|
||||
STransConnCtx* pCtx = pCliMsg->ctx;
|
||||
|
||||
SCliThrdObj* pThrd = pConn->hostThrd;
|
||||
|
@ -480,6 +497,7 @@ void cliSend(SCliConn* pConn) {
|
|||
TMSG_INFO(pHead->msgType), inet_ntoa(pConn->addr.sin_addr), ntohs(pConn->addr.sin_port),
|
||||
inet_ntoa(pConn->locaddr.sin_addr), ntohs(pConn->locaddr.sin_port));
|
||||
|
||||
pConn->writeReq.data = pConn;
|
||||
uv_write(&pConn->writeReq, (uv_stream_t*)pConn->stream, &wb, 1, cliSendCb);
|
||||
|
||||
return;
|
||||
|
@ -502,8 +520,8 @@ void cliConnCb(uv_connect_t* req, int status) {
|
|||
uv_tcp_getsockname((uv_tcp_t*)pConn->stream, (struct sockaddr*)&pConn->locaddr, &addrlen);
|
||||
|
||||
tTrace("%s cli conn %p connect to server successfully", CONN_GET_INST_LABEL(pConn), pConn);
|
||||
|
||||
assert(pConn->stream == req->handle);
|
||||
|
||||
cliSend(pConn);
|
||||
}
|
||||
|
||||
|
@ -521,8 +539,11 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrdObj* pThrd) {
|
|||
SCliConn* conn = pMsg->msg.handle;
|
||||
tDebug("%s cli conn %p release to inst", CONN_GET_INST_LABEL(conn), conn);
|
||||
|
||||
destroyCmsg(pMsg);
|
||||
conn->data = NULL;
|
||||
while (taosArrayGetSize(conn->cliMsgs) > 0) {
|
||||
SCliMsg* pMsg = taosArrayGetP(conn->cliMsgs, 0);
|
||||
destroyCmsg(pMsg);
|
||||
taosArrayRemove(conn->cliMsgs, 0);
|
||||
}
|
||||
|
||||
transDestroyBuffer(&conn->readBuf);
|
||||
if (conn->persist && T_REF_VAL_GET(conn) >= 2) {
|
||||
|
@ -561,14 +582,19 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrdObj* pThrd) {
|
|||
|
||||
SCliConn* conn = cliGetConn(pMsg, pThrd);
|
||||
if (conn != NULL) {
|
||||
conn->data = pMsg;
|
||||
conn->hThrdIdx = pCtx->hThrdIdx;
|
||||
|
||||
if (taosArrayGetSize(conn->cliMsgs) > 0) {
|
||||
taosArrayPush(conn->cliMsgs, &pMsg);
|
||||
return;
|
||||
}
|
||||
taosArrayPush(conn->cliMsgs, &pMsg);
|
||||
transDestroyBuffer(&conn->readBuf);
|
||||
cliSend(conn);
|
||||
} else {
|
||||
conn = cliCreateConn(pThrd);
|
||||
conn->data = pMsg;
|
||||
taosArrayPush(conn->cliMsgs, &pMsg);
|
||||
|
||||
conn->hThrdIdx = pCtx->hThrdIdx;
|
||||
conn->ip = strdup(pMsg->ctx->ip);
|
||||
conn->port = pMsg->ctx->port;
|
||||
|
|
|
@ -214,7 +214,9 @@ static void uvHandleReq(SSrvConn* pConn) {
|
|||
// pHead = rpcDecompresSTransMsg(pHead);
|
||||
} else {
|
||||
pHead->msgLen = htonl(pHead->msgLen);
|
||||
// impl later
|
||||
if (pHead->secured == 1) {
|
||||
pHead->msgLen -= sizeof(STransUserMsg);
|
||||
}
|
||||
//
|
||||
}
|
||||
|
||||
|
|
|
@ -50,6 +50,7 @@ static void *ConstructArgForSpecificMsgType(void *parent, tmsg_t msgType) {
|
|||
}
|
||||
// server except
|
||||
static bool handleExcept(void *parent, tmsg_t msgType) {
|
||||
//
|
||||
return msgType == TDMT_VND_QUERY || msgType == TDMT_VND_FETCH_RSP || msgType == TDMT_VND_RES_READY_RSP;
|
||||
}
|
||||
typedef void (*CB)(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet);
|
||||
|
|
|
@ -351,6 +351,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_MESSED_MSG, "TSDB messed message")
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVLD_TAG_VAL, "TSDB invalid tag value")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_CACHE_LAST_ROW, "TSDB no cache last row data")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_SMA_INDEX_IN_META, "No sma index in meta")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR, "TDB env open error")
|
||||
|
||||
// query
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, "Invalid handle")
|
||||
|
|
|
@ -15,43 +15,43 @@
|
|||
|
||||
#include "tuuid.h"
|
||||
|
||||
static int64_t hashId = 0;
|
||||
static int32_t SerialNo = 0;
|
||||
static int64_t tUUIDHashId = 0;
|
||||
static int32_t tUUIDSerialNo = 0;
|
||||
|
||||
int32_t tGenIdPI32(void) {
|
||||
if (hashId == 0) {
|
||||
if (tUUIDHashId == 0) {
|
||||
char uid[64];
|
||||
int32_t code = taosGetSystemUUID(uid, tListLen(uid));
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
} else {
|
||||
hashId = MurmurHash3_32(uid, strlen(uid));
|
||||
tUUIDHashId = MurmurHash3_32(uid, strlen(uid));
|
||||
}
|
||||
}
|
||||
|
||||
int64_t ts = taosGetTimestampMs();
|
||||
uint64_t pid = taosGetPId();
|
||||
int32_t val = atomic_add_fetch_32(&SerialNo, 1);
|
||||
int32_t val = atomic_add_fetch_32(&tUUIDSerialNo, 1);
|
||||
|
||||
int32_t id = ((hashId & 0x1F) << 26) | ((pid & 0x3F) << 20) | ((ts & 0xFFF) << 8) | (val & 0xFF);
|
||||
int32_t id = ((tUUIDHashId & 0x1F) << 26) | ((pid & 0x3F) << 20) | ((ts & 0xFFF) << 8) | (val & 0xFF);
|
||||
return id;
|
||||
}
|
||||
|
||||
int64_t tGenIdPI64(void) {
|
||||
if (hashId == 0) {
|
||||
if (tUUIDHashId == 0) {
|
||||
char uid[64];
|
||||
int32_t code = taosGetSystemUUID(uid, tListLen(uid));
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
} else {
|
||||
hashId = MurmurHash3_32(uid, strlen(uid));
|
||||
tUUIDHashId = MurmurHash3_32(uid, strlen(uid));
|
||||
}
|
||||
}
|
||||
|
||||
int64_t ts = taosGetTimestampMs();
|
||||
uint64_t pid = taosGetPId();
|
||||
int32_t val = atomic_add_fetch_32(&SerialNo, 1);
|
||||
int32_t val = atomic_add_fetch_32(&tUUIDSerialNo, 1);
|
||||
|
||||
int64_t id = ((hashId & 0x07FF) << 52) | ((pid & 0x0FFF) << 40) | ((ts & 0xFFFFFF) << 16) | (val & 0xFFFF);
|
||||
int64_t id = ((tUUIDHashId & 0x07FF) << 52) | ((pid & 0x0FFF) << 40) | ((ts & 0xFFFFFF) << 16) | (val & 0xFFFF);
|
||||
return id;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue