merge 3.0

This commit is contained in:
Xiaoyu Wang 2022-07-01 20:05:15 +08:00
commit 7a53bae352
93 changed files with 2019 additions and 2887 deletions

View File

@ -130,6 +130,9 @@ def pre_test(){
def pre_test_win(){
bat '''
hostname
taskkill /f /t /im python.exe
taskkill /f /t /im bash.exe
taskkill /f /t /im taosd.exe
ipconfig
set
date /t

View File

@ -222,6 +222,7 @@ void blockDataCleanup(SSDataBlock* pDataBlock);
size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize);
int32_t blockDataTrimFirstNRows(SSDataBlock* pBlock, size_t n);
int32_t blockDataKeepFirstNRows(SSDataBlock* pBlock, size_t n);
int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src);
int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src);
@ -235,6 +236,7 @@ SColumnInfoData* bdGetColumnInfoData(SSDataBlock* pBlock, int32_t index);
void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, int8_t needCompress);
const char* blockDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData);
void blockDebugShowDataBlock(SSDataBlock* pBlock, const char* flag);
void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag);
// for debug
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);

View File

@ -78,6 +78,7 @@ int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag);
int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag);
int32_t tTagToValArray(const STag *pTag, SArray **ppArray);
void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove
int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag** ppTag, void* pMsgBuf);
// STRUCT =================
struct STColumn {

View File

@ -436,15 +436,16 @@ typedef struct {
int32_t ttl;
int32_t numOfColumns;
int32_t numOfTags;
int32_t numOfFuncs;
int32_t commentLen;
int32_t ast1Len;
int32_t ast2Len;
SArray* pColumns; // array of SField
SArray* pTags; // array of SField
char* comment;
SArray* pFuncs;
char* pComment;
char* pAst1;
char* pAst2;
SArray* pFuncs;
} SMCreateStbReq;
int32_t tSerializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq);

View File

@ -233,10 +233,12 @@ enum {
TD_DEF_MSG_TYPE(TDMT_SYNC_PING, "sync-ping", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_PING_REPLY, "sync-ping-reply", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_CLIENT_REQUEST, "sync-client-request", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_CLIENT_REQUEST_BATCH, "sync-client-request-batch", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_CLIENT_REQUEST_REPLY, "sync-client-request-reply", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_REQUEST_VOTE, "sync-request-vote", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_REQUEST_VOTE_REPLY, "sync-request-vote-reply", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_APPEND_ENTRIES, "sync-append-entries", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_APPEND_ENTRIES_BATCH, "sync-append-entries-batch", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_APPEND_ENTRIES_REPLY, "sync-append-entries-reply", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_NOOP, "sync-noop", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_UNKNOWN, "sync-unknown", NULL, NULL)

View File

@ -26,6 +26,7 @@ extern "C" {
extern bool gRaftDetailLog;
#define SYNC_MAX_BATCH_SIZE 100
#define SYNC_INDEX_BEGIN 0
#define SYNC_INDEX_INVALID -1
#define SYNC_TERM_INVALID 0xFFFFFFFFFFFFFFFF
@ -120,7 +121,7 @@ typedef struct SSyncFSM {
int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot, void* pReaderParam, void** ppReader);
int32_t (*FpGetSnapshotInfo)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
int32_t (*FpSnapshotStartRead)(struct SSyncFSM* pFsm, void** ppReader);
int32_t (*FpSnapshotStartRead)(struct SSyncFSM* pFsm, void* pReaderParam, void** ppReader);
int32_t (*FpSnapshotStopRead)(struct SSyncFSM* pFsm, void* pReader);
int32_t (*FpSnapshotDoRead)(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32_t* len);
@ -164,6 +165,7 @@ typedef struct SSyncLogStore {
bool (*syncLogIsEmpty)(struct SSyncLogStore* pLogStore);
int32_t (*syncLogEntryCount)(struct SSyncLogStore* pLogStore);
int32_t (*syncLogRestoreFromSnapshot)(struct SSyncLogStore* pLogStore, SyncIndex index);
bool (*syncLogExist)(struct SSyncLogStore* pLogStore, SyncIndex index);
SyncIndex (*syncLogWriteIndex)(struct SSyncLogStore* pLogStore);
SyncIndex (*syncLogLastIndex)(struct SSyncLogStore* pLogStore);
@ -179,6 +181,7 @@ typedef struct SSyncInfo {
bool isStandBy;
bool snapshotEnable;
SyncGroupId vgId;
int32_t batchSize;
SSyncCfg syncCfg;
char path[TSDB_FILENAME_LEN];
SWal* pWal;
@ -202,6 +205,7 @@ SyncGroupId syncGetVgId(int64_t rid);
void syncGetEpSet(int64_t rid, SEpSet* pEpSet);
void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet);
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak);
// int32_t syncProposeBatch(int64_t rid, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize);
bool syncEnvIsStart();
const char* syncStr(ESyncState state);
bool syncIsRestoreFinish(int64_t rid);

View File

@ -219,6 +219,34 @@ void syncClientRequestPrint2(char* s, const SyncClientRequest* pMsg);
void syncClientRequestLog(const SyncClientRequest* pMsg);
void syncClientRequestLog2(char* s, const SyncClientRequest* pMsg);
// ---------------------------------------------
typedef struct SOffsetAndContLen {
int32_t offset;
int32_t contLen;
} SOffsetAndContLen;
typedef struct SRaftMeta {
uint64_t seqNum;
bool isWeak;
} SRaftMeta;
// block1:
// block2: SRaftMeta array
// block3: rpc msg array (with pCont)
typedef struct SyncClientRequestBatch {
uint32_t bytes;
int32_t vgId;
uint32_t msgType; // SyncClientRequestBatch msgType
uint32_t dataCount;
uint32_t dataLen; // user RpcMsg.contLen
char data[]; // user RpcMsg.pCont
} SyncClientRequestBatch;
SyncClientRequestBatch* syncClientRequestBatchBuild(SRpcMsg* rpcMsgArr, SRaftMeta* raftArr, int32_t arrSize,
int32_t vgId);
void syncClientRequestBatch2RpcMsg(const SyncClientRequestBatch* pSyncMsg, SRpcMsg* pRpcMsg);
// ---------------------------------------------
typedef struct SyncClientRequestReply {
uint32_t bytes;
@ -325,22 +353,53 @@ void syncAppendEntriesLog(const SyncAppendEntries* pMsg);
void syncAppendEntriesLog2(char* s, const SyncAppendEntries* pMsg);
// ---------------------------------------------
// define ahead
/*
typedef struct SOffsetAndContLen {
int32_t offset;
int32_t contLen;
} SOffsetAndContLen;
*/
typedef struct SyncAppendEntriesBatch {
uint32_t bytes;
int32_t vgId;
uint32_t msgType;
SRaftId srcId;
SRaftId destId;
// private data
SyncTerm term;
SyncIndex prevLogIndex;
SyncTerm prevLogTerm;
SyncIndex commitIndex;
SyncTerm privateTerm;
int32_t dataCount;
uint32_t dataLen;
char data[];
} SyncAppendEntriesBatch;
SyncAppendEntriesBatch* syncAppendEntriesBatchBuild(SRpcMsg* rpcMsgArr, int32_t arrSize, int32_t vgId);
void syncAppendEntriesBatchDestroy(SyncAppendEntriesBatch* pMsg);
void syncAppendEntriesBatchSerialize(const SyncAppendEntriesBatch* pMsg, char* buf, uint32_t bufLen);
void syncAppendEntriesBatchDeserialize(const char* buf, uint32_t len, SyncAppendEntriesBatch* pMsg);
char* syncAppendEntriesBatchSerialize2(const SyncAppendEntriesBatch* pMsg, uint32_t* len);
SyncAppendEntriesBatch* syncAppendEntriesBatchDeserialize2(const char* buf, uint32_t len);
void syncAppendEntriesBatch2RpcMsg(const SyncAppendEntriesBatch* pMsg, SRpcMsg* pRpcMsg);
void syncAppendEntriesBatchFromRpcMsg(const SRpcMsg* pRpcMsg, SyncAppendEntriesBatch* pMsg);
SyncAppendEntriesBatch* syncAppendEntriesBatchFromRpcMsg2(const SRpcMsg* pRpcMsg);
cJSON* syncAppendEntriesBatch2Json(const SyncAppendEntriesBatch* pMsg);
char* syncAppendEntriesBatch2Str(const SyncAppendEntriesBatch* pMsg);
void syncAppendEntriesBatch2RpcMsgArray(SyncAppendEntriesBatch* pSyncMsg, SRpcMsg* rpcMsgArr, int32_t maxArrSize,
int32_t* pRetArrSize);
// for debug ----------------------
void syncAppendEntriesBatchPrint(const SyncAppendEntriesBatch* pMsg);
void syncAppendEntriesBatchPrint2(char* s, const SyncAppendEntriesBatch* pMsg);
void syncAppendEntriesBatchLog(const SyncAppendEntriesBatch* pMsg);
void syncAppendEntriesBatchLog2(char* s, const SyncAppendEntriesBatch* pMsg);
// ---------------------------------------------
typedef struct SyncAppendEntriesReply {
uint32_t bytes;
@ -542,6 +601,7 @@ int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg);
int32_t syncNodeOnPingReplyCb(SSyncNode* ths, SyncPingReply* pMsg);
int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg);
int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg, SyncIndex* pRetIndex);
int32_t syncNodeOnClientRequestBatchCb(SSyncNode* ths, SyncClientRequestBatch* pMsg);
int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg);
int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg);
int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg);

View File

@ -61,45 +61,23 @@ extern "C" {
} \
}
#define WAL_HEAD_VER 0
#define WAL_PROTO_VER 0
#define WAL_NOSUFFIX_LEN 20
#define WAL_SUFFIX_AT (WAL_NOSUFFIX_LEN + 1)
#define WAL_LOG_SUFFIX "log"
#define WAL_INDEX_SUFFIX "idx"
#define WAL_REFRESH_MS 1000
#define WAL_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead))
#define WAL_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalCkHead))
#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12)
#define WAL_FILE_LEN (WAL_PATH_LEN + 32)
#define WAL_MAGIC 0xFAFBFCFDULL
#pragma pack(push, 1)
typedef enum {
TAOS_WAL_NOLOG = 0,
TAOS_WAL_WRITE = 1,
TAOS_WAL_FSYNC = 2,
} EWalType;
// used by sync module
typedef struct {
int8_t isWeek;
uint64_t seqNum;
uint64_t term;
} SSyncLogMeta;
typedef struct SWalReadHead {
int8_t headVer;
int8_t reserved;
int16_t msgType;
int32_t bodyLen;
int64_t ingestTs; // not implemented
int64_t version;
// sync meta
SSyncLogMeta syncMeta;
char body[];
} SWalReadHead;
typedef struct {
int32_t vgId;
int32_t fsyncPeriod; // millisecond
@ -110,13 +88,6 @@ typedef struct {
EWalType level; // wal level
} SWalCfg;
typedef struct {
uint64_t magic;
uint32_t cksumHead;
uint32_t cksumBody;
SWalReadHead head;
} SWalHead;
typedef struct SWalVer {
int64_t firstVer;
int64_t verInSnapshotting;
@ -125,6 +96,35 @@ typedef struct SWalVer {
int64_t lastVer;
} SWalVer;
#pragma pack(push, 1)
// used by sync module
typedef struct {
int8_t isWeek;
uint64_t seqNum;
uint64_t term;
} SSyncLogMeta;
typedef struct {
int8_t protoVer;
int64_t version;
int16_t msgType;
int32_t bodyLen;
int64_t ingestTs; // not implemented
// sync meta
SSyncLogMeta syncMeta;
char body[];
} SWalCont;
typedef struct {
uint64_t magic;
uint32_t cksumHead;
uint32_t cksumBody;
SWalCont head;
} SWalCkHead;
#pragma pack(pop)
typedef struct SWal {
// cfg
SWalCfg cfg;
@ -134,7 +134,7 @@ typedef struct SWal {
TdFilePtr pWriteLogTFile;
TdFilePtr pWriteIdxTFile;
int32_t writeCur;
SArray *fileInfoSet;
SArray *fileInfoSet; // SArray<SWalFileInfo>
// status
int64_t totSize;
int64_t lastRollSeq;
@ -146,7 +146,7 @@ typedef struct SWal {
// path
char path[WAL_PATH_LEN];
// reusable write head
SWalHead writeHead;
SWalCkHead writeHead;
} SWal; // WAL HANDLE
typedef struct SWalReadHandle {
@ -158,11 +158,8 @@ typedef struct SWalReadHandle {
int64_t capacity;
int64_t status; // if cursor valid
TdThreadMutex mutex;
SWalHead *pHead;
SWalCkHead *pHead;
} SWalReadHandle;
#pragma pack(pop)
// typedef int32_t (*FWalWrite)(void *ahandle, void *pHead);
// module initialization
int32_t walInit();
@ -174,9 +171,9 @@ int32_t walAlter(SWal *, SWalCfg *pCfg);
void walClose(SWal *);
// write
int64_t walWriteWithSyncInfo(SWal *, int64_t index, tmsg_t msgType, SSyncLogMeta syncMeta, const void *body,
int32_t walWriteWithSyncInfo(SWal *, int64_t index, tmsg_t msgType, SSyncLogMeta syncMeta, const void *body,
int32_t bodyLen);
int64_t walWrite(SWal *, int64_t index, tmsg_t msgType, const void *body, int32_t bodyLen);
int32_t walWrite(SWal *, int64_t index, tmsg_t msgType, const void *body, int32_t bodyLen);
void walFsync(SWal *, bool force);
// apis for lifecycle management
@ -196,9 +193,9 @@ int32_t walReadWithHandle(SWalReadHandle *pRead, int64_t ver);
// only for tq usage
void walSetReaderCapacity(SWalReadHandle *pRead, int32_t capacity);
int32_t walFetchHead(SWalReadHandle *pRead, int64_t ver, SWalHead *pHead);
int32_t walFetchBody(SWalReadHandle *pRead, SWalHead **ppHead);
int32_t walSkipFetchBody(SWalReadHandle *pRead, const SWalHead *pHead);
int32_t walFetchHead(SWalReadHandle *pRead, int64_t ver, SWalCkHead *pHead);
int32_t walFetchBody(SWalReadHandle *pRead, SWalCkHead **ppHead);
int32_t walSkipFetchBody(SWalReadHandle *pRead, const SWalCkHead *pHead);
typedef struct {
int64_t refId;

View File

@ -25,17 +25,17 @@ extern "C" {
// If the error is in a third-party library, place this header file under the third-party library header file.
// When you want to use this feature, you should find or add the same function in the following sectio
#ifndef ALLOW_FORBID_FUNC
#define open OPEN_FUNC_TAOS_FORBID
#define fopen FOPEN_FUNC_TAOS_FORBID
#define access ACCESS_FUNC_TAOS_FORBID
#define stat STAT_FUNC_TAOS_FORBID
#define lstat LSTAT_FUNC_TAOS_FORBID
#define fstat FSTAT_FUNC_TAOS_FORBID
#define close CLOSE_FUNC_TAOS_FORBID
#define fclose FCLOSE_FUNC_TAOS_FORBID
#define fsync FSYNC_FUNC_TAOS_FORBID
#define getline GETLINE_FUNC_TAOS_FORBID
// #define fflush FFLUSH_FUNC_TAOS_FORBID
#define open OPEN_FUNC_TAOS_FORBID
#define fopen FOPEN_FUNC_TAOS_FORBID
#define access ACCESS_FUNC_TAOS_FORBID
#define stat STAT_FUNC_TAOS_FORBID
#define lstat LSTAT_FUNC_TAOS_FORBID
#define fstat FSTAT_FUNC_TAOS_FORBID
#define close CLOSE_FUNC_TAOS_FORBID
#define fclose FCLOSE_FUNC_TAOS_FORBID
#define fsync FSYNC_FUNC_TAOS_FORBID
#define getline GETLINE_FUNC_TAOS_FORBID
// #define fflush FFLUSH_FUNC_TAOS_FORBID
#endif
#ifndef PATH_MAX
@ -43,54 +43,54 @@ extern "C" {
#endif
typedef struct TdFile *TdFilePtr;
#define TD_FILE_CREATE 0x0001
#define TD_FILE_WRITE 0x0002
#define TD_FILE_READ 0x0004
#define TD_FILE_TRUNC 0x0008
#define TD_FILE_APPEND 0x0010
#define TD_FILE_TEXT 0x0020
#define TD_FILE_AUTO_DEL 0x0040
#define TD_FILE_EXCL 0x0080
#define TD_FILE_STREAM 0x0100 // Only support taosFprintfFile, taosGetLineFile, taosEOFFile
TdFilePtr taosOpenFile(const char *path,int32_t tdFileOptions);
#define TD_FILE_CREATE 0x0001
#define TD_FILE_WRITE 0x0002
#define TD_FILE_READ 0x0004
#define TD_FILE_TRUNC 0x0008
#define TD_FILE_APPEND 0x0010
#define TD_FILE_TEXT 0x0020
#define TD_FILE_AUTO_DEL 0x0040
#define TD_FILE_EXCL 0x0080
#define TD_FILE_STREAM 0x0100 // Only support taosFprintfFile, taosGetLineFile, taosEOFFile
TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions);
#define TD_FILE_ACCESS_EXIST_OK 0x1
#define TD_FILE_ACCESS_READ_OK 0x2
#define TD_FILE_ACCESS_WRITE_OK 0x4
bool taosCheckAccessFile(const char *pathname, int mode);
bool taosCheckAccessFile(const char *pathname, int mode);
int32_t taosLockFile(TdFilePtr pFile);
int32_t taosUnLockFile(TdFilePtr pFile);
int32_t taosUmaskFile(int32_t maskVal);
int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime);
int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno);
int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime);
bool taosCheckExistFile(const char *pathname);
int64_t taosLSeekFile(TdFilePtr pFile, int64_t offset, int32_t whence);
int32_t taosFtruncateFile(TdFilePtr pFile, int64_t length);
int32_t taosFsyncFile(TdFilePtr pFile);
int64_t taosReadFile(TdFilePtr pFile, void *buf, int64_t count);
int64_t taosPReadFile(TdFilePtr pFile, void *buf, int64_t count, int64_t offset);
int64_t taosWriteFile(TdFilePtr pFile, const void *buf, int64_t count);
void taosFprintfFile(TdFilePtr pFile, const char *format, ...);
int64_t taosGetLineFile(TdFilePtr pFile, char ** __restrict ptrBuf);
int64_t taosGetLineFile(TdFilePtr pFile, char **__restrict ptrBuf);
int64_t taosGetsFile(TdFilePtr pFile, int32_t maxSize, char *__restrict buf);
int32_t taosEOFFile(TdFilePtr pFile);
int64_t taosCloseFile(TdFilePtr *ppFile);
int32_t taosCloseFile(TdFilePtr *ppFile);
int32_t taosRenameFile(const char *oldName, const char *newName);
int64_t taosCopyFile(const char *from, const char *to);
int32_t taosRemoveFile(const char *path);
void taosGetTmpfilePath(const char *inputTmpDir, const char *fileNamePrefix, char *dstPath);
void taosGetTmpfilePath(const char *inputTmpDir, const char *fileNamePrefix, char *dstPath);
int64_t taosFSendFile(TdFilePtr pFileOut, TdFilePtr pFileIn, int64_t *offset, int64_t size);

View File

@ -388,6 +388,10 @@ int32_t* taosGetErrno();
#define TSDB_CODE_QRY_TASK_MSG_ERROR TAOS_DEF_ERROR_CODE(0, 0x0719)
#define TSDB_CODE_QRY_JOB_FREED TAOS_DEF_ERROR_CODE(0, 0x071A)
#define TSDB_CODE_QRY_TASK_STATUS_ERROR TAOS_DEF_ERROR_CODE(0, 0x071B)
//json
#define TSDB_CODE_QRY_JSON_IN_ERROR TAOS_DEF_ERROR_CODE(0, 0x071C)
#define TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR TAOS_DEF_ERROR_CODE(0, 0x071D)
#define TSDB_CODE_QRY_JSON_IN_GROUP_ERROR TAOS_DEF_ERROR_CODE(0, 0x071E)
// grant
#define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800)
@ -423,6 +427,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_SYN_RECONFIG_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0910)
#define TSDB_CODE_SYN_PROPOSE_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0911)
#define TSDB_CODE_SYN_STANDBY_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0912)
#define TSDB_CODE_SYN_BATCH_ERROR TAOS_DEF_ERROR_CODE(0, 0x0913)
#define TSDB_CODE_SYN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x09FF)
// tq

View File

@ -44,6 +44,8 @@ uint32_t ip2uint(const char *const ip_addr);
void taosIp2String(uint32_t ip, char *str);
void taosIpPort2String(uint32_t ip, uint16_t port, char *str);
void *tmemmem(const char *haystack, int hlen, const char *needle, int nlen);
static FORCE_INLINE void taosEncryptPass(uint8_t *inBuf, size_t inLen, char *target) {
T_MD5_CTX context;
tMD5Init(&context);
@ -59,10 +61,10 @@ static FORCE_INLINE void taosEncryptPass_c(uint8_t *inBuf, size_t len, char *tar
tMD5Final(&context);
char buf[TSDB_PASSWORD_LEN + 1];
sprintf(buf, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", context.digest[0],
context.digest[1], context.digest[2], context.digest[3], context.digest[4], context.digest[5],
context.digest[6], context.digest[7], context.digest[8], context.digest[9], context.digest[10],
context.digest[11], context.digest[12], context.digest[13], context.digest[14], context.digest[15]);
sprintf(buf, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", context.digest[0], context.digest[1],
context.digest[2], context.digest[3], context.digest[4], context.digest[5], context.digest[6],
context.digest[7], context.digest[8], context.digest[9], context.digest[10], context.digest[11],
context.digest[12], context.digest[13], context.digest[14], context.digest[15]);
memcpy(target, buf, TSDB_PASSWORD_LEN);
}

View File

@ -716,7 +716,12 @@ int32_t dataBlockCompar(const void* p1, const void* p2, const void* param) {
void* left1 = colDataGetData(pColInfoData, left);
void* right1 = colDataGetData(pColInfoData, right);
if (pColInfoData->info.type == TSDB_DATA_TYPE_JSON) {
if (tTagIsJson(left1) || tTagIsJson(right1)) {
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
return 0;
}
}
__compar_fn_t fn = getKeyComparFunc(pColInfoData->info.type, pOrder->order);
int ret = fn(left1, right1);
@ -890,7 +895,7 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo) {
SBlockOrderInfo* pOrder = taosArrayGet(pOrderInfo, 0);
int64_t p0 = taosGetTimestampUs();
__compar_fn_t fn = getKeyComparFunc(pColInfoData->info.type, pOrder->order);
taosSort(pColInfoData->pData, pDataBlock->info.rows, pColInfoData->info.bytes, fn);
@ -919,6 +924,7 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo) {
}
taosqsort(index, rows, sizeof(int32_t), &helper, dataBlockCompar);
if(terrno) return terrno;
int64_t p1 = taosGetTimestampUs();
@ -1431,9 +1437,39 @@ static void doShiftBitmap(char* nullBitmap, size_t n, size_t total) {
}
}
static int32_t colDataMoveVarData(SColumnInfoData* pColInfoData, size_t start, size_t end){
int32_t dataOffset = -1;
int32_t dataLen = 0;
int32_t beigin = start;
while(beigin < end){
int32_t offset = pColInfoData->varmeta.offset[beigin];
if(offset == -1) {
beigin++;
continue;
}
if(start != 0) {
pColInfoData->varmeta.offset[beigin] = dataLen;
}
char *data = pColInfoData->pData + offset;
if(dataOffset == -1) dataOffset = offset; // mark the begin of data
int32_t type = pColInfoData->info.type;
if (type == TSDB_DATA_TYPE_JSON) {
dataLen += getJsonValueLen(data);
} else {
dataLen += varDataTLen(data);
}
beigin++;
}
if(dataOffset > 0){
memmove(pColInfoData->pData, pColInfoData->pData + dataOffset, dataLen);
memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[start], (end - start) * sizeof(int32_t));
}
return dataLen;
}
static void colDataTrimFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) {
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n) * sizeof(int32_t));
pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, n, total);
memset(&pColInfoData->varmeta.offset[total - n], 0, n);
} else {
int32_t bytes = pColInfoData->info.bytes;
@ -1461,6 +1497,33 @@ int32_t blockDataTrimFirstNRows(SSDataBlock* pBlock, size_t n) {
return TSDB_CODE_SUCCESS;
}
static void colDataKeepFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) {
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, 0, n);
memset(&pColInfoData->varmeta.offset[n], 0, total - n);
}
}
int32_t blockDataKeepFirstNRows(SSDataBlock* pBlock, size_t n) {
if (n == 0) {
blockDataCleanup(pBlock);
return TSDB_CODE_SUCCESS;
}
if (pBlock->info.rows <= n) {
return TSDB_CODE_SUCCESS;
} else {
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
colDataKeepFirstNRows(pColInfoData, n, pBlock->info.rows);
}
pBlock->info.rows = n;
}
return TSDB_CODE_SUCCESS;
}
int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock) {
int64_t tbUid = pBlock->info.uid;
int16_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
@ -1605,6 +1668,13 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
return buf;
}
void blockDebugShowDataBlock(SSDataBlock* pBlock, const char* flag) {
SArray* dataBlocks = taosArrayInit(1, sizeof(SSDataBlock));
taosArrayPush(dataBlocks, pBlock);
blockDebugShowDataBlocks(dataBlocks, flag);
taosArrayDestroy(dataBlocks);
}
void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag) {
char pBuf[128] = {0};
int32_t sz = taosArrayGetSize(dataBlocks);

View File

@ -503,6 +503,7 @@ int32_t tSerializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pReq
if (tEncodeI32(&encoder, pReq->ttl) < 0) return -1;
if (tEncodeI32(&encoder, pReq->numOfColumns) < 0) return -1;
if (tEncodeI32(&encoder, pReq->numOfTags) < 0) return -1;
if (tEncodeI32(&encoder, pReq->numOfFuncs) < 0) return -1;
if (tEncodeI32(&encoder, pReq->commentLen) < 0) return -1;
if (tEncodeI32(&encoder, pReq->ast1Len) < 0) return -1;
if (tEncodeI32(&encoder, pReq->ast2Len) < 0) return -1;
@ -510,21 +511,26 @@ int32_t tSerializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pReq
for (int32_t i = 0; i < pReq->numOfColumns; ++i) {
SField *pField = taosArrayGet(pReq->pColumns, i);
if (tEncodeI8(&encoder, pField->type) < 0) return -1;
if (tEncodeI8(&encoder, pField->flags) < 0) return -1;
if (tEncodeI32(&encoder, pField->bytes) < 0) return -1;
if (tEncodeCStr(&encoder, pField->name) < 0) return -1;
if (tEncodeI8(&encoder, pField->flags) < 0) return -1;
}
for (int32_t i = 0; i < pReq->numOfTags; ++i) {
SField *pField = taosArrayGet(pReq->pTags, i);
if (tEncodeI8(&encoder, pField->type) < 0) return -1;
if (tEncodeI8(&encoder, pField->flags) < 0) return -1;
if (tEncodeI32(&encoder, pField->bytes) < 0) return -1;
if (tEncodeCStr(&encoder, pField->name) < 0) return -1;
if (tEncodeI8(&encoder, pField->flags) < 0) return -1;
}
for (int32_t i = 0; i < pReq->numOfFuncs; ++i) {
const char *pFunc = taosArrayGet(pReq->pFuncs, i);
if (tEncodeCStr(&encoder, pFunc) < 0) return -1;
}
if (pReq->commentLen > 0) {
if (tEncodeCStr(&encoder, pReq->comment) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->pComment) < 0) return -1;
}
if (pReq->ast1Len > 0) {
if (tEncodeBinary(&encoder, pReq->pAst1, pReq->ast1Len) < 0) return -1;
@ -533,13 +539,6 @@ int32_t tSerializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pReq
if (tEncodeBinary(&encoder, pReq->pAst2, pReq->ast2Len) < 0) return -1;
}
int32_t numOfFuncs = taosArrayGetSize(pReq->pFuncs);
if (tEncodeI32(&encoder, numOfFuncs) < 0) return -1;
for (int32_t i = 0; i < numOfFuncs; ++i) {
const char *pFunc = taosArrayGet(pReq->pFuncs, i);
if (tEncodeCStr(&encoder, pFunc) < 0) return -1;
}
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -561,13 +560,15 @@ int32_t tDeserializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pR
if (tDecodeI32(&decoder, &pReq->ttl) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->numOfColumns) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->numOfTags) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->numOfFuncs) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->commentLen) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->ast1Len) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->ast2Len) < 0) return -1;
pReq->pColumns = taosArrayInit(pReq->numOfColumns, sizeof(SField));
pReq->pTags = taosArrayInit(pReq->numOfTags, sizeof(SField));
if (pReq->pColumns == NULL || pReq->pTags == NULL) {
pReq->pFuncs = taosArrayInit(pReq->numOfFuncs, TSDB_FUNC_NAME_LEN);
if (pReq->pColumns == NULL || pReq->pTags == NULL || pReq->pFuncs == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
@ -575,9 +576,9 @@ int32_t tDeserializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pR
for (int32_t i = 0; i < pReq->numOfColumns; ++i) {
SField field = {0};
if (tDecodeI8(&decoder, &field.type) < 0) return -1;
if (tDecodeI8(&decoder, &field.flags) < 0) return -1;
if (tDecodeI32(&decoder, &field.bytes) < 0) return -1;
if (tDecodeCStrTo(&decoder, field.name) < 0) return -1;
if (tDecodeI8(&decoder, &field.flags) < 0) return -1;
if (taosArrayPush(pReq->pColumns, &field) == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
@ -587,19 +588,28 @@ int32_t tDeserializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pR
for (int32_t i = 0; i < pReq->numOfTags; ++i) {
SField field = {0};
if (tDecodeI8(&decoder, &field.type) < 0) return -1;
if (tDecodeI8(&decoder, &field.flags) < 0) return -1;
if (tDecodeI32(&decoder, &field.bytes) < 0) return -1;
if (tDecodeCStrTo(&decoder, field.name) < 0) return -1;
if (tDecodeI8(&decoder, &field.flags) < 0) return -1;
if (taosArrayPush(pReq->pTags, &field) == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
}
for (int32_t i = 0; i < pReq->numOfFuncs; ++i) {
char pFunc[TSDB_FUNC_NAME_LEN] = {0};
if (tDecodeCStrTo(&decoder, pFunc) < 0) return -1;
if (taosArrayPush(pReq->pFuncs, pFunc) == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
}
if (pReq->commentLen > 0) {
pReq->comment = taosMemoryMalloc(pReq->commentLen + 1);
if (pReq->comment == NULL) return -1;
if (tDecodeCStrTo(&decoder, pReq->comment) < 0) return -1;
pReq->pComment = taosMemoryMalloc(pReq->commentLen + 1);
if (pReq->pComment == NULL) return -1;
if (tDecodeCStrTo(&decoder, pReq->pComment) < 0) return -1;
}
if (pReq->ast1Len > 0) {
@ -614,23 +624,7 @@ int32_t tDeserializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pR
if (tDecodeCStrTo(&decoder, pReq->pAst2) < 0) return -1;
}
int32_t numOfFuncs = 0;
if (tDecodeI32(&decoder, &numOfFuncs) < 0) return -1;
if (numOfFuncs > 0) {
pReq->pFuncs = taosArrayInit(numOfFuncs, TSDB_FUNC_NAME_LEN);
if (NULL == pReq->pFuncs) return -1;
}
for (int32_t i = 0; i < numOfFuncs; ++i) {
char pFunc[TSDB_FUNC_NAME_LEN] = {0};
if (tDecodeCStrTo(&decoder, pFunc) < 0) return -1;
if (taosArrayPush(pReq->pFuncs, pFunc) == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
@ -638,10 +632,10 @@ int32_t tDeserializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pR
void tFreeSMCreateStbReq(SMCreateStbReq *pReq) {
taosArrayDestroy(pReq->pColumns);
taosArrayDestroy(pReq->pTags);
taosMemoryFreeClear(pReq->comment);
taosArrayDestroy(pReq->pFuncs);
taosMemoryFreeClear(pReq->pComment);
taosMemoryFreeClear(pReq->pAst1);
taosMemoryFreeClear(pReq->pAst2);
taosArrayDestroy(pReq->pFuncs);
}
int32_t tSerializeSMDropStbReq(void *buf, int32_t bufLen, SMDropStbReq *pReq) {

View File

@ -155,7 +155,8 @@ void taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uin
void taosVariantDestroy(SVariant *pVar) {
if (pVar == NULL) return;
if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR) {
if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR
|| pVar->nType == TSDB_DATA_TYPE_JSON) {
taosMemoryFreeClear(pVar->pz);
pVar->nLen = 0;
}
@ -184,7 +185,8 @@ void taosVariantAssign(SVariant *pDst, const SVariant *pSrc) {
if (pSrc == NULL || pDst == NULL) return;
pDst->nType = pSrc->nType;
if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR) {
if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR
|| pSrc->nType == TSDB_DATA_TYPE_JSON) {
int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE;
char *p = taosMemoryRealloc(pDst->pz, len);
assert(p);
@ -976,6 +978,7 @@ char *taosVariantGet(SVariant *pVar, int32_t type) {
case TSDB_DATA_TYPE_FLOAT:
return (char *)&pVar->d;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_JSON:
return (char *)pVar->pz;
case TSDB_DATA_TYPE_NCHAR:
return (char *)pVar->ucs4;

View File

@ -341,11 +341,12 @@ typedef struct {
int32_t colVer;
int32_t smaVer;
int32_t nextColId;
int64_t watermark[2];
int64_t maxdelay[2];
int64_t watermark[2];
int32_t ttl;
int32_t numOfColumns;
int32_t numOfTags;
int32_t numOfFuncs;
int32_t commentLen;
int32_t ast1Len;
int32_t ast2Len;

View File

@ -396,6 +396,8 @@ static int32_t mndSetUpdateSmaStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb
stbObj.pColumns = NULL;
stbObj.numOfTags = 0;
stbObj.pTags = NULL;
stbObj.numOfFuncs = 0;
stbObj.pFuncs = NULL;
stbObj.updateTime = taosGetTimestampMs();
stbObj.lock = 0;
stbObj.smaVer++;
@ -408,47 +410,6 @@ static int32_t mndSetUpdateSmaStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb
return 0;
}
#if 0
static int32_t mndSetCreateSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SSmaObj *pSma) {
SSdb *pSdb = pMnode->pSdb;
SVgObj *pVgroup = NULL;
void *pIter = NULL;
int32_t contLen;
while (1) {
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
if (pIter == NULL) break;
if (pVgroup->dbUid != pDb->uid) {
sdbRelease(pSdb, pVgroup);
continue;
}
void *pReq = mndBuildVCreateSmaReq(pMnode, pVgroup, pSma, &contLen);
if (pReq == NULL) {
sdbCancelFetch(pSdb, pIter);
sdbRelease(pSdb, pVgroup);
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
STransAction action = {0};
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
action.pCont = pReq;
action.contLen = contLen;
action.msgType = TDMT_VND_CREATE_SMA;
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(pReq);
sdbCancelFetch(pSdb, pIter);
sdbRelease(pSdb, pVgroup);
return -1;
}
sdbRelease(pSdb, pVgroup);
}
return 0;
}
#endif
static int32_t mndSetCreateSmaVgroupRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup,
SSmaObj *pSma) {
SVnodeGid *pVgid = pVgroup->vnodeGid + 0;
@ -621,7 +582,6 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
if (mndSetCreateSmaCommitLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
if (mndSetCreateSmaVgroupCommitLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
if (mndSetUpdateSmaStbCommitLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
// if (mndSetCreateSmaRedoActions(pMnode, pTrans, pDb, &smaObj) != 0) goto _OVER;
if (mndSetCreateSmaVgroupRedoActions(pMnode, pTrans, pDb, &streamObj.fixedSinkVg, &smaObj) != 0) goto _OVER;
if (mndScheduleStream(pMnode, &streamObj) != 0) goto _OVER;
if (mndPersistStream(pMnode, pTrans, &streamObj) != 0) goto _OVER;
@ -770,49 +730,6 @@ static int32_t mndSetDropSmaVgroupCommitLogs(SMnode *pMnode, STrans *pTrans, SVg
return 0;
}
#if 0
static int32_t mndSetDropSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SSmaObj *pSma) {
SSdb *pSdb = pMnode->pSdb;
SVgObj *pVgroup = NULL;
void *pIter = NULL;
int32_t contLen;
while (1) {
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
if (pIter == NULL) break;
if (pVgroup->dbUid != pDb->uid) {
sdbRelease(pSdb, pVgroup);
continue;
}
int32_t contLen = 0;
void *pReq = mndBuildVDropSmaReq(pMnode, pVgroup, pSma, &contLen);
if (pReq == NULL) {
sdbCancelFetch(pSdb, pIter);
sdbRelease(pSdb, pVgroup);
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
STransAction action = {0};
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
action.pCont = pReq;
action.contLen = contLen;
action.msgType = TDMT_VND_DROP_SMA;
action.acceptableCode = TSDB_CODE_VND_SMA_NOT_EXIST;
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(pReq);
sdbCancelFetch(pSdb, pIter);
sdbRelease(pSdb, pVgroup);
return -1;
}
sdbRelease(pSdb, pVgroup);
}
return 0;
}
#endif
static int32_t mndSetDropSmaVgroupRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup) {
SVnodeGid *pVgid = pVgroup->vnodeGid + 0;
SDnodeObj *pDnode = mndAcquireDnode(pMnode, pVgid->dnodeId);
@ -879,7 +796,6 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p
if (mndSetDropSmaCommitLogs(pMnode, pTrans, pSma) != 0) goto _OVER;
if (mndSetDropSmaVgroupCommitLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER;
if (mndSetUpdateSmaStbCommitLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
// if (mndSetDropSmaRedoActions(pMnode, pTrans, pDb, pSma) != 0) goto _OVER;
if (mndSetDropSmaVgroupRedoActions(pMnode, pTrans, pDb, pVgroup) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
@ -909,7 +825,6 @@ int32_t mndDropSmasByStb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *p
if (mndSetDropSmaVgroupCommitLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER;
if (mndSetDropSmaVgroupRedoActions(pMnode, pTrans, pDb, pVgroup) != 0) goto _OVER;
if (mndSetDropSmaCommitLogs(pMnode, pTrans, pSma) != 0) goto _OVER;
// if (mndSetDropSmaRedoActions(pMnode, pTrans, pDb, pSma) != 0) goto _OVER;
mndReleaseVgroup(pMnode, pVgroup);
pVgroup = NULL;
}

View File

@ -78,7 +78,7 @@ void mndCleanupStb(SMnode *pMnode) {}
SSdbRaw *mndStbActionEncode(SStbObj *pStb) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
int32_t size = sizeof(SStbObj) + (pStb->numOfColumns + pStb->numOfTags) * sizeof(SSchema) + +pStb->commentLen +
int32_t size = sizeof(SStbObj) + (pStb->numOfColumns + pStb->numOfTags) * sizeof(SSchema) + pStb->commentLen +
pStb->ast1Len + pStb->ast2Len + STB_RESERVE_SIZE + taosArrayGetSize(pStb->pFuncs) * TSDB_FUNC_NAME_LEN;
SSdbRaw *pRaw = sdbAllocRaw(SDB_STB, STB_VER_NUMBER, size);
if (pRaw == NULL) goto _OVER;
@ -92,6 +92,7 @@ SSdbRaw *mndStbActionEncode(SStbObj *pStb) {
SDB_SET_INT64(pRaw, dataPos, pStb->dbUid, _OVER)
SDB_SET_INT32(pRaw, dataPos, pStb->tagVer, _OVER)
SDB_SET_INT32(pRaw, dataPos, pStb->colVer, _OVER)
SDB_SET_INT32(pRaw, dataPos, pStb->smaVer, _OVER)
SDB_SET_INT32(pRaw, dataPos, pStb->nextColId, _OVER)
SDB_SET_INT64(pRaw, dataPos, pStb->maxdelay[0], _OVER)
SDB_SET_INT64(pRaw, dataPos, pStb->maxdelay[1], _OVER)
@ -100,17 +101,11 @@ SSdbRaw *mndStbActionEncode(SStbObj *pStb) {
SDB_SET_INT32(pRaw, dataPos, pStb->ttl, _OVER)
SDB_SET_INT32(pRaw, dataPos, pStb->numOfColumns, _OVER)
SDB_SET_INT32(pRaw, dataPos, pStb->numOfTags, _OVER)
SDB_SET_INT32(pRaw, dataPos, pStb->numOfFuncs, _OVER)
SDB_SET_INT32(pRaw, dataPos, pStb->commentLen, _OVER)
SDB_SET_INT32(pRaw, dataPos, pStb->ast1Len, _OVER)
SDB_SET_INT32(pRaw, dataPos, pStb->ast2Len, _OVER)
int32_t funcNum = taosArrayGetSize(pStb->pFuncs);
SDB_SET_INT32(pRaw, dataPos, funcNum, _OVER)
for (int32_t i = 0; i < funcNum; ++i) {
char *func = taosArrayGet(pStb->pFuncs, i);
SDB_SET_BINARY(pRaw, dataPos, func, TSDB_FUNC_NAME_LEN, _OVER)
}
for (int32_t i = 0; i < pStb->numOfColumns; ++i) {
SSchema *pSchema = &pStb->pColumns[i];
SDB_SET_INT8(pRaw, dataPos, pSchema->type, _OVER)
@ -129,15 +124,23 @@ SSdbRaw *mndStbActionEncode(SStbObj *pStb) {
SDB_SET_BINARY(pRaw, dataPos, pSchema->name, TSDB_COL_NAME_LEN, _OVER)
}
for (int32_t i = 0; i < pStb->numOfFuncs; ++i) {
char *func = taosArrayGet(pStb->pFuncs, i);
SDB_SET_BINARY(pRaw, dataPos, func, TSDB_FUNC_NAME_LEN, _OVER)
}
if (pStb->commentLen > 0) {
SDB_SET_BINARY(pRaw, dataPos, pStb->comment, pStb->commentLen + 1, _OVER)
}
if (pStb->ast1Len > 0) {
SDB_SET_BINARY(pRaw, dataPos, pStb->pAst1, pStb->ast1Len, _OVER)
}
if (pStb->ast2Len > 0) {
SDB_SET_BINARY(pRaw, dataPos, pStb->pAst2, pStb->ast2Len, _OVER)
}
SDB_SET_RESERVE(pRaw, dataPos, STB_RESERVE_SIZE, _OVER)
SDB_SET_DATALEN(pRaw, dataPos, _OVER)
@ -180,6 +183,7 @@ static SSdbRow *mndStbActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT64(pRaw, dataPos, &pStb->dbUid, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pStb->tagVer, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pStb->colVer, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pStb->smaVer, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pStb->nextColId, _OVER)
SDB_GET_INT64(pRaw, dataPos, &pStb->maxdelay[0], _OVER)
SDB_GET_INT64(pRaw, dataPos, &pStb->maxdelay[1], _OVER)
@ -188,27 +192,15 @@ static SSdbRow *mndStbActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT32(pRaw, dataPos, &pStb->ttl, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pStb->numOfColumns, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pStb->numOfTags, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pStb->numOfFuncs, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pStb->commentLen, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pStb->ast1Len, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pStb->ast2Len, _OVER)
int32_t funcNum = 0;
SDB_GET_INT32(pRaw, dataPos, &funcNum, _OVER)
if (funcNum > 0) {
pStb->pFuncs = taosArrayInit(funcNum, TSDB_FUNC_NAME_LEN);
if (NULL == pStb->pFuncs) {
goto _OVER;
}
char funcName[TSDB_FUNC_NAME_LEN];
for (int32_t i = 0; i < funcNum; ++i) {
SDB_GET_BINARY(pRaw, dataPos, funcName, TSDB_FUNC_NAME_LEN, _OVER)
taosArrayPush(pStb->pFuncs, funcName);
}
}
pStb->pColumns = taosMemoryCalloc(pStb->numOfColumns, sizeof(SSchema));
pStb->pTags = taosMemoryCalloc(pStb->numOfTags, sizeof(SSchema));
if (pStb->pColumns == NULL || pStb->pTags == NULL) {
pStb->pFuncs = taosArrayInit(pStb->numOfFuncs, TSDB_FUNC_NAME_LEN);
if (pStb->pColumns == NULL || pStb->pTags == NULL || pStb->pFuncs == NULL) {
goto _OVER;
}
@ -230,16 +222,24 @@ static SSdbRow *mndStbActionDecode(SSdbRaw *pRaw) {
SDB_GET_BINARY(pRaw, dataPos, pSchema->name, TSDB_COL_NAME_LEN, _OVER)
}
for (int32_t i = 0; i < pStb->numOfFuncs; ++i) {
char funcName[TSDB_FUNC_NAME_LEN] = {0};
SDB_GET_BINARY(pRaw, dataPos, funcName, TSDB_FUNC_NAME_LEN, _OVER)
taosArrayPush(pStb->pFuncs, funcName);
}
if (pStb->commentLen > 0) {
pStb->comment = taosMemoryCalloc(pStb->commentLen + 1, 1);
if (pStb->comment == NULL) goto _OVER;
SDB_GET_BINARY(pRaw, dataPos, pStb->comment, pStb->commentLen + 1, _OVER)
}
if (pStb->ast1Len > 0) {
pStb->pAst1 = taosMemoryCalloc(pStb->ast1Len, 1);
if (pStb->pAst1 == NULL) goto _OVER;
SDB_GET_BINARY(pRaw, dataPos, pStb->pAst1, pStb->ast1Len, _OVER)
}
if (pStb->ast2Len > 0) {
pStb->pAst2 = taosMemoryCalloc(pStb->ast2Len, 1);
if (pStb->pAst2 == NULL) goto _OVER;
@ -273,6 +273,7 @@ static int32_t mndStbActionDelete(SSdb *pSdb, SStbObj *pStb) {
taosMemoryFreeClear(pStb->pColumns);
taosMemoryFreeClear(pStb->pTags);
taosMemoryFreeClear(pStb->comment);
taosMemoryFreeClear(pStb->pFuncs);
taosMemoryFreeClear(pStb->pAst1);
taosMemoryFreeClear(pStb->pAst2);
taosArrayDestroy(pStb->pFuncs);
@ -322,7 +323,7 @@ static int32_t mndStbActionUpdate(SSdb *pSdb, SStbObj *pOld, SStbObj *pNew) {
pOld->commentLen = pNew->commentLen;
if (pOld->ast1Len < pNew->ast1Len) {
void *pAst1 = taosMemoryMalloc(pNew->ast1Len);
void *pAst1 = taosMemoryMalloc(pNew->ast1Len + 1);
if (pAst1 != NULL) {
taosMemoryFree(pOld->pAst1);
pOld->pAst1 = pAst1;
@ -334,7 +335,7 @@ static int32_t mndStbActionUpdate(SSdb *pSdb, SStbObj *pOld, SStbObj *pNew) {
}
if (pOld->ast2Len < pNew->ast2Len) {
void *pAst2 = taosMemoryMalloc(pNew->ast2Len);
void *pAst2 = taosMemoryMalloc(pNew->ast2Len + 1);
if (pAst2 != NULL) {
taosMemoryFree(pOld->pAst2);
pOld->pAst2 = pAst2;
@ -361,12 +362,15 @@ static int32_t mndStbActionUpdate(SSdb *pSdb, SStbObj *pOld, SStbObj *pNew) {
}
if (pNew->commentLen > 0) {
memcpy(pOld->comment, pNew->comment, pNew->commentLen + 1);
pOld->commentLen = pNew->commentLen;
}
if (pNew->ast1Len != 0) {
memcpy(pOld->pAst1, pNew->pAst1, pNew->ast1Len);
pOld->ast1Len = pNew->ast1Len;
}
if (pNew->ast2Len != 0) {
memcpy(pOld->pAst2, pNew->pAst2, pNew->ast2Len);
pOld->ast2Len = pNew->ast2Len;
}
taosWUnLockLatch(&pOld->lock);
return 0;
@ -575,7 +579,10 @@ int32_t mndCheckCreateStbReq(SMCreateStbReq *pCreate) {
static int32_t mndSetCreateStbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) {
SSdbRaw *pRedoRaw = mndStbActionEncode(pStb);
if (pRedoRaw == NULL) return -1;
if (mndTransAppendRedolog(pTrans, pRedoRaw) != 0) return -1;
if (mndTransAppendRedolog(pTrans, pRedoRaw) != 0) {
sdbFreeRaw(pRedoRaw);
return -1;
}
if (sdbSetRawStatus(pRedoRaw, SDB_STATUS_CREATING) != 0) return -1;
return 0;
@ -584,7 +591,10 @@ static int32_t mndSetCreateStbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *p
static int32_t mndSetCreateStbUndoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) {
SSdbRaw *pUndoRaw = mndStbActionEncode(pStb);
if (pUndoRaw == NULL) return -1;
if (mndTransAppendUndolog(pTrans, pUndoRaw) != 0) return -1;
if (mndTransAppendUndolog(pTrans, pUndoRaw) != 0) {
sdbFreeRaw(pUndoRaw);
return -1;
}
if (sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED) != 0) return -1;
return 0;
@ -593,7 +603,10 @@ static int32_t mndSetCreateStbUndoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *p
static int32_t mndSetCreateStbCommitLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) {
SSdbRaw *pCommitRaw = mndStbActionEncode(pStb);
if (pCommitRaw == NULL) return -1;
if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) return -1;
if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
sdbFreeRaw(pCommitRaw);
return -1;
}
if (sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY) != 0) return -1;
return 0;
@ -613,6 +626,11 @@ static int32_t mndSetCreateStbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj
continue;
}
if (pVgroup->isTsma) {
sdbRelease(pSdb, pVgroup);
continue;
}
void *pReq = mndBuildVCreateStbReq(pMnode, pVgroup, pStb, &contLen);
if (pReq == NULL) {
sdbCancelFetch(pSdb, pIter);
@ -651,6 +669,11 @@ static int32_t mndSetCreateStbUndoActions(SMnode *pMnode, STrans *pTrans, SDbObj
continue;
}
if (pVgroup->isTsma) {
sdbRelease(pSdb, pVgroup);
continue;
}
int32_t contLen = 0;
void *pReq = mndBuildVDropStbReq(pMnode, pVgroup, pStb, &contLen);
if (pReq == NULL) {
@ -697,6 +720,7 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat
pDst->dbUid = pDb->uid;
pDst->tagVer = 1;
pDst->colVer = 1;
pDst->smaVer = 1;
pDst->nextColId = 1;
pDst->maxdelay[0] = pCreate->delay1;
pDst->maxdelay[1] = pCreate->delay2;
@ -705,6 +729,7 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat
pDst->ttl = pCreate->ttl;
pDst->numOfColumns = pCreate->numOfColumns;
pDst->numOfTags = pCreate->numOfTags;
pDst->numOfFuncs = pCreate->numOfFuncs;
pDst->commentLen = pCreate->commentLen;
pDst->pFuncs = pCreate->pFuncs;
pCreate->pFuncs = NULL;
@ -715,7 +740,7 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
memcpy(pDst->comment, pCreate->comment, pDst->commentLen + 1);
memcpy(pDst->comment, pCreate->pComment, pDst->commentLen + 1);
}
pDst->ast1Len = pCreate->ast1Len;
@ -770,20 +795,15 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat
static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCreate, SDbObj *pDb) {
SStbObj stbObj = {0};
int32_t code = -1;
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to create stb:%s", pTrans->id, pCreate->name);
if (mndBuildStbFromReq(pMnode, &stbObj, pCreate, pDb) != 0) goto _OVER;
if (mndAddStbToTrans(pMnode, pTrans, pDb, &stbObj) < 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
code = 0;
_OVER:
@ -906,7 +926,8 @@ _OVER:
}
static int32_t mndCheckAlterStbReq(SMAlterStbReq *pAlter) {
if (pAlter->commentLen >= 0 || pAlter->ttl != 0) return 0;
if (pAlter->commentLen >= 0) return 0;
if (pAlter->ttl != 0) return 0;
if (pAlter->numOfFields < 1 || pAlter->numOfFields != (int32_t)taosArrayGetSize(pAlter->pFields)) {
terrno = TSDB_CODE_MND_INVALID_STB_OPTION;
@ -969,6 +990,7 @@ static int32_t mndUpdateStbCommentAndTTL(const SStbObj *pOld, SStbObj *pNew, cha
memcpy(pNew->comment, pComment, commentLen + 1);
} else if (commentLen == 0) {
pNew->commentLen = 0;
} else {
}
if (ttl >= 0) {
@ -1245,7 +1267,10 @@ static int32_t mndAlterStbColumnBytes(SMnode *pMnode, const SStbObj *pOld, SStbO
static int32_t mndSetAlterStbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) {
SSdbRaw *pRedoRaw = mndStbActionEncode(pStb);
if (pRedoRaw == NULL) return -1;
if (mndTransAppendRedolog(pTrans, pRedoRaw) != 0) return -1;
if (mndTransAppendRedolog(pTrans, pRedoRaw) != 0) {
sdbFreeRaw(pRedoRaw);
return -1;
}
if (sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY) != 0) return -1;
return 0;
@ -1254,7 +1279,10 @@ static int32_t mndSetAlterStbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pD
static int32_t mndSetAlterStbCommitLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) {
SSdbRaw *pCommitRaw = mndStbActionEncode(pStb);
if (pCommitRaw == NULL) return -1;
if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) return -1;
if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
sdbFreeRaw(pCommitRaw);
return -1;
}
if (sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY) != 0) return -1;
return 0;
@ -1274,6 +1302,11 @@ static int32_t mndSetAlterStbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj
continue;
}
if (pVgroup->isTsma) {
sdbRelease(pSdb, pVgroup);
continue;
}
void *pReq = mndBuildVCreateStbReq(pMnode, pVgroup, pStb, &contLen);
if (pReq == NULL) {
sdbCancelFetch(pSdb, pIter);
@ -1388,7 +1421,7 @@ static int32_t mndBuildStbCfgImp(SDbObj *pDb, SStbObj *pStb, const char *tbName,
pSchema->bytes = pSrcSchema->bytes;
}
if (pStb->pFuncs) {
if (pStb->numOfFuncs > 0) {
pRsp->pFuncs = taosArrayDup(pStb->pFuncs);
}
@ -1626,7 +1659,10 @@ _OVER:
static int32_t mndSetDropStbRedoLogs(SMnode *pMnode, STrans *pTrans, SStbObj *pStb) {
SSdbRaw *pRedoRaw = mndStbActionEncode(pStb);
if (pRedoRaw == NULL) return -1;
if (mndTransAppendRedolog(pTrans, pRedoRaw) != 0) return -1;
if (mndTransAppendRedolog(pTrans, pRedoRaw) != 0) {
sdbFreeRaw(pRedoRaw);
return -1;
}
if (sdbSetRawStatus(pRedoRaw, SDB_STATUS_DROPPING) != 0) return -1;
return 0;
@ -1635,7 +1671,10 @@ static int32_t mndSetDropStbRedoLogs(SMnode *pMnode, STrans *pTrans, SStbObj *pS
static int32_t mndSetDropStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStbObj *pStb) {
SSdbRaw *pCommitRaw = mndStbActionEncode(pStb);
if (pCommitRaw == NULL) return -1;
if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) return -1;
if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
sdbFreeRaw(pCommitRaw);
return -1;
}
if (sdbSetRawStatus(pCommitRaw, SDB_STATUS_DROPPED) != 0) return -1;
return 0;
@ -1654,6 +1693,11 @@ static int32_t mndSetDropStbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *
continue;
}
if (pVgroup->isTsma) {
sdbRelease(pSdb, pVgroup);
continue;
}
int32_t contLen = 0;
void *pReq = mndBuildVDropStbReq(pMnode, pVgroup, pStb, &contLen);
if (pReq == NULL) {
@ -1683,7 +1727,7 @@ static int32_t mndSetDropStbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *
static int32_t mndDropStb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb) {
int32_t code = -1;
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq);
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq);
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to drop stb:%s", pTrans->id, pStb->name);

View File

@ -117,7 +117,7 @@ void mndReConfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReConfigCbMeta cbM
}
}
int32_t mndSnapshotStartRead(struct SSyncFSM *pFsm, void **ppReader) {
int32_t mndSnapshotStartRead(struct SSyncFSM *pFsm, void *pParam, void **ppReader) {
mDebug("start to read snapshot from sdb");
SMnode *pMnode = pFsm->data;
return sdbStartRead(pMnode->pSdb, (SSdbIter **)ppReader, NULL, NULL, NULL);

View File

@ -426,7 +426,15 @@ static int32_t mndCompareDnodeId(int32_t *dnode1Id, int32_t *dnode2Id) { return
static int32_t mndCompareDnodeVnodes(SDnodeObj *pDnode1, SDnodeObj *pDnode2) {
float d1Score = (float)pDnode1->numOfVnodes / pDnode1->numOfSupportVnodes;
float d2Score = (float)pDnode2->numOfVnodes / pDnode2->numOfSupportVnodes;
#if 0
if (d1Score == d2Score) {
return pDnode2->id - pDnode1->id;
} else {
return d1Score >= d2Score ? 1 : 0;
}
#else
return d1Score >= d2Score ? 1 : 0;
#endif
}
void mndSortVnodeGid(SVgObj *pVgroup) {

View File

@ -157,7 +157,7 @@ typedef struct {
static STqMgmt tqMgmt = {0};
// tqRead
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalHead** pHeadWithCkSum);
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum);
// tqExec
int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataRsp* pRsp, int32_t workerId);
@ -178,6 +178,7 @@ STqOffsetStore* tqOffsetOpen();
void tqOffsetClose(STqOffsetStore*);
STqOffset* tqOffsetRead(STqOffsetStore* pStore, const char* subscribeKey);
int32_t tqOffsetWrite(STqOffsetStore* pStore, const STqOffset* pOffset);
int32_t tqOffsetDelete(STqOffsetStore* pStore, const char* subscribeKey);
int32_t tqOffsetSnapshot(STqOffsetStore* pStore);
// tqSink

View File

@ -271,8 +271,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, pReq->subKey);
if (pOffset != NULL) {
fetchOffsetNew = pOffset->val;
char formatBuf[50];
tFormatOffset(formatBuf, 50, &fetchOffsetNew);
char formatBuf[80];
tFormatOffset(formatBuf, 80, &fetchOffsetNew);
tqDebug("tmq poll: consumer %ld, offset reset to %s", consumerId, formatBuf);
} else {
if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
@ -302,9 +302,9 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
tqInitDataRsp(&dataRsp, pReq, pHandle->execHandle.subType);
if (fetchOffsetNew.type == TMQ_OFFSET__LOG) {
int64_t fetchVer = fetchOffsetNew.version + 1;
SWalHead* pHeadWithCkSum = taosMemoryMalloc(sizeof(SWalHead) + 2048);
if (pHeadWithCkSum == NULL) {
int64_t fetchVer = fetchOffsetNew.version + 1;
SWalCkHead* pCkHead = taosMemoryMalloc(sizeof(SWalCkHead) + 2048);
if (pCkHead == NULL) {
return -1;
}
@ -318,7 +318,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
break;
}
if (tqFetchLog(pTq, pHandle, &fetchVer, &pHeadWithCkSum) < 0) {
if (tqFetchLog(pTq, pHandle, &fetchVer, &pCkHead) < 0) {
// TODO add push mgr
tqOffsetResetToLog(&dataRsp.rspOffset, fetchVer);
@ -329,7 +329,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
goto OVER;
}
SWalReadHead* pHead = &pHeadWithCkSum->head;
SWalCont* pHead = &pCkHead->head;
tqDebug("tmq poll: consumer %ld (epoch %d) iter log, vg %d offset %ld msgType %d", consumerId, pReq->epoch,
TD_VID(pTq->pVnode), fetchVer, pHead->msgType);
@ -373,9 +373,10 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
}
}
taosMemoryFree(pHeadWithCkSum);
taosMemoryFree(pCkHead);
} else if (fetchOffsetNew.type == TMQ_OFFSET__SNAPSHOT_DATA) {
tqInfo("retrieve using snapshot req offset: uid %ld ts %ld", dataRsp.reqOffset.uid, dataRsp.reqOffset.ts);
tqInfo("retrieve using snapshot req offset: uid %ld ts %ld, actual offset: uid %ld ts %ld", dataRsp.reqOffset.uid,
dataRsp.reqOffset.ts, fetchOffsetNew.uid, fetchOffsetNew.ts);
if (tqScanSnapshot(pTq, &pHandle->execHandle, &dataRsp, fetchOffsetNew, workerId) < 0) {
ASSERT(0);
}
@ -522,7 +523,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
break;
}
SWalReadHead* pHead = &pHeadWithCkSum->head;
SWalCont* pHead = &pHeadWithCkSum->head;
tqDebug("tmq poll: consumer %ld (epoch %d) iter log, vg %d offset %ld msgType %d", consumerId, pReq->epoch,
TD_VID(pTq->pVnode), fetchOffset, pHead->msgType);
@ -597,6 +598,8 @@ int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) {
int32_t code = taosHashRemove(pTq->handles, pReq->subKey, strlen(pReq->subKey));
ASSERT(code == 0);
tqOffsetDelete(pTq->pOffsetStore, pReq->subKey);
if (tqMetaDeleteHandle(pTq, pReq->subKey) < 0) {
ASSERT(0);
}

View File

@ -97,6 +97,10 @@ int32_t tqOffsetWrite(STqOffsetStore* pStore, const STqOffset* pOffset) {
return taosHashPut(pStore->pHash, pOffset->subKey, strlen(pOffset->subKey), pOffset, sizeof(STqOffset));
}
int32_t tqOffsetDelete(STqOffsetStore* pStore, const char* subscribeKey) {
return taosHashRemove(pStore->pHash, subscribeKey, strlen(subscribeKey));
}
int32_t tqOffsetSnapshot(STqOffsetStore* pStore) {
// open file
// TODO file name should be with a version

View File

@ -15,13 +15,13 @@
#include "tq.h"
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalHead** ppHeadWithCkSum) {
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** ppCkHead) {
int32_t code = 0;
taosThreadMutexLock(&pHandle->pWalReader->mutex);
int64_t offset = *fetchOffset;
while (1) {
if (walFetchHead(pHandle->pWalReader, offset, *ppHeadWithCkSum) < 0) {
if (walFetchHead(pHandle->pWalReader, offset, *ppCkHead) < 0) {
tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, no more log to return", pHandle->consumerId,
pHandle->epoch, TD_VID(pTq->pVnode), offset);
*fetchOffset = offset - 1;
@ -29,8 +29,8 @@ int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalHead*
goto END;
}
if ((*ppHeadWithCkSum)->head.msgType == TDMT_VND_SUBMIT) {
code = walFetchBody(pHandle->pWalReader, ppHeadWithCkSum);
if ((*ppCkHead)->head.msgType == TDMT_VND_SUBMIT) {
code = walFetchBody(pHandle->pWalReader, ppCkHead);
if (code < 0) {
ASSERT(0);
@ -43,9 +43,9 @@ int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalHead*
goto END;
} else {
if (pHandle->fetchMeta) {
SWalReadHead* pHead = &((*ppHeadWithCkSum)->head);
SWalCont* pHead = &((*ppCkHead)->head);
if (IS_META_MSG(pHead->msgType)) {
code = walFetchBody(pHandle->pWalReader, ppHeadWithCkSum);
code = walFetchBody(pHandle->pWalReader, ppCkHead);
if (code < 0) {
ASSERT(0);
@ -58,7 +58,7 @@ int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalHead*
goto END;
}
}
code = walSkipFetchBody(pHandle->pWalReader, *ppHeadWithCkSum);
code = walSkipFetchBody(pHandle->pWalReader, *ppCkHead);
if (code < 0) {
ASSERT(0);
*fetchOffset = offset;

View File

@ -119,7 +119,7 @@ static int32_t vnodeProcessAlterReplicaReq(SVnode *pVnode, SRpcMsg *pMsg) {
}
void vnodeProposeMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
SVnode * pVnode = pInfo->ahandle;
SVnode *pVnode = pInfo->ahandle;
int32_t vgId = pVnode->config.vgId;
int32_t code = 0;
SRpcMsg *pMsg = NULL;
@ -199,7 +199,7 @@ void vnodeProposeMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
}
void vnodeApplyMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
SVnode * pVnode = pInfo->ahandle;
SVnode *pVnode = pInfo->ahandle;
int32_t vgId = pVnode->config.vgId;
int32_t code = 0;
SRpcMsg *pMsg = NULL;
@ -240,7 +240,7 @@ int32_t vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
STraceId *trace = &pMsg->info.traceId;
do {
char * syncNodeStr = sync2SimpleStr(pVnode->sync);
char *syncNodeStr = sync2SimpleStr(pVnode->sync);
static int64_t vndTick = 0;
if (++vndTick % 10 == 1) {
vGTrace("vgId:%d, sync trace msg:%s, %s", syncGetVgId(pVnode->sync), TMSG_INFO(pMsg->msgType), syncNodeStr);
@ -375,7 +375,7 @@ static void vnodeSyncReconfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReCon
}
static void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
SVnode * pVnode = pFsm->data;
SVnode *pVnode = pFsm->data;
SSnapshot snapshot = {0};
SyncIndex beginIndex = SYNC_INDEX_INVALID;
char logBuf[256] = {0};
@ -409,7 +409,7 @@ static void vnodeSyncRollBackMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta
syncRpcMsgLog2(logBuf, (SRpcMsg *)pMsg);
}
static int32_t vnodeSnapshotStartRead(struct SSyncFSM *pFsm, void **ppReader) { return 0; }
static int32_t vnodeSnapshotStartRead(struct SSyncFSM *pFsm, void *pParam, void **ppReader) { return 0; }
static int32_t vnodeSnapshotStopRead(struct SSyncFSM *pFsm, void *pReader) { return 0; }

View File

@ -297,7 +297,7 @@ int32_t appendTagValues(char* buf, int32_t* len, STableCfg* pCfg) {
SArray* pTagVals = NULL;
STag* pTag = (STag*)pCfg->pTags;
if (pCfg->pTags && pTag->flags & TD_TAG_JSON) {
if (pCfg->pTags && tTagIsJson(pTag)) {
char* pJson = parseTagDatatoJson(pTag);
if (pJson) {
*len += sprintf(buf + VARSTR_HEADER_SIZE + *len, "%s", pJson);

View File

@ -287,6 +287,7 @@ typedef struct STableScanInfo {
} lastStatus;
int8_t scanMode;
int8_t noTable;
} STableScanInfo;
typedef struct STagScanInfo {

View File

@ -63,6 +63,7 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
rowSize += pCtx[i].resDataInfo.interBufSize;
}
rowSize += (numOfOutput * sizeof(bool)); // expand rowSize to mark if col is null for top/bottom result(saveTupleData)
return rowSize;
}
@ -112,7 +113,9 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int
p->groupId = *(uint64_t*)key;
p->pos = *(SResultRowPosition*)pData;
memcpy(p->key, (char*)key + sizeof(uint64_t), keyLen - sizeof(uint64_t));
#ifdef BUF_PAGE_DEBUG
qDebug("page_groupRes, groupId:%"PRIu64",pageId:%d,offset:%d\n", p->groupId, p->pos.pageId, p->pos.offset);
#endif
taosArrayPush(pGroupResInfo->pRows, &p);
}
@ -271,6 +274,7 @@ static bool isTableOk(STableKeyInfo* info, SNode* pTagCond, SMeta* metaHandle) {
SNode* pNew = NULL;
int32_t code = scalarCalculateConstants(pTagCondTmp, &pNew);
if (TSDB_CODE_SUCCESS != code) {
terrno = code;
nodesDestroyNode(pTagCondTmp);
return false;
}
@ -323,12 +327,19 @@ int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo
code = tsdbGetAllTableList(metaHandle, tableUid, pListInfo->pTableList);
}
if (pTagCond) {
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get tableIds, reason: %s, suid: %" PRIu64 "", tstrerror(code), tableUid);
terrno = code;
return code;
}
if(pTagCond){
int32_t i = 0;
while (i < taosArrayGetSize(pListInfo->pTableList)) {
STableKeyInfo* info = taosArrayGet(pListInfo->pTableList, i);
bool isOk = isTableOk(info, pTagCond, metaHandle);
if (!isOk) {
bool isOk = isTableOk(info, pTagCond, metaHandle);
if(terrno) return terrno;
if(!isOk){
taosArrayRemove(pListInfo->pTableList, i);
continue;
}
@ -586,13 +597,16 @@ static int32_t setSelectValueColumnInfo(SqlFunctionCtx* pCtx, int32_t numOfOutpu
}
for (int32_t i = 0; i < numOfOutput; ++i) {
if (strcmp(pCtx[i].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
if (strcmp(pCtx[i].pExpr->pExpr->_function.functionName, "_select_value") == 0 ||
strcmp(pCtx[i].pExpr->pExpr->_function.functionName, "_group_key") == 0) {
pValCtx[num++] = &pCtx[i];
} else if (fmIsSelectFunc(pCtx[i].functionId)) {
p = &pCtx[i];
}
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_setSelect num:%d", num);
#endif
if (p != NULL) {
p->subsidiaries.pCtx = pValCtx;
p->subsidiaries.num = num;

View File

@ -235,9 +235,11 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
if (uid == 0) {
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
uid = pTableInfo->uid;
ts = INT64_MIN;
if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
uid = pTableInfo->uid;
ts = INT64_MIN;
}
}
return doPrepareScan(pTaskInfo->pRoot, uid, ts);

View File

@ -274,6 +274,9 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
// 1. close current opened time window
if (pResultRowInfo->cur.pageId != -1 && ((pResult == NULL) || (pResult->pageId != pResultRowInfo->cur.pageId))) {
#ifdef BUF_PAGE_DEBUG
qDebug("page_1");
#endif
SResultRowPosition pos = pResultRowInfo->cur;
SFilePage* pPage = getBufPage(pResultBuf, pos.pageId);
releaseBufPage(pResultBuf, pPage);
@ -281,6 +284,9 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
// allocate a new buffer page
if (pResult == NULL) {
#ifdef BUF_PAGE_DEBUG
qDebug("page_2");
#endif
ASSERT(pSup->resultRowSize > 0);
pResult = getNewResultRow(pResultBuf, groupId, pSup->resultRowSize);
@ -538,7 +544,9 @@ static int32_t doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SqlFunct
if (pCtx[k].fpSet.process == NULL) {
continue;
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_process");
#endif
int32_t code = pCtx[k].fpSet.process(&pCtx[k]);
if (code != TSDB_CODE_SUCCESS) {
qError("%s aggregate function error happens, code: %s", GET_TASKID(pOperator->pTaskInfo), tstrerror(code));
@ -1413,7 +1421,9 @@ void setExecutionContext(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t
if (pAggInfo->groupId != INT32_MIN && pAggInfo->groupId == groupId) {
return;
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_setbuf, groupId:%"PRIu64, groupId);
#endif
doSetTableGroupOutputBuf(pOperator, pAggInfo, numOfOutput, groupId);
// record the current active group id
@ -1489,11 +1499,15 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
int32_t numOfExprs) {
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
int32_t start = pGroupResInfo->index;
#ifdef BUF_PAGE_DEBUG
qDebug("\npage_copytoblock rows:%d", numOfRows);
#endif
for (int32_t i = start; i < numOfRows; i += 1) {
SResKeyPos* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
SFilePage* page = getBufPage(pBuf, pPos->pos.pageId);
#ifdef BUF_PAGE_DEBUG
qDebug("page_copytoblock pos pageId:%d, offset:%d", pPos->pos.pageId, pPos->pos.offset);
#endif
SResultRow* pRow = (SResultRow*)((char*)page + pPos->pos.offset);
doUpdateNumOfRows(pRow, numOfExprs, rowCellOffset);
@ -1525,6 +1539,9 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowCellOffset);
if (pCtx[j].fpSet.finalize) {
#ifdef BUF_PAGE_DEBUG
qDebug("\npage_finalize %d", numOfExprs);
#endif
int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
if (TAOS_FAILED(code)) {
qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
@ -1553,9 +1570,9 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
releaseBufPage(pBuf, page);
pBlock->info.rows += pRow->numOfRows;
if (pBlock->info.rows >= pBlock->info.capacity) { // output buffer is full
break;
}
// if (pBlock->info.rows >= pBlock->info.capacity) { // output buffer is full
// break;
// }
}
qDebug("%s result generated, rows:%d, groupId:%" PRIu64, GET_TASKID(pTaskInfo), pBlock->info.rows,
@ -2373,8 +2390,7 @@ static int32_t initExchangeOperator(SExchangePhysiNode* pExNode, SExchangeInfo*
}
pInfo->pSources = taosArrayInit(numOfSources, sizeof(SDownstreamSourceNode));
pInfo->pSourceDataInfo = taosArrayInit(numOfSources, sizeof(SSourceDataInfo));
if (pInfo->pSourceDataInfo == NULL || pInfo->pSources == NULL) {
if (pInfo->pSources == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@ -2829,21 +2845,28 @@ int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) {
pScanInfo->blockType = STREAM_INPUT__DATA_SCAN;
STableScanInfo* pInfo = pScanInfo->pSnapshotReadOp->info;
if (uid == 0) {
pInfo->noTable = 1;
return TSDB_CODE_SUCCESS;
}
/*if (pSnapShotScanInfo->dataReader == NULL) {*/
/*pSnapShotScanInfo->dataReader = tsdbReaderOpen(pHandle->vnode, &pSTInfo->cond, tableList, 0, 0);*/
/*pSnapShotScanInfo->scanMode = TABLE_SCAN__TABLE_ORDER;*/
/*}*/
pInfo->noTable = 0;
if (pInfo->lastStatus.uid != uid || pInfo->lastStatus.ts != ts) {
tsdbSetTableId(pInfo->dataReader, uid);
int64_t oldSkey = pInfo->cond.twindows[0].skey;
pInfo->cond.twindows[0].skey = ts;
pInfo->cond.twindows[0].skey = ts + 1;
tsdbResetReadHandle(pInfo->dataReader, &pInfo->cond, 0);
pInfo->cond.twindows[0].skey = oldSkey;
pInfo->scanTimes = 0;
pInfo->curTWinIdx = 0;
}
return TSDB_CODE_SUCCESS;
} else {
if (pOperator->numOfDownstream == 1) {
@ -2856,8 +2879,6 @@ int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) {
return TSDB_CODE_QRY_APP_ERROR;
}
}
return TSDB_CODE_SUCCESS;
}
int32_t doGetScanStatus(SOperatorInfo* pOperator, uint64_t* uid, int64_t* ts) {
@ -3160,8 +3181,9 @@ static int32_t handleLimitOffset(SOperatorInfo* pOperator, SSDataBlock* pBlock)
}
// check for the limitation in each group
if (pProjectInfo->limit.limit > 0 && pProjectInfo->curOutput + pRes->info.rows >= pProjectInfo->limit.limit) {
pRes->info.rows = (int32_t)(pProjectInfo->limit.limit - pProjectInfo->curOutput);
if (pProjectInfo->limit.limit >= 0 && pProjectInfo->curOutput + pRes->info.rows >= pProjectInfo->limit.limit) {
int32_t keepRows = (int32_t)(pProjectInfo->limit.limit - pProjectInfo->curOutput);
blockDataKeepFirstNRows(pRes, keepRows);
if (pProjectInfo->slimit.limit > 0 && pProjectInfo->slimit.limit <= pProjectInfo->curGroupOutput) {
pOperator->status = OP_EXEC_DONE;
}
@ -3235,8 +3257,6 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
longjmp(pTaskInfo->env, code);
}
doFilter(pProjectInfo->pFilterNode, pBlock);
setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, false);
blockDataEnsureCapacity(pInfo->pRes, pInfo->pRes->info.rows + pBlock->info.rows);
@ -3247,6 +3267,10 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
}
int32_t status = handleLimitOffset(pOperator, pBlock);
// filter shall be applied after apply functions and limit/offset on the result
doFilter(pProjectInfo->pFilterNode, pInfo->pRes);
if (status == PROJECT_RETRIEVE_CONTINUE) {
continue;
} else if (status == PROJECT_RETRIEVE_DONE) {
@ -3473,11 +3497,12 @@ int32_t initAggInfo(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInf
}
void initResultSizeInfo(SOperatorInfo* pOperator, int32_t numOfRows) {
ASSERT(numOfRows != 0);
pOperator->resultInfo.capacity = numOfRows;
pOperator->resultInfo.threshold = numOfRows * 0.75;
if (pOperator->resultInfo.threshold == 0) {
pOperator->resultInfo.capacity = numOfRows;
pOperator->resultInfo.threshold = numOfRows;
}
}
@ -3977,12 +4002,12 @@ static int32_t sortTableGroup(STableListInfo* pTableListInfo, int32_t groupNum)
return TSDB_CODE_QRY_APP_ERROR;
}
if (p == NULL) {
if (taosArrayPush(sortSupport, groupId) != NULL) {
if (taosArrayPush(sortSupport, groupId) == NULL) {
qError("taos push support array error");
taosArrayDestroy(sortSupport);
return TSDB_CODE_QRY_APP_ERROR;
}
if (taosArrayPush(pTableListInfo->pGroupList, &tGroup) != NULL) {
if (taosArrayPush(pTableListInfo->pGroupList, &tGroup) == NULL) {
qError("taos push group array error");
taosArrayDestroy(sortSupport);
return TSDB_CODE_QRY_APP_ERROR;
@ -4062,6 +4087,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
} else {
taosMemoryFree(keyBuf);
nodesClearList(groupNew);
metaReaderClear(&mr);
return code;
}
@ -4074,7 +4100,14 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
} else {
isNull[index++] = 0;
char* data = nodesGetValueFromNode(pValue);
if (pValue->node.resType.type == TSDB_DATA_TYPE_JSON) {
if (pValue->node.resType.type == TSDB_DATA_TYPE_JSON){
if(tTagIsJson(data)){
terrno = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR;
taosMemoryFree(keyBuf);
nodesClearList(groupNew);
metaReaderClear(&mr);
return terrno;
}
int32_t len = getJsonValueLen(data);
memcpy(pStart, data, len);
pStart += len;
@ -4133,7 +4166,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
} else if (QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN == type) {
STableMergeScanPhysiNode* pTableScanNode = (STableMergeScanPhysiNode*)pPhyNode;
int32_t code = createScanTableListInfo(pTableScanNode, pHandle, pTableListInfo, queryId, taskId);
if (code) {
if(code){
pTaskInfo->code = code;
return NULL;
}
code = extractTableSchemaVersion(pHandle, pTableScanNode->scan.uid, pTaskInfo);
@ -4160,7 +4194,11 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
.maxTs = INT64_MIN,
};
if (pHandle) {
createScanTableListInfo(pTableScanNode, pHandle, pTableListInfo, queryId, taskId);
int32_t code = createScanTableListInfo(pTableScanNode, pHandle, pTableListInfo, queryId, taskId);
if(code){
pTaskInfo->code = code;
return NULL;
}
}
SOperatorInfo* pOperator =

View File

@ -141,6 +141,10 @@ static void recordNewGroupKeys(SArray* pGroupCols, SArray* pGroupColVals, SSData
pkey->isNull = false;
char* val = colDataGetData(pColInfoData, rowIndex);
if (pkey->type == TSDB_DATA_TYPE_JSON) {
if(tTagIsJson(val)){
terrno = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR;
return;
}
int32_t dataLen = getJsonValueLen(val);
memcpy(pkey->pData, val, dataLen);
} else if (IS_VAR_DATA_TYPE(pkey->type)) {
@ -227,11 +231,15 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
int32_t len = 0;
STimeWindow w = TSWINDOW_INITIALIZER;
terrno = TSDB_CODE_SUCCESS;
int32_t num = 0;
for (int32_t j = 0; j < pBlock->info.rows; ++j) {
// Compare with the previous row of this column, and do not set the output buffer again if they are identical.
if (!pInfo->isInit) {
recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j);
if (terrno != TSDB_CODE_SUCCESS) { // group by json error
longjmp(pTaskInfo->env, terrno);
}
pInfo->isInit = true;
num++;
continue;
@ -247,6 +255,9 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
if (j == 0) {
num++;
recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j);
if (terrno != TSDB_CODE_SUCCESS) { // group by json error
longjmp(pTaskInfo->env, terrno);
}
continue;
}
@ -661,7 +672,11 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) {
}
}
terrno = TSDB_CODE_SUCCESS;
doHashPartition(pOperator, pBlock);
if (terrno != TSDB_CODE_SUCCESS) { // group by json error
longjmp(pTaskInfo->env, terrno);
}
}
SArray* groupArray = taosArrayInit(taosHashGetSize(pInfo->pGroupSet), sizeof(SDataGroupInfo));

View File

@ -518,6 +518,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
// if scan table by table
if (pInfo->scanMode == TABLE_SCAN__TABLE_ORDER) {
if (pInfo->noTable) return NULL;
while (1) {
SSDataBlock* result = doTableScanGroup(pOperator);
if (result) {
@ -2089,6 +2090,7 @@ int32_t createScanTableListInfo(STableScanPhysiNode* pTableScanNode, SReadHandle
qDebug("no table qualified for query, TID:0x%" PRIx64 ", QID:0x%" PRIx64, taskId, queryId);
return TSDB_CODE_SUCCESS;
}
pTableListInfo->needSortTableByGroupId = pTableScanNode->groupSort;
code = generateGroupIdMap(pTableListInfo, pHandle, pTableScanNode->pGroupTags);
if (code != TSDB_CODE_SUCCESS) {
return code;

View File

@ -593,7 +593,10 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
if (size > sortBufSize) {
// Perform the in-memory sort and then flush data in the buffer into disk.
int64_t p = taosGetTimestampUs();
blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo);
code = blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo);
if (code != 0) {
return code;
}
int64_t el = taosGetTimestampUs() - p;
pHandle->sortElapsed += el;
@ -608,7 +611,10 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
// Perform the in-memory sort and then flush data in the buffer into disk.
int64_t p = taosGetTimestampUs();
blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo);
int32_t code = blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo);
if (code != 0) {
return code;
}
int64_t el = taosGetTimestampUs() - p;
pHandle->sortElapsed += el;

View File

@ -634,6 +634,12 @@ static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t l
}
// param1 ~ param3
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY ||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
for (int32_t i = 1; i < numOfParams; ++i) {
SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
if (QUERY_NODE_VALUE != nodeType(pParamNode)) {
@ -643,12 +649,11 @@ static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t l
SValueNode* pValue = (SValueNode*)pParamNode;
pValue->notReserved = true;
}
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY ||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
if (i == 3 && pValue->datum.i != 1 && pValue->datum.i != 0) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"HISTOGRAM function normalized parameter should be 0/1");
}
}
pFunc->node.resType = (SDataType){.bytes = 512, .type = TSDB_DATA_TYPE_BINARY};
@ -668,6 +673,12 @@ static int32_t translateHistogramImpl(SFunctionNode* pFunc, char* pErrBuf, int32
}
// param1 ~ param3
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY ||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
for (int32_t i = 1; i < numOfParams; ++i) {
SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
if (QUERY_NODE_VALUE != nodeType(pParamNode)) {
@ -677,12 +688,11 @@ static int32_t translateHistogramImpl(SFunctionNode* pFunc, char* pErrBuf, int32
SValueNode* pValue = (SValueNode*)pParamNode;
pValue->notReserved = true;
}
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY ||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
if (i == 3 && pValue->datum.i != 1 && pValue->datum.i != 0) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"HISTOGRAM function normalized parameter should be 0/1");
}
}
pFunc->node.resType =
@ -1532,7 +1542,7 @@ static int32_t translateToJson(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BINARY].bytes, .type = TSDB_DATA_TYPE_BINARY};
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_JSON].bytes, .type = TSDB_DATA_TYPE_JSON};
return TSDB_CODE_SUCCESS;
}
@ -2610,7 +2620,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "_group_key",
.type = FUNCTION_TYPE_GROUP_KEY,
.classification = FUNC_MGT_AGG_FUNC,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC,
.translateFunc = translateGroupKey,
.getEnvFunc = getGroupKeyFuncEnv,
.initFunc = functionSetup,

View File

@ -16,6 +16,7 @@
#include "builtinsimpl.h"
#include "cJSON.h"
#include "function.h"
#include "query.h"
#include "querynodes.h"
#include "taggfunction.h"
#include "tcompare.h"
@ -1471,8 +1472,8 @@ void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuple
int32_t pageId = pTuplePos->pageId;
int32_t offset = pTuplePos->offset;
if (pTuplePos->pageId != -1) {
int32_t numOfCols = taosArrayGetSize(pCtx->pSrcBlock->pDataBlock);
if (pTuplePos->pageId != -1 && pCtx->subsidiaries.num > 0) {
int32_t numOfCols = pCtx->subsidiaries.num;
SFilePage* pPage = getBufPage(pCtx->pBuf, pageId);
bool* nullList = (bool*)((char*)pPage + offset);
@ -1483,22 +1484,21 @@ void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuple
SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
int32_t srcSlotId = pFuncParam->pCol->slotId;
int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
int32_t ps = 0;
for (int32_t k = 0; k < srcSlotId; ++k) {
SColumnInfoData* pSrcCol = taosArrayGet(pCtx->pSrcBlock->pDataBlock, k);
ps += pSrcCol->info.bytes;
}
SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId);
if (nullList[srcSlotId]) {
ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes);
if (nullList[j]) {
colDataAppendNULL(pDstCol, rowIndex);
} else {
colDataAppend(pDstCol, rowIndex, (pStart + ps), false);
colDataAppend(pDstCol, rowIndex, pStart, false);
}
pStart += pDstCol->info.bytes;
}
releaseBufPage(pCtx->pBuf, pPage);
}
}
@ -3194,7 +3194,10 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
if (pCtx->subsidiaries.num > 0) {
saveTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_saveTuple i:%d, item:%p,pageId:%d, offset:%d\n", pEntryInfo->numOfRes, pItem, pItem->tuplePos.pageId,
pItem->tuplePos.offset);
#endif
// allocate the buffer and keep the data of this row into the new allocated buffer
pEntryInfo->numOfRes++;
taosheapsort((void*)pItems, sizeof(STopBotResItem), pEntryInfo->numOfRes, (const void*)&type, topBotResComparFn,
@ -3215,7 +3218,9 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
if (pCtx->subsidiaries.num > 0) {
copyTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_copyTuple pageId:%d, offset:%d", pItem->tuplePos.pageId, pItem->tuplePos.offset);
#endif
taosheapadjust((void*)pItems, sizeof(STopBotResItem), 0, pEntryInfo->numOfRes - 1, (const void*)&type,
topBotResComparFn, NULL, !isTopQuery);
}
@ -3225,7 +3230,11 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
void saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
SFilePage* pPage = NULL;
int32_t completeRowSize = pSrcBlock->info.rowSize + (int32_t)taosArrayGetSize(pSrcBlock->pDataBlock) * sizeof(bool);
int32_t completeRowSize = pCtx->subsidiaries.num * sizeof(bool);
for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
completeRowSize += pc->pExpr->base.resSchema.bytes;
}
if (pCtx->curBufPage == -1) {
pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage);
@ -3243,19 +3252,22 @@ void saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pS
// keep the current row data, extract method
int32_t offset = 0;
bool* nullList = (bool*)((char*)pPage + pPage->num);
char* pStart = (char*)(nullList + sizeof(bool) * (int32_t)taosArrayGetSize(pSrcBlock->pDataBlock));
for (int32_t i = 0; i < (int32_t)taosArrayGetSize(pSrcBlock->pDataBlock); ++i) {
SColumnInfoData* pCol = taosArrayGet(pSrcBlock->pDataBlock, i);
bool isNull = colDataIsNull_s(pCol, rowIndex);
if (isNull) {
nullList[i] = true;
char* pStart = (char*)(nullList + sizeof(bool) * pCtx->subsidiaries.num);
for (int32_t i = 0; i < pCtx->subsidiaries.num; ++i) {
SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i];
SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
int32_t srcSlotId = pFuncParam->pCol->slotId;
SColumnInfoData* pCol = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId);
if ((nullList[i] = colDataIsNull_s(pCol, rowIndex)) == true) {
offset += pCol->info.bytes;
continue;
}
char* p = colDataGetData(pCol, rowIndex);
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
memcpy(pStart + offset, p, varDataTLen(p));
memcpy(pStart + offset, p, (pCol->info.type == TSDB_DATA_TYPE_JSON) ? getJsonValueLen(p) : varDataTLen(p));
} else {
memcpy(pStart + offset, p, pCol->info.bytes);
}
@ -3273,14 +3285,18 @@ void saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pS
void copyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
SFilePage* pPage = getBufPage(pCtx->pBuf, pPos->pageId);
int32_t numOfCols = taosArrayGetSize(pSrcBlock->pDataBlock);
int32_t numOfCols = pCtx->subsidiaries.num;
bool* nullList = (bool*)((char*)pPage + pPos->offset);
char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
int32_t offset = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pCol = taosArrayGet(pSrcBlock->pDataBlock, i);
SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i];
SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
int32_t srcSlotId = pFuncParam->pCol->slotId;
SColumnInfoData* pCol = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId);
if ((nullList[i] = colDataIsNull_s(pCol, rowIndex)) == true) {
offset += pCol->info.bytes;
continue;
@ -3288,7 +3304,7 @@ void copyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pS
char* p = colDataGetData(pCol, rowIndex);
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
memcpy(pStart + offset, p, varDataTLen(p));
memcpy(pStart + offset, p, (pCol->info.type == TSDB_DATA_TYPE_JSON) ? getJsonValueLen(p) : varDataTLen(p));
} else {
memcpy(pStart + offset, p, pCol->info.bytes);
}
@ -3302,7 +3318,7 @@ void copyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pS
int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
STopBotRes* pRes = GET_ROWCELL_INTERBUF(pEntryInfo);
STopBotRes* pRes = getTopBotOutputInfo(pCtx);
int16_t type = pCtx->input.pData[0]->info.type;
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
@ -3319,7 +3335,10 @@ int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
} else {
colDataAppend(pCol, currentRow, (const char*)&pItem->v.i, false);
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_finalize i:%d,item:%p,pageId:%d, offset:%d\n", i, pItem, pItem->tuplePos.pageId,
pItem->tuplePos.offset);
#endif
setSelectivityValue(pCtx, pBlock, &pRes->pItems[i].tuplePos, currentRow);
currentRow += 1;
}
@ -5610,8 +5629,6 @@ int32_t groupKeyFunction(SqlFunctionCtx* pCtx) {
SInputColumnInfoData* pInput = &pCtx->input;
SColumnInfoData* pInputCol = pInput->pData[0];
int32_t bytes = pInputCol->info.bytes;
int32_t startIndex = pInput->startRowIndex;
// escape rest of data blocks to avoid first entry to be overwritten.
@ -5626,7 +5643,12 @@ int32_t groupKeyFunction(SqlFunctionCtx* pCtx) {
}
char* data = colDataGetData(pInputCol, startIndex);
memcpy(pInfo->data, data, bytes);
if (IS_VAR_DATA_TYPE(pInputCol->info.type)) {
memcpy(pInfo->data, data,
(pInputCol->info.type == TSDB_DATA_TYPE_JSON) ? getJsonValueLen(data) : varDataTLen(data));
} else {
memcpy(pInfo->data, data, pInputCol->info.bytes);
}
pInfo->hasResult = true;
_group_key_over:

View File

@ -18,6 +18,7 @@
#include "querynodes.h"
#include "taos.h"
#include "taoserror.h"
#include "tdatablock.h"
#define COPY_SCALAR_FIELD(fldname) \
do { \
@ -164,7 +165,15 @@ static int32_t valueNodeCopy(const SValueNode* pSrc, SValueNode* pDst) {
memcpy(pDst->datum.p, pSrc->datum.p, len);
break;
}
case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_JSON:{
int32_t len = getJsonValueLen(pSrc->datum.p);
pDst->datum.p = taosMemoryCalloc(1, len);
if (NULL == pDst->datum.p) {
return TSDB_CODE_OUT_OF_MEMORY;
}
memcpy(pDst->datum.p, pSrc->datum.p, len);
break;
}
case TSDB_DATA_TYPE_DECIMAL:
case TSDB_DATA_TYPE_BLOB:
case TSDB_DATA_TYPE_MEDIUMBLOB:

View File

@ -20,6 +20,7 @@
#include "querynodes.h"
#include "taoserror.h"
#include "tjson.h"
#include "tdatablock.h"
static int32_t nodeToJson(const void* pObj, SJson* pJson);
static int32_t jsonToNode(const SJson* pJson, void* pObj);
@ -2629,7 +2630,18 @@ static int32_t datumToJson(const void* pObj, SJson* pJson) {
case TSDB_DATA_TYPE_VARBINARY:
code = tjsonAddStringToObject(pJson, jkValueDatum, varDataVal(pNode->datum.p));
break;
case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_JSON:{
int32_t len = getJsonValueLen(pNode->datum.p);
char* buf = taosMemoryCalloc( len * 2 + 1, sizeof(char));
code = taosHexEncode(pNode->datum.p, buf, len);
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFree(buf);
return TSDB_CODE_TSC_INVALID_VALUE;
}
code = tjsonAddStringToObject(pJson, jkValueDatum, buf);
taosMemoryFree(buf);
break;
}
case TSDB_DATA_TYPE_DECIMAL:
case TSDB_DATA_TYPE_BLOB:
// todo
@ -2752,7 +2764,30 @@ static int32_t jsonToDatum(const SJson* pJson, void* pObj) {
}
break;
}
case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_JSON:{
pNode->datum.p = taosMemoryCalloc(1, pNode->node.resType.bytes);
if (NULL == pNode->datum.p) {
code = TSDB_CODE_OUT_OF_MEMORY;
break;
}
char* buf = taosMemoryCalloc(1, pNode->node.resType.bytes * 2 + 1);
if (NULL == buf) {
code = TSDB_CODE_OUT_OF_MEMORY;
break;
}
code = tjsonGetStringValue(pJson, jkValueDatum, buf);
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFree(buf);
break;
}
code = taosHexDecode(buf, pNode->datum.p, pNode->node.resType.bytes);
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFree(buf);
break;
}
taosMemoryFree(buf);
break;
}
case TSDB_DATA_TYPE_DECIMAL:
case TSDB_DATA_TYPE_BLOB:
// todo

View File

@ -20,6 +20,7 @@
#include "taos.h"
#include "taoserror.h"
#include "thash.h"
#include "tdatablock.h"
static SNode* makeNode(ENodeType type, size_t size) {
SNode* p = taosMemoryCalloc(1, size);
@ -1675,6 +1676,10 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal) {
pVal->pz[pVal->nLen + VARSTR_HEADER_SIZE] = 0;
break;
case TSDB_DATA_TYPE_JSON:
pVal->nLen = getJsonValueLen(pNode->datum.p);
pVal->pz = taosMemoryMalloc(pVal->nLen);
memcpy(pVal->pz, pNode->datum.p, pVal->nLen);
break;
case TSDB_DATA_TYPE_DECIMAL:
case TSDB_DATA_TYPE_BLOB:
// todo

View File

@ -62,7 +62,6 @@ int32_t getNumOfColumns(const STableMeta* pTableMeta);
int32_t getNumOfTags(const STableMeta* pTableMeta);
STableComInfo getTableInfo(const STableMeta* pTableMeta);
STableMeta* tableMetaDup(const STableMeta* pTableMeta);
int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag** ppTag, SMsgBuf* pMsgBuf);
int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen);

View File

@ -3634,8 +3634,8 @@ static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStm
pReq->numOfColumns = LIST_LENGTH(pStmt->pCols);
pReq->numOfTags = LIST_LENGTH(pStmt->pTags);
if (pStmt->pOptions->commentNull == false) {
pReq->comment = strdup(pStmt->pOptions->comment);
if (NULL == pReq->comment) {
pReq->pComment = strdup(pStmt->pOptions->comment);
if (NULL == pReq->pComment) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pReq->commentLen = strlen(pStmt->pOptions->comment);
@ -3643,6 +3643,7 @@ static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStm
pReq->commentLen = -1;
}
buildRollupFuncs(pStmt->pOptions->pRollupFuncs, &pReq->pFuncs);
pReq->numOfFuncs = taosArrayGetSize(pReq->pFuncs);
SName tableName;
tNameExtractFullName(toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &tableName), pReq->name);

View File

@ -221,6 +221,7 @@ int32_t buildInvalidOperationMsg(SMsgBuf* pBuf, const char* msg) {
}
int32_t buildSyntaxErrMsg(SMsgBuf* pBuf, const char* additionalInfo, const char* sourceStr) {
if(pBuf == NULL) return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
const char* msgFormat1 = "syntax error near \'%s\'";
const char* msgFormat2 = "syntax error near \'%s\' (%s)";
const char* msgFormat3 = "%s";
@ -348,7 +349,7 @@ static bool isValidateTag(char* input) {
return true;
}
int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag** ppTag, SMsgBuf* pMsgBuf) {
int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag** ppTag, void* pMsgBuf) {
int32_t retCode = TSDB_CODE_SUCCESS;
cJSON* root = NULL;
SHashObj* keyHash = NULL;

View File

@ -372,7 +372,7 @@ TEST_F(ParserInitialCTest, createStable) {
expect.watermark2 = watermark2;
// expect.ttl = ttl;
if (nullptr != pComment) {
expect.comment = strdup(pComment);
expect.pComment = strdup(pComment);
expect.commentLen = strlen(pComment);
}
};
@ -443,7 +443,7 @@ TEST_F(ParserInitialCTest, createStable) {
}
}
if (expect.commentLen > 0) {
ASSERT_EQ(std::string(req.comment), std::string(expect.comment));
ASSERT_EQ(std::string(req.pComment), std::string(expect.pComment));
}
if (expect.ast1Len > 0) {
ASSERT_EQ(std::string(req.pAst1), std::string(expect.pAst1));

View File

@ -476,10 +476,16 @@ static int32_t pushDownCondOptPushCondToScan(SOptimizeContext* pCxt, SScanLogicN
return pushDownCondOptAppendCond(&pScan->node.pConditions, pCond);
}
static int32_t pushDownCondOptPushCondToProject(SOptimizeContext* pCxt, SProjectLogicNode* pProject, SNode** pCond) {
return pushDownCondOptAppendCond(&pProject->node.pConditions, pCond);
}
static int32_t pushDownCondOptPushCondToChild(SOptimizeContext* pCxt, SLogicNode* pChild, SNode** pCond) {
switch (nodeType(pChild)) {
case QUERY_NODE_LOGIC_PLAN_SCAN:
return pushDownCondOptPushCondToScan(pCxt, (SScanLogicNode*)pChild, pCond);
case QUERY_NODE_LOGIC_PLAN_PROJECT:
return pushDownCondOptPushCondToProject(pCxt, (SProjectLogicNode*)pChild, pCond);
default:
break;
}
@ -713,7 +719,8 @@ static int32_t pushDownCondOptDealAgg(SOptimizeContext* pCxt, SAggLogicNode* pAg
}
// TODO: remove it after full implementation of pushing down to child
if (1 != LIST_LENGTH(pAgg->node.pChildren) ||
QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(nodesListGetNode(pAgg->node.pChildren, 0))) {
QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(nodesListGetNode(pAgg->node.pChildren, 0)) &&
QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(nodesListGetNode(pAgg->node.pChildren, 0))) {
return TSDB_CODE_SUCCESS;
}

View File

@ -305,18 +305,21 @@ int32_t dataConverToStr(char* str, int type, void* buf, int32_t bufSize, int32_t
char* parseTagDatatoJson(void* p) {
char* string = NULL;
cJSON* json = cJSON_CreateObject();
if (json == NULL) {
goto end;
}
SArray* pTagVals = NULL;
cJSON* json = NULL;
if (tTagToValArray((const STag*)p, &pTagVals) != 0) {
goto end;
}
int16_t nCols = taosArrayGetSize(pTagVals);
if (nCols == 0) {
goto end;
}
char tagJsonKey[256] = {0};
json = cJSON_CreateObject();
if (json == NULL) {
goto end;
}
for (int j = 0; j < nCols; ++j) {
STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, j);
// json key encode by binary
@ -374,6 +377,10 @@ char* parseTagDatatoJson(void* p) {
string = cJSON_PrintUnformatted(json);
end:
cJSON_Delete(json);
taosArrayDestroy(pTagVals);
if(string == NULL){
string = strdup(TSDB_DATA_NULL_STR_L);
}
return string;
}

View File

@ -192,6 +192,9 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) {
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_TIMESTAMP:
return 18;
case TSDB_DATA_TYPE_JSON:
terrno = TSDB_CODE_QRY_JSON_IN_ERROR;
return 0;
default:
assert(0);
}
@ -215,6 +218,9 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) {
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_TIMESTAMP:
return 24;
case TSDB_DATA_TYPE_JSON:
terrno = TSDB_CODE_QRY_JSON_IN_ERROR;
return 0;
default:
assert(0);
}

View File

@ -551,7 +551,9 @@ int32_t sclExecOperator(SOperatorNode *node, SScalarCtx *ctx, SScalarParam *outp
SScalarParam* pLeft = &params[0];
SScalarParam* pRight = paramNum > 1 ? &params[1] : NULL;
terrno = TSDB_CODE_SUCCESS;
OperatorFn(pLeft, pRight, output, TSDB_ORDER_ASC);
code = terrno;
_return:
for (int32_t i = 0; i < paramNum; ++i) {
@ -693,7 +695,11 @@ EDealRes sclRewriteFunction(SNode** pNode, SScalarCtx *ctx) {
res->node.resType.scale = output.columnData->info.scale;
res->node.resType.precision = output.columnData->info.precision;
int32_t type = output.columnData->info.type;
if (IS_VAR_DATA_TYPE(type)) {
if (type == TSDB_DATA_TYPE_JSON){
int32_t len = getJsonValueLen(output.columnData->pData);
res->datum.p = taosMemoryCalloc(len, 1);
memcpy(res->datum.p, output.columnData->pData, len);
} else if (IS_VAR_DATA_TYPE(type)) {
res->datum.p = taosMemoryCalloc(res->node.resType.bytes + VARSTR_HEADER_SIZE + 1, 1);
memcpy(res->datum.p, output.columnData->pData, varDataTLen(output.columnData->pData));
} else {

View File

@ -1152,42 +1152,30 @@ int32_t toJsonFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu
char tmp[TSDB_MAX_JSON_TAG_LEN] = {0};
for (int32_t i = 0; i < pInput[0].numOfRows; ++i) {
if (colDataIsNull_s(pInput[0].columnData, i)) {
colDataAppendNULL(pOutput->columnData, i);
continue;
}
char *input = pInput[0].columnData->pData + pInput[0].columnData->varmeta.offset[i];
SArray* pTagVals = taosArrayInit(8, sizeof(STagVal));
STag* pTag = NULL;
if(type == TSDB_DATA_TYPE_NCHAR){
if (varDataTLen(input) > TSDB_MAX_JSON_TAG_LEN){
colDataAppendNULL(pOutput->columnData, i);
continue;
}
int32_t len = taosUcs4ToMbs((TdUcs4 *)varDataVal(input), varDataLen(input), tmp);
if (len < 0) {
colDataAppendNULL(pOutput->columnData, i);
continue;
}
tmp[len] = 0;
if (colDataIsNull_s(pInput[0].columnData, i)) {
tTagNew(pTagVals, 1, true, &pTag);
}else{
char *input = pInput[0].columnData->pData + pInput[0].columnData->varmeta.offset[i];
if (varDataLen(input) > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE){
colDataAppendNULL(pOutput->columnData, i);
continue;
taosArrayDestroy(pTagVals);
return TSDB_CODE_FAILED;
}
memcpy(tmp, varDataVal(input), varDataLen(input));
tmp[varDataLen(input)] = 0;
if(parseJsontoTagData(tmp, pTagVals, &pTag, NULL)){
tTagNew(pTagVals, 1, true, &pTag);
}
}
if(!tjsonValidateJson(tmp)){
colDataAppendNULL(pOutput->columnData, i);
continue;
}
colDataAppend(pOutput->columnData, i, input, false);
colDataAppend(pOutput->columnData, i, (const char*)pTag, false);
tTagFree(pTag);
taosArrayDestroy(pTagVals);
}
pOutput->numOfRows = pInput->numOfRows;
return TSDB_CODE_SUCCESS;
}

View File

@ -160,6 +160,9 @@ int64_t getVectorBigintValue_JSON(void *src, int32_t index){
return 0;
} else if(*data == TSDB_DATA_TYPE_NCHAR) { // json inner type can not be BINARY
convertNcharToDouble(data+CHAR_BYTES, &out);
} else if(tTagIsJson(data)){
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
return 0;
} else {
convertNumberToNumber(data+CHAR_BYTES, &out, *data, TSDB_DATA_TYPE_DOUBLE);
}
@ -416,6 +419,9 @@ int32_t vectorConvertFromVarData(const SScalarParam* pIn, SScalarParam* pOut, in
else if(*data == TSDB_DATA_TYPE_NCHAR) {
data += CHAR_BYTES;
convertType = TSDB_DATA_TYPE_NCHAR;
} else if(tTagIsJson(data)){
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
return terrno;
} else {
convertNumberToNumber(data+CHAR_BYTES, colDataGetNumData(pOut->columnData, i), *data, outType);
continue;
@ -461,7 +467,10 @@ double getVectorDoubleValue_JSON(void *src, int32_t index){
return out;
} else if(*data == TSDB_DATA_TYPE_NCHAR) { // json inner type can not be BINARY
convertNcharToDouble(data+CHAR_BYTES, &out);
} else {
} else if(tTagIsJson(data)){
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
return 0;
} else{
convertNumberToNumber(data+CHAR_BYTES, &out, *data, TSDB_DATA_TYPE_DOUBLE);
}
return out;
@ -493,10 +502,18 @@ bool convertJsonValue(__compar_fn_t *fp, int32_t optr, int8_t typeLeft, int8_t t
}
if(typeLeft == TSDB_DATA_TYPE_JSON){
if(tTagIsJson(*pLeftData)){
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
return false;
}
typeLeft = **pLeftData;
(*pLeftData) ++;
}
if(typeRight == TSDB_DATA_TYPE_JSON){
if(tTagIsJson(*pLeftData)){
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
return false;
}
typeRight = **pRightData;
(*pRightData) ++;
}
@ -1576,7 +1593,11 @@ void vectorBitOr(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut,
void vectorCompareImpl(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord, int32_t optr) {
int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : TMAX(pLeft->numOfRows, pRight->numOfRows) - 1;
int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1;
__compar_fn_t fp = filterGetCompFunc(GET_PARAM_TYPE(pLeft), optr);
if(terrno != TSDB_CODE_SUCCESS){
return;
}
pOut->numOfRows = TMAX(pLeft->numOfRows, pRight->numOfRows);
@ -1709,6 +1730,7 @@ void vectorIsTrue(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut,
STagVal getJsonValue(char *json, char *key, bool *isExist) {
STagVal val = {.pKey = key};
if (tTagIsJson((const STag *)json) == false){
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
if(isExist){
*isExist = false;
}

View File

@ -88,3 +88,12 @@ SStreamDataSubmit* streamSubmitRefClone(SStreamDataSubmit* pSubmit) {
memcpy(pSubmitClone, pSubmit, sizeof(SStreamDataSubmit));
return pSubmitClone;
}
void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit) {
int32_t ref = atomic_sub_fetch_32(pDataSubmit->dataRef, 1);
ASSERT(ref >= 0);
if (ref == 0) {
taosMemoryFree(pDataSubmit->data);
taosMemoryFree(pDataSubmit->dataRef);
}
}

View File

@ -63,7 +63,6 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes)
continue;
}
// TODO: do we need free memory?
SSDataBlock block = {0};
assignOneDataBlock(&block, output);
block.info.childId = pTask->selfChildId;

View File

@ -41,12 +41,3 @@ void streamQueueClose(SStreamQueue* queue) {
return;
}
}
void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit) {
int32_t ref = atomic_sub_fetch_32(pDataSubmit->dataRef, 1);
ASSERT(ref >= 0);
if (ref == 0) {
taosMemoryFree(pDataSubmit->data);
taosMemoryFree(pDataSubmit->dataRef);
}
}

View File

@ -94,6 +94,7 @@ extern "C" {
//
int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg);
int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMsg);
int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatch* pMsg);
#ifdef __cplusplus
}

View File

@ -42,6 +42,12 @@ extern "C" {
//
int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg);
int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg);
int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntriesReply* pMsg);
typedef struct SReaderParam {
SyncIndex start;
SyncIndex end;
} SReaderParam;
#ifdef __cplusplus
}

View File

@ -67,6 +67,7 @@ typedef struct SSyncNode {
char path[TSDB_FILENAME_LEN];
char raftStorePath[TSDB_FILENAME_LEN * 2];
char configPath[TSDB_FILENAME_LEN * 2];
int32_t batchSize;
// sync io
SWal* pWal;
@ -170,6 +171,7 @@ void syncNodeStart(SSyncNode* pSyncNode);
void syncNodeStartStandBy(SSyncNode* pSyncNode);
void syncNodeClose(SSyncNode* pSyncNode);
int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak);
int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize);
// option
bool syncNodeSnapshotEnable(SSyncNode* pSyncNode);

View File

@ -53,8 +53,10 @@ extern "C" {
//
int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode);
int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode);
int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode);
int32_t syncNodeReplicate(SSyncNode* pSyncNode);
int32_t syncNodeAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntries* pMsg);
int32_t syncNodeAppendEntriesBatch(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntriesBatch* pMsg);
#ifdef __cplusplus
}

View File

@ -40,8 +40,8 @@ typedef struct SSyncSnapshotSender {
bool start;
int32_t seq;
int32_t ack;
void *pReader;
void *pCurrentBlock;
void * pReader;
void * pCurrentBlock;
int32_t blockLen;
SSnapshot snapshot;
SSyncCfg lastConfig;
@ -62,14 +62,14 @@ int32_t snapshotSend(SSyncSnapshotSender *pSender);
int32_t snapshotReSend(SSyncSnapshotSender *pSender);
cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender);
char *snapshotSender2Str(SSyncSnapshotSender *pSender);
char *snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event);
char * snapshotSender2Str(SSyncSnapshotSender *pSender);
char * snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event);
//---------------------------------------------------
typedef struct SSyncSnapshotReceiver {
bool start;
int32_t ack;
void *pWriter;
void * pWriter;
SyncTerm term;
SyncTerm privateTerm;
SSnapshot snapshot;
@ -85,8 +85,8 @@ bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver);
void snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver);
cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver);
char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver);
char *snapshotReceiver2SimpleStr(SSyncSnapshotReceiver *pReceiver, char *event);
char * snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver);
char * snapshotReceiver2SimpleStr(SSyncSnapshotReceiver *pReceiver, char *event);
//---------------------------------------------------
// on message

View File

@ -719,6 +719,8 @@ static bool syncNodeOnAppendEntriesLogOK(SSyncNode* pSyncNode, SyncAppendEntries
return false;
}
int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatch* pMsg) { return 0; }
int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
int32_t ret = 0;
int32_t code = 0;

View File

@ -108,48 +108,82 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p
return ret;
}
#if 0
int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg) {
// only start once
static void syncNodeStartSnapshot(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex, SyncTerm lastApplyTerm,
SyncAppendEntriesReply* pMsg) {
// get sender
SSyncSnapshotSender* pSender = syncNodeGetSnapshotSender(ths, &(pMsg->srcId));
ASSERT(pSender != NULL);
SSnapshot snapshot = {
.data = NULL, .lastApplyIndex = endIndex, .lastApplyTerm = lastApplyTerm, .lastConfigIndex = SYNC_INDEX_INVALID};
void* pReader = NULL;
SReaderParam readerParam = {.start = beginIndex, .end = endIndex};
ths->pFsm->FpSnapshotStartRead(ths->pFsm, &readerParam, &pReader);
if (!snapshotSenderIsStart(pSender) && pMsg->privateTerm < pSender->privateTerm) {
ASSERT(pReader != NULL);
snapshotSenderStart(pSender, snapshot, pReader);
} else {
if (pReader != NULL) {
ths->pFsm->FpSnapshotStopRead(ths->pFsm, pReader);
}
}
}
int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntriesReply* pMsg) {
int32_t ret = 0;
char logBuf[128] = {0};
snprintf(logBuf, sizeof(logBuf), "==syncNodeOnAppendEntriesReplyCb== term:%lu", ths->pRaftStore->currentTerm);
syncAppendEntriesReplyLog2(logBuf, pMsg);
if (pMsg->term < ths->pRaftStore->currentTerm) {
sTrace("DropStaleResponse, receive term:%" PRIu64 ", current term:%" PRIu64 "", pMsg->term,
ths->pRaftStore->currentTerm);
return ret;
// if already drop replica, do not process
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId)) && !ths->pRaftCfg->isStandBy) {
syncNodeEventLog(ths, "recv sync-append-entries-reply, maybe replica already dropped");
return -1;
}
syncIndexMgrLog2("==syncNodeOnAppendEntriesReplyCb== before pNextIndex", ths->pNextIndex);
syncIndexMgrLog2("==syncNodeOnAppendEntriesReplyCb== before pMatchIndex", ths->pMatchIndex);
// no need this code, because if I receive reply.term, then I must have sent for that term.
// if (pMsg->term > ths->pRaftStore->currentTerm) {
// syncNodeUpdateTerm(ths, pMsg->term);
// }
// drop stale response
if (pMsg->term < ths->pRaftStore->currentTerm) {
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries-reply, recv-term:%lu, drop stale response", pMsg->term);
syncNodeEventLog(ths, logBuf);
return -1;
}
// error term
if (pMsg->term > ths->pRaftStore->currentTerm) {
char logBuf[128] = {0};
snprintf(logBuf, sizeof(logBuf), "syncNodeOnAppendEntriesReplyCb error term, receive:%lu current:%lu", pMsg->term,
ths->pRaftStore->currentTerm);
syncNodeLog2(logBuf, ths);
sError("%s", logBuf);
return ret;
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries-reply, error term, recv-term:%lu", pMsg->term);
syncNodeErrorLog(ths, logBuf);
return -1;
}
ASSERT(pMsg->term == ths->pRaftStore->currentTerm);
if (pMsg->success) {
// nextIndex' = [nextIndex EXCEPT ![i][j] = m.mmatchIndex + 1]
syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), pMsg->matchIndex + 1);
SyncIndex newNextIndex = pMsg->matchIndex + 1;
SyncIndex newMatchIndex = pMsg->matchIndex;
// matchIndex' = [matchIndex EXCEPT ![i][j] = m.mmatchIndex]
syncIndexMgrSetIndex(ths->pMatchIndex, &(pMsg->srcId), pMsg->matchIndex);
if (ths->pLogStore->syncLogExist(ths->pLogStore, newNextIndex) &&
ths->pLogStore->syncLogExist(ths->pLogStore, newNextIndex - 1)) {
// nextIndex' = [nextIndex EXCEPT ![i][j] = m.mmatchIndex + 1]
syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), newNextIndex);
// maybe commit
syncMaybeAdvanceCommitIndex(ths);
// matchIndex' = [matchIndex EXCEPT ![i][j] = m.mmatchIndex]
syncIndexMgrSetIndex(ths->pMatchIndex, &(pMsg->srcId), newMatchIndex);
// maybe commit
if (ths->state == TAOS_SYNC_STATE_LEADER) {
syncMaybeAdvanceCommitIndex(ths);
}
} else {
// start snapshot <match+1, old snapshot.end>
SSnapshot snapshot;
ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &snapshot);
syncNodeStartSnapshot(ths, newMatchIndex + 1, snapshot.lastApplyIndex, snapshot.lastApplyTerm, pMsg);
syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), snapshot.lastApplyIndex + 1);
syncIndexMgrSetIndex(ths->pMatchIndex, &(pMsg->srcId), newMatchIndex);
}
} else {
SyncIndex nextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId));
@ -157,18 +191,35 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p
// notice! int64, uint64
if (nextIndex > SYNC_INDEX_BEGIN) {
--nextIndex;
if (ths->pLogStore->syncLogExist(ths->pLogStore, nextIndex) &&
ths->pLogStore->syncLogExist(ths->pLogStore, nextIndex - 1)) {
// do nothing
} else {
SSyncRaftEntry* pEntry;
int32_t code = ths->pLogStore->syncLogGetEntry(ths->pLogStore, nextIndex, &pEntry);
ASSERT(code == 0);
syncNodeStartSnapshot(ths, SYNC_INDEX_BEGIN, nextIndex, pEntry->term, pMsg);
// get sender
SSyncSnapshotSender* pSender = syncNodeGetSnapshotSender(ths, &(pMsg->srcId));
ASSERT(pSender != NULL);
SyncIndex sentryIndex = pSender->snapshot.lastApplyIndex + 1;
// update nextIndex to sentryIndex
if (nextIndex <= sentryIndex) {
nextIndex = sentryIndex;
}
}
} else {
nextIndex = SYNC_INDEX_BEGIN;
}
syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), nextIndex);
}
syncIndexMgrLog2("==syncNodeOnAppendEntriesReplyCb== after pNextIndex", ths->pNextIndex);
syncIndexMgrLog2("==syncNodeOnAppendEntriesReplyCb== after pMatchIndex", ths->pMatchIndex);
return ret;
return 0;
}
#endif
int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg) {
int32_t ret = 0;

View File

@ -50,7 +50,6 @@ static int32_t syncNodeAppendNoop(SSyncNode* ths);
// process message ----
int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg);
int32_t syncNodeOnPingReplyCb(SSyncNode* ths, SyncPingReply* pMsg);
int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg, SyncIndex* pRetIndex);
// life cycle
static void syncFreeNode(void* param);
@ -627,6 +626,94 @@ int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak) {
return ret;
}
int32_t syncProposeBatch(int64_t rid, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize) {
if (arrSize < 0) {
terrno = TSDB_CODE_SYN_INTERNAL_ERROR;
return -1;
}
int32_t ret = 0;
SSyncNode* pSyncNode = taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) {
terrno = TSDB_CODE_SYN_INTERNAL_ERROR;
return -1;
}
ASSERT(rid == pSyncNode->rid);
ret = syncNodeProposeBatch(pSyncNode, pMsgArr, pIsWeakArr, arrSize);
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
return ret;
}
static bool syncNodeBatchOK(SRpcMsg* pMsgArr, int32_t arrSize) {
for (int32_t i = 0; i < arrSize; ++i) {
if (pMsgArr[i].msgType == TDMT_SYNC_CONFIG_CHANGE) {
return false;
}
if (pMsgArr[i].msgType == TDMT_SYNC_CONFIG_CHANGE_FINISH) {
return false;
}
}
return true;
}
int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize) {
if (!syncNodeBatchOK(pMsgArr, arrSize)) {
syncNodeErrorLog(pSyncNode, "sync propose batch error");
terrno = TSDB_CODE_SYN_BATCH_ERROR;
return -1;
}
if (arrSize > SYNC_MAX_BATCH_SIZE) {
syncNodeErrorLog(pSyncNode, "sync propose match batch error");
terrno = TSDB_CODE_SYN_BATCH_ERROR;
return -1;
}
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
syncNodeErrorLog(pSyncNode, "sync propose not leader");
terrno = TSDB_CODE_SYN_NOT_LEADER;
return -1;
}
if (pSyncNode->changing) {
syncNodeErrorLog(pSyncNode, "sync propose not ready");
terrno = TSDB_CODE_SYN_PROPOSE_NOT_READY;
return -1;
}
SRaftMeta raftArr[SYNC_MAX_BATCH_SIZE];
for (int i = 0; i < arrSize; ++i) {
SRespStub stub;
stub.createTime = taosGetTimestampMs();
stub.rpcMsg = pMsgArr[i];
uint64_t seqNum = syncRespMgrAdd(pSyncNode->pSyncRespMgr, &stub);
raftArr[i].isWeak = pIsWeakArr[i];
raftArr[i].seqNum = seqNum;
}
SyncClientRequestBatch* pSyncMsg = syncClientRequestBatchBuild(pMsgArr, raftArr, arrSize, pSyncNode->vgId);
ASSERT(pSyncMsg != NULL);
SRpcMsg rpcMsg;
syncClientRequestBatch2RpcMsg(pSyncMsg, &rpcMsg);
taosMemoryFree(pSyncMsg); // only free msg body, do not free rpc msg content
if (pSyncNode->FpEqMsg != NULL && (*pSyncNode->FpEqMsg)(pSyncNode->msgcb, &rpcMsg) == 0) {
// enqueue msg ok
} else {
sError("enqueue msg error, FpEqMsg is NULL");
terrno = TSDB_CODE_SYN_INTERNAL_ERROR;
return -1;
}
return 0;
}
int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak) {
int32_t ret = 0;
@ -2362,6 +2449,49 @@ int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg, SyncI
return ret;
}
int32_t syncNodeOnClientRequestBatchCb(SSyncNode* ths, SyncClientRequestBatch* pMsg) {
int32_t code = 0;
if (ths->state != TAOS_SYNC_STATE_LEADER) {
// call FpCommitCb, delete resp mgr
return -1;
}
SyncIndex index = ths->pLogStore->syncLogWriteIndex(ths->pLogStore);
SyncTerm term = ths->pRaftStore->currentTerm;
int32_t raftMetaArrayLen = sizeof(SRaftMeta) * pMsg->dataCount;
int32_t rpcArrayLen = sizeof(SRpcMsg) * pMsg->dataCount;
SRaftMeta* raftMetaArr = (SRaftMeta*)(pMsg->data);
SRpcMsg* msgArr = (SRpcMsg*)((char*)(pMsg->data) + raftMetaArrayLen);
for (int32_t i = 0; i < pMsg->dataCount; ++i) {
SSyncRaftEntry* pEntry = syncEntryBuild(msgArr[i].contLen);
ASSERT(pEntry != NULL);
pEntry->originalRpcType = msgArr[i].msgType;
pEntry->seqNum = raftMetaArr[i].seqNum;
pEntry->isWeak = raftMetaArr[i].isWeak;
pEntry->term = term;
pEntry->index = index;
memcpy(pEntry->data, msgArr[i].pCont, msgArr[i].contLen);
ASSERT(msgArr[i].contLen == pEntry->dataLen);
code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pEntry);
if (code != 0) {
// del resp mgr, call FpCommitCb
ASSERT(0);
return -1;
}
}
// fsync once
SSyncLogStoreData* pData = ths->pLogStore->data;
SWal* pWal = pData->pWal;
walFsync(pWal, true);
return 0;
}
static void syncFreeNode(void* param) {
SSyncNode* pNode = param;
// inner object already free

View File

@ -591,7 +591,7 @@ void syncPingReplySerialize(const SyncPingReply* pMsg, char* buf, uint32_t bufLe
void syncPingReplyDeserialize(const char* buf, uint32_t len, SyncPingReply* pMsg) {
memcpy(pMsg, buf, len);
ASSERT(len == pMsg->bytes);
ASSERT(pMsg->bytes == sizeof(SyncPing) + pMsg->dataLen);
ASSERT(pMsg->bytes == sizeof(SyncPingReply) + pMsg->dataLen);
}
char* syncPingReplySerialize2(const SyncPingReply* pMsg, uint32_t* len) {
@ -956,6 +956,48 @@ void syncClientRequestLog2(char* s, const SyncClientRequest* pMsg) {
}
}
// ---- message process SyncClientRequestBatch----
// block1:
// block2: SRaftMeta array
// block3: rpc msg array (with pCont)
SyncClientRequestBatch* syncClientRequestBatchBuild(SRpcMsg* rpcMsgArr, SRaftMeta* raftArr, int32_t arrSize,
int32_t vgId) {
ASSERT(rpcMsgArr != NULL);
ASSERT(arrSize > 0);
int32_t dataLen = 0;
int32_t raftMetaArrayLen = sizeof(SRaftMeta) * arrSize;
int32_t rpcArrayLen = sizeof(SRpcMsg) * arrSize;
dataLen += (raftMetaArrayLen + rpcArrayLen);
uint32_t bytes = sizeof(SyncClientRequestBatch) + dataLen;
SyncClientRequestBatch* pMsg = taosMemoryMalloc(bytes);
memset(pMsg, 0, bytes);
pMsg->bytes = bytes;
pMsg->vgId = vgId;
pMsg->msgType = TDMT_SYNC_CLIENT_REQUEST_BATCH;
pMsg->dataCount = arrSize;
pMsg->dataLen = dataLen;
SRaftMeta* raftMetaArr = (SRaftMeta*)(pMsg->data);
SRpcMsg* msgArr = (SRpcMsg*)((char*)(pMsg->data) + raftMetaArrayLen);
for (int i = 0; i < arrSize; ++i) {
// init raftMetaArr
raftMetaArr[i].isWeak = raftArr[i].isWeak;
raftMetaArr[i].seqNum = raftArr[i].seqNum;
// init msgArr
msgArr[i] = rpcMsgArr[i];
}
return pMsg;
}
void syncClientRequestBatch2RpcMsg(const SyncClientRequestBatch* pSyncMsg, SRpcMsg* pRpcMsg) {}
// ---- message process SyncRequestVote----
SyncRequestVote* syncRequestVoteBuild(int32_t vgId) {
uint32_t bytes = sizeof(SyncRequestVote);
@ -1426,6 +1468,279 @@ void syncAppendEntriesLog2(char* s, const SyncAppendEntries* pMsg) {
}
}
// ---- message process SyncAppendEntriesBatch----
// block1: SOffsetAndContLen
// block2: SOffsetAndContLen Array
// block3: SRpcMsg Array
// block4: SRpcMsg pCont Array
SyncAppendEntriesBatch* syncAppendEntriesBatchBuild(SRpcMsg* rpcMsgArr, int32_t arrSize, int32_t vgId) {
ASSERT(rpcMsgArr != NULL);
ASSERT(arrSize > 0);
int32_t dataLen = 0;
int32_t metaArrayLen = sizeof(SOffsetAndContLen) * arrSize; // <offset, contLen>
int32_t rpcArrayLen = sizeof(SRpcMsg) * arrSize; // SRpcMsg
int32_t contArrayLen = 0;
for (int i = 0; i < arrSize; ++i) { // SRpcMsg pCont
contArrayLen += rpcMsgArr[i].contLen;
}
dataLen += (metaArrayLen + rpcArrayLen + contArrayLen);
uint32_t bytes = sizeof(SyncAppendEntriesBatch) + dataLen;
SyncAppendEntriesBatch* pMsg = taosMemoryMalloc(bytes);
memset(pMsg, 0, bytes);
pMsg->bytes = bytes;
pMsg->vgId = vgId;
pMsg->msgType = TDMT_SYNC_APPEND_ENTRIES_BATCH;
pMsg->dataCount = arrSize;
pMsg->dataLen = dataLen;
SOffsetAndContLen* metaArr = (SOffsetAndContLen*)(pMsg->data);
SRpcMsg* msgArr = (SRpcMsg*)((char*)(pMsg->data) + metaArrayLen);
char* pData = pMsg->data;
for (int i = 0; i < arrSize; ++i) {
// init <offset, contLen>
if (i == 0) {
metaArr[i].offset = metaArrayLen + rpcArrayLen;
metaArr[i].contLen = rpcMsgArr[i].contLen;
} else {
metaArr[i].offset = metaArr[i - 1].offset + metaArr[i - 1].contLen;
metaArr[i].contLen = rpcMsgArr[i].contLen;
}
// init msgArr
msgArr[i] = rpcMsgArr[i];
// init data
ASSERT(rpcMsgArr[i].contLen == metaArr[i].contLen);
memcpy(pData + metaArr[i].offset, rpcMsgArr[i].pCont, rpcMsgArr[i].contLen);
}
return pMsg;
}
void syncAppendEntriesBatchDestroy(SyncAppendEntriesBatch* pMsg) {
if (pMsg != NULL) {
taosMemoryFree(pMsg);
}
}
void syncAppendEntriesBatchSerialize(const SyncAppendEntriesBatch* pMsg, char* buf, uint32_t bufLen) {
ASSERT(pMsg->bytes <= bufLen);
memcpy(buf, pMsg, pMsg->bytes);
}
void syncAppendEntriesBatchDeserialize(const char* buf, uint32_t len, SyncAppendEntriesBatch* pMsg) {
memcpy(pMsg, buf, len);
ASSERT(len == pMsg->bytes);
ASSERT(pMsg->bytes == sizeof(SyncAppendEntriesBatch) + pMsg->dataLen);
}
char* syncAppendEntriesBatchSerialize2(const SyncAppendEntriesBatch* pMsg, uint32_t* len) {
char* buf = taosMemoryMalloc(pMsg->bytes);
ASSERT(buf != NULL);
syncAppendEntriesBatchSerialize(pMsg, buf, pMsg->bytes);
if (len != NULL) {
*len = pMsg->bytes;
}
return buf;
}
SyncAppendEntriesBatch* syncAppendEntriesBatchDeserialize2(const char* buf, uint32_t len) {
uint32_t bytes = *((uint32_t*)buf);
SyncAppendEntriesBatch* pMsg = taosMemoryMalloc(bytes);
ASSERT(pMsg != NULL);
syncAppendEntriesBatchDeserialize(buf, len, pMsg);
ASSERT(len == pMsg->bytes);
return pMsg;
}
void syncAppendEntriesBatch2RpcMsg(const SyncAppendEntriesBatch* pMsg, SRpcMsg* pRpcMsg) {
memset(pRpcMsg, 0, sizeof(*pRpcMsg));
pRpcMsg->msgType = pMsg->msgType;
pRpcMsg->contLen = pMsg->bytes;
pRpcMsg->pCont = rpcMallocCont(pRpcMsg->contLen);
syncAppendEntriesBatchSerialize(pMsg, pRpcMsg->pCont, pRpcMsg->contLen);
}
void syncAppendEntriesBatchFromRpcMsg(const SRpcMsg* pRpcMsg, SyncAppendEntriesBatch* pMsg) {
syncAppendEntriesBatchDeserialize(pRpcMsg->pCont, pRpcMsg->contLen, pMsg);
}
SyncAppendEntriesBatch* syncAppendEntriesBatchFromRpcMsg2(const SRpcMsg* pRpcMsg) {
SyncAppendEntriesBatch* pMsg = syncAppendEntriesBatchDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen);
ASSERT(pMsg != NULL);
return pMsg;
}
cJSON* syncAppendEntriesBatch2Json(const SyncAppendEntriesBatch* pMsg) {
char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
cJSON_AddNumberToObject(pRoot, "vgId", pMsg->vgId);
cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
cJSON* pSrcId = cJSON_CreateObject();
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->srcId.addr);
cJSON_AddStringToObject(pSrcId, "addr", u64buf);
{
uint64_t u64 = pMsg->srcId.addr;
cJSON* pTmp = pSrcId;
char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
cJSON_AddNumberToObject(pTmp, "addr_port", port);
}
cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
cJSON* pDestId = cJSON_CreateObject();
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->destId.addr);
cJSON_AddStringToObject(pDestId, "addr", u64buf);
{
uint64_t u64 = pMsg->destId.addr;
cJSON* pTmp = pDestId;
char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
cJSON_AddNumberToObject(pTmp, "addr_port", port);
}
cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
cJSON_AddItemToObject(pRoot, "destId", pDestId);
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->term);
cJSON_AddStringToObject(pRoot, "term", u64buf);
snprintf(u64buf, sizeof(u64buf), "%ld", pMsg->prevLogIndex);
cJSON_AddStringToObject(pRoot, "prevLogIndex", u64buf);
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->prevLogTerm);
cJSON_AddStringToObject(pRoot, "prevLogTerm", u64buf);
snprintf(u64buf, sizeof(u64buf), "%ld", pMsg->commitIndex);
cJSON_AddStringToObject(pRoot, "commitIndex", u64buf);
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->privateTerm);
cJSON_AddStringToObject(pRoot, "privateTerm", u64buf);
cJSON_AddNumberToObject(pRoot, "dataCount", pMsg->dataCount);
cJSON_AddNumberToObject(pRoot, "dataLen", pMsg->dataLen);
int32_t metaArrayLen = sizeof(SOffsetAndContLen) * pMsg->dataCount; // <offset, contLen>
int32_t rpcArrayLen = sizeof(SRpcMsg) * pMsg->dataCount; // SRpcMsg
int32_t contArrayLen = pMsg->dataLen - metaArrayLen - rpcArrayLen;
cJSON_AddNumberToObject(pRoot, "metaArrayLen", metaArrayLen);
cJSON_AddNumberToObject(pRoot, "rpcArrayLen", rpcArrayLen);
cJSON_AddNumberToObject(pRoot, "contArrayLen", contArrayLen);
SOffsetAndContLen* metaArr = (SOffsetAndContLen*)(pMsg->data);
SRpcMsg* msgArr = (SRpcMsg*)(pMsg->data + metaArrayLen);
void* pData = (void*)(pMsg->data + metaArrayLen + rpcArrayLen);
cJSON* pMetaArr = cJSON_CreateArray();
cJSON_AddItemToObject(pRoot, "metaArr", pMetaArr);
for (int i = 0; i < pMsg->dataCount; ++i) {
cJSON* pMeta = cJSON_CreateObject();
cJSON_AddNumberToObject(pMeta, "offset", metaArr[i].offset);
cJSON_AddNumberToObject(pMeta, "contLen", metaArr[i].contLen);
cJSON_AddItemToArray(pMetaArr, pMeta);
}
cJSON* pMsgArr = cJSON_CreateArray();
cJSON_AddItemToObject(pRoot, "msgArr", pMsgArr);
for (int i = 0; i < pMsg->dataCount; ++i) {
cJSON* pRpcMsgJson = cJSON_CreateObject();
cJSON_AddNumberToObject(pRpcMsgJson, "code", msgArr[i].code);
cJSON_AddNumberToObject(pRpcMsgJson, "contLen", msgArr[i].contLen);
cJSON_AddNumberToObject(pRpcMsgJson, "msgType", msgArr[i].msgType);
cJSON_AddItemToArray(pMsgArr, pRpcMsgJson);
}
char* s;
s = syncUtilprintBin((char*)(pMsg->data), pMsg->dataLen);
cJSON_AddStringToObject(pRoot, "data", s);
taosMemoryFree(s);
s = syncUtilprintBin2((char*)(pMsg->data), pMsg->dataLen);
cJSON_AddStringToObject(pRoot, "data2", s);
taosMemoryFree(s);
}
cJSON* pJson = cJSON_CreateObject();
cJSON_AddItemToObject(pJson, "SyncAppendEntriesBatch", pRoot);
return pJson;
}
char* syncAppendEntriesBatch2Str(const SyncAppendEntriesBatch* pMsg) {
cJSON* pJson = syncAppendEntriesBatch2Json(pMsg);
char* serialized = cJSON_Print(pJson);
cJSON_Delete(pJson);
return serialized;
}
void syncAppendEntriesBatch2RpcMsgArray(SyncAppendEntriesBatch* pSyncMsg, SRpcMsg* rpcMsgArr, int32_t maxArrSize,
int32_t* pRetArrSize) {
if (pRetArrSize != NULL) {
*pRetArrSize = pSyncMsg->dataCount;
}
int32_t arrSize = pSyncMsg->dataCount;
if (arrSize > maxArrSize) {
arrSize = maxArrSize;
}
int32_t metaArrayLen = sizeof(SOffsetAndContLen) * pSyncMsg->dataCount; // <offset, contLen>
int32_t rpcArrayLen = sizeof(SRpcMsg) * pSyncMsg->dataCount; // SRpcMsg
int32_t contArrayLen = pSyncMsg->dataLen - metaArrayLen - rpcArrayLen;
SOffsetAndContLen* metaArr = (SOffsetAndContLen*)(pSyncMsg->data);
SRpcMsg* msgArr = (SRpcMsg*)(pSyncMsg->data + metaArrayLen);
void* pData = pSyncMsg->data + metaArrayLen + rpcArrayLen;
for (int i = 0; i < arrSize; ++i) {
rpcMsgArr[i] = msgArr[i];
rpcMsgArr[i].pCont = rpcMallocCont(msgArr[i].contLen);
void* pRpcCont = pSyncMsg->data + metaArr[i].offset;
memcpy(rpcMsgArr[i].pCont, pRpcCont, rpcMsgArr[i].contLen);
}
}
// for debug ----------------------
void syncAppendEntriesBatchPrint(const SyncAppendEntriesBatch* pMsg) {
char* serialized = syncAppendEntriesBatch2Str(pMsg);
printf("syncAppendEntriesBatchPrint | len:%lu | %s \n", strlen(serialized), serialized);
fflush(NULL);
taosMemoryFree(serialized);
}
void syncAppendEntriesBatchPrint2(char* s, const SyncAppendEntriesBatch* pMsg) {
char* serialized = syncAppendEntriesBatch2Str(pMsg);
printf("syncAppendEntriesBatchPrint2 | len:%lu | %s | %s \n", strlen(serialized), s, serialized);
fflush(NULL);
taosMemoryFree(serialized);
}
void syncAppendEntriesBatchLog(const SyncAppendEntriesBatch* pMsg) {
char* serialized = syncAppendEntriesBatch2Str(pMsg);
sTrace("syncAppendEntriesBatchLog | len:%lu | %s", strlen(serialized), serialized);
taosMemoryFree(serialized);
}
void syncAppendEntriesBatchLog2(char* s, const SyncAppendEntriesBatch* pMsg) {
if (gRaftDetailLog) {
char* serialized = syncAppendEntriesBatch2Str(pMsg);
sTraceLong("syncAppendEntriesBatchLog2 | len:%lu | %s | %s", strlen(serialized), s, serialized);
taosMemoryFree(serialized);
}
}
// ---- message process SyncAppendEntriesReply----
SyncAppendEntriesReply* syncAppendEntriesReplyBuild(int32_t vgId) {
uint32_t bytes = sizeof(SyncAppendEntriesReply);

View File

@ -101,7 +101,7 @@ cJSON *syncCfg2Json(SSyncCfg *pSyncCfg) {
char *syncCfg2Str(SSyncCfg *pSyncCfg) {
cJSON *pJson = syncCfg2Json(pSyncCfg);
char *serialized = cJSON_Print(pJson);
char * serialized = cJSON_Print(pJson);
cJSON_Delete(pJson);
return serialized;
}
@ -109,7 +109,7 @@ char *syncCfg2Str(SSyncCfg *pSyncCfg) {
char *syncCfg2SimpleStr(SSyncCfg *pSyncCfg) {
if (pSyncCfg != NULL) {
int32_t len = 512;
char *s = taosMemoryMalloc(len);
char * s = taosMemoryMalloc(len);
memset(s, 0, len);
snprintf(s, len, "{replica-num:%d, my-index:%d, ", pSyncCfg->replicaNum, pSyncCfg->myIndex);
@ -205,7 +205,7 @@ cJSON *raftCfg2Json(SRaftCfg *pRaftCfg) {
char *raftCfg2Str(SRaftCfg *pRaftCfg) {
cJSON *pJson = raftCfg2Json(pRaftCfg);
char *serialized = cJSON_Print(pJson);
char * serialized = cJSON_Print(pJson);
cJSON_Delete(pJson);
return serialized;
}
@ -280,7 +280,7 @@ int32_t raftCfgFromJson(const cJSON *pRoot, SRaftCfg *pRaftCfg) {
(pRaftCfg->configIndexArr)[i] = atoll(pIndex->valuestring);
}
cJSON *pJsonSyncCfg = cJSON_GetObjectItem(pJson, "SSyncCfg");
cJSON * pJsonSyncCfg = cJSON_GetObjectItem(pJson, "SSyncCfg");
int32_t code = syncCfgFromJson(pJsonSyncCfg, &(pRaftCfg->cfg));
ASSERT(code == 0);

View File

@ -116,6 +116,73 @@ int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode) {
return ret;
}
int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) {
if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
return -1;
}
int32_t ret = 0;
for (int i = 0; i < pSyncNode->peersNum; ++i) {
SRaftId* pDestId = &(pSyncNode->peersId[i]);
// next index
SyncIndex nextIndex = syncIndexMgrGetIndex(pSyncNode->pNextIndex, pDestId);
// pre index, pre term
SyncIndex preLogIndex = syncNodeGetPreIndex(pSyncNode, nextIndex);
SyncTerm preLogTerm = syncNodeGetPreTerm(pSyncNode, nextIndex);
if (preLogTerm == SYNC_TERM_INVALID) {
SSyncSnapshotSender* pSender = syncNodeGetSnapshotSender(pSyncNode, pDestId);
ASSERT(pSender != NULL);
ASSERT(!snapshotSenderIsStart(pSender));
SyncIndex newNextIndex = syncNodeGetLastIndex(pSyncNode) + 1;
syncIndexMgrSetIndex(pSyncNode->pNextIndex, pDestId, newNextIndex);
syncIndexMgrSetIndex(pSyncNode->pMatchIndex, pDestId, SYNC_INDEX_INVALID);
sError("vgId:%d sync get pre term error, nextIndex:%ld, update next-index:%ld, match-index:%d, raftid:%ld",
pSyncNode->vgId, nextIndex, newNextIndex, SYNC_INDEX_INVALID, pDestId->addr);
return -1;
}
SRpcMsg rpcMsgArr[SYNC_MAX_BATCH_SIZE];
memset(rpcMsgArr, 0, sizeof(rpcMsgArr));
int32_t getCount = 0;
for (int32_t i = 0; i < pSyncNode->batchSize; ++i) {
SSyncRaftEntry* pEntry;
int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, nextIndex, &pEntry);
if (code == 0) {
ASSERT(pEntry != NULL);
// get rpc msg [i] from entry
syncEntryDestory(pEntry);
getCount++;
} else {
break;
}
}
SyncAppendEntriesBatch* pMsg = syncAppendEntriesBatchBuild(rpcMsgArr, getCount, pSyncNode->vgId);
ASSERT(pMsg != NULL);
// prepare msg
pMsg->srcId = pSyncNode->myRaftId;
pMsg->destId = *pDestId;
pMsg->term = pSyncNode->pRaftStore->currentTerm;
pMsg->prevLogIndex = preLogIndex;
pMsg->prevLogTerm = preLogTerm;
pMsg->commitIndex = pSyncNode->commitIndex;
pMsg->privateTerm = 0;
pMsg->dataCount = getCount;
// send msg
syncNodeAppendEntriesBatch(pSyncNode, pDestId, pMsg);
syncAppendEntriesBatchDestroy(pMsg);
}
return 0;
}
int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode) {
ASSERT(pSyncNode->state == TAOS_SYNC_STATE_LEADER);
@ -234,4 +301,24 @@ int32_t syncNodeAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, c
syncAppendEntries2RpcMsg(pMsg, &rpcMsg);
syncNodeSendMsgById(destRaftId, pSyncNode, &rpcMsg);
return ret;
}
int32_t syncNodeAppendEntriesBatch(SSyncNode* pSyncNode, const SRaftId* destRaftId,
const SyncAppendEntriesBatch* pMsg) {
do {
char host[128];
uint16_t port;
syncUtilU642Addr(destRaftId->addr, host, sizeof(host), &port);
sDebug(
"vgId:%d, send sync-append-entries-batch to %s:%d, {term:%lu, pre-index:%ld, pre-term:%lu, pterm:%lu, "
"commit:%ld, "
"datalen:%d, dataCount:%d}",
pSyncNode->vgId, host, port, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->privateTerm,
pMsg->commitIndex, pMsg->dataLen, pMsg->dataCount);
} while (0);
SRpcMsg rpcMsg;
syncAppendEntriesBatch2RpcMsg(pMsg, &rpcMsg);
syncNodeSendMsgById(destRaftId, pSyncNode, &rpcMsg);
return 0;
}

View File

@ -349,14 +349,14 @@ cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender) {
char *snapshotSender2Str(SSyncSnapshotSender *pSender) {
cJSON *pJson = snapshotSender2Json(pSender);
char *serialized = cJSON_Print(pJson);
char * serialized = cJSON_Print(pJson);
cJSON_Delete(pJson);
return serialized;
}
char *snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event) {
int32_t len = 256;
char *s = taosMemoryMalloc(len);
char * s = taosMemoryMalloc(len);
SRaftId destId = pSender->pSyncNode->replicasId[pSender->replicaIndex];
char host[64];
@ -604,7 +604,7 @@ cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) {
cJSON_AddStringToObject(pFromId, "addr", u64buf);
{
uint64_t u64 = pReceiver->fromId.addr;
cJSON *pTmp = pFromId;
cJSON * pTmp = pFromId;
char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
@ -637,14 +637,14 @@ cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) {
char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver) {
cJSON *pJson = snapshotReceiver2Json(pReceiver);
char *serialized = cJSON_Print(pJson);
char * serialized = cJSON_Print(pJson);
cJSON_Delete(pJson);
return serialized;
}
char *snapshotReceiver2SimpleStr(SSyncSnapshotReceiver *pReceiver, char *event) {
int32_t len = 256;
char *s = taosMemoryMalloc(len);
char * s = taosMemoryMalloc(len);
SRaftId fromId = pReceiver->fromId;
char host[128];

View File

@ -21,6 +21,7 @@ add_executable(syncEntryCacheTest "")
add_executable(syncRequestVoteTest "")
add_executable(syncRequestVoteReplyTest "")
add_executable(syncAppendEntriesTest "")
add_executable(syncAppendEntriesBatchTest "")
add_executable(syncAppendEntriesReplyTest "")
add_executable(syncClientRequestTest "")
add_executable(syncTimeoutTest "")
@ -146,6 +147,10 @@ target_sources(syncAppendEntriesTest
PRIVATE
"syncAppendEntriesTest.cpp"
)
target_sources(syncAppendEntriesBatchTest
PRIVATE
"syncAppendEntriesBatchTest.cpp"
)
target_sources(syncAppendEntriesReplyTest
PRIVATE
"syncAppendEntriesReplyTest.cpp"
@ -387,6 +392,11 @@ target_include_directories(syncAppendEntriesTest
"${TD_SOURCE_DIR}/include/libs/sync"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
target_include_directories(syncAppendEntriesBatchTest
PUBLIC
"${TD_SOURCE_DIR}/include/libs/sync"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
target_include_directories(syncAppendEntriesReplyTest
PUBLIC
"${TD_SOURCE_DIR}/include/libs/sync"
@ -636,6 +646,10 @@ target_link_libraries(syncAppendEntriesTest
sync
gtest_main
)
target_link_libraries(syncAppendEntriesBatchTest
sync
gtest_main
)
target_link_libraries(syncAppendEntriesReplyTest
sync
gtest_main

View File

@ -0,0 +1,139 @@
//#include <gtest/gtest.h>
#include <stdio.h>
#include "syncIO.h"
#include "syncInt.h"
#include "syncMessage.h"
#include "syncUtil.h"
#include "trpc.h"
void logTest() {
sTrace("--- sync log test: trace");
sDebug("--- sync log test: debug");
sInfo("--- sync log test: info");
sWarn("--- sync log test: warn");
sError("--- sync log test: error");
sFatal("--- sync log test: fatal");
}
SRpcMsg *createRpcMsg(int32_t i, int32_t dataLen) {
SRpcMsg *pRpcMsg = (SRpcMsg *)taosMemoryMalloc(sizeof(SRpcMsg));
memset(pRpcMsg, 0, sizeof(SRpcMsg));
pRpcMsg->msgType = TDMT_SYNC_PING;
pRpcMsg->contLen = dataLen;
pRpcMsg->pCont = rpcMallocCont(pRpcMsg->contLen);
pRpcMsg->code = 10 * i;
snprintf((char *)pRpcMsg->pCont, pRpcMsg->contLen, "value_%d", i);
return pRpcMsg;
}
SyncAppendEntriesBatch *createMsg() {
SRpcMsg rpcMsgArr[5];
memset(rpcMsgArr, 0, sizeof(rpcMsgArr));
for (int32_t i = 0; i < 5; ++i) {
SRpcMsg *pRpcMsg = createRpcMsg(i, 20);
rpcMsgArr[i] = *pRpcMsg;
taosMemoryFree(pRpcMsg);
}
SyncAppendEntriesBatch *pMsg = syncAppendEntriesBatchBuild(rpcMsgArr, 5, 1234);
pMsg->srcId.addr = syncUtilAddr2U64("127.0.0.1", 1234);
pMsg->srcId.vgId = 100;
pMsg->destId.addr = syncUtilAddr2U64("127.0.0.1", 5678);
pMsg->destId.vgId = 100;
pMsg->prevLogIndex = 11;
pMsg->prevLogTerm = 22;
pMsg->commitIndex = 33;
pMsg->privateTerm = 44;
return pMsg;
}
void test1() {
SyncAppendEntriesBatch *pMsg = createMsg();
syncAppendEntriesBatchLog2((char *)"test1:", pMsg);
SRpcMsg rpcMsgArr[5];
int32_t retArrSize;
syncAppendEntriesBatch2RpcMsgArray(pMsg, rpcMsgArr, 5, &retArrSize);
for (int i = 0; i < retArrSize; ++i) {
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "==test1 decode rpc msg %d: msgType:%d, code:%d, contLen:%d, pCont:%s \n", i,
rpcMsgArr[i].msgType, rpcMsgArr[i].code, rpcMsgArr[i].contLen, (char *)rpcMsgArr[i].pCont);
sTrace("%s", logBuf);
}
syncAppendEntriesBatchDestroy(pMsg);
}
/*
void test2() {
SyncAppendEntries *pMsg = createMsg();
uint32_t len = pMsg->bytes;
char * serialized = (char *)taosMemoryMalloc(len);
syncAppendEntriesSerialize(pMsg, serialized, len);
SyncAppendEntries *pMsg2 = syncAppendEntriesBuild(pMsg->dataLen, 1000);
syncAppendEntriesDeserialize(serialized, len, pMsg2);
syncAppendEntriesLog2((char *)"test2: syncAppendEntriesSerialize -> syncAppendEntriesDeserialize ", pMsg2);
taosMemoryFree(serialized);
syncAppendEntriesDestroy(pMsg);
syncAppendEntriesDestroy(pMsg2);
}
void test3() {
SyncAppendEntries *pMsg = createMsg();
uint32_t len;
char * serialized = syncAppendEntriesSerialize2(pMsg, &len);
SyncAppendEntries *pMsg2 = syncAppendEntriesDeserialize2(serialized, len);
syncAppendEntriesLog2((char *)"test3: syncAppendEntriesSerialize3 -> syncAppendEntriesDeserialize2 ", pMsg2);
taosMemoryFree(serialized);
syncAppendEntriesDestroy(pMsg);
syncAppendEntriesDestroy(pMsg2);
}
void test4() {
SyncAppendEntries *pMsg = createMsg();
SRpcMsg rpcMsg;
syncAppendEntries2RpcMsg(pMsg, &rpcMsg);
SyncAppendEntries *pMsg2 = (SyncAppendEntries *)taosMemoryMalloc(rpcMsg.contLen);
syncAppendEntriesFromRpcMsg(&rpcMsg, pMsg2);
syncAppendEntriesLog2((char *)"test4: syncAppendEntries2RpcMsg -> syncAppendEntriesFromRpcMsg ", pMsg2);
rpcFreeCont(rpcMsg.pCont);
syncAppendEntriesDestroy(pMsg);
syncAppendEntriesDestroy(pMsg2);
}
void test5() {
SyncAppendEntries *pMsg = createMsg();
SRpcMsg rpcMsg;
syncAppendEntries2RpcMsg(pMsg, &rpcMsg);
SyncAppendEntries *pMsg2 = syncAppendEntriesFromRpcMsg2(&rpcMsg);
syncAppendEntriesLog2((char *)"test5: syncAppendEntries2RpcMsg -> syncAppendEntriesFromRpcMsg2 ", pMsg2);
rpcFreeCont(rpcMsg.pCont);
syncAppendEntriesDestroy(pMsg);
syncAppendEntriesDestroy(pMsg2);
}
*/
int main() {
gRaftDetailLog = true;
tsAsyncLog = 0;
sDebugFlag = DEBUG_DEBUG + DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE;
logTest();
test1();
/*
test2();
test3();
test4();
test5();
*/
return 0;
}

View File

@ -77,7 +77,7 @@ int32_t GetSnapshotCb(struct SSyncFSM* pFsm, SSnapshot* pSnapshot) {
return 0;
}
int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void** ppReader) {
int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void *pParam, void** ppReader) {
*ppReader = (void*)0xABCD;
char logBuf[256] = {0};
snprintf(logBuf, sizeof(logBuf), "==callback== ==SnapshotStartRead== pFsm:%p, *ppReader:%p", pFsm, *ppReader);

View File

@ -25,7 +25,7 @@ void ReConfigCb(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta)
int32_t GetSnapshot(struct SSyncFSM* pFsm, SSnapshot* pSnapshot) { return 0; }
int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void** ppReader) { return 0; }
int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void *pParam, void** ppReader) { return 0; }
int32_t SnapshotStopRead(struct SSyncFSM* pFsm, void* pReader) { return 0; }
int32_t SnapshotDoRead(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32_t* len) { return 0; }

View File

@ -74,7 +74,7 @@ int32_t GetSnapshotCb(struct SSyncFSM* pFsm, SSnapshot* pSnapshot) {
return 0;
}
int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void** ppReader) {
int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void *pParam, void** ppReader) {
*ppReader = (void*)0xABCD;
char logBuf[256] = {0};
snprintf(logBuf, sizeof(logBuf), "==callback== ==SnapshotStartRead== pFsm:%p, *ppReader:%p", pFsm, *ppReader);

View File

@ -27,7 +27,7 @@ extern "C" {
#endif
// meta section begin
typedef struct WalFileInfo {
typedef struct {
int64_t firstVer;
int64_t lastVer;
int64_t createTs;
@ -98,20 +98,20 @@ static inline int walBuildIdxName(SWal* pWal, int64_t fileFirstVer, char* buf) {
return sprintf(buf, "%s/%020" PRId64 "." WAL_INDEX_SUFFIX, pWal->path, fileFirstVer);
}
static inline int walValidHeadCksum(SWalHead* pHead) {
return taosCheckChecksum((uint8_t*)&pHead->head, sizeof(SWalReadHead), pHead->cksumHead);
static inline int walValidHeadCksum(SWalCkHead* pHead) {
return taosCheckChecksum((uint8_t*)&pHead->head, sizeof(SWalCont), pHead->cksumHead);
}
static inline int walValidBodyCksum(SWalHead* pHead) {
static inline int walValidBodyCksum(SWalCkHead* pHead) {
return taosCheckChecksum((uint8_t*)pHead->head.body, pHead->head.bodyLen, pHead->cksumBody);
}
static inline int walValidCksum(SWalHead* pHead, void* body, int64_t bodyLen) {
static inline int walValidCksum(SWalCkHead* pHead, void* body, int64_t bodyLen) {
return walValidHeadCksum(pHead) && walValidBodyCksum(pHead);
}
static inline uint32_t walCalcHeadCksum(SWalHead* pHead) {
return taosCalcChecksum(0, (uint8_t*)&pHead->head, sizeof(SWalReadHead));
static inline uint32_t walCalcHeadCksum(SWalCkHead* pHead) {
return taosCalcChecksum(0, (uint8_t*)&pHead->head, sizeof(SWalCont));
}
static inline uint32_t walCalcBodyCksum(const void* body, uint32_t len) {

View File

@ -16,7 +16,7 @@
#include "cJSON.h"
#include "os.h"
#include "taoserror.h"
#include "tref.h"
#include "tutil.h"
#include "walInt.h"
bool FORCE_INLINE walLogExist(SWal* pWal, int64_t ver) {
@ -37,26 +37,9 @@ static FORCE_INLINE int walBuildMetaName(SWal* pWal, int metaVer, char* buf) {
return sprintf(buf, "%s/meta-ver%d", pWal->path, metaVer);
}
void* tmemmem(char* haystack, int hlen, char* needle, int nlen) {
char* limit;
if (nlen == 0 || hlen < nlen) {
return NULL;
}
limit = haystack + hlen - nlen + 1;
while ((haystack = (char*)memchr(haystack, needle[0], limit - haystack)) != NULL) {
if (memcmp(haystack, needle, nlen) == 0) {
return haystack;
}
haystack++;
}
return NULL;
}
static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal) {
ASSERT(pWal->fileInfoSet != NULL);
int sz = taosArrayGetSize(pWal->fileInfoSet);
int32_t sz = taosArrayGetSize(pWal->fileInfoSet);
ASSERT(sz > 0);
#if 0
for (int i = 0; i < sz; i++) {
@ -101,14 +84,14 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal) {
char* candidate;
while ((candidate = tmemmem(haystack, readSize - (haystack - buf), (char*)&magic, sizeof(uint64_t))) != NULL) {
// read and validate
SWalHead* logContent = (SWalHead*)candidate;
SWalCkHead* logContent = (SWalCkHead*)candidate;
if (walValidHeadCksum(logContent) == 0 && walValidBodyCksum(logContent) == 0) {
found = candidate;
}
haystack = candidate + 1;
}
if (found == buf) {
SWalHead* logContent = (SWalHead*)found;
SWalCkHead* logContent = (SWalCkHead*)found;
if (walValidHeadCksum(logContent) != 0 || walValidBodyCksum(logContent) != 0) {
// file has to be deleted
taosMemoryFree(buf);
@ -118,7 +101,7 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal) {
}
}
taosCloseFile(&pFile);
SWalHead* lastEntry = (SWalHead*)found;
SWalCkHead* lastEntry = (SWalCkHead*)found;
return lastEntry->head.version;
}

View File

@ -117,8 +117,8 @@ SWal *walOpen(const char *path, SWalCfg *pCfg) {
pWal->lastRollSeq = -1;
// init write buffer
memset(&pWal->writeHead, 0, sizeof(SWalHead));
pWal->writeHead.head.headVer = WAL_HEAD_VER;
memset(&pWal->writeHead, 0, sizeof(SWalCkHead));
pWal->writeHead.head.protoVer = WAL_PROTO_VER;
pWal->writeHead.magic = WAL_MAGIC;
if (taosThreadMutexInit(&pWal->mutex, NULL) < 0) {

View File

@ -33,7 +33,7 @@ SWalReadHandle *walOpenReadHandle(SWal *pWal) {
taosThreadMutexInit(&pRead->mutex, NULL);
pRead->pHead = taosMemoryMalloc(sizeof(SWalHead));
pRead->pHead = taosMemoryMalloc(sizeof(SWalCkHead));
if (pRead->pHead == NULL) {
terrno = TSDB_CODE_WAL_OUT_OF_MEMORY;
taosMemoryFree(pRead);
@ -155,7 +155,7 @@ static int32_t walReadSeekVer(SWalReadHandle *pRead, int64_t ver) {
void walSetReaderCapacity(SWalReadHandle *pRead, int32_t capacity) { pRead->capacity = capacity; }
int32_t walFetchHead(SWalReadHandle *pRead, int64_t ver, SWalHead *pHead) {
int32_t walFetchHead(SWalReadHandle *pRead, int64_t ver, SWalCkHead *pHead) {
int64_t code;
// TODO: valid ver
@ -170,8 +170,8 @@ int32_t walFetchHead(SWalReadHandle *pRead, int64_t ver, SWalHead *pHead) {
ASSERT(taosValidFile(pRead->pReadLogTFile) == true);
code = taosReadFile(pRead->pReadLogTFile, pHead, sizeof(SWalHead));
if (code != sizeof(SWalHead)) {
code = taosReadFile(pRead->pReadLogTFile, pHead, sizeof(SWalCkHead));
if (code != sizeof(SWalCkHead)) {
return -1;
}
@ -186,7 +186,7 @@ int32_t walFetchHead(SWalReadHandle *pRead, int64_t ver, SWalHead *pHead) {
return 0;
}
int32_t walSkipFetchBody(SWalReadHandle *pRead, const SWalHead *pHead) {
int32_t walSkipFetchBody(SWalReadHandle *pRead, const SWalCkHead *pHead) {
int64_t code;
ASSERT(pRead->curVersion == pHead->head.version);
@ -203,12 +203,12 @@ int32_t walSkipFetchBody(SWalReadHandle *pRead, const SWalHead *pHead) {
return 0;
}
int32_t walFetchBody(SWalReadHandle *pRead, SWalHead **ppHead) {
SWalReadHead *pReadHead = &((*ppHead)->head);
int64_t ver = pReadHead->version;
int32_t walFetchBody(SWalReadHandle *pRead, SWalCkHead **ppHead) {
SWalCont *pReadHead = &((*ppHead)->head);
int64_t ver = pReadHead->version;
if (pRead->capacity < pReadHead->bodyLen) {
void *ptr = taosMemoryRealloc(*ppHead, sizeof(SWalHead) + pReadHead->bodyLen);
void *ptr = taosMemoryRealloc(*ppHead, sizeof(SWalCkHead) + pReadHead->bodyLen);
if (ptr == NULL) {
terrno = TSDB_CODE_WAL_OUT_OF_MEMORY;
return -1;
@ -241,18 +241,18 @@ int32_t walFetchBody(SWalReadHandle *pRead, SWalHead **ppHead) {
return 0;
}
int32_t walReadWithHandle_s(SWalReadHandle *pRead, int64_t ver, SWalReadHead **ppHead) {
int32_t walReadWithHandle_s(SWalReadHandle *pRead, int64_t ver, SWalCont **ppHead) {
taosThreadMutexLock(&pRead->mutex);
if (walReadWithHandle(pRead, ver) < 0) {
taosThreadMutexUnlock(&pRead->mutex);
return -1;
}
*ppHead = taosMemoryMalloc(sizeof(SWalReadHead) + pRead->pHead->head.bodyLen);
*ppHead = taosMemoryMalloc(sizeof(SWalCont) + pRead->pHead->head.bodyLen);
if (*ppHead == NULL) {
taosThreadMutexUnlock(&pRead->mutex);
return -1;
}
memcpy(*ppHead, &pRead->pHead->head, sizeof(SWalReadHead) + pRead->pHead->head.bodyLen);
memcpy(*ppHead, &pRead->pHead->head, sizeof(SWalCont) + pRead->pHead->head.bodyLen);
taosThreadMutexUnlock(&pRead->mutex);
return 0;
}
@ -282,8 +282,8 @@ int32_t walReadWithHandle(SWalReadHandle *pRead, int64_t ver) {
ASSERT(taosValidFile(pRead->pReadLogTFile) == true);
code = taosReadFile(pRead->pReadLogTFile, pRead->pHead, sizeof(SWalHead));
if (code != sizeof(SWalHead)) {
code = taosReadFile(pRead->pReadLogTFile, pRead->pHead, sizeof(SWalCkHead));
if (code != sizeof(SWalCkHead)) {
if (code < 0)
terrno = TAOS_SYSTEM_ERROR(errno);
else {
@ -301,7 +301,7 @@ int32_t walReadWithHandle(SWalReadHandle *pRead, int64_t ver) {
}
if (pRead->capacity < pRead->pHead->head.bodyLen) {
void *ptr = taosMemoryRealloc(pRead->pHead, sizeof(SWalHead) + pRead->pHead->head.bodyLen);
void *ptr = taosMemoryRealloc(pRead->pHead, sizeof(SWalCkHead) + pRead->pHead->head.bodyLen);
if (ptr == NULL) {
terrno = TSDB_CODE_WAL_OUT_OF_MEMORY;
return -1;

View File

@ -142,10 +142,10 @@ int32_t walRollback(SWal *pWal, int64_t ver) {
return -1;
}
// validate offset
SWalHead head;
SWalCkHead head;
ASSERT(taosValidFile(pLogTFile));
int64_t size = taosReadFile(pLogTFile, &head, sizeof(SWalHead));
if (size != sizeof(SWalHead)) {
int64_t size = taosReadFile(pLogTFile, &head, sizeof(SWalCkHead));
if (size != sizeof(SWalCkHead)) {
return -1;
}
code = walValidHeadCksum(&head);
@ -261,7 +261,7 @@ int32_t walEndSnapshot(SWal *pWal) {
}
int walRoll(SWal *pWal) {
int code = 0;
int32_t code = 0;
if (pWal->pWriteIdxTFile != NULL) {
code = taosCloseFile(&pWal->pWriteIdxTFile);
if (code != 0) {
@ -321,12 +321,13 @@ static int walWriteIndex(SWal *pWal, int64_t ver, int64_t offset) {
return 0;
}
int64_t walWriteWithSyncInfo(SWal *pWal, int64_t index, tmsg_t msgType, SSyncLogMeta syncMeta, const void *body,
int32_t walWriteWithSyncInfo(SWal *pWal, int64_t index, tmsg_t msgType, SSyncLogMeta syncMeta, const void *body,
int32_t bodyLen) {
int code = 0;
int32_t code = 0;
// no wal
if (pWal->cfg.level == TAOS_WAL_NOLOG) return 0;
if (bodyLen > TSDB_MAX_WAL_SIZE) {
terrno = TSDB_CODE_WAL_SIZE_LIMIT;
return -1;
@ -356,6 +357,7 @@ int64_t walWriteWithSyncInfo(SWal *pWal, int64_t index, tmsg_t msgType, SSyncLog
terrno = TSDB_CODE_WAL_INVALID_VER;
return -1;
}
/*if (!tfValid(pWal->pWriteLogTFile)) return -1;*/
ASSERT(pWal->writeCur >= 0);
@ -380,7 +382,7 @@ int64_t walWriteWithSyncInfo(SWal *pWal, int64_t index, tmsg_t msgType, SSyncLog
pWal->writeHead.cksumHead = walCalcHeadCksum(&pWal->writeHead);
pWal->writeHead.cksumBody = walCalcBodyCksum(body, bodyLen);
if (taosWriteFile(pWal->pWriteLogTFile, &pWal->writeHead, sizeof(SWalHead)) != sizeof(SWalHead)) {
if (taosWriteFile(pWal->pWriteLogTFile, &pWal->writeHead, sizeof(SWalCkHead)) != sizeof(SWalCkHead)) {
// TODO ftruncate
terrno = TAOS_SYSTEM_ERROR(errno);
wError("vgId:%d, file:%" PRId64 ".log, failed to write since %s", pWal->cfg.vgId, walGetLastFileFirstVer(pWal),
@ -405,19 +407,19 @@ int64_t walWriteWithSyncInfo(SWal *pWal, int64_t index, tmsg_t msgType, SSyncLog
// set status
if (pWal->vers.firstVer == -1) pWal->vers.firstVer = index;
pWal->vers.lastVer = index;
pWal->totSize += sizeof(SWalHead) + bodyLen;
pWal->totSize += sizeof(SWalCkHead) + bodyLen;
if (walGetCurFileInfo(pWal)->firstVer == -1) {
walGetCurFileInfo(pWal)->firstVer = index;
}
walGetCurFileInfo(pWal)->lastVer = index;
walGetCurFileInfo(pWal)->fileSize += sizeof(SWalHead) + bodyLen;
walGetCurFileInfo(pWal)->fileSize += sizeof(SWalCkHead) + bodyLen;
taosThreadMutexUnlock(&pWal->mutex);
return 0;
}
int64_t walWrite(SWal *pWal, int64_t index, tmsg_t msgType, const void *body, int32_t bodyLen) {
int32_t walWrite(SWal *pWal, int64_t index, tmsg_t msgType, const void *body, int32_t bodyLen) {
SSyncLogMeta syncMeta = {
.isWeek = -1,
.seqNum = UINT64_MAX,
@ -435,27 +437,3 @@ void walFsync(SWal *pWal, bool forceFsync) {
}
}
}
/*static int walValidateOffset(SWal* pWal, int64_t ver) {*/
/*int code = 0;*/
/*SWalHead *pHead = NULL;*/
/*code = (int)walRead(pWal, &pHead, ver);*/
/*if(pHead->head.version != ver) {*/
/*return -1;*/
/*}*/
/*return 0;*/
/*}*/
/*static int64_t walGetOffset(SWal* pWal, int64_t ver) {*/
/*int code = walSeekVer(pWal, ver);*/
/*if(code != 0) {*/
/*return -1;*/
/*}*/
/*code = walValidateOffset(pWal, ver);*/
/*if(code != 0) {*/
/*return -1;*/
/*}*/
/*return 0;*/
/*}*/

View File

@ -148,7 +148,7 @@ TEST_F(WalCleanEnv, createNew) {
walRollFileInfo(pWal);
ASSERT(pWal->fileInfoSet != NULL);
ASSERT_EQ(pWal->fileInfoSet->size, 1);
WalFileInfo* pInfo = (WalFileInfo*)taosArrayGetLast(pWal->fileInfoSet);
SWalFileInfo* pInfo = (SWalFileInfo*)taosArrayGetLast(pWal->fileInfoSet);
ASSERT_EQ(pInfo->firstVer, 0);
ASSERT_EQ(pInfo->lastVer, -1);
ASSERT_EQ(pInfo->closeTs, -1);

View File

@ -300,16 +300,14 @@ TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) {
return pFile;
}
int64_t taosCloseFile(TdFilePtr *ppFile) {
int32_t taosCloseFile(TdFilePtr *ppFile) {
int32_t code = 0;
if (ppFile == NULL || *ppFile == NULL) {
return 0;
}
#if FILE_WITH_LOCK
taosThreadRwlockWrlock(&((*ppFile)->rwlock));
#endif
if (ppFile == NULL || *ppFile == NULL) {
return 0;
}
if ((*ppFile)->fp != NULL) {
fflush((*ppFile)->fp);
fclose((*ppFile)->fp);
@ -320,9 +318,10 @@ int64_t taosCloseFile(TdFilePtr *ppFile) {
HANDLE h = (HANDLE)_get_osfhandle((*ppFile)->fd);
!FlushFileBuffers(h);
#else
fsync((*ppFile)->fd);
// warning: never fsync silently in base lib
/*fsync((*ppFile)->fd);*/
#endif
close((*ppFile)->fd);
code = close((*ppFile)->fd);
(*ppFile)->fd = -1;
}
(*ppFile)->refId = 0;
@ -332,7 +331,7 @@ int64_t taosCloseFile(TdFilePtr *ppFile) {
#endif
taosMemoryFree(*ppFile);
*ppFile = NULL;
return 0;
return code;
}
int64_t taosReadFile(TdFilePtr pFile, void *buf, int64_t count) {
@ -560,6 +559,8 @@ int32_t taosFsyncFile(TdFilePtr pFile) {
return 0;
}
// this implementation is WRONG
// fflush is not a replacement of fsync
if (pFile->fp != NULL) return fflush(pFile->fp);
if (pFile->fd >= 0) {
#ifdef WINDOWS

View File

@ -202,7 +202,7 @@ int32_t taosHexEncode(const char *src, char *dst, int32_t len) {
}
for (int32_t i = 0; i < len; ++i) {
sprintf(dst + i * 2, "%02x", src[i] & 0xff);
sprintf(dst + i * 2, "%02x", src[i]);
}
return 0;
@ -213,10 +213,10 @@ int32_t taosHexDecode(const char *src, char *dst, int32_t len) {
return -1;
}
uint16_t hn, ln, out;
uint8_t hn, ln, out;
for (int i = 0, j = 0; i < len * 2; i += 2, ++j ) {
hn = src[i] > '9' ? src[i] - 'A' + 10 : src[i] - '0';
ln = src[i + 1] > '9' ? src[i + 1] - 'A' + 10 : src[i + 1] - '0';
hn = src[i] > '9' ? src[i] - 'a' + 10 : src[i] - '0';
ln = src[i + 1] > '9' ? src[i + 1] - 'a' + 10 : src[i + 1] - '0';
out = (hn << 4) | ln;
memcpy(dst + j, &out, 1);

View File

@ -394,6 +394,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_DUPLICATTED_OPERATION, "Duplicatted operation
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TASK_MSG_ERROR, "Task message error")
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JOB_FREED, "Job already freed")
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TASK_STATUS_ERROR, "Task status error")
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JSON_IN_ERROR, "Json not support in in/notin operator")
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR, "Json not support in this place")
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JSON_IN_GROUP_ERROR, "Json not support in group/partition by")
// grant
TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, "License expired")
@ -429,6 +432,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SYN_NEW_CONFIG_ERROR, "Sync new config error
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_RECONFIG_NOT_READY, "Sync not ready for reconfig")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_PROPOSE_NOT_READY, "Sync not ready for propose")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_STANDBY_NOT_READY, "Sync not ready for standby")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_BATCH_ERROR, "Sync batch error")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INTERNAL_ERROR, "Sync internal error")
// wal

View File

@ -193,7 +193,9 @@ static char* doFlushPageToDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) {
char* pDataBuf = pg->pData;
memset(pDataBuf, 0, getAllocPageSize(pBuf->pageSize));
#ifdef BUF_PAGE_DEBUG
uDebug("page_flush %p, pageId:%d, offset:%d", pDataBuf, pg->pageId, pg->offset);
#endif
pg->length = size; // on disk size
return pDataBuf;
}
@ -440,6 +442,9 @@ void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId) {
}
((void**)pi->pData)[0] = pi;
#ifdef BUF_PAGE_DEBUG
uDebug("page_getNewBufPage , pi->pData:%p, pageId:%d, offset:%"PRId64, pi->pData, pi->pageId, pi->offset);
#endif
return (void*)(GET_DATA_PAYLOAD(pi));
}
@ -462,7 +467,9 @@ void* getBufPage(SDiskbasedBuf* pBuf, int32_t id) {
lruListMoveToFront(pBuf->lruList, (*pi));
(*pi)->used = true;
#ifdef BUF_PAGE_DEBUG
uDebug("page_getBufPage1 pageId:%d, offset:%"PRId64, (*pi)->pageId, (*pi)->offset);
#endif
return (void*)(GET_DATA_PAYLOAD(*pi));
} else { // not in memory
assert((*pi)->pData == NULL && (*pi)->pn == NULL && (((*pi)->length >= 0 && (*pi)->offset >= 0) || ((*pi)->length == -1 && (*pi)->offset == -1)));
@ -494,7 +501,9 @@ void* getBufPage(SDiskbasedBuf* pBuf, int32_t id) {
return NULL;
}
}
#ifdef BUF_PAGE_DEBUG
uDebug("page_getBufPage2 pageId:%d, offset:%"PRId64, (*pi)->pageId, (*pi)->offset);
#endif
return (void*)(GET_DATA_PAYLOAD(*pi));
}
}
@ -506,8 +515,11 @@ void releaseBufPage(SDiskbasedBuf* pBuf, void* page) {
}
void releaseBufPageInfo(SDiskbasedBuf* pBuf, SPageInfo* pi) {
#ifdef BUF_PAGE_DEBUG
uDebug("page_releaseBufPageInfo pageId:%d, used:%d, offset:%"PRId64, pi->pageId, pi->used, pi->offset);
#endif
assert(pi->pData != NULL && pi->used == true);
// assert(pi->pData != NULL);
pi->used = false;
pBuf->statis.releasePages += 1;
}

View File

@ -16,6 +16,23 @@
#define _DEFAULT_SOURCE
#include "tutil.h"
void *tmemmem(const char *haystack, int32_t hlen, const char *needle, int32_t nlen) {
const char *limit;
if (nlen == 0 || hlen < nlen) {
return NULL;
}
limit = haystack + hlen - nlen + 1;
while ((haystack = (char *)memchr(haystack, needle[0], limit - haystack)) != NULL) {
if (memcmp(haystack, needle, nlen) == 0) {
return (void *)haystack;
}
haystack++;
}
return NULL;
}
int32_t strdequote(char *z) {
if (z == NULL) {
return 0;

View File

@ -60,7 +60,7 @@ class TDTestCase:
def build_db(precision, start_time):
tdSql.execute("drop database if exists timedb1")
tdSql.execute(
"create database timedb1 duration 10 keep 365 blocks 8 precision "+"\""+precision+"\"")
"create database timedb1 duration 10 keep 36500 blocks 8 precision "+"\""+precision+"\"")
tdSql.execute("use timedb1")
tdSql.execute(

View File

@ -30,13 +30,6 @@
./test.sh -f tsim/dnode/balance2.sim
./test.sh -f tsim/dnode/balance3.sim
./test.sh -f tsim/dnode/balancex.sim
#./test.sh -f tsim/dnode/cluster_alter.sim
#./test.sh -f tsim/dnode/cluster_balance1.sim
#./test.sh -f tsim/dnode/cluster_balance2.sim
#./test.sh -f tsim/dnode/cluster_balance3.sim
#./test.sh -f tsim/dnode/cluster_cache.sim
#./test.sh -f tsim/dnode/cluster_flowctrl.sim
#./test.sh -f tsim/dnode/cluster_vgroup100.sim
./test.sh -f tsim/dnode/create_dnode.sim
./test.sh -f tsim/dnode/drop_dnode_has_mnode.sim
./test.sh -f tsim/dnode/drop_dnode_has_qnode_snode.sim

View File

@ -1,174 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c role -v 1
system sh/cfg.sh -n dnode2 -c role -v 2
system sh/cfg.sh -n dnode3 -c role -v 2
system sh/cfg.sh -n dnode4 -c role -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c wallevel -v 2
system sh/cfg.sh -n dnode2 -c wallevel -v 2
system sh/cfg.sh -n dnode3 -c wallevel -v 2
system sh/cfg.sh -n dnode4 -c wallevel -v 2
system sh/cfg.sh -n dnode1 -c balance -v 0
system sh/cfg.sh -n dnode2 -c balance -v 0
system sh/cfg.sh -n dnode3 -c balance -v 0
system sh/cfg.sh -n dnode4 -c balance -v 0
print ========== step1
system sh/exec.sh -n dnode1 -s start
sql connect
sleep 2000
sql create dnode $hostname2
system sh/exec.sh -n dnode2 -s start
sleep 2000
print ========== step2
sql create database d1
sql create table d1.t1 (t timestamp, i int)
sql insert into d1.t1 values(now+1s, 15)
sql insert into d1.t1 values(now+2s, 14)
sql insert into d1.t1 values(now+3s, 13)
sql insert into d1.t1 values(now+4s, 12)
sql insert into d1.t1 values(now+5s, 11)
print ========== step3
sleep 2000
sql create dnode $hostname3
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname4
system sh/exec.sh -n dnode4 -s start
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
print dnode3 openVnodes $data2_3
print dnode4 openVnodes $data2_4
if $data2_1 != 0 then
return -1
endi
if $data2_2 != 1 then
return -1
endi
if $data2_3 != 0 then
return -1
endi
if $data2_4 != 0 then
return -1
endi
print ========== step4
sql alter dnode 2 balance "vnode:2-dnode:3"
$x = 0
show4:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
print dnode3 openVnodes $data2_3
print dnode4 openVnodes $data2_4
if $data2_2 != 0 then
goto show4
endi
if $data2_3 != 1 then
goto show4
endi
if $data2_4 != 0 then
goto show4
endi
print ========== step5
sql alter dnode 3 balance "vnode:2-dnode:4"
$x = 0
show5:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
print dnode3 openVnodes $data2_3
print dnode4 openVnodes $data2_4
if $data2_2 != 0 then
goto show5
endi
if $data2_3 != 0 then
goto show5
endi
if $data2_4 != 1 then
goto show5
endi
print ========== step6
sql alter dnode 4 balance "vnode:2-dnode:2"
$x = 0
show6:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
print dnode3 openVnodes $data2_3
print dnode4 openVnodes $data2_4
if $data2_2 != 1 then
goto show6
endi
if $data2_3 != 0 then
goto show6
endi
if $data2_4 != 0 then
goto show6
endi
print ========== step7
sql select * from d1.t1 order by t desc
print $data01 $data11 $data21 $data31 $data41
if $data01 != 11 then
return -1
endi
if $data11 != 12 then
return -1
endi
if $data21 != 13 then
return -1
endi
if $data31 != 14 then
return -1
endi
if $data41 != 15 then
return -1
endi
print ========== step8
sql_error sql alter dnode 4 balance "vnode:2-dnode:5"
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT

View File

@ -1,590 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/deploy.sh -n dnode5 -i 5
system sh/deploy.sh -n dnode6 -i 6
system sh/deploy.sh -n dnode7 -i 7
system sh/deploy.sh -n dnode8 -i 8
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode5 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode6 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode7 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode8 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode5 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode6 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode7 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode8 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode1 -c wallevel -v 1
system sh/cfg.sh -n dnode2 -c wallevel -v 1
system sh/cfg.sh -n dnode3 -c wallevel -v 1
system sh/cfg.sh -n dnode4 -c wallevel -v 1
system sh/cfg.sh -n dnode5 -c wallevel -v 1
system sh/cfg.sh -n dnode6 -c wallevel -v 1
system sh/cfg.sh -n dnode7 -c wallevel -v 1
system sh/cfg.sh -n dnode8 -c wallevel -v 1
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode6 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode7 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode8 -c maxTablesPerVnode -v 4
print ============== step1
print ========= start dnode1
system sh/exec.sh -n dnode1 -s start
sql connect
sql create database c_b1_d1
sql use c_b1_d1
sql create table c_b1_t1 (t timestamp, i int)
sql insert into c_b1_t1 values(1520000020015, 15)
sql insert into c_b1_t1 values(1520000021014, 14)
sql insert into c_b1_t1 values(1520000022013, 13)
sql insert into c_b1_t1 values(1520000023012, 12)
sql insert into c_b1_t1 values(1520000024011, 11)
sql create database c_b1_d2
sql use c_b1_d2
sql create table c_b1_t2 (t timestamp, i int)
sql insert into c_b1_t2 values(1520000020025, 25)
sql insert into c_b1_t2 values(1520000021024, 24)
sql insert into c_b1_t2 values(1520000022023, 23)
sql insert into c_b1_t2 values(1520000023022, 22)
sql insert into c_b1_t2 values(1520000024021, 21)
sql show dnodes
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode2Vnodes = $data2_2
print dnode2 $dnode2Vnodes
if $dnode1Vnodes != 2 then
return -1
endi
if $dnode2Vnodes != null then
return -1
endi
print ============================== step2
print ========= start dnode2
sql create dnode $hostname2
system sh/exec.sh -n dnode2 -s start
$x = 0
show2:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes -x show2
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode2Vnodes = $data2_2
print dnode2 $dnode2Vnodes
if $dnode1Vnodes != 1 then
goto show2
endi
if $dnode2Vnodes != 1 then
goto show2
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print ============================== step3
print ========= add db3
sql create database c_b1_d3
sql use c_b1_d3
sql create table c_b1_t3 (t timestamp, i int)
sql insert into c_b1_t3 values(1520000020035, 35)
sql insert into c_b1_t3 values(1520000021034, 34)
sql insert into c_b1_t3 values(1520000022033, 33)
sql insert into c_b1_t3 values(1520000023032, 32)
sql insert into c_b1_t3 values(1520000024031, 31)
print ============================== step4
print ========= drop dnode2
sql drop dnode $hostname2
$x = 0
show4:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes -x show4
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode2Vnodes = $data2_2
print dnode2 $dnode2Vnodes
if $dnode1Vnodes != 3 then
goto show4
endi
if $dnode2Vnodes != null then
goto show4
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
$dnode4Role = $data2_4
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
print dnode4 ==> $dnode4Role
print ============================== step5
print ========= add dnode2
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3
$x = 0
show5:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes -x show5
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode3Vnodes = $data2_3
print dnode2 $dnode3Vnodes
if $dnode1Vnodes != 2 then
goto show5
endi
if $dnode3Vnodes != 1 then
goto show5
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode3Role = $data2_3
$dnode4Role = $data2_4
print dnode1 ==> $dnode1Role
print dnode3 ==> $dnode3Role
print dnode4 ==> $dnode4Role
if $dnode1Role != master then
return -1
endi
if $dnode3Role != slave then
return -1
endi
print ============================== step6
sql_error drop dnode $hostname1
print ============================== step7
sql_error create dnode $hostname1
print ============================== step8
sql drop dnode $hostname3
$x = 0
show8:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes -x show8
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode3Vnodes = $data2_3
print dnode3 $dnode3Vnodes
if $dnode1Vnodes != 3 then
goto show8
endi
if $dnode3Vnodes != null then
goto show8
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode3Role = $data2_3
$dnode4Role = $data2_4
print dnode1 ==> $dnode1Role
print dnode3 ==> $dnode3Role
print dnode4 ==> $dnode4Role
if $dnode1Role != master then
return -1
endi
if $dnode3Role != null then
return -1
endi
print ============================== step9
sql create dnode $hostname4
system sh/exec.sh -n dnode4 -s start
$x = 0
show9:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes -x show9
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode4Vnodes = $data2_4
print dnode4 $dnode4Vnodes
if $dnode1Vnodes != 2 then
goto show9
endi
if $dnode4Vnodes != 1 then
goto show9
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode4Role = $data2_4
print dnode1 ==> $dnode1Role
print dnode4 ==> $dnode4Role
if $dnode1Role != master then
return -1
endi
if $dnode4Role != slave then
return -1
endi
print ============================== step10
sql create database c_b1_d4
sql use c_b1_d4
sql create table c_b1_t4 (t timestamp, i int)
sql insert into c_b1_t4 values(1520000020045, 45)
sql insert into c_b1_t4 values(1520000021044, 44)
sql insert into c_b1_t4 values(1520000022043, 43)
sql insert into c_b1_t4 values(1520000023042, 42)
sql insert into c_b1_t4 values(1520000024041, 41)
$x = 0
show10:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes -x show10
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode4Vnodes = $data2_4
print dnode4 $dnode4Vnodes
if $dnode1Vnodes != 2 then
goto show10
endi
if $dnode4Vnodes != 2 then
goto show10
endi
sql reset query cache
sql use c_b1_d3
sql insert into c_b1_t3 values(1520000025036, 36)
sql use c_b1_d2
sql insert into c_b1_t2 values(1520000025026, 26)
print ============================== step12
sql create database c_b1_d5
sql use c_b1_d5
sql_error create table c_b1_t5 (t timestamp, i int) -x error3
print ============================== step13
sql create dnode $hostname5
system sh/exec.sh -n dnode5 -s start
$x = 0
step13:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes
print dnode1 $data4_1
print dnode2 $data4_2
print dnode3 $data4_3
print dnode4 $data4_4
print dnode5 $data4_5
if $data4_5 != ready then
goto step13
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode4Role = $data2_4
$dnode5Role = $data2_5
print dnode1 ==> $dnode1Role
print dnode4 ==> $dnode4Role
print dnode5 ==> $dnode5Role
sql use c_b1_d5;
sql create table c_b1_t5 (t timestamp, i int)
sql insert into c_b1_t5 values(1520000020055, 55)
sql insert into c_b1_t5 values(1520000021054, 54)
sql insert into c_b1_t5 values(1520000022053, 53)
sql insert into c_b1_t5 values(1520000023052, 52)
sql insert into c_b1_t5 values(1520000024051, 51)
sql create database c_b1_d6
sql use c_b1_d6
sql create table c_b1_t6 (t timestamp, i int)
sql insert into c_b1_t6 values(1520000020065, 65)
sql insert into c_b1_t6 values(1520000021064, 64)
sql insert into c_b1_t6 values(1520000022063, 63)
sql insert into c_b1_t6 values(1520000023062, 62)
sql insert into c_b1_t6 values(1520000024061, 61)
sql show dnodes
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode4Vnodes = $data2_4
print dnode4 $dnode2Vnodes
$dnode5Vnodes = $data2_5
print dnode5 $dnode2Vnodes
if $dnode1Vnodes != 2 then
goto step13
endi
if $dnode4Vnodes != 2 then
goto step13
endi
if $dnode5Vnodes != 2 then
goto step13
endi
print ============================== step14
sql create dnode $hostname6
system sh/exec.sh -n dnode6 -s start
$x = 0
step14:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes
print dnode1 $data4_1
print dnode2 $data4_2
print dnode3 $data4_3
print dnode4 $data4_4
print dnode4 $data4_5
if $data4_6 != ready then
goto step14
endi
sql create database c_b1_d7
sql use c_b1_d7
sql create table c_b1_t7 (t timestamp, i int)
sql insert into c_b1_t7 values(1520000020075, 75)
sql insert into c_b1_t7 values(1520000021074, 74)
sql insert into c_b1_t7 values(1520000022073, 73)
sql insert into c_b1_t7 values(1520000023072, 72)
sql insert into c_b1_t7 values(1520000024071, 71)
sql create database c_b1_d8
sql use c_b1_d8
sql create table c_b1_t8 (t timestamp, i int)
sql insert into c_b1_t8 values(1520000020085, 85)
sql insert into c_b1_t8 values(1520000021084, 84)
sql insert into c_b1_t8 values(1520000022083, 83)
sql insert into c_b1_t8 values(1520000023082, 82)
sql insert into c_b1_t8 values(1520000024081, 81)
$x = 0
show14:
$x = $x + 1
sleep 1000
if $x == 30 then
return -1
endi
sql show dnodes -x show14
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode4Vnodes = $data2_4
print dnode4 $dnode4Vnodes
$dnode5Vnodes = $data2_5
print dnode5 $dnode5Vnodes
$dnode6Vnodes = $data2_6
print dnode6 $dnode6Vnodes
if $dnode1Vnodes != 2 then
goto show14
endi
if $dnode4Vnodes != 2 then
goto show14
endi
if $dnode5Vnodes != 2 then
goto show14
endi
if $dnode6Vnodes != 2 then
goto show14
endi
sql reset query cache
sleep 1000
print ============================== step17
print ========= check data
sql reset query cache
sleep 100
sql use c_b1_d1
sql select * from c_b1_d1.c_b1_t1
print $rows
print $data01 $data11 $data21 $data31 $data41
if $rows != 5 then
return -1
endi
sql use c_b1_d2
sql select * from c_b1_d2.c_b1_t2
print $rows
print $data01 $data11 $data21 $data31 $data41
if $rows != 6 then
return -1
endi
sql use c_b1_d3
sql select * from c_b1_d3.c_b1_t3 order by t desc
print $rows
print $data01 $data11 $data21 $data31 $data41
if $rows != 6 then
return -1
endi
if $data01 != 36 then
return -1
endi
sql use c_b1_d4
sql select * from c_b1_d4.c_b1_t4 order by t desc
print $rows
print $data01 $data11 $data21 $data31 $data41
sql use c_b1_d5
sql select * from c_b1_d5.c_b1_t5 order by t desc
print $rows
print $data01 $data11 $data21 $data31 $data41
if $data01 != 51 then
return -1
endi
if $data11 != 52 then
return -1
endi
if $data21 != 53 then
return -1
endi
if $data31 != 54 then
return -1
endi
if $data41 != 55 then
return -1
endi
sql use c_b1_d6
sql select * from c_b1_d6.c_b1_t6 order by t desc
print $rows
print $data01 $data11 $data21 $data31 $data41
if $data01 != 61 then
return -1
endi
if $data11 != 62 then
return -1
endi
if $data21 != 63 then
return -1
endi
if $data31 != 64 then
return -1
endi
if $data41 != 65 then
return -1
endi
sql use c_b1_d7
sql select * from c_b1_d7.c_b1_t7 order by t desc
print $rows
print $data01 $data11 $data21 $data31 $data41
if $data01 != 71 then
return -1
endi
if $data11 != 72 then
return -1
endi
if $data21 != 73 then
return -1
endi
if $data31 != 74 then
return -1
endi
if $data41 != 75 then
return -1
endi
sql use c_b1_d8
sql select * from c_b1_d8.c_b1_t8 order by t desc
print $rows
print $data01 $data11 $data21 $data31 $data41
if $data01 != 81 then
return -1
endi
if $data11 != 82 then
return -1
endi
if $data21 != 83 then
return -1
endi
if $data31 != 84 then
return -1
endi
if $data41 != 85 then
return -1
endi
print ============================================ over=
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode5 -s stop -x SIGINT
system sh/exec.sh -n dnode6 -s stop -x SIGINT
system sh/exec.sh -n dnode7 -s stop -x SIGINT
system sh/exec.sh -n dnode8 -s stop -x SIGINT

View File

@ -1,479 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/deploy.sh -n dnode5 -i 5
system sh/deploy.sh -n dnode6 -i 6
system sh/deploy.sh -n dnode7 -i 7
system sh/deploy.sh -n dnode8 -i 8
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode5 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode6 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode7 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode8 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode1 -c wallevel -v 1
system sh/cfg.sh -n dnode2 -c wallevel -v 1
system sh/cfg.sh -n dnode3 -c wallevel -v 1
system sh/cfg.sh -n dnode4 -c wallevel -v 1
system sh/cfg.sh -n dnode5 -c wallevel -v 1
system sh/cfg.sh -n dnode6 -c wallevel -v 1
system sh/cfg.sh -n dnode7 -c wallevel -v 1
system sh/cfg.sh -n dnode8 -c wallevel -v 1
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode5 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode6 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode7 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode8 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode6 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode7 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode8 -c maxTablesPerVnode -v 4
print ============== step1
print ========= start dnode1
system sh/exec.sh -n dnode1 -s start
sql connect
sql create dnode $hostname2
sql create dnode $hostname3
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
$x = 0
step1:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 $data4_1
print dnode2 $data4_2
print dnode3 $data4_3
print dnode4 $data4_4
if $data4_1 != ready then
goto step1
endi
if $data4_2 != ready then
goto step1
endi
if $data4_3 != ready then
goto step1
endi
sql show mnodes
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
goto step1
endi
if $data2_2 != slave then
goto step1
endi
if $data2_3 != slave then
goto step1
endi
sql create database c_b2_d1 replica 2
sql use c_b2_d1
sql create table c_b2_t1 (t timestamp, i int)
sql insert into c_b2_t1 values(1520000020015, 15)
sql insert into c_b2_t1 values(1520000021014, 14)
sql insert into c_b2_t1 values(1520000022013, 13)
sql insert into c_b2_t1 values(1520000023012, 12)
sql insert into c_b2_t1 values(1520000024011, 11)
sql create database c_b2_d2 replica 2
sql use c_b2_d2
sql create table c_b2_t2 (t timestamp, i int)
sql insert into c_b2_t2 values(1520000020025, 25)
sql insert into c_b2_t2 values(1520000021024, 24)
sql insert into c_b2_t2 values(1520000022023, 23)
sql insert into c_b2_t2 values(1520000023022, 22)
sql insert into c_b2_t2 values(1520000024021, 21)
sql create database c_b2_d3 replica 2
sql use c_b2_d3
sql create table c_b2_t3 (t timestamp, i int)
sql insert into c_b2_t3 values(1520000020035, 35)
sql insert into c_b2_t3 values(1520000021034, 34)
sql insert into c_b2_t3 values(1520000022033, 33)
sql insert into c_b2_t3 values(1520000023032, 32)
sql insert into c_b2_t3 values(1520000024031, 31)
$x = 0
show1:
$x = $x + 1
sleep 3000
if $x == 20 then
return -1
endi
sql show dnodes -x show1
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode2Vnodes = $data2_2
print dnode2 $dnode2Vnodes
$dnode3Vnodes = $data2_3
print dnode3 $dnode3Vnodes
if $dnode1Vnodes != 2 then
goto show1
endi
if $dnode2Vnodes != 2 then
goto show1
endi
if $dnode3Vnodes != 2 then
goto show1
endi
print ============================== step2
print ========= drop dnode2
sql drop dnode $hostname2
$x = 0
show2:
$x = $x + 1
sleep 1000
if $x == 30 then
return -1
endi
sql show dnodes -x show2
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode2Vnodes = $data2_2
print dnode2 $dnode2Vnodes
$dnode3Vnodes = $data2_3
print dnode3 $dnode3Vnodes
if $dnode1Vnodes != 3 then
goto show2
endi
if $dnode2Vnodes != null then
goto show2
endi
if $dnode3Vnodes != 3 then
goto show2
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
$dnode4Role = $data2_4
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
print dnode4 ==> $dnode4Role
system sh/exec.sh -n dnode2 -s stop -x SIGINT
print ============================== step3
print ========= start dnode4
sql create dnode $hostname4
system sh/exec.sh -n dnode4 -s start
$x = 0
show3:
$x = $x + 1
sleep 1000
if $x == 30 then
return -1
endi
sql show dnodes -x show3
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode4Vnodes = $data2_4
print dnode4 $dnode4Vnodes
$dnode3Vnodes = $data2_3
print dnode3 $dnode3Vnodes
if $dnode1Vnodes != 2 then
goto show3
endi
if $dnode4Vnodes != 2 then
goto show3
endi
if $dnode3Vnodes != 2 then
goto show3
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
$dnode4Role = $data2_4
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
print dnode4 ==> $dnode4Role
if $dnode1Role != master then
return -1
endi
if $dnode2Role != null then
return -1
endi
if $dnode3Role != slave then
return -1
endi
if $dnode4Role != slave then
return -1
endi
print ============================== step4
print ========= drop dnode3
sql drop dnode $hostname3
$x = 0
show4:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes -x show4
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode4Vnodes = $data2_4
print dnode4 $dnode4Vnodes
$dnode3Vnodes = $data2_3
print dnode3 $dnode3Vnodes
if $dnode1Vnodes != 3 then
goto show4
endi
if $dnode4Vnodes != 3 then
goto show4
endi
if $dnode3Vnodes != null then
goto show4
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
$dnode4Role = $data2_4
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
print dnode4 ==> $dnode4Role
if $dnode1Role != master then
return -1
endi
if $dnode2Role != null then
return -1
endi
if $dnode3Role != null then
return -1
endi
if $dnode4Role != slave then
return -1
endi
system sh/exec.sh -n dnode3 -s stop -x SIGINT
print ============================== step5
print ========= start dnode3
sql create dnode $hostname5
system sh/exec.sh -n dnode5 -s start
$x = 0
show5:
$x = $x + 1
sleep 1000
if $x == 30 then
return -1
endi
sql show dnodes -x show5
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode4Vnodes = $data2_4
print dnode4 $dnode4Vnodes
$dnode5Vnodes = $data2_5
print dnode5 $dnode5Vnodes
if $dnode1Vnodes != 2 then
goto show5
endi
if $dnode4Vnodes != 2 then
goto show5
endi
if $dnode5Vnodes != 2 then
goto show5
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode4Role = $data2_4
$dnode5Role = $data2_5
print dnode1 ==> $dnode1Role
print dnode4 ==> $dnode4Role
print dnode5 ==> $dnode5Role
print ============================== step6
system sh/exec.sh -n dnode1 -s stop -x SIGINT
print stop dnode1 and sleep 3000
sleep 3000
sql show mnodes
$dnode1Role = $data2_1
$dnode4Role = $data2_4
$dnode5Role = $data2_5
print dnode1 ==> $dnode1Role
print dnode4 ==> $dnode4Role
print dnode5 ==> $dnode5Role
if $dnode1Role != offline then
return -1
endi
print ============================== step6.1
system sh/exec.sh -n dnode1 -s start
$x = 0
step6.1:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 $data4_1
if $data4_1 != ready then
goto step6.1
endi
sql drop dnode $hostname1
print drop dnode1 and sleep 9000
sleep 9000
$x = 0
show6:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show dnodes -x show6
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode4Vnodes = $data2_4
print dnode4 $dnode4Vnodes
$dnode5Vnodes = $data2_5
print dnode5 $dnode5Vnodes
if $dnode1Vnodes != null then
goto show6
endi
if $dnode4Vnodes != 3 then
goto show6
endi
if $dnode5Vnodes != 3 then
goto show6
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode4Role = $data2_4
$dnode5Role = $data2_5
print dnode1 ==> $dnode1Role
print dnode4 ==> $dnode4Role
print dnode5 ==> $dnode5Role
#system sh/exec.sh -n dnode1 -s stop -x SIGINT
print ============================== step12
print ========= check data
sql reset query cache
sleep 100
sql select * from c_b2_d1.c_b2_t1 order by t desc
print $data01 $data11 $data21 $data31 $data41
if $data01 != 11 then
return -1
endi
if $data11 != 12 then
return -1
endi
if $data21 != 13 then
return -1
endi
if $data31 != 14 then
return -1
endi
if $data41 != 15 then
return -1
endi
sql select * from c_b2_d2.c_b2_t2 order by t desc
print $data01 $data11 $data21 $data31 $data41
if $data01 != 21 then
return -1
endi
if $data11 != 22 then
return -1
endi
if $data21 != 23 then
return -1
endi
if $data31 != 24 then
return -1
endi
if $data41 != 25 then
return -1
endi
sql select * from c_b2_d3.c_b2_t3 order by t desc
print $data01 $data11 $data21 $data31 $data41
if $data01 != 31 then
return -1
endi
if $data11 != 32 then
return -1
endi
if $data21 != 33 then
return -1
endi
if $data31 != 34 then
return -1
endi
if $data41 != 35 then
return -1
endi
print ============================================ over
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode5 -s stop -x SIGKILL
system sh/exec.sh -n dnode6 -s stop -x SIGINT
system sh/exec.sh -n dnode7 -s stop -x SIGINT
system sh/exec.sh -n dnode8 -s stop -x SIGINT

View File

@ -1,643 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/deploy.sh -n dnode5 -i 5
system sh/deploy.sh -n dnode6 -i 6
system sh/deploy.sh -n dnode7 -i 7
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode5 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode6 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode7 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode5 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode6 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode7 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode1 -c wallevel -v 1
system sh/cfg.sh -n dnode2 -c wallevel -v 1
system sh/cfg.sh -n dnode3 -c wallevel -v 1
system sh/cfg.sh -n dnode4 -c wallevel -v 1
system sh/cfg.sh -n dnode5 -c wallevel -v 1
system sh/cfg.sh -n dnode6 -c wallevel -v 1
system sh/cfg.sh -n dnode7 -c wallevel -v 1
print ============== step1
print ========= start dnode1
system sh/exec.sh -n dnode1 -s start
sql connect
sql create dnode $hostname2
sql create dnode $hostname3
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
$x = 0
step1:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 $data4_1
print dnode2 $data4_2
print dnode3 $data4_3
if $data4_1 != ready then
goto step1
endi
if $data4_2 != ready then
goto step1
endi
if $data4_3 != ready then
goto step1
endi
sql show mnodes
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
goto step1
endi
if $data2_2 != slave then
goto step1
endi
if $data2_3 != slave then
goto step1
endi
sql create database c_b3_d1 replica 3
sql use c_b3_d1
sql create table c_b3_t1 (t timestamp, i int)
sql insert into c_b3_t1 values(1520000020015, 15)
sql insert into c_b3_t1 values(1520000021014, 14)
sql insert into c_b3_t1 values(1520000022013, 13)
sql insert into c_b3_t1 values(1520000023012, 12)
sql insert into c_b3_t1 values(1520000024011, 11)
sql create database c_b3_d2 replica 3
sql use c_b3_d2
sql create table c_b3_t2 (t timestamp, i int)
sql insert into c_b3_t2 values(1520000020025, 25)
sql insert into c_b3_t2 values(1520000021024, 24)
sql insert into c_b3_t2 values(1520000022023, 23)
sql insert into c_b3_t2 values(1520000023022, 22)
sql insert into c_b3_t2 values(1520000024021, 21)
sql create database c_b3_d3 replica 3
sql use c_b3_d3
sql create table c_b3_t3 (t timestamp, i int)
sql insert into c_b3_t3 values(1520000020035, 35)
sql insert into c_b3_t3 values(1520000021034, 34)
sql insert into c_b3_t3 values(1520000022033, 33)
sql insert into c_b3_t3 values(1520000023032, 32)
sql insert into c_b3_t3 values(1520000024031, 31)
$x = 0
show1:
$x = $x + 1
sleep 2000
if $x == 20 then
return -1
endi
sql show dnodes -x show1
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode2Vnodes = $data2_2
print dnode2 $dnode2Vnodes
$dnode3Vnodes = $data2_3
print dnode3 $dnode3Vnodes
if $dnode1Vnodes != 3 then
goto show1
endi
if $dnode2Vnodes != 3 then
goto show1
endi
if $dnode3Vnodes != 3 then
goto show1
endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print ============================== step2
print ========= start dnode4
sql create dnode $hostname4
system sh/exec.sh -n dnode4 -s start
$x = 0
show2:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes -x show2
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode2Vnodes = $data2_2
print dnode2 $dnode2Vnodes
$dnode3Vnodes = $data2_3
print dnode3 $dnode3Vnodes
$dnode4Vnodes = $data2_4
print dnode4 $dnode4Vnodes
if $dnode4Vnodes != 2 then
goto show2
endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print ============================== step3
print ========= drop dnode2
sql drop dnode $hostname2
$x = 0
show3:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes -x show3
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode2Vnodes = $data2_2
print dnode2 $dnode2Vnodes
$dnode3Vnodes = $data2_3
print dnode3 $dnode3Vnodes
$dnode4Vnodes = $data2_4
print dnode4 $dnode4Vnodes
if $dnode1Vnodes != 3 then
goto show3
endi
if $dnode2Vnodes != null then
goto show3
endi
if $dnode3Vnodes != 3 then
goto show3
endi
if $dnode4Vnodes != 3 then
goto show3
endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
system sh/exec.sh -n dnode2 -s stop -x SIGINT
print ============================== step4
sql create dnode $hostname5
system sh/exec.sh -n dnode5 -s start
$x = 0
show4:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes -x show4
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode5Vnodes = $data2_5
print dnode5 $dnode5Vnodes
$dnode3Vnodes = $data2_3
print dnode3 $dnode3Vnodes
$dnode4Vnodes = $data2_4
print dnode4 $dnode4Vnodes
if $dnode5Vnodes != 2 then
goto show4
endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
if $data2_4 != slave then
goto show4
endi
print ============================== step5
print ========= drop dnode3
sql drop dnode $hostname3
sleep 9000
$x = 0
show5:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes -x show5
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode5Vnodes = $data2_5
print dnode5 $dnode5Vnodes
$dnode3Vnodes = $data2_3
print dnode3 $dnode3Vnodes
$dnode4Vnodes = $data2_4
print dnode4 $dnode4Vnodes
if $dnode1Vnodes != 3 then
goto show5
endi
if $dnode5Vnodes != 3 then
goto show5
endi
if $dnode3Vnodes != null then
goto show5
endi
if $dnode4Vnodes != 3 then
goto show5
endi
system sh/exec.sh -n dnode3 -s stop -x SIGINT
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
if $data2_5 != slave then
goto show5
endi
print ============================== step6
sql create dnode $hostname6
system sh/exec.sh -n dnode6 -s start
$x = 0
show6:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes -x show6
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode4Vnodes = $data2_4
print dnode4 $dnode4Vnodes
$dnode5Vnodes = $data2_5
print dnode5 $dnode5Vnodes
$dnode6Vnodes = $data2_6
print dnode6 $dnode6Vnodes
if $dnode1Vnodes != 2 then
goto show6
endi
if $dnode4Vnodes != 2 then
goto show6
endi
if $dnode5Vnodes != 3 then
goto show6
endi
if $dnode6Vnodes != 2 then
goto show6
endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
print ============================== step7
print ========= drop dnode4
sql drop dnode $hostname4
$x = 0
show7:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes -x show7
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode4Vnodes = $data2_4
print dnode4 $dnode4Vnodes
$dnode5Vnodes = $data2_5
print dnode5 $dnode5Vnodes
$dnode6Vnodes = $data2_6
print dnode6 $dnode6Vnodes
if $dnode1Vnodes != 3 then
goto show7
endi
if $dnode5Vnodes != 3 then
goto show7
endi
if $dnode6Vnodes != 3 then
goto show7
endi
if $dnode4Vnodes != null then
goto show7
endi
system sh/exec.sh -n dnode4 -s stop -x SIGINT
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
if $data2_6 != slave then
goto show7
endi
print ============================== step8
sql create dnode $hostname7
system sh/exec.sh -n dnode7 -s start
$x = 0
show8:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes -x show8
$dnode1Vnodes = $data2_1
print dnode1 $dnode1Vnodes
$dnode5Vnodes = $data2_5
print dnode5 $dnode5Vnodes
$dnode6Vnodes = $data2_6
print dnode6 $dnode6Vnodes
$dnode7Vnodes = $data2_7
print dnode7 $dnode7Vnodes
if $dnode1Vnodes != 2 then
goto show8
endi
if $dnode5Vnodes != 2 then
goto show8
endi
if $dnode6Vnodes != 3 then
goto show8
endi
if $dnode7Vnodes != 2 then
goto show8
endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
print ============================== step9
system sh/exec.sh -n dnode1 -s stop -x SIGINT
$x = 0
show9:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show mnodes -x show9
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
if $data2_1 != offline then
goto show9
endi
if $data2_5 != master then
goto show9
endi
if $data2_6 != slave then
goto show9
endi
print ============================== step10
sql drop dnode $hostname1
$x = 0
show10:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show mnodes -x show10
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
if $data2_1 != null then
goto show10
endi
if $data2_5 != master then
goto show10
endi
if $data2_6 != slave then
goto show10
endi
if $data2_7 != slave then
goto show10
endi
print ============================== step11
system sh/exec.sh -n dnode1 -s start
$x = 0
show11:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes -x show11
$dnode5Vnodes = $data2_5
print dnode5 $dnode5Vnodes
$dnode6Vnodes = $data2_6
print dnode6 $dnode6Vnodes
$dnode7Vnodes = $data2_7
print dnode7 $dnode7Vnodes
if $dnode5Vnodes != 3 then
goto show11
endi
if $dnode6Vnodes != 3 then
goto show11
endi
if $dnode7Vnodes != 3 then
goto show11
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data2_3
print dnode4 ==> $data2_4
print dnode5 ==> $data2_5
print dnode6 ==> $data2_6
print dnode7 ==> $data2_7
print ============================== step12
sql create database c_b3_d4 replica 3
sql use c_b3_d4
$x = 0
create4:
$x = $x + 1
sleep 1000
if $x == 20 then
return -1
endi
sql create table c_b3_t4 (t timestamp, i int) -x create4
sql insert into c_b3_t4 values(1520000020045, 45)
sql insert into c_b3_t4 values(1520000021044, 44)
sql insert into c_b3_t4 values(1520000022043, 43)
sql insert into c_b3_t4 values(1520000023042, 42)
sql insert into c_b3_t4 values(1520000024041, 41)
$x = 0
show12:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes -x show12
$dnode5Vnodes = $data2_5
print dnode5 $dnode5Vnodes
$dnode6Vnodes = $data2_6
print dnode6 $dnode6Vnodes
$dnode7Vnodes = $data2_7
print dnode7 $dnode7Vnodes
if $dnode5Vnodes != 4 then
goto show12
endi
if $dnode6Vnodes != 4 then
goto show12
endi
if $dnode7Vnodes != 4 then
goto show12
endi
print ============================== step13
sql reset query cache
sleep 200
print ========= check data
sql select * from c_b3_d1.c_b3_t1 order by t desc
print $data01 $data11 $data21 $data31 $data41
if $data01 != 11 then
return -1
endi
if $data11 != 12 then
return -1
endi
if $data21 != 13 then
return -1
endi
if $data31 != 14 then
return -1
endi
if $data41 != 15 then
return -1
endi
sql select * from c_b3_d2.c_b3_t2 order by t desc
print $data01 $data11 $data21 $data31 $data41
if $data01 != 21 then
return -1
endi
if $data11 != 22 then
return -1
endi
if $data21 != 23 then
return -1
endi
if $data31 != 24 then
return -1
endi
if $data41 != 25 then
return -1
endi
sql select * from c_b3_d3.c_b3_t3 order by t desc
print $data01 $data11 $data21 $data31 $data41
if $data01 != 31 then
return -1
endi
if $data11 != 32 then
return -1
endi
if $data21 != 33 then
return -1
endi
if $data31 != 34 then
return -1
endi
if $data41 != 35 then
return -1
endi
print ============================================ over
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode5 -s stop -x SIGINT
system sh/exec.sh -n dnode6 -s stop -x SIGINT
system sh/exec.sh -n dnode7 -s stop -x SIGINT

View File

@ -1,65 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode2 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c httpMaxThreads -v 2
system sh/cfg.sh -n dnode2 -c httpMaxThreads -v 2
system sh/cfg.sh -n dnode1 -c monitor -v 1
system sh/cfg.sh -n dnode1 -c monitor -v 2
system sh/cfg.sh -n dnode2 -c http -v 1
system sh/cfg.sh -n dnode1 -c enableHttp -v 1
system sh/cfg.sh -n dnode2 -c monitor -v 1
system sh/cfg.sh -n dnode1 -c monitorInterval -v 1
system sh/cfg.sh -n dnode2 -c monitorInterval -v 1
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
sql create database testdb
sql use testdb
sql create table meter1 (ts timestamp, val int)
$x = 0
$v = -100
while $x < 30
$v = $v + 1
$ms = $v . m
sql insert into meter1 values (now $ms , $x )
$x = $x + 1
endw
sleep 2000
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2
sleep 10000
sql show log.tables;
if $rows > 6 then
return -1
endi
sql select * from log.dn1
print ===>rows $rows
print $data00 $data01 $data02
print $data10 $data11 $data12
print $data20 $data21 $data22
if $rows < 10 then
return -1
endi
#sql create table sys.st as select avg(taosd), avg(system) from sys.cpu interval(30s)
sql show log.vgroups
if $data05 != master then
return -1
endi
if $data15 != master then
return -1
endi

View File

@ -1,131 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode1 -c http -v 0
system sh/cfg.sh -n dnode2 -c http -v 0
system sh/cfg.sh -n dnode3 -c http -v 0
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20000
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 20000
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 20000
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 20
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 20
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 20
system sh/cfg.sh -n dnode1 -c replica -v 3
system sh/cfg.sh -n dnode2 -c replica -v 3
system sh/cfg.sh -n dnode3 -c replica -v 3
print ============== deploy
system sh/exec.sh -n dnode1 -s start
sleep 5001
sql connect
sql create dnode $hostname2
sql create dnode $hostname3
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
print =============== step1
$x = 0
show1:
$x = $x + 1
sleep 2000
if $x == 5 then
return -1
endi
sql show mnodes -x show1
$mnode1Role = $data2_1
print mnode1Role $mnode1Role
$mnode2Role = $data2_2
print mnode2Role $mnode2Role
$mnode3Role = $data2_3
print mnode3Role $mnode3Role
if $mnode1Role != master then
goto show1
endi
if $mnode2Role != slave then
goto show1
endi
if $mnode3Role != slave then
goto show1
endi
print =============== step2
sql create database db replica 3
sql use db
sql create table tb (ts timestamp, test int)
$x = 0
while $x < 100
$ms = $x . s
sql insert into tb values (now + $ms , $x )
$x = $x + 1
endw
print =============== step3
sleep 2000
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
print =============== step4
sleep 3000
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
print =============== step5
sleep 8000
while $x < 200
$ms = $x . s
sql insert into tb values (now + $ms , $x )
$x = $x + 1
endw
print =============== step6
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 2000
while $x < 300
$ms = $x . s
sql insert into tb values (now + $ms , $x )
$x = $x + 1
endw
system sh/exec.sh -n dnode2 -s start
sleep 6000
print =============== step7
while $x < 400
$ms = $x . s
sql insert into tb values (now + $ms , $x )
$x = $x + 1
sleep 1
endw
print =============== step8
sql select * from tb
print rows $rows
if $rows != 400 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT

View File

@ -1,146 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode1 -c maxTables -v 4
system sh/cfg.sh -n dnode2 -c maxTables -v 4
system sh/cfg.sh -n dnode3 -c maxTables -v 4
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 0
print ============================== step1
system sh/exec.sh -n dnode1 -s start
sql connect
print ============================== step2
print ========= start dnode2
sql create dnode $hostname2
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname3
system sh/exec.sh -n dnode3 -s start
$maxNum = 102
$maxNum = 12
$x = 0
show2:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print $dnode1Role
print $dnode2Role
print $dnode3Role
if $dnode1Role != master then
goto show2
endi
if $dnode2Role != slave then
goto show2
endi
if $dnode3Role != slave then
goto show2
endi
print ============================== step3
$count = 2
while $count < $maxNum
$db = d . $count
$tb = $db . .t
$tb2 = $db . .t2
sql create database $db replica 3 cache 1 blocks 3
sql create table $tb (ts timestamp, i int)
sql insert into $tb values(now, 1)
sql create table $tb2 as select count(*) from $tb interval(10s)
$count = $count + 1
print insert into $tb values(now, 1) ==> finished
endw
print ============================== step4
$count = 2
while $count < $maxNum
$db = d . $count
$tb = $db . .t
sql select * from $tb
if $rows != 1 then
print select * from $tb
return -1
endi
$count = $count + 1
print select * from $tb ==> rows: $rows
endw
print ============================== step5
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
print ============================== step6
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
print ============================== step7
$x = 0
show7:
$x = $x + 1
sleep 2000
if $x == 50 then
return -1
endi
sql show mnodes -x show7
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
if $dnode1Role != master then
goto show7
endi
if $dnode2Role != slave then
goto show7
endi
if $dnode2Role != slave then
goto show7
endi
print ============================== step8
$x = 0
show8:
$x = $x + 1
sleep 2000
if $x == 20 then
return -1
endi
$count = 2
while $count < $maxNum
$db = d . $count
$tb = $db . .t
sql select * from $tb
if $rows != 1 then
print select * from $tb
goto show8
endi
$count = $count + 1
print select * from $tb ==> rows: $rows
endw
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT

View File

@ -0,0 +1,75 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
system sh/cfg.sh -n dnode2 -c supportVnodes -v 4
system sh/cfg.sh -n dnode3 -c supportVnodes -v 4
print ========== step1
system sh/exec.sh -n dnode1 -s start
sql connect
print ========== step2
sql create dnode $hostname port 7200
sql create dnode $hostname port 7300
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
$x = 0
step2:
$x = $x + 1
sleep 1000
if $x == 10 then
print ====> dnode not ready!
return -1
endi
sql show dnodes
print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
if $rows != 3 then
return -1
endi
if $data(1)[4] != ready then
goto step2
endi
if $data(2)[4] != ready then
goto step2
endi
if $data(3)[4] != ready then
goto step2
endi
print ========== step3
sql create database d1 vgroups 1
sql use d1;
print --> create stb
sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);
print --> create sma
sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) interval(6m,10s) sliding(6m);
print --> drop stb
sql drop table stb;
print ========== step4 repeat
print --> create stb
sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);
print --> create sma
sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) interval(6m,10s) sliding(6m);
print --> drop stb
sql drop table stb;
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode5 -s stop -x SIGINT
system sh/exec.sh -n dnode6 -s stop -x SIGINT
system sh/exec.sh -n dnode7 -s stop -x SIGINT
system sh/exec.sh -n dnode8 -s stop -x SIGINT

View File

@ -184,17 +184,17 @@ class TDTestCase:
#query: string Functions
querystmt3=conn.statement("select CHAR_LENGTH(?) from log ")
queryparam3=new_bind_params(1)
print(type(queryparam3))
queryparam3[0].binary('中文字符')
querystmt3.bind_param(queryparam3)
querystmt3.execute()
result3=querystmt3.use_result()
rows3=result3.fetch_all()
print("4",rows3)
assert rows3[0][0] == 12, 'fourth case is failed'
assert rows3[1][0] == 12, 'fourth case is failed'
querystmt9=conn.statement("select CHAR_LENGTH(?) from log ")
queryparam9=new_bind_params(1)
print(type(queryparam9))
queryparam9[0].binary('中文字符')
querystmt9.bind_param(queryparam9)
querystmt9.execute()
result9=querystmt9.use_result()
rows9=result9.fetch_all()
print("9",rows9)
assert rows9[0][0] == 12, 'fourth case is failed'
assert rows9[1][0] == 12, 'fourth case is failed'
# #query: conversion Functions
@ -259,4 +259,4 @@ class TDTestCase:
# add case with filename
#
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -38,7 +38,10 @@ class TDTestCase:
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
# tdSql.prepare()
tdSql.execute('drop database if exists db')
tdSql.execute('create database db vgroups 1')
tdSql.execute('use db')
print("============== STEP 1 ===== prepare data & validate json string")
tdSql.error("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json, tagint int)")
tdSql.error("create table if not exists jsons1(ts timestamp, data json) tags(tagint int)")
@ -56,6 +59,22 @@ class TDTestCase:
tdSql.query("select jtag from jsons1_8")
tdSql.checkData(0, 0, '{" ":90,"1tag$":2,"tag1":null}')
tdSql.query("select ts,jtag from jsons1 order by ts limit 2,3")
tdSql.checkData(0, 0, '2020-06-02 09:17:08.000')
tdSql.checkData(0, 1, '{"tag1":5,"tag2":"beijing"}')
tdSql.checkData(1, 0, '2020-06-02 09:17:48.000')
tdSql.checkData(1, 1, '{"tag1":false,"tag2":"beijing"}')
tdSql.checkData(2, 0, '2020-06-02 09:18:48.000')
tdSql.checkData(2, 1, '{"tag1":null,"tag2":"shanghai","tag3":"hello"}')
tdSql.query("select ts,jtag->'tag1' from jsons1 order by ts limit 2,3")
tdSql.checkData(0, 0, '2020-06-02 09:17:08.000')
tdSql.checkData(0, 1, '5.000000000')
tdSql.checkData(1, 0, '2020-06-02 09:17:48.000')
tdSql.checkData(1, 1, 'false')
tdSql.checkData(2, 0, '2020-06-02 09:18:48.000')
tdSql.checkData(2, 1, 'null')
# test empty json string, save as jtag is NULL
tdSql.execute("insert into jsons1_9 using jsons1 tags('\t') values (1591062328000, 24, NULL, '你就会', '2sdw')")
tdSql.execute("CREATE TABLE if not exists jsons1_10 using jsons1 tags('')")
@ -218,9 +237,19 @@ class TDTestCase:
# test where with json tag
tdSql.query("select * from jsons1_1 where jtag is not null")
# tdSql.query("select * from jsons1 where jtag='{\"tag1\":11,\"tag2\":\"\"}'")
tdSql.error("select * from jsons1 where jtag='{\"tag1\":11,\"tag2\":\"\"}'")
tdSql.error("select * from jsons1 where jtag->'tag1'={}")
# test json error
tdSql.error("select jtag + 1 from jsons1")
tdSql.error("select jtag > 1 from jsons1")
tdSql.error("select jtag like \"1\" from jsons1")
tdSql.error("select jtag in (\"1\") from jsons1")
tdSql.error("select jtag from jsons1 where jtag > 1")
tdSql.error("select jtag from jsons1 where jtag like 'fsss'")
tdSql.error("select jtag from jsons1 where jtag in (1)")
# where json value is string
tdSql.query("select * from jsons1 where jtag->'tag2'='beijing'")
tdSql.checkRows(2)
@ -369,7 +398,7 @@ class TDTestCase:
tdSql.checkRows(2)
# test where condition in no support in
# tdSql.error("select * from jsons1 where jtag->'tag1' in ('beijing')")
tdSql.error("select * from jsons1 where jtag->'tag1' in ('beijing')")
# test where condition match/nmath
tdSql.query("select * from jsons1 where jtag->'tag1' match 'ma'")
@ -387,8 +416,8 @@ class TDTestCase:
tdSql.execute("insert into jsons1_14 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')")
tdSql.query("select distinct jtag->'tag1' from jsons1")
tdSql.checkRows(8)
tdSql.query("select distinct jtag from jsons1")
tdSql.checkRows(9)
# tdSql.query("select distinct jtag from jsons1")
# tdSql.checkRows(9)
#test dumplicate key with normal colomn
tdSql.execute("INSERT INTO jsons1_15 using jsons1 tags('{\"tbname\":\"tt\",\"databool\":true,\"datastr\":\"是是是\"}') values(1591060828000, 4, false, 'jjsf', \"你就会\")")
@ -424,62 +453,56 @@ class TDTestCase:
tdSql.checkData(7, 1, "false")
# tdSql.error("select count(*) from jsons1 group by jtag")
# tdSql.error("select count(*) from jsons1 partition by jtag")
# tdSql.error("select count(*) from jsons1 group by jtag order by jtag")
tdSql.error("select count(*) from jsons1 group by jtag")
tdSql.error("select count(*) from jsons1 partition by jtag")
tdSql.error("select count(*) from jsons1 group by jtag order by jtag")
tdSql.error("select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag2'")
tdSql.error("select count(*) from jsons1 group by jtag->'tag1' order by jtag")
# tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1' desc")
# tdSql.checkRows(8)
# tdSql.checkData(0, 0, 2)
# tdSql.checkData(0, 1, '"femail"')
# tdSql.checkData(1, 0, 2)
# tdSql.checkData(1, 1, '"收到货"')
# tdSql.checkData(2, 0, 1)
# tdSql.checkData(2, 1, "11.000000000")
# tdSql.checkData(5, 0, 1)
# tdSql.checkData(5, 1, "false")
# tdSql.checkData(6, 0, 1)
# tdSql.checkData(6, 1, "null")
# tdSql.checkData(7, 0, 2)
# tdSql.checkData(7, 1, None)
tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1' desc")
tdSql.checkRows(8)
tdSql.checkData(0, 0, 2)
tdSql.checkData(0, 1, '"femail"')
tdSql.checkData(1, 0, 2)
tdSql.checkData(1, 1, '"收到货"')
tdSql.checkData(2, 0, 1)
tdSql.checkData(2, 1, "11.000000000")
tdSql.checkData(5, 0, 1)
tdSql.checkData(5, 1, "false")
tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1' asc")
tdSql.checkRows(8)
tdSql.checkData(0, 1, None)
tdSql.checkData(2, 0, 1)
tdSql.checkData(2, 1, "false")
tdSql.checkData(5, 0, 1)
tdSql.checkData(5, 1, "11.000000000")
tdSql.checkData(7, 0, 2)
tdSql.checkData(7, 1, '"femail"')
# tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1' asc")
# tdSql.checkRows(8)
# tdSql.checkData(0, 0, 2)
# tdSql.checkData(0, 1, None)
# tdSql.checkData(2, 0, 1)
# tdSql.checkData(2, 1, "false")
# tdSql.checkData(5, 0, 1)
# tdSql.checkData(5, 1, "11.000000000")
# tdSql.checkData(7, 0, 2)
# tdSql.checkData(7, 1, '"femail"')
#
# test stddev with group by json tag
# tdSql.query("select stddev(dataint),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'")
# tdSql.checkRows(8)
# tdSql.checkData(0, 0, 10)
# tdSql.checkData(0, 1, None)
# tdSql.checkData(4, 0, 0)
# tdSql.checkData(4, 1, "5.000000000")
# tdSql.checkData(7, 0, 11)
# tdSql.checkData(7, 1, '"femail"')
#
# res = tdSql.getColNameList("select stddev(dataint),jsons1.jtag->'tag1' from jsons1 group by jsons1.jtag->'tag1' order by jtag->'tag1'")
# cname_list = []
# cname_list.append("stddev(dataint)")
# cname_list.append("jsons1.jtag->'tag1'")
# tdSql.checkColNameList(res, cname_list)
tdSql.query("select stddev(dataint),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'")
tdSql.checkRows(8)
tdSql.checkData(0, 1, None)
tdSql.checkData(4, 0, 0)
tdSql.checkData(4, 1, "5.000000000")
tdSql.checkData(7, 0, 11)
tdSql.checkData(7, 1, '"femail"')
res = tdSql.getColNameList("select stddev(dataint),jsons1.jtag->'tag1' from jsons1 group by jsons1.jtag->'tag1' order by jtag->'tag1'")
cname_list = []
cname_list.append("stddev(dataint)")
cname_list.append("jsons1.jtag->'tag1'")
tdSql.checkColNameList(res, cname_list)
# test top/bottom with group by json tag
# tdSql.query("select top(dataint,2),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'")
# tdSql.checkRows(11)
# tdSql.checkData(0, 1, None)
# tdSql.checkData(2, 0, 4)
# tdSql.checkData(3, 0, 3)
# tdSql.checkData(3, 1, "false")
# tdSql.checkData(8, 0, 2)
# tdSql.checkData(10, 1, '"femail"')
tdSql.query("select top(dataint,2),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'")
tdSql.checkRows(11)
tdSql.checkData(0, 1, None)
tdSql.checkData(2, 0, 4)
tdSql.checkData(3, 0, 3)
tdSql.checkData(3, 1, "false")
tdSql.checkData(8, 0, 2)
tdSql.checkData(10, 1, '"femail"')
# test having
# tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' having count(*) > 1")
@ -492,6 +515,7 @@ class TDTestCase:
tdSql.checkData(5, 0, '{"tag1":false,"tag2":"beijing"}')
tdSql.error("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)")
tdSql.error("select t->'tag1' from (select jtag->'tag1' as t, dataint from jsons1)")
# tdSql.query("select ts,jtag->'tag1' from (select jtag->'tag1',tbname,ts from jsons1 order by ts)")
# tdSql.checkRows(11)
# tdSql.checkData(1, 1, "jsons1_1")
@ -519,9 +543,10 @@ class TDTestCase:
tdSql.checkData(0, 0, 10)
tdSql.query("select avg(dataint) from jsons1 where jtag is not null")
tdSql.checkData(0, 0, 5.3)
# tdSql.query("select twa(dataint) from jsons1 where jtag is not null")
# tdSql.checkData(0, 0, 36)
# tdSql.error("select irate(dataint) from jsons1 where jtag is not null")
tdSql.query("select twa(dataint) from jsons1 where jtag is not null")
tdSql.checkData(0, 0, 28.386363636363637)
tdSql.query("select irate(dataint) from jsons1 where jtag is not null")
tdSql.query("select sum(dataint) from jsons1 where jtag->'tag1' is not null")
tdSql.checkData(0, 0, 45)
tdSql.query("select stddev(dataint) from jsons1 where jtag->'tag1'>1")
@ -549,9 +574,9 @@ class TDTestCase:
#test calculation function:diff/derivative/spread/ceil/floor/round/
tdSql.query("select diff(dataint) from jsons1 where jtag->'tag1'>1")
# tdSql.checkRows(2)
# tdSql.checkData(0, 0, -1)
# tdSql.checkData(1, 0, 10)
tdSql.checkRows(2)
tdSql.checkData(0, 0, -1)
tdSql.checkData(1, 0, 10)
tdSql.query("select derivative(dataint, 10m, 0) from jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, -2)
tdSql.query("select spread(dataint) from jsons1 where jtag->'tag1'>1")
@ -608,14 +633,14 @@ class TDTestCase:
tdSql.checkRows(1)
# function not ready
# tdSql.query("select tail(dataint,1) from jsons1 where jtag->'tag1'>1;")
# tdSql.checkRows(3)
# tdSql.query("select unique(dataint) from jsons1 where jtag->'tag1'>1;")
# tdSql.checkRows(3)
# tdSql.query("select mode(dataint) from jsons1 where jtag->'tag1'>1;")
# tdSql.checkRows(3)
# tdSql.query("select irate(dataint) from jsons1 where jtag->'tag1'>1;")
# tdSql.checkRows(1)
tdSql.query("select tail(dataint,1) from jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(1)
tdSql.query("select unique(dataint) from jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
tdSql.query("select mode(dataint) from jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(1)
tdSql.query("select irate(dataint) from jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(1)
#str function
tdSql.query("select upper(dataStr) from jsons1 where jtag->'tag1'>1;")
@ -659,13 +684,26 @@ class TDTestCase:
tdSql.query("select ELAPSED(ts,1h) from jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(1)
#
# #test TD-12077
# to_json()
tdSql.query("select to_json('{\"abc\":123}') from jsons1_1")
tdSql.checkRows(2)
# tdSql.checkData(0, 0, '{"abc":123}')
# tdSql.checkData(1, 0, '{"abc":123}')
tdSql.query("select to_json('null') from jsons1_1")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 'null')
tdSql.checkData(1, 0, 'null')
tdSql.query("select to_json('{\"key\"}') from jsons1_1")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 'null')
tdSql.checkData(1, 0, 'null')
#test TD-12077
tdSql.execute("insert into jsons1_16 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":-2.111}') values(1591062628000, 2, NULL, '你就会', 'dws')")
tdSql.query("select jtag->'tag3' from jsons1_16")
tdSql.checkData(0, 0, '-2.111000000')
# # test TD-12452
# test TD-12452
tdSql.execute("ALTER TABLE jsons1_1 SET TAG jtag=NULL")
tdSql.query("select jtag from jsons1_1")
tdSql.checkData(0, 0, None)

View File

@ -5,6 +5,7 @@ import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
@ -127,10 +128,14 @@ class TDTestCase:
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
for i in range(ctbNum):
sql += " %s_%d values "%(stbName,i)
batchRows = 0
for j in range(rowsPerTbl):
sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)):
batchRows += 1
# if (j > 0) and ((j%(batchNum-1) == 0) or (j == rowsPerTbl - 1)):
if (j > 0) and ((batchRows == batchNum) or (j == rowsPerTbl - 1)):
tsql.execute(sql)
batchRows = 0
if j < rowsPerTbl - 1:
sql = "insert into %s_%d values " %(stbName,i)
else:
@ -171,8 +176,8 @@ class TDTestCase:
'dbName': 'db8', \
'vgroups': 4, \
'stbName': 'stb', \
'ctbNum': 10, \
'rowsPerTbl': 10000, \
'ctbNum': 1, \
'rowsPerTbl': 1000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
@ -189,7 +194,7 @@ class TDTestCase:
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2
expectrowcnt = math.ceil(parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2)
topicList = topicName1
ifcheckdata = 0
ifManualCommit = 0
@ -217,7 +222,7 @@ class TDTestCase:
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt:
if not (totalConsumeRows >= expectrowcnt):
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@ -267,7 +272,7 @@ class TDTestCase:
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2
expectrowcnt = math.ceil(parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2)
topicList = topicName1
ifcheckdata = 0
ifManualCommit = 1

View File

@ -5,6 +5,7 @@ import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
@ -15,16 +16,21 @@ sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __int__(self):
self.vgroups = 1
self.ctbNum = 10
self.rowsPerTbl = 10000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db1',
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
@ -35,15 +41,19 @@ class TDTestCase:
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
tdLog.info("create ctb")
@ -52,13 +62,12 @@ class TDTestCase:
tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
tdDnodes.stop(1)
time.sleep(2)
tdDnodes.start(1)
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
# sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
@ -71,31 +80,211 @@ class TDTestCase:
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("0 tmq consume rows error!")
tdLog.exit("%d tmq consume rows error!"%consumerId)
tmqCom.checkFileContent(consumerId, queryString)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'ctbPrefix': 'ctb',
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
# tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
# tdLog.info("create stb")
# tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
# tdLog.info("create ctb")
# tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
# tdDnodes.stop(1)
# tdDnodes.start(1)
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 1
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
firstConsumeRows = resultList[0]
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = firstConsumeRows + resultList[0]
if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows):
tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
def tmqCase3(self):
tdLog.printNoPrefix("======== test case 3: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'ctbPrefix': 'ctb',
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': -1,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
# tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
# tdLog.info("create stb")
# tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
# tdLog.info("create ctb")
# tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
# tdDnodes.stop(1)
# tdDnodes.start(1)
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 3
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
consumerId = 4
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0] + resultList[1]
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 3 end ...... ")
def run(self):
tdSql.prepare()
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()

View File

@ -482,6 +482,7 @@ int32_t shellReadCommand(char *command) {
#endif
break;
case 4: // EOF or Ctrl+D
taosResetTerminalMode();
printf("\n");
return -1;
case 5: // ctrl E