Merge pull request #6531 from taosdata/feature/TD-4352
[TD-4352]compact tsdb meta data feature
This commit is contained in:
commit
9f96559a01
|
@ -284,3 +284,5 @@ keepColumnName 1
|
|||
# 0 no query allowed, queries are disabled
|
||||
# queryBufferSize -1
|
||||
|
||||
# percent of redundant data in tsdb meta will compact meta data,0 means donot compact
|
||||
# tsdbMetaCompactRatio 0
|
||||
|
|
|
@ -150,6 +150,7 @@ int32_t tsMaxVgroupsPerDb = 0;
|
|||
int32_t tsMinTablePerVnode = TSDB_TABLES_STEP;
|
||||
int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
|
||||
int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
|
||||
int32_t tsTsdbMetaCompactRatio = TSDB_META_COMPACT_RATIO;
|
||||
|
||||
// tsdb config
|
||||
|
||||
|
@ -1581,6 +1582,16 @@ static void doInitGlobalConfig(void) {
|
|||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "tsdbMetaCompactRatio";
|
||||
cfg.ptr = &tsTsdbMetaCompactRatio;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 100;
|
||||
cfg.ptrLength = 0;
|
||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
assert(tsGlobalConfigNum <= TSDB_CFG_MAX_NUM);
|
||||
#ifdef TD_TSZ
|
||||
// lossy compress
|
||||
|
|
|
@ -277,6 +277,7 @@ do { \
|
|||
#define TSDB_MAX_TABLES 10000000
|
||||
#define TSDB_DEFAULT_TABLES 1000000
|
||||
#define TSDB_TABLES_STEP 1000
|
||||
#define TSDB_META_COMPACT_RATIO 0 // disable tsdb meta compact by default
|
||||
|
||||
#define TSDB_MIN_DAYS_PER_FILE 1
|
||||
#define TSDB_MAX_DAYS_PER_FILE 3650
|
||||
|
|
|
@ -44,6 +44,7 @@ typedef struct {
|
|||
|
||||
SFSStatus* cstatus; // current status
|
||||
SHashObj* metaCache; // meta cache
|
||||
SHashObj* metaCacheComp; // meta cache for compact
|
||||
bool intxn;
|
||||
SFSStatus* nstatus; // new status
|
||||
} STsdbFS;
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
*/
|
||||
#include "tsdbint.h"
|
||||
|
||||
extern int32_t tsTsdbMetaCompactRatio;
|
||||
|
||||
#define TSDB_MAX_SUBBLOCKS 8
|
||||
static FORCE_INLINE int TSDB_KEY_FID(TSKEY key, int32_t days, int8_t precision) {
|
||||
if (key < 0) {
|
||||
|
@ -55,8 +57,9 @@ typedef struct {
|
|||
#define TSDB_COMMIT_TXN_VERSION(ch) FS_TXN_VERSION(REPO_FS(TSDB_COMMIT_REPO(ch)))
|
||||
|
||||
static int tsdbCommitMeta(STsdbRepo *pRepo);
|
||||
static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen);
|
||||
static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen, bool compact);
|
||||
static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid);
|
||||
static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile);
|
||||
static int tsdbCommitTSData(STsdbRepo *pRepo);
|
||||
static void tsdbStartCommit(STsdbRepo *pRepo);
|
||||
static void tsdbEndCommit(STsdbRepo *pRepo, int eno);
|
||||
|
@ -261,6 +264,35 @@ int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf) {
|
|||
|
||||
|
||||
// =================== Commit Meta Data
|
||||
static int tsdbInitCommitMetaFile(STsdbRepo *pRepo, SMFile* pMf, bool open) {
|
||||
STsdbFS * pfs = REPO_FS(pRepo);
|
||||
SMFile * pOMFile = pfs->cstatus->pmf;
|
||||
SDiskID did;
|
||||
|
||||
// Create/Open a meta file or open the existing file
|
||||
if (pOMFile == NULL) {
|
||||
// Create a new meta file
|
||||
did.level = TFS_PRIMARY_LEVEL;
|
||||
did.id = TFS_PRIMARY_ID;
|
||||
tsdbInitMFile(pMf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)));
|
||||
|
||||
if (open && tsdbCreateMFile(pMf, true) < 0) {
|
||||
tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
tsdbInfo("vgId:%d meta file %s is created to commit", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMf));
|
||||
} else {
|
||||
tsdbInitMFileEx(pMf, pOMFile);
|
||||
if (open && tsdbOpenMFile(pMf, O_WRONLY) < 0) {
|
||||
tsdbError("vgId:%d failed to open META file since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbCommitMeta(STsdbRepo *pRepo) {
|
||||
STsdbFS * pfs = REPO_FS(pRepo);
|
||||
SMemTable *pMem = pRepo->imem;
|
||||
|
@ -269,35 +301,26 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) {
|
|||
SActObj * pAct = NULL;
|
||||
SActCont * pCont = NULL;
|
||||
SListNode *pNode = NULL;
|
||||
SDiskID did;
|
||||
|
||||
ASSERT(pOMFile != NULL || listNEles(pMem->actList) > 0);
|
||||
|
||||
if (listNEles(pMem->actList) <= 0) {
|
||||
// no meta data to commit, just keep the old meta file
|
||||
tsdbUpdateMFile(pfs, pOMFile);
|
||||
if (tsTsdbMetaCompactRatio > 0) {
|
||||
if (tsdbInitCommitMetaFile(pRepo, &mf, false) < 0) {
|
||||
return -1;
|
||||
}
|
||||
int ret = tsdbCompactMetaFile(pRepo, pfs, &mf);
|
||||
if (ret < 0) tsdbError("compact meta file error");
|
||||
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
// Create/Open a meta file or open the existing file
|
||||
if (pOMFile == NULL) {
|
||||
// Create a new meta file
|
||||
did.level = TFS_PRIMARY_LEVEL;
|
||||
did.id = TFS_PRIMARY_ID;
|
||||
tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)));
|
||||
|
||||
if (tsdbCreateMFile(&mf, true) < 0) {
|
||||
tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
if (tsdbInitCommitMetaFile(pRepo, &mf, true) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
tsdbInfo("vgId:%d meta file %s is created to commit", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(&mf));
|
||||
} else {
|
||||
tsdbInitMFileEx(&mf, pOMFile);
|
||||
if (tsdbOpenMFile(&mf, O_WRONLY) < 0) {
|
||||
tsdbError("vgId:%d failed to open META file since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Loop to write
|
||||
|
@ -305,7 +328,7 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) {
|
|||
pAct = (SActObj *)pNode->data;
|
||||
if (pAct->act == TSDB_UPDATE_META) {
|
||||
pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj));
|
||||
if (tsdbUpdateMetaRecord(pfs, &mf, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) {
|
||||
if (tsdbUpdateMetaRecord(pfs, &mf, pAct->uid, (void *)(pCont->cont), pCont->len, false) < 0) {
|
||||
tsdbError("vgId:%d failed to update META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
|
||||
tstrerror(terrno));
|
||||
tsdbCloseMFile(&mf);
|
||||
|
@ -338,6 +361,10 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) {
|
|||
tsdbCloseMFile(&mf);
|
||||
tsdbUpdateMFile(pfs, &mf);
|
||||
|
||||
if (tsTsdbMetaCompactRatio > 0 && tsdbCompactMetaFile(pRepo, pfs, &mf) < 0) {
|
||||
tsdbError("compact meta file error");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -375,7 +402,7 @@ void tsdbGetRtnSnap(STsdbRepo *pRepo, SRtn *pRtn) {
|
|||
pRtn->minFid, pRtn->midFid, pRtn->maxFid);
|
||||
}
|
||||
|
||||
static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen) {
|
||||
static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen, bool compact) {
|
||||
char buf[64] = "\0";
|
||||
void * pBuf = buf;
|
||||
SKVRecord rInfo;
|
||||
|
@ -401,13 +428,18 @@ static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void
|
|||
}
|
||||
|
||||
tsdbUpdateMFileMagic(pMFile, POINTER_SHIFT(cont, contLen - sizeof(TSCKSUM)));
|
||||
SKVRecord *pRecord = taosHashGet(pfs->metaCache, (void *)&uid, sizeof(uid));
|
||||
|
||||
SHashObj* cache = compact ? pfs->metaCacheComp : pfs->metaCache;
|
||||
|
||||
pMFile->info.nRecords++;
|
||||
|
||||
SKVRecord *pRecord = taosHashGet(cache, (void *)&uid, sizeof(uid));
|
||||
if (pRecord != NULL) {
|
||||
pMFile->info.tombSize += (pRecord->size + sizeof(SKVRecord));
|
||||
} else {
|
||||
pMFile->info.nRecords++;
|
||||
}
|
||||
taosHashPut(pfs->metaCache, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo));
|
||||
taosHashPut(cache, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -442,6 +474,129 @@ static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) {
|
||||
float delPercent = (float)(pMFile->info.nDels) / (float)(pMFile->info.nRecords);
|
||||
float tombPercent = (float)(pMFile->info.tombSize) / (float)(pMFile->info.size);
|
||||
float compactRatio = (float)(tsTsdbMetaCompactRatio)/100;
|
||||
|
||||
if (delPercent < compactRatio && tombPercent < compactRatio) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (tsdbOpenMFile(pMFile, O_RDONLY) < 0) {
|
||||
tsdbError("open meta file %s compact fail", pMFile->f.rname);
|
||||
return -1;
|
||||
}
|
||||
|
||||
tsdbInfo("begin compact tsdb meta file, ratio:%d, nDels:%" PRId64 ",nRecords:%" PRId64 ",tombSize:%" PRId64 ",size:%" PRId64,
|
||||
tsTsdbMetaCompactRatio, pMFile->info.nDels,pMFile->info.nRecords,pMFile->info.tombSize,pMFile->info.size);
|
||||
|
||||
SMFile mf;
|
||||
SDiskID did;
|
||||
|
||||
// first create tmp meta file
|
||||
did.level = TFS_PRIMARY_LEVEL;
|
||||
did.id = TFS_PRIMARY_ID;
|
||||
tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)) + 1);
|
||||
|
||||
if (tsdbCreateMFile(&mf, true) < 0) {
|
||||
tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
tsdbInfo("vgId:%d meta file %s is created to compact meta data", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(&mf));
|
||||
|
||||
// second iterator metaCache
|
||||
int code = -1;
|
||||
int64_t maxBufSize = 1024;
|
||||
SKVRecord *pRecord;
|
||||
void *pBuf = NULL;
|
||||
|
||||
pBuf = malloc((size_t)maxBufSize);
|
||||
if (pBuf == NULL) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
// init Comp
|
||||
assert(pfs->metaCacheComp == NULL);
|
||||
pfs->metaCacheComp = taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
|
||||
if (pfs->metaCacheComp == NULL) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
pRecord = taosHashIterate(pfs->metaCache, NULL);
|
||||
while (pRecord) {
|
||||
if (tsdbSeekMFile(pMFile, pRecord->offset + sizeof(SKVRecord), SEEK_SET) < 0) {
|
||||
tsdbError("vgId:%d failed to seek file %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile),
|
||||
tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
if (pRecord->size > maxBufSize) {
|
||||
maxBufSize = pRecord->size;
|
||||
void* tmp = realloc(pBuf, (size_t)maxBufSize);
|
||||
if (tmp == NULL) {
|
||||
goto _err;
|
||||
}
|
||||
pBuf = tmp;
|
||||
}
|
||||
int nread = (int)tsdbReadMFile(pMFile, pBuf, pRecord->size);
|
||||
if (nread < 0) {
|
||||
tsdbError("vgId:%d failed to read file %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile),
|
||||
tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (nread < pRecord->size) {
|
||||
tsdbError("vgId:%d failed to read file %s since file corrupted, expected read:%" PRId64 " actual read:%d",
|
||||
REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile), pRecord->size, nread);
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (tsdbUpdateMetaRecord(pfs, &mf, pRecord->uid, pBuf, (int)pRecord->size, true) < 0) {
|
||||
tsdbError("vgId:%d failed to update META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pRecord->uid,
|
||||
tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
pRecord = taosHashIterate(pfs->metaCache, pRecord);
|
||||
}
|
||||
code = 0;
|
||||
|
||||
_err:
|
||||
if (code == 0) TSDB_FILE_FSYNC(&mf);
|
||||
tsdbCloseMFile(&mf);
|
||||
tsdbCloseMFile(pMFile);
|
||||
|
||||
if (code == 0) {
|
||||
// rename meta.tmp -> meta
|
||||
tsdbInfo("vgId:%d meta file rename %s -> %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(&mf), TSDB_FILE_FULL_NAME(pMFile));
|
||||
taosRename(mf.f.aname,pMFile->f.aname);
|
||||
tstrncpy(mf.f.aname, pMFile->f.aname, TSDB_FILENAME_LEN);
|
||||
tstrncpy(mf.f.rname, pMFile->f.rname, TSDB_FILENAME_LEN);
|
||||
// update current meta file info
|
||||
pfs->nstatus->pmf = NULL;
|
||||
tsdbUpdateMFile(pfs, &mf);
|
||||
|
||||
taosHashCleanup(pfs->metaCache);
|
||||
pfs->metaCache = pfs->metaCacheComp;
|
||||
pfs->metaCacheComp = NULL;
|
||||
} else {
|
||||
// remove meta.tmp file
|
||||
remove(mf.f.aname);
|
||||
taosHashCleanup(pfs->metaCacheComp);
|
||||
pfs->metaCacheComp = NULL;
|
||||
}
|
||||
|
||||
tfree(pBuf);
|
||||
|
||||
ASSERT(mf.info.nDels == 0);
|
||||
ASSERT(mf.info.tombSize == 0);
|
||||
|
||||
tsdbInfo("end compact tsdb meta file,code:%d,nRecords:%" PRId64 ",size:%" PRId64,
|
||||
code,mf.info.nRecords,mf.info.size);
|
||||
return code;
|
||||
}
|
||||
|
||||
// =================== Commit Time-Series Data
|
||||
static int tsdbCommitTSData(STsdbRepo *pRepo) {
|
||||
SMemTable *pMem = pRepo->imem;
|
||||
|
|
|
@ -215,6 +215,7 @@ STsdbFS *tsdbNewFS(STsdbCfg *pCfg) {
|
|||
}
|
||||
|
||||
pfs->intxn = false;
|
||||
pfs->metaCacheComp = NULL;
|
||||
|
||||
pfs->nstatus = tsdbNewFSStatus(maxFSet);
|
||||
if (pfs->nstatus == NULL) {
|
||||
|
|
|
@ -20,7 +20,7 @@ static const char *TSDB_FNAME_SUFFIX[] = {
|
|||
"data", // TSDB_FILE_DATA
|
||||
"last", // TSDB_FILE_LAST
|
||||
"", // TSDB_FILE_MAX
|
||||
"meta" // TSDB_FILE_META
|
||||
"meta", // TSDB_FILE_META
|
||||
};
|
||||
|
||||
static void tsdbGetFilename(int vid, int fid, uint32_t ver, TSDB_FILE_T ftype, char *fname);
|
||||
|
|
|
@ -43,6 +43,7 @@ static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable);
|
|||
static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable);
|
||||
static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid);
|
||||
static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema);
|
||||
static int tsdbInsertNewTableAction(STsdbRepo *pRepo, STable* pTable);
|
||||
static int tsdbAddSchema(STable *pTable, STSchema *pSchema);
|
||||
static void tsdbFreeTableSchema(STable *pTable);
|
||||
|
||||
|
@ -128,21 +129,16 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) {
|
|||
tsdbUnlockRepoMeta(pRepo);
|
||||
|
||||
// Write to memtable action
|
||||
// TODO: refactor duplicate codes
|
||||
int tlen = 0;
|
||||
void *pBuf = NULL;
|
||||
if (newSuper || superChanged) {
|
||||
tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, super);
|
||||
pBuf = tsdbAllocBytes(pRepo, tlen);
|
||||
if (pBuf == NULL) goto _err;
|
||||
void *tBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, pBuf, super);
|
||||
ASSERT(POINTER_DISTANCE(tBuf, pBuf) == tlen);
|
||||
// add insert new super table action
|
||||
if (tsdbInsertNewTableAction(pRepo, super) != 0) {
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
// add insert new table action
|
||||
if (tsdbInsertNewTableAction(pRepo, table) != 0) {
|
||||
goto _err;
|
||||
}
|
||||
tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, table);
|
||||
pBuf = tsdbAllocBytes(pRepo, tlen);
|
||||
if (pBuf == NULL) goto _err;
|
||||
void *tBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, pBuf, table);
|
||||
ASSERT(POINTER_DISTANCE(tBuf, pBuf) == tlen);
|
||||
|
||||
if (tsdbCheckCommit(pRepo) < 0) return -1;
|
||||
|
||||
|
@ -383,7 +379,7 @@ int tsdbUpdateTableTagValue(STsdbRepo *repo, SUpdateTableTagValMsg *pMsg) {
|
|||
tdDestroyTSchemaBuilder(&schemaBuilder);
|
||||
}
|
||||
|
||||
// Chage in memory
|
||||
// Change in memory
|
||||
if (pNewSchema != NULL) { // change super table tag schema
|
||||
TSDB_WLOCK_TABLE(pTable->pSuper);
|
||||
STSchema *pOldSchema = pTable->pSuper->tagSchema;
|
||||
|
@ -426,6 +422,21 @@ int tsdbUpdateTableTagValue(STsdbRepo *repo, SUpdateTableTagValMsg *pMsg) {
|
|||
}
|
||||
|
||||
// ------------------ INTERNAL FUNCTIONS ------------------
|
||||
static int tsdbInsertNewTableAction(STsdbRepo *pRepo, STable* pTable) {
|
||||
int tlen = 0;
|
||||
void *pBuf = NULL;
|
||||
|
||||
tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pTable);
|
||||
pBuf = tsdbAllocBytes(pRepo, tlen);
|
||||
if (pBuf == NULL) {
|
||||
return -1;
|
||||
}
|
||||
void *tBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, pBuf, pTable);
|
||||
ASSERT(POINTER_DISTANCE(tBuf, pBuf) == tlen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
STsdbMeta *tsdbNewMeta(STsdbCfg *pCfg) {
|
||||
STsdbMeta *pMeta = (STsdbMeta *)calloc(1, sizeof(*pMeta));
|
||||
if (pMeta == NULL) {
|
||||
|
@ -617,6 +628,7 @@ int16_t tsdbGetLastColumnsIndexByColId(STable* pTable, int16_t colId) {
|
|||
if (pTable->lastCols == NULL) {
|
||||
return -1;
|
||||
}
|
||||
// TODO: use binary search instead
|
||||
for (int16_t i = 0; i < pTable->maxColNum; ++i) {
|
||||
if (pTable->lastCols[i].colId == colId) {
|
||||
return i;
|
||||
|
@ -734,10 +746,10 @@ void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema,
|
|||
TSDB_WUNLOCK_TABLE(pCTable);
|
||||
|
||||
if (insertAct) {
|
||||
int tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pCTable);
|
||||
void *buf = tsdbAllocBytes(pRepo, tlen);
|
||||
ASSERT(buf != NULL);
|
||||
tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pCTable);
|
||||
if (tsdbInsertNewTableAction(pRepo, pCTable) != 0) {
|
||||
tsdbError("vgId:%d table %s tid %d uid %" PRIu64 " tsdbInsertNewTableAction fail", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
TABLE_TID(pTable), TABLE_UID(pTable));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,97 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.getcwd())
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
import taos
|
||||
import threading
|
||||
import subprocess
|
||||
from random import choice
|
||||
|
||||
class TwoClients:
|
||||
def initConnection(self):
|
||||
self.host = "chr03"
|
||||
self.user = "root"
|
||||
self.password = "taosdata"
|
||||
self.config = "/etc/taos/"
|
||||
self.port =6030
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
|
||||
# new taos client
|
||||
conn1 = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config )
|
||||
print(conn1)
|
||||
cur1 = conn1.cursor()
|
||||
tdSql.init(cur1, True)
|
||||
|
||||
tdSql.execute("drop database if exists db3")
|
||||
|
||||
# insert data with taosc
|
||||
for i in range(10):
|
||||
os.system("taosdemo -f manualTest/TD-5114/insertDataDb3Replica2.json -y ")
|
||||
# # check data correct
|
||||
tdSql.execute("show databases")
|
||||
tdSql.execute("use db3")
|
||||
tdSql.query("select count (tbname) from stb0")
|
||||
tdSql.checkData(0, 0, 20000)
|
||||
tdSql.query("select count (*) from stb0")
|
||||
tdSql.checkData(0, 0, 4000000)
|
||||
|
||||
# insert data with python connector , if you want to use this case ,cancel note.
|
||||
|
||||
# for x in range(10):
|
||||
# dataType= [ "tinyint", "smallint", "int", "bigint", "float", "double", "bool", " binary(20)", "nchar(20)", "tinyint unsigned", "smallint unsigned", "int unsigned", "bigint unsigned"]
|
||||
# tdSql.execute("drop database if exists db3")
|
||||
# tdSql.execute("create database db3 keep 3650 replica 2 ")
|
||||
# tdSql.execute("use db3")
|
||||
# tdSql.execute('''create table test(ts timestamp, col0 tinyint, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
# col7 bool, col8 binary(20), col9 nchar(20), col10 tinyint unsigned, col11 smallint unsigned, col12 int unsigned, col13 bigint unsigned) tags(loc nchar(3000), tag1 int)''')
|
||||
# rowNum2= 988
|
||||
# for i in range(rowNum2):
|
||||
# tdSql.execute("alter table test add column col%d %s ;" %( i+14, choice(dataType)) )
|
||||
# rowNum3= 988
|
||||
# for i in range(rowNum3):
|
||||
# tdSql.execute("alter table test drop column col%d ;" %( i+14) )
|
||||
# self.rowNum = 50
|
||||
# self.rowNum2 = 2000
|
||||
# self.ts = 1537146000000
|
||||
# for j in range(self.rowNum2):
|
||||
# tdSql.execute("create table test%d using test tags('beijing%d', 10)" % (j,j) )
|
||||
# for i in range(self.rowNum):
|
||||
# tdSql.execute("insert into test%d values(%d, %d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
# % (j, self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||
|
||||
# # check data correct
|
||||
# tdSql.execute("show databases")
|
||||
# tdSql.execute("use db3")
|
||||
# tdSql.query("select count (tbname) from test")
|
||||
# tdSql.checkData(0, 0, 200)
|
||||
# tdSql.query("select count (*) from test")
|
||||
# tdSql.checkData(0, 0, 200000)
|
||||
|
||||
|
||||
# delete useless file
|
||||
testcaseFilename = os.path.split(__file__)[-1]
|
||||
os.system("rm -rf ./insert_res.txt")
|
||||
os.system("rm -rf manualTest/TD-5114/%s.sql" % testcaseFilename )
|
||||
|
||||
clients = TwoClients()
|
||||
clients.initConnection()
|
||||
# clients.getBuildPath()
|
||||
clients.run()
|
|
@ -0,0 +1,61 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 4,
|
||||
"thread_count_create_tbl": 4,
|
||||
"result_file": "./insert_res.txt",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 0,
|
||||
"num_of_records_per_req": 3000,
|
||||
"max_sql_len": 1024000,
|
||||
"databases": [{
|
||||
"dbinfo": {
|
||||
"name": "db3",
|
||||
"drop": "yes",
|
||||
"replica": 2,
|
||||
"days": 10,
|
||||
"cache": 50,
|
||||
"blocks": 8,
|
||||
"precision": "ms",
|
||||
"keep": 365,
|
||||
"minRows": 100,
|
||||
"maxRows": 4096,
|
||||
"comp":2,
|
||||
"walLevel":1,
|
||||
"cachelast":0,
|
||||
"quorum":1,
|
||||
"fsync":3000,
|
||||
"update": 0
|
||||
},
|
||||
"super_tables": [{
|
||||
"name": "stb0",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 20000,
|
||||
"childtable_prefix": "stb0_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 1000,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 2000,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-10-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
|
||||
}]
|
||||
}]
|
||||
}
|
|
@ -0,0 +1,275 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from sys import version
|
||||
from fabric import Connection
|
||||
import random
|
||||
import time
|
||||
import datetime
|
||||
import logging
|
||||
import subprocess
|
||||
import os
|
||||
import sys
|
||||
|
||||
class Node:
|
||||
def __init__(self, index, username, hostIP, password, version):
|
||||
self.index = index
|
||||
self.username = username
|
||||
self.hostIP = hostIP
|
||||
# self.hostName = hostName
|
||||
# self.homeDir = homeDir
|
||||
self.version = version
|
||||
self.verName = "TDengine-enterprise-server-%s-Linux-x64.tar.gz" % self.version
|
||||
self.installPath = "TDengine-enterprise-server-%s" % self.version
|
||||
# self.corePath = '/coredump'
|
||||
self.conn = Connection("{}@{}".format(username, hostIP), connect_kwargs={"password": "{}".format(password)})
|
||||
|
||||
|
||||
def buildTaosd(self):
|
||||
try:
|
||||
print(self.conn)
|
||||
# self.conn.run('echo "1234" > /home/chr/installtest/test.log')
|
||||
self.conn.run("cd /home/chr/installtest/ && tar -xvf %s " %self.verName)
|
||||
self.conn.run("cd /home/chr/installtest/%s && ./install.sh " % self.installPath)
|
||||
except Exception as e:
|
||||
print("Build Taosd error for node %d " % self.index)
|
||||
logging.exception(e)
|
||||
pass
|
||||
|
||||
def rebuildTaosd(self):
|
||||
try:
|
||||
print(self.conn)
|
||||
# self.conn.run('echo "1234" > /home/chr/installtest/test.log')
|
||||
self.conn.run("cd /home/chr/installtest/%s && ./install.sh " % self.installPath)
|
||||
except Exception as e:
|
||||
print("Build Taosd error for node %d " % self.index)
|
||||
logging.exception(e)
|
||||
pass
|
||||
|
||||
def startTaosd(self):
|
||||
try:
|
||||
self.conn.run("sudo systemctl start taosd")
|
||||
except Exception as e:
|
||||
print("Start Taosd error for node %d " % self.index)
|
||||
logging.exception(e)
|
||||
|
||||
def restartTarbi(self):
|
||||
try:
|
||||
self.conn.run("sudo systemctl restart tarbitratord ")
|
||||
except Exception as e:
|
||||
print("Start Taosd error for node %d " % self.index)
|
||||
logging.exception(e)
|
||||
|
||||
def clearData(self):
|
||||
timeNow = datetime.datetime.now()
|
||||
# timeYes = datetime.datetime.now() + datetime.timedelta(days=-1)
|
||||
timStr = timeNow.strftime('%Y%m%d%H%M%S')
|
||||
# timStr = timeNow.strftime('%Y%m%d%H%M%S')
|
||||
try:
|
||||
# self.conn.run("mv /var/lib/taos/ /var/lib/taos%s " % timStr)
|
||||
self.conn.run("rm -rf /home/chr/data/taos*")
|
||||
except Exception as e:
|
||||
print("rm -rf /var/lib/taos error %d " % self.index)
|
||||
logging.exception(e)
|
||||
|
||||
def stopTaosd(self):
|
||||
try:
|
||||
self.conn.run("sudo systemctl stop taosd")
|
||||
except Exception as e:
|
||||
print("Stop Taosd error for node %d " % self.index)
|
||||
logging.exception(e)
|
||||
|
||||
def restartTaosd(self):
|
||||
try:
|
||||
self.conn.run("sudo systemctl restart taosd")
|
||||
except Exception as e:
|
||||
print("Stop Taosd error for node %d " % self.index)
|
||||
logging.exception(e)
|
||||
|
||||
class oneNode:
|
||||
|
||||
def FirestStartNode(self, id, username, IP, passwd, version):
|
||||
# get installPackage
|
||||
verName = "TDengine-enterprise-server-%s-Linux-x64.tar.gz" % version
|
||||
# installPath = "TDengine-enterprise-server-%s" % self.version
|
||||
node131 = Node(131, 'ubuntu', '192.168.1.131', 'tbase125!', '2.0.20.0')
|
||||
node131.conn.run('sshpass -p tbase125! scp /nas/TDengine/v%s/enterprise/%s root@192.168.1.%d:/home/chr/installtest/' % (version,verName,id))
|
||||
node131.conn.close()
|
||||
# install TDengine at 192.168.103/104/141
|
||||
try:
|
||||
node = Node(id, username, IP, passwd, version)
|
||||
node.conn.run('echo "start taosd"')
|
||||
node.buildTaosd()
|
||||
# clear DataPath , if need clear data
|
||||
node.clearData()
|
||||
node.startTaosd()
|
||||
if id == 103 :
|
||||
node.restartTarbi()
|
||||
print("start taosd ver:%s node:%d successfully " % (version,id))
|
||||
node.conn.close()
|
||||
|
||||
# query_pid = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1])
|
||||
# assert query_pid == 1 , "node %d: start taosd failed " % id
|
||||
except Exception as e:
|
||||
print("Stop Taosd error for node %d " % id)
|
||||
logging.exception(e)
|
||||
|
||||
def startNode(self, id, username, IP, passwd, version):
|
||||
# start TDengine
|
||||
try:
|
||||
node = Node(id, username, IP, passwd, version)
|
||||
node.conn.run('echo "restart taosd"')
|
||||
# clear DataPath , if need clear data
|
||||
node.clearData()
|
||||
node.restartTaosd()
|
||||
time.sleep(5)
|
||||
if id == 103 :
|
||||
node.restartTarbi()
|
||||
print("start taosd ver:%s node:%d successfully " % (version,id))
|
||||
node.conn.close()
|
||||
|
||||
# query_pid = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1])
|
||||
# assert query_pid == 1 , "node %d: start taosd failed " % id
|
||||
except Exception as e:
|
||||
print("Stop Taosd error for node %d " % id)
|
||||
logging.exception(e)
|
||||
|
||||
def firstUpgradeNode(self, id, username, IP, passwd, version):
|
||||
# get installPackage
|
||||
verName = "TDengine-enterprise-server-%s-Linux-x64.tar.gz" % version
|
||||
# installPath = "TDengine-enterprise-server-%s" % self.version
|
||||
node131 = Node(131, 'ubuntu', '192.168.1.131', 'tbase125!', '2.0.20.0')
|
||||
node131.conn.run('echo "upgrade cluster"')
|
||||
node131.conn.run('sshpass -p tbase125! scp /nas/TDengine/v%s/enterprise/%s root@192.168.1.%d:/home/chr/installtest/' % (version,verName,id))
|
||||
node131.conn.close()
|
||||
# upgrade TDengine at 192.168.103/104/141
|
||||
try:
|
||||
node = Node(id, username, IP, passwd, version)
|
||||
node.conn.run('echo "start taosd"')
|
||||
node.conn.run('echo "1234" > /home/chr/test.log')
|
||||
node.buildTaosd()
|
||||
time.sleep(5)
|
||||
node.startTaosd()
|
||||
if id == 103 :
|
||||
node.restartTarbi()
|
||||
print("start taosd ver:%s node:%d successfully " % (version,id))
|
||||
node.conn.close()
|
||||
|
||||
# query_pid = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1])
|
||||
# assert query_pid == 1 , "node %d: start taosd failed " % id
|
||||
except Exception as e:
|
||||
print("Stop Taosd error for node %d " % id)
|
||||
logging.exception(e)
|
||||
|
||||
def upgradeNode(self, id, username, IP, passwd, version):
|
||||
|
||||
# backCluster TDengine at 192.168.103/104/141
|
||||
try:
|
||||
node = Node(id, username, IP, passwd, version)
|
||||
node.conn.run('echo "rollback taos"')
|
||||
node.rebuildTaosd()
|
||||
time.sleep(5)
|
||||
node.startTaosd()
|
||||
if id == 103 :
|
||||
node.restartTarbi()
|
||||
print("start taosd ver:%s node:%d successfully " % (version,id))
|
||||
node.conn.close()
|
||||
except Exception as e:
|
||||
print("Stop Taosd error for node %d " % id)
|
||||
logging.exception(e)
|
||||
|
||||
|
||||
# how to use : cd TDinternal/commumity/test/pytest && python3 manualTest/rollingUpgrade.py ,when inserting data, we can start " python3 manualTest/rollingUpagrade.py". add example "oneNode().FirestStartNode(103,'root','192.168.1.103','tbase125!','2.0.20.0')"
|
||||
|
||||
|
||||
# node103=oneNode().FirestStartNode(103,'root','192.168.1.103','tbase125!','2.0.20.0')
|
||||
# node104=oneNode().FirestStartNode(104,'root','192.168.1.104','tbase125!','2.0.20.0')
|
||||
# node141=oneNode().FirestStartNode(141,'root','192.168.1.141','tbase125!','2.0.20.0')
|
||||
|
||||
# node103=oneNode().startNode(103,'root','192.168.1.103','tbase125!','2.0.20.0')
|
||||
# time.sleep(30)
|
||||
# node141=oneNode().startNode(141,'root','192.168.1.141','tbase125!','2.0.20.0')
|
||||
# time.sleep(30)
|
||||
# node104=oneNode().startNode(104,'root','192.168.1.104','tbase125!','2.0.20.0')
|
||||
# time.sleep(30)
|
||||
|
||||
# node103=oneNode().firstUpgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.5')
|
||||
# time.sleep(30)
|
||||
# node104=oneNode().firstUpgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.5')
|
||||
# time.sleep(30)
|
||||
# node141=oneNode().firstUpgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.5')
|
||||
# time.sleep(30)
|
||||
|
||||
# node141=oneNode().firstUpgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.10')
|
||||
# time.sleep(30)
|
||||
# node103=oneNode().firstUpgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.10')
|
||||
# time.sleep(30)
|
||||
# node104=oneNode().firstUpgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.10')
|
||||
# time.sleep(30)
|
||||
|
||||
# node141=oneNode().firstUpgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.12')
|
||||
# time.sleep(30)
|
||||
# node103=oneNode().firstUpgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.12')
|
||||
# time.sleep(30)
|
||||
# node104=oneNode().firstUpgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.12')
|
||||
# time.sleep(30)
|
||||
|
||||
|
||||
|
||||
# node103=oneNode().upgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.0')
|
||||
# time.sleep(120)
|
||||
# node104=oneNode().upgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.0')
|
||||
# time.sleep(180)
|
||||
# node141=oneNode().upgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.0')
|
||||
# time.sleep(240)
|
||||
|
||||
# node104=oneNode().upgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.5')
|
||||
# time.sleep(120)
|
||||
# node103=oneNode().upgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.5')
|
||||
# time.sleep(120)
|
||||
# node141=oneNode().upgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.5')
|
||||
# time.sleep(180)
|
||||
|
||||
# node141=oneNode().upgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.10')
|
||||
# time.sleep(120)
|
||||
# node103=oneNode().upgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.10')
|
||||
# time.sleep(120)
|
||||
# node104=oneNode().upgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.10')
|
||||
# time.sleep(180)
|
||||
|
||||
# node103=oneNode().upgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.12')
|
||||
# time.sleep(180)
|
||||
# node141=oneNode().upgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.12')
|
||||
# time.sleep(180)
|
||||
# node104=oneNode().upgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.12')
|
||||
|
||||
|
||||
# node141=oneNode().firstUpgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.9')
|
||||
# time.sleep(5)
|
||||
# node103=oneNode().firstUpgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.9')
|
||||
# time.sleep(5)
|
||||
# node104=oneNode().firstUpgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.9')
|
||||
# time.sleep(30)
|
||||
|
||||
# node141=oneNode().upgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.10')
|
||||
# time.sleep(12)
|
||||
# node103=oneNode().upgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.10')
|
||||
# time.sleep(12)
|
||||
# node104=oneNode().upgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.10')
|
||||
# time.sleep(180)
|
||||
|
||||
# node103=oneNode().upgradeNode(103,'root','192.168.1.103','tbase125!','2.0.20.12')
|
||||
# time.sleep(120)
|
||||
# node141=oneNode().upgradeNode(141,'root','192.168.1.141','tbase125!','2.0.20.12')
|
||||
# time.sleep(120)
|
||||
# node104=oneNode().upgradeNode(104,'root','192.168.1.104','tbase125!','2.0.20.12')
|
|
@ -0,0 +1,87 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 4,
|
||||
"thread_count_create_tbl": 4,
|
||||
"result_file": "./insert_res.txt",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 10,
|
||||
"num_of_records_per_req": 1000,
|
||||
"max_sql_len": 1024000,
|
||||
"databases": [{
|
||||
"dbinfo": {
|
||||
"name": "db1",
|
||||
"drop": "yes",
|
||||
"replica": 1,
|
||||
"days": 10,
|
||||
"cache": 50,
|
||||
"blocks": 8,
|
||||
"precision": "ms",
|
||||
"keep": 365,
|
||||
"minRows": 100,
|
||||
"maxRows": 4096,
|
||||
"comp":2,
|
||||
"walLevel":1,
|
||||
"cachelast":0,
|
||||
"quorum":1,
|
||||
"fsync":3000,
|
||||
"update": 0
|
||||
},
|
||||
"super_tables": [{
|
||||
"name": "stb0",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 1000,
|
||||
"childtable_prefix": "stb00_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 1000,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 1000,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-10-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
|
||||
},
|
||||
{
|
||||
"name": "stb1",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 10000,
|
||||
"childtable_prefix": "stb01_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 1000,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 200,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-10-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
|
||||
}]
|
||||
}]
|
||||
}
|
||||
|
|
@ -0,0 +1,87 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 4,
|
||||
"thread_count_create_tbl": 4,
|
||||
"result_file": "./insert_res.txt",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 10,
|
||||
"num_of_records_per_req": 1000,
|
||||
"max_sql_len": 1024000,
|
||||
"databases": [{
|
||||
"dbinfo": {
|
||||
"name": "db1",
|
||||
"drop": "yes",
|
||||
"replica": 2,
|
||||
"days": 10,
|
||||
"cache": 50,
|
||||
"blocks": 8,
|
||||
"precision": "ms",
|
||||
"keep": 365,
|
||||
"minRows": 100,
|
||||
"maxRows": 4096,
|
||||
"comp":2,
|
||||
"walLevel":1,
|
||||
"cachelast":0,
|
||||
"quorum":1,
|
||||
"fsync":3000,
|
||||
"update": 0
|
||||
},
|
||||
"super_tables": [{
|
||||
"name": "stb0",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 1000,
|
||||
"childtable_prefix": "stb00_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 100,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 100,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-10-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
|
||||
},
|
||||
{
|
||||
"name": "stb1",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 1000,
|
||||
"childtable_prefix": "stb01_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 10,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 200,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-10-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
|
||||
}]
|
||||
}]
|
||||
}
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 4,
|
||||
"thread_count_create_tbl": 4,
|
||||
"result_file": "./insert_res.txt",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 0,
|
||||
"num_of_records_per_req": 3000,
|
||||
"max_sql_len": 1024000,
|
||||
"databases": [{
|
||||
"dbinfo": {
|
||||
"name": "db2",
|
||||
"drop": "yes",
|
||||
"replica": 1,
|
||||
"days": 10,
|
||||
"cache": 50,
|
||||
"blocks": 8,
|
||||
"precision": "ms",
|
||||
"keep": 365,
|
||||
"minRows": 100,
|
||||
"maxRows": 4096,
|
||||
"comp":2,
|
||||
"walLevel":1,
|
||||
"cachelast":0,
|
||||
"quorum":1,
|
||||
"fsync":3000,
|
||||
"update": 0
|
||||
},
|
||||
"super_tables": [{
|
||||
"name": "stb0",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 200000,
|
||||
"childtable_prefix": "stb0_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 1000,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 0,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-10-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
|
||||
},
|
||||
{
|
||||
"name": "stb1",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 2,
|
||||
"childtable_prefix": "stb1_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 1000,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 5,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-10-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
|
||||
}]
|
||||
}]
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 4,
|
||||
"thread_count_create_tbl": 4,
|
||||
"result_file": "./insert_res.txt",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 0,
|
||||
"num_of_records_per_req": 3000,
|
||||
"max_sql_len": 1024000,
|
||||
"databases": [{
|
||||
"dbinfo": {
|
||||
"name": "db2",
|
||||
"drop": "no",
|
||||
"replica": 1,
|
||||
"days": 10,
|
||||
"cache": 50,
|
||||
"blocks": 8,
|
||||
"precision": "ms",
|
||||
"keep": 365,
|
||||
"minRows": 100,
|
||||
"maxRows": 4096,
|
||||
"comp":2,
|
||||
"walLevel":1,
|
||||
"cachelast":0,
|
||||
"quorum":1,
|
||||
"fsync":3000,
|
||||
"update": 0
|
||||
},
|
||||
"super_tables": [{
|
||||
"name": "stb0",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 1,
|
||||
"childtable_prefix": "stb0_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 100,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 0,
|
||||
"childtable_limit": -1,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-10-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
|
||||
},
|
||||
{
|
||||
"name": "stb1",
|
||||
"child_table_exists":"yes",
|
||||
"childtable_count": 1,
|
||||
"childtable_prefix": "stb01_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 10,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 10,
|
||||
"childtable_limit": -1,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-11-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
|
||||
}]
|
||||
}]
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 4,
|
||||
"thread_count_create_tbl": 4,
|
||||
"result_file": "./insert_res.txt",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 0,
|
||||
"num_of_records_per_req": 3000,
|
||||
"max_sql_len": 1024000,
|
||||
"databases": [{
|
||||
"dbinfo": {
|
||||
"name": "db2",
|
||||
"drop": "no",
|
||||
"replica": 2,
|
||||
"days": 10,
|
||||
"cache": 50,
|
||||
"blocks": 8,
|
||||
"precision": "ms",
|
||||
"keep": 365,
|
||||
"minRows": 100,
|
||||
"maxRows": 4096,
|
||||
"comp":2,
|
||||
"walLevel":1,
|
||||
"cachelast":0,
|
||||
"quorum":1,
|
||||
"fsync":3000,
|
||||
"update": 0
|
||||
},
|
||||
"super_tables": [{
|
||||
"name": "stb0",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 1,
|
||||
"childtable_prefix": "stb0_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 100,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 0,
|
||||
"childtable_limit": -1,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-10-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
|
||||
},
|
||||
{
|
||||
"name": "stb1",
|
||||
"child_table_exists":"yes",
|
||||
"childtable_count": 1,
|
||||
"childtable_prefix": "stb01_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 10,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 10,
|
||||
"childtable_limit": -1,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-11-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
|
||||
}]
|
||||
}]
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 4,
|
||||
"thread_count_create_tbl": 4,
|
||||
"result_file": "./insert_res.txt",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 0,
|
||||
"num_of_records_per_req": 3000,
|
||||
"max_sql_len": 1024000,
|
||||
"databases": [{
|
||||
"dbinfo": {
|
||||
"name": "db2",
|
||||
"drop": "yes",
|
||||
"replica": 2,
|
||||
"days": 10,
|
||||
"cache": 50,
|
||||
"blocks": 8,
|
||||
"precision": "ms",
|
||||
"keep": 365,
|
||||
"minRows": 100,
|
||||
"maxRows": 4096,
|
||||
"comp":2,
|
||||
"walLevel":1,
|
||||
"cachelast":0,
|
||||
"quorum":1,
|
||||
"fsync":3000,
|
||||
"update": 0
|
||||
},
|
||||
"super_tables": [{
|
||||
"name": "stb0",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 2000,
|
||||
"childtable_prefix": "stb0_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 100,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 2000,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-10-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
|
||||
},
|
||||
{
|
||||
"name": "stb1",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 2,
|
||||
"childtable_prefix": "stb1_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 10,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 5,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset":0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval":0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-10-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
|
||||
}]
|
||||
}]
|
||||
}
|
|
@ -0,0 +1,161 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from distutils.log import debug
|
||||
import sys
|
||||
import os
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
import subprocess
|
||||
from random import choice
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def getBuildPath(self):
|
||||
global selfPath
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def run(self):
|
||||
|
||||
# set path para
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
|
||||
binPath = buildPath+ "/build/bin/"
|
||||
testPath = selfPath+ "/../../../"
|
||||
walFilePath = testPath + "/sim/dnode1/data/mnode_bak/wal/"
|
||||
|
||||
#new db and insert data
|
||||
tdSql.execute("drop database if exists db2")
|
||||
os.system("%staosdemo -f tsdb/insertDataDb1.json -y " % binPath)
|
||||
tdSql.execute("drop database if exists db1")
|
||||
os.system("%staosdemo -f tsdb/insertDataDb2.json -y " % binPath)
|
||||
tdSql.execute("drop table if exists db2.stb0")
|
||||
os.system("%staosdemo -f tsdb/insertDataDb2Newstab.json -y " % binPath)
|
||||
|
||||
tdSql.execute("use db2")
|
||||
tdSql.execute("drop table if exists stb1_0")
|
||||
tdSql.execute("drop table if exists stb1_1")
|
||||
tdSql.execute("insert into stb0_0 values(1614218412000,8637,78.861045,'R','bf3')(1614218422000,8637,98.861045,'R','bf3')")
|
||||
tdSql.execute("alter table db2.stb0 add column c4 int")
|
||||
tdSql.execute("alter table db2.stb0 drop column c2")
|
||||
tdSql.execute("alter table db2.stb0 add tag t3 int;")
|
||||
tdSql.execute("alter table db2.stb0 drop tag t1")
|
||||
tdSql.execute("create table if not exists stb2_0 (ts timestamp, c0 int, c1 float) ")
|
||||
tdSql.execute("insert into stb2_0 values(1614218412000,8637,78.861045)")
|
||||
tdSql.execute("alter table stb2_0 add column c2 binary(4)")
|
||||
tdSql.execute("alter table stb2_0 drop column c1")
|
||||
tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')")
|
||||
|
||||
# create db utest
|
||||
|
||||
|
||||
dataType= [ "tinyint", "smallint", "int", "bigint", "float", "double", "bool", " binary(20)", "nchar(20)", "tinyint unsigned", "smallint unsigned", "int unsigned", "bigint unsigned"]
|
||||
|
||||
tdSql.execute("drop database if exists utest")
|
||||
tdSql.execute("create database utest keep 3650")
|
||||
tdSql.execute("use utest")
|
||||
tdSql.execute('''create table test(ts timestamp, col0 tinyint, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20), col10 tinyint unsigned, col11 smallint unsigned, col12 int unsigned, col13 bigint unsigned) tags(loc nchar(200), tag1 int)''')
|
||||
|
||||
# rowNum1 = 13
|
||||
# for i in range(rowNum1):
|
||||
# columnName= "col" + str(i+1)
|
||||
# tdSql.execute("alter table test drop column %s ;" % columnName )
|
||||
|
||||
rowNum2= 988
|
||||
for i in range(rowNum2):
|
||||
tdSql.execute("alter table test add column col%d %s ;" %( i+14, choice(dataType)) )
|
||||
|
||||
rowNum3= 988
|
||||
for i in range(rowNum3):
|
||||
tdSql.execute("alter table test drop column col%d ;" %( i+14) )
|
||||
|
||||
|
||||
self.rowNum = 1
|
||||
self.rowNum2 = 100
|
||||
self.rowNum3 = 20
|
||||
self.ts = 1537146000000
|
||||
|
||||
for j in range(self.rowNum2):
|
||||
tdSql.execute("create table test%d using test tags('beijing%d', 10)" % (j,j) )
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test%d values(%d, %d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
% (j, self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||
|
||||
for j in range(self.rowNum2):
|
||||
tdSql.execute("drop table if exists test%d" % (j+1))
|
||||
|
||||
|
||||
# stop taosd and restart taosd
|
||||
tdDnodes.stop(1)
|
||||
sleep(10)
|
||||
tdDnodes.start(1)
|
||||
sleep(5)
|
||||
tdSql.execute("reset query cache")
|
||||
query_pid2 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1])
|
||||
print(query_pid2)
|
||||
|
||||
# verify that the data is correct
|
||||
tdSql.execute("use db2")
|
||||
tdSql.query("select count (tbname) from stb0")
|
||||
tdSql.checkData(0, 0, 1)
|
||||
tdSql.query("select count (tbname) from stb1")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query("select count(*) from stb0_0")
|
||||
tdSql.checkData(0, 0, 2)
|
||||
tdSql.query("select count(*) from stb0")
|
||||
tdSql.checkData(0, 0, 2)
|
||||
tdSql.query("select count(*) from stb2_0")
|
||||
tdSql.checkData(0, 0, 2)
|
||||
|
||||
tdSql.execute("use utest")
|
||||
tdSql.query("select count (tbname) from test")
|
||||
tdSql.checkData(0, 0, 1)
|
||||
|
||||
# delete useless file
|
||||
testcaseFilename = os.path.split(__file__)[-1]
|
||||
os.system("rm -rf ./insert_res.txt")
|
||||
os.system("rm -rf tsdb/%s.sql" % testcaseFilename )
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,160 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.getcwd())
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
import taos
|
||||
import threading
|
||||
import subprocess
|
||||
from random import choice
|
||||
|
||||
|
||||
class TwoClients:
|
||||
def initConnection(self):
|
||||
self.host = "chenhaoran02"
|
||||
self.user = "root"
|
||||
self.password = "taosdata"
|
||||
self.config = "/etc/taos/"
|
||||
self.port =6030
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def run(self):
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
binPath = buildPath+ "/build/bin/"
|
||||
walFilePath = "/var/lib/taos/mnode_bak/wal/"
|
||||
|
||||
# new taos client
|
||||
conn1 = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config )
|
||||
print(conn1)
|
||||
cur1 = conn1.cursor()
|
||||
tdSql.init(cur1, True)
|
||||
|
||||
# new db ,new super tables , child tables, and insert data
|
||||
tdSql.execute("drop database if exists db2")
|
||||
os.system("%staosdemo -f tsdb/insertDataDb1.json -y " % binPath)
|
||||
tdSql.execute("drop database if exists db1")
|
||||
os.system("%staosdemo -f tsdb/insertDataDb2.json -y " % binPath)
|
||||
tdSql.execute("drop table if exists db2.stb0")
|
||||
os.system("%staosdemo -f tsdb/insertDataDb2Newstab.json -y " % binPath)
|
||||
|
||||
# new general tables and modify general tables;
|
||||
tdSql.execute("use db2")
|
||||
tdSql.execute("drop table if exists stb1_0")
|
||||
tdSql.execute("drop table if exists stb1_1")
|
||||
tdSql.execute("insert into stb0_0 values(1614218412000,8637,78.861045,'R','bf3')(1614218422000,8637,98.861045,'R','bf3')")
|
||||
tdSql.execute("alter table db2.stb0 add column c4 int")
|
||||
tdSql.execute("alter table db2.stb0 drop column c2")
|
||||
tdSql.execute("alter table db2.stb0 add tag t3 int")
|
||||
tdSql.execute("alter table db2.stb0 drop tag t1")
|
||||
tdSql.execute("create table if not exists stb2_0 (ts timestamp, c0 int, c1 float) ")
|
||||
tdSql.execute("insert into stb2_0 values(1614218412000,8637,78.861045)")
|
||||
tdSql.execute("alter table stb2_0 add column c2 binary(4)")
|
||||
tdSql.execute("alter table stb2_0 drop column c1")
|
||||
tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')")
|
||||
|
||||
# create db utest and modify super tables;
|
||||
dataType= [ "tinyint", "smallint", "int", "bigint", "float", "double", "bool", " binary(20)", "nchar(20)", "tinyint unsigned", "smallint unsigned", "int unsigned", "bigint unsigned"]
|
||||
tdSql.execute("drop database if exists utest")
|
||||
tdSql.execute("create database utest keep 3650")
|
||||
tdSql.execute("use utest")
|
||||
tdSql.execute('''create table test(ts timestamp, col0 tinyint, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20), col10 tinyint unsigned, col11 smallint unsigned, col12 int unsigned, col13 bigint unsigned) tags(loc nchar(200), tag1 int)''')
|
||||
rowNum2= 988
|
||||
for i in range(rowNum2):
|
||||
tdSql.execute("alter table test add column col%d %s ;" %( i+14, choice(dataType)) )
|
||||
rowNum3= 988
|
||||
for i in range(rowNum3):
|
||||
tdSql.execute("alter table test drop column col%d ;" %( i+14) )
|
||||
|
||||
self.rowNum = 1
|
||||
self.rowNum2 = 100
|
||||
self.ts = 1537146000000
|
||||
for j in range(self.rowNum2):
|
||||
tdSql.execute("create table test%d using test tags('beijing%d', 10)" % (j,j) )
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test%d values(%d, %d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
% (j, self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||
# delete child tables;
|
||||
for j in range(self.rowNum2):
|
||||
tdSql.execute("drop table if exists test%d" % (j+1))
|
||||
|
||||
#restart taosd
|
||||
os.system("ps -ef |grep taosd |grep -v 'grep' |awk '{print $2}'|xargs kill -2")
|
||||
sleep(20)
|
||||
print("123")
|
||||
os.system("nohup /usr/bin/taosd > /dev/null 2>&1 &")
|
||||
sleep(4)
|
||||
tdSql.execute("reset query cache")
|
||||
query_pid2 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1])
|
||||
print(query_pid2)
|
||||
|
||||
# new taos connecting to server
|
||||
conn2 = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config )
|
||||
print(conn2)
|
||||
cur2 = conn2.cursor()
|
||||
tdSql.init(cur2, True)
|
||||
|
||||
|
||||
# check data correct
|
||||
tdSql.query("show databases")
|
||||
tdSql.execute("use db2")
|
||||
tdSql.query("select count (tbname) from stb0")
|
||||
tdSql.checkData(0, 0, 1)
|
||||
tdSql.query("select count (tbname) from stb1")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query("select count(*) from stb0_0")
|
||||
tdSql.checkData(0, 0, 2)
|
||||
tdSql.query("select count(*) from stb0")
|
||||
tdSql.checkData(0, 0, 2)
|
||||
tdSql.query("select count(*) from stb2_0")
|
||||
tdSql.checkData(0, 0, 2)
|
||||
tdSql.query("select * from stb2_0")
|
||||
tdSql.checkData(1, 2, 'R')
|
||||
tdSql.execute("use utest")
|
||||
tdSql.query("select count (tbname) from test")
|
||||
tdSql.checkData(0, 0, 1)
|
||||
|
||||
# delete useless file
|
||||
testcaseFilename = os.path.split(__file__)[-1]
|
||||
os.system("rm -rf ./insert_res.txt")
|
||||
os.system("rm -rf tsdb/%s.sql" % testcaseFilename )
|
||||
|
||||
clients = TwoClients()
|
||||
clients.initConnection()
|
||||
# clients.getBuildPath()
|
||||
clients.run()
|
|
@ -0,0 +1,170 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.getcwd())
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
import taos
|
||||
import threading
|
||||
import subprocess
|
||||
from random import choice
|
||||
|
||||
class TwoClients:
|
||||
def initConnection(self):
|
||||
self.host = "chenhaoran02"
|
||||
self.user = "root"
|
||||
self.password = "taosdata"
|
||||
self.config = "/etc/taos/"
|
||||
self.port =6030
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def run(self):
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
binPath = buildPath+ "/build/bin/"
|
||||
walFilePath = "/var/lib/taos/mnode_bak/wal/"
|
||||
|
||||
# new taos client
|
||||
conn1 = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config )
|
||||
print(conn1)
|
||||
cur1 = conn1.cursor()
|
||||
tdSql.init(cur1, True)
|
||||
|
||||
# new db ,new super tables , child tables, and insert data
|
||||
tdSql.execute("drop database if exists db2")
|
||||
os.system("%staosdemo -f tsdb/insertDataDb1Replica2.json -y " % binPath)
|
||||
tdSql.execute("drop database if exists db1")
|
||||
os.system("%staosdemo -f tsdb/insertDataDb2Replica2.json -y " % binPath)
|
||||
tdSql.execute("drop table if exists db2.stb0")
|
||||
os.system("%staosdemo -f tsdb/insertDataDb2NewstabReplica2.json -y " % binPath)
|
||||
|
||||
# new general tables and modify general tables;
|
||||
tdSql.execute("use db2")
|
||||
tdSql.execute("drop table if exists stb1_0")
|
||||
tdSql.execute("drop table if exists stb1_1")
|
||||
tdSql.execute("insert into stb0_0 values(1614218412000,8637,78.861045,'R','bf3')(1614218422000,8637,98.861045,'R','bf3')")
|
||||
tdSql.execute("alter table db2.stb0 add column c4 int")
|
||||
tdSql.execute("alter table db2.stb0 drop column c2")
|
||||
tdSql.execute("alter table db2.stb0 add tag t3 int")
|
||||
tdSql.execute("alter table db2.stb0 drop tag t1")
|
||||
tdSql.execute("create table if not exists stb2_0 (ts timestamp, c0 int, c1 float) ")
|
||||
tdSql.execute("insert into stb2_0 values(1614218412000,8637,78.861045)")
|
||||
tdSql.execute("alter table stb2_0 add column c2 binary(4)")
|
||||
tdSql.execute("alter table stb2_0 drop column c1")
|
||||
tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')")
|
||||
|
||||
|
||||
# create db utest replica 2 and modify super tables;
|
||||
dataType= [ "tinyint", "smallint", "int", "bigint", "float", "double", "bool", " binary(20)", "nchar(20)", "tinyint unsigned", "smallint unsigned", "int unsigned", "bigint unsigned"]
|
||||
tdSql.execute("drop database if exists utest")
|
||||
tdSql.execute("create database utest keep 3650 replica 2 ")
|
||||
tdSql.execute("use utest")
|
||||
tdSql.execute('''create table test(ts timestamp, col0 tinyint, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20), col10 tinyint unsigned, col11 smallint unsigned, col12 int unsigned, col13 bigint unsigned) tags(loc nchar(200), tag1 int)''')
|
||||
rowNum2= 988
|
||||
for i in range(rowNum2):
|
||||
tdSql.execute("alter table test add column col%d %s ;" %( i+14, choice(dataType)) )
|
||||
rowNum3= 988
|
||||
for i in range(rowNum3):
|
||||
tdSql.execute("alter table test drop column col%d ;" %( i+14) )
|
||||
self.rowNum = 1
|
||||
self.rowNum2 = 100
|
||||
self.ts = 1537146000000
|
||||
for j in range(self.rowNum2):
|
||||
tdSql.execute("create table test%d using test tags('beijing%d', 10)" % (j,j) )
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test%d values(%d, %d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
% (j, self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||
# delete child tables;
|
||||
for j in range(self.rowNum2):
|
||||
tdSql.execute("drop table if exists test%d" % (j+1))
|
||||
|
||||
# drop dnodes and restart taosd;
|
||||
sleep(3)
|
||||
tdSql.execute(" drop dnode 'chenhaoran02:6030'; ")
|
||||
sleep(20)
|
||||
os.system("rm -rf /var/lib/taos/*")
|
||||
print("clear dnode chenhaoran02'data files")
|
||||
os.system("nohup /usr/bin/taosd > /dev/null 2>&1 &")
|
||||
print("start taosd")
|
||||
sleep(10)
|
||||
tdSql.execute("reset query cache ;")
|
||||
tdSql.execute("create dnode chenhaoran02 ;")
|
||||
|
||||
# #
|
||||
# os.system("ps -ef |grep taosd |grep -v 'grep' |awk '{print $2}'|xargs kill -2")
|
||||
# sleep(20)
|
||||
# os.system("nohup /usr/bin/taosd > /dev/null 2>&1 &")
|
||||
# sleep(4)
|
||||
# tdSql.execute("reset query cache")
|
||||
# query_pid2 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1])
|
||||
# print(query_pid2)
|
||||
|
||||
# new taos connecting to server
|
||||
conn2 = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config )
|
||||
print(conn2)
|
||||
cur2 = conn2.cursor()
|
||||
tdSql.init(cur2, True)
|
||||
|
||||
# check data correct
|
||||
tdSql.query("show databases")
|
||||
tdSql.execute("use db2")
|
||||
tdSql.query("select count (tbname) from stb0")
|
||||
tdSql.checkData(0, 0, 1)
|
||||
tdSql.query("select count (tbname) from stb1")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query("select count(*) from stb0_0")
|
||||
tdSql.checkData(0, 0, 2)
|
||||
tdSql.query("select count(*) from stb0")
|
||||
tdSql.checkData(0, 0, 2)
|
||||
tdSql.query("select count(*) from stb2_0")
|
||||
tdSql.checkData(0, 0, 2)
|
||||
tdSql.query("select * from stb2_0")
|
||||
tdSql.checkData(1, 2, 'R')
|
||||
|
||||
tdSql.execute("use utest")
|
||||
tdSql.query("select count (tbname) from test")
|
||||
tdSql.checkData(0, 0, 1)
|
||||
|
||||
# delete useless file
|
||||
testcaseFilename = os.path.split(__file__)[-1]
|
||||
os.system("rm -rf ./insert_res.txt")
|
||||
os.system("rm -rf tsdb/%s.sql" % testcaseFilename )
|
||||
|
||||
clients = TwoClients()
|
||||
clients.initConnection()
|
||||
# clients.getBuildPath()
|
||||
clients.run()
|
Loading…
Reference in New Issue