[td-822] merge develop branch
This commit is contained in:
commit
959389badf
10
.travis.yml
10
.travis.yml
|
@ -36,6 +36,8 @@ matrix:
|
|||
- psmisc
|
||||
|
||||
before_script:
|
||||
- export TZ=Asia/Harbin
|
||||
- date
|
||||
- cd ${TRAVIS_BUILD_DIR}
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
|
@ -150,6 +152,8 @@ matrix:
|
|||
- DESC="trusty/gcc-4.8 build"
|
||||
|
||||
before_script:
|
||||
- export TZ=Asia/Harbin
|
||||
- date
|
||||
- cd ${TRAVIS_BUILD_DIR}
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
|
@ -173,6 +177,8 @@ matrix:
|
|||
- cmake
|
||||
|
||||
before_script:
|
||||
- export TZ=Asia/Harbin
|
||||
- date
|
||||
- cd ${TRAVIS_BUILD_DIR}
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
|
@ -197,6 +203,8 @@ matrix:
|
|||
- cmake
|
||||
|
||||
before_script:
|
||||
- export TZ=Asia/Harbin
|
||||
- date
|
||||
- cd ${TRAVIS_BUILD_DIR}
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
|
@ -225,6 +233,8 @@ matrix:
|
|||
- DESC="trusty/gcc-4.8 build"
|
||||
|
||||
before_script:
|
||||
- export TZ=Asia/Harbin
|
||||
- date
|
||||
- cd ${TRAVIS_BUILD_DIR}
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
|
|
|
@ -891,11 +891,15 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
|||
return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
|
||||
}
|
||||
|
||||
SKVRowBuilder kvRowBuilder = {0};
|
||||
if (tdInitKVRowBuilder(&kvRowBuilder) < 0) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
uint32_t ignoreTokenTypes = TK_LP;
|
||||
uint32_t numOfIgnoreToken = 1;
|
||||
for (int i = 0; i < spd.numOfAssignedCols; ++i) {
|
||||
char * tagVal = pTag->data + spd.elems[i].offset;
|
||||
int16_t colIndex = spd.elems[i].colIndex;
|
||||
SSchema* pSchema = pTagSchema + spd.elems[i].colIndex;
|
||||
|
||||
index = 0;
|
||||
sToken = tStrGetToken(sql, &index, true, numOfIgnoreToken, &ignoreTokenTypes);
|
||||
|
@ -911,12 +915,26 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
|||
sToken.n -= 2;
|
||||
}
|
||||
|
||||
code = tsParseOneColumnData(&pTagSchema[colIndex], &sToken, tagVal, pCmd->payload, &sql, false, tinfo.precision);
|
||||
char tagVal[TSDB_MAX_TAGS_LEN];
|
||||
code = tsParseOneColumnData(pSchema, &sToken, tagVal, pCmd->payload, &sql, false, tinfo.precision);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
return code;
|
||||
}
|
||||
|
||||
tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal);
|
||||
}
|
||||
|
||||
SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
if (row == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
tdSortKVRowByColIdx(row);
|
||||
pTag->dataLen = kvRowLen(row);
|
||||
kvRowCpy(pTag->data, row);
|
||||
free(row);
|
||||
|
||||
index = 0;
|
||||
sToken = tStrGetToken(sql, &index, false, 0, NULL);
|
||||
sql += index;
|
||||
|
@ -924,29 +942,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
|||
return tscInvalidSQLErrMsg(pCmd->payload, ") expected", sToken.z);
|
||||
}
|
||||
|
||||
// 2. set the null value for the columns that do not assign values
|
||||
if (spd.numOfAssignedCols < spd.numOfCols) {
|
||||
char *ptr = pTag->data;
|
||||
|
||||
for (int32_t i = 0; i < spd.numOfCols; ++i) {
|
||||
if (!spd.hasVal[i]) { // current tag column do not have any value to insert, set it to null
|
||||
if (pTagSchema[i].type == TSDB_DATA_TYPE_BINARY || pTagSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(ptr, pTagSchema[i].type);
|
||||
} else {
|
||||
setNull(ptr, pTagSchema[i].type, pTagSchema[i].bytes);
|
||||
}
|
||||
}
|
||||
|
||||
ptr += pTagSchema[i].bytes;
|
||||
}
|
||||
}
|
||||
|
||||
// 3. calculate the actual data size of STagData
|
||||
pCmd->payloadLen = sizeof(pTag->name) + sizeof(pTag->dataLen);
|
||||
for (int32_t t = 0; t < numOfTags; ++t) {
|
||||
pTag->dataLen += pTagSchema[t].bytes;
|
||||
pCmd->payloadLen += pTagSchema[t].bytes;
|
||||
}
|
||||
pCmd->payloadLen = sizeof(pTag->name) + sizeof(pTag->dataLen) + pTag->dataLen;
|
||||
pTag->dataLen = htonl(pTag->dataLen);
|
||||
|
||||
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
|
||||
|
|
|
@ -5623,25 +5623,42 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
SSchema* pTagSchema = tscGetTableTagSchema(pStableMeterMetaInfo->pTableMeta);
|
||||
|
||||
STagData* pTag = &pCreateTable->usingInfo.tagdata;
|
||||
char* tagVal = pTag->data;
|
||||
SKVRowBuilder kvRowBuilder = {0};
|
||||
if (tdInitKVRowBuilder(&kvRowBuilder) < 0) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
int32_t ret = TSDB_CODE_SUCCESS;
|
||||
|
||||
for (int32_t i = 0; i < pList->nExpr; ++i) {
|
||||
if (pTagSchema[i].type == TSDB_DATA_TYPE_BINARY || pTagSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
|
||||
SSchema* pSchema = pTagSchema + i;
|
||||
if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
// validate the length of binary
|
||||
if (pList->a[i].pVar.nLen + VARSTR_HEADER_SIZE > pTagSchema[i].bytes) {
|
||||
if (pList->a[i].pVar.nLen + VARSTR_HEADER_SIZE > pSchema->bytes) {
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
}
|
||||
|
||||
ret = tVariantDump(&(pList->a[i].pVar), tagVal, pTagSchema[i].type, true);
|
||||
char tagVal[TSDB_MAX_TAGS_LEN];
|
||||
ret = tVariantDump(&(pList->a[i].pVar), tagVal, pSchema->type, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
}
|
||||
|
||||
tagVal += pTagSchema[i].bytes;
|
||||
tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal);
|
||||
}
|
||||
|
||||
SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
if (row == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
tdSortKVRowByColIdx(row);
|
||||
pTag->dataLen = kvRowLen(row);
|
||||
kvRowCpy(pTag->data, row);
|
||||
free(row);
|
||||
|
||||
// table name
|
||||
if (tscValidateName(&pInfo->pCreateTableInfo->name) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
|
@ -5653,7 +5670,6 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
pTag->dataLen = tagVal - pTag->data;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -191,7 +191,8 @@ int tscSendMsgToServer(SSqlObj *pSql) {
|
|||
.msgType = pSql->cmd.msgType,
|
||||
.pCont = pMsg,
|
||||
.contLen = pSql->cmd.payloadLen,
|
||||
.handle = pSql,
|
||||
.ahandle = pSql,
|
||||
.handle = &pSql->pRpcCtx,
|
||||
.code = 0
|
||||
};
|
||||
|
||||
|
@ -199,12 +200,12 @@ int tscSendMsgToServer(SSqlObj *pSql) {
|
|||
// Otherwise, the pSql object may have been released already during the response function, which is
|
||||
// processMsgFromServer function. In the meanwhile, the assignment of the rpc context to sql object will absolutely
|
||||
// cause crash.
|
||||
/*pSql->pRpcCtx = */rpcSendRequest(pObj->pDnodeConn, &pSql->ipList, &rpcMsg);
|
||||
rpcSendRequest(pObj->pDnodeConn, &pSql->ipList, &rpcMsg);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
|
||||
SSqlObj *pSql = (SSqlObj *)rpcMsg->handle;
|
||||
SSqlObj *pSql = (SSqlObj *)rpcMsg->ahandle;
|
||||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
tscError("%p sql is already released", pSql);
|
||||
return;
|
||||
|
|
|
@ -625,18 +625,31 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo
|
|||
return len;
|
||||
}
|
||||
|
||||
static int32_t getRowExpandSize(STableMeta* pTableMeta) {
|
||||
int32_t result = TD_DATA_ROW_HEAD_SIZE;
|
||||
int32_t columns = tscGetNumOfColumns(pTableMeta);
|
||||
SSchema* pSchema = tscGetTableSchema(pTableMeta);
|
||||
for(int32_t i = 0; i < columns; i++) {
|
||||
if (IS_VAR_DATA_TYPE((pSchema + i)->type)) {
|
||||
result += TYPE_BYTES[TSDB_DATA_TYPE_BINARY];
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
// the maximum expanded size in byte when a row-wise data is converted to SDataRow format
|
||||
const int32_t MAX_EXPAND_SIZE = TD_DATA_ROW_HEAD_SIZE + TYPE_BYTES[TSDB_DATA_TYPE_BINARY];
|
||||
STableDataBlocks* pOneTableBlock = taosArrayGetP(pTableDataBlockList, 0);
|
||||
int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
|
||||
|
||||
void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false);
|
||||
SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES);
|
||||
|
||||
size_t total = taosArrayGetSize(pTableDataBlockList);
|
||||
for (int32_t i = 0; i < total; ++i) {
|
||||
STableDataBlocks* pOneTableBlock = taosArrayGetP(pTableDataBlockList, i);
|
||||
pOneTableBlock = taosArrayGetP(pTableDataBlockList, i);
|
||||
STableDataBlocks* dataBuf = NULL;
|
||||
|
||||
int32_t ret =
|
||||
|
@ -650,7 +663,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
|
|||
}
|
||||
|
||||
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
|
||||
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * MAX_EXPAND_SIZE;
|
||||
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize;
|
||||
|
||||
if (dataBuf->nAllocSize < destSize) {
|
||||
while (dataBuf->nAllocSize < destSize) {
|
||||
|
@ -678,8 +691,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
|
|||
tscDebug("%p tableId:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->tableId,
|
||||
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
|
||||
|
||||
|
||||
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + MAX_EXPAND_SIZE);
|
||||
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize);
|
||||
|
||||
pBlocks->tid = htonl(pBlocks->tid);
|
||||
pBlocks->uid = htobe64(pBlocks->uid);
|
||||
|
|
|
@ -272,7 +272,7 @@ typedef struct {
|
|||
int16_t offset;
|
||||
} SColIdx;
|
||||
|
||||
#define TD_KV_ROW_HEAD_SIZE 2 * sizeof(int16_t)
|
||||
#define TD_KV_ROW_HEAD_SIZE (2 * sizeof(int16_t))
|
||||
|
||||
#define kvRowLen(r) (*(int16_t *)(r))
|
||||
#define kvRowNCols(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t)))
|
||||
|
@ -290,6 +290,7 @@ SKVRow tdKVRowDup(SKVRow row);
|
|||
int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value);
|
||||
int tdEncodeKVRow(void **buf, SKVRow row);
|
||||
void * tdDecodeKVRow(void *buf, SKVRow *row);
|
||||
void tdSortKVRowByColIdx(SKVRow row);
|
||||
|
||||
static FORCE_INLINE int comparTagId(const void *key1, const void *key2) {
|
||||
if (*(int16_t *)key1 > ((SColIdx *)key2)->colId) {
|
||||
|
|
|
@ -117,6 +117,7 @@ extern char tsDataDir[];
|
|||
extern char tsLogDir[];
|
||||
extern char tsScriptDir[];
|
||||
extern int64_t tsMsPerDay[3];
|
||||
extern char tsVnodeBakDir[];
|
||||
|
||||
// system info
|
||||
extern char tsOsName[];
|
||||
|
|
|
@ -259,7 +259,7 @@ bool isNEleNull(SDataCol *pCol, int nEle) {
|
|||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
for (int i = 0; i < nEle; i++) {
|
||||
if (!isNull(varDataVal(tdGetColDataOfRow(pCol, i)), pCol->type)) return false;
|
||||
if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false;
|
||||
}
|
||||
return true;
|
||||
default:
|
||||
|
@ -515,6 +515,22 @@ SKVRow tdKVRowDup(SKVRow row) {
|
|||
return trow;
|
||||
}
|
||||
|
||||
static int compareColIdx(const void* a, const void* b) {
|
||||
const SColIdx* x = (const SColIdx*)a;
|
||||
const SColIdx* y = (const SColIdx*)b;
|
||||
if (x->colId > y->colId) {
|
||||
return 1;
|
||||
}
|
||||
if (x->colId < y->colId) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tdSortKVRowByColIdx(SKVRow row) {
|
||||
qsort(kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), compareColIdx);
|
||||
}
|
||||
|
||||
int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) {
|
||||
SColIdx *pColIdx = NULL;
|
||||
SKVRow row = *orow;
|
||||
|
|
|
@ -43,7 +43,7 @@ int16_t tsNumOfTotalVnodes = TSDB_INVALID_VNODE_NUM;
|
|||
int32_t tsNumOfMnodes = 3;
|
||||
|
||||
// common
|
||||
int32_t tsRpcTimer = 300;
|
||||
int32_t tsRpcTimer = 1000;
|
||||
int32_t tsRpcMaxTime = 600; // seconds;
|
||||
int32_t tsMaxShellConns = 5000;
|
||||
int32_t tsMaxConnections = 5000;
|
||||
|
@ -153,6 +153,7 @@ char tsDnodeDir[TSDB_FILENAME_LEN] = {0};
|
|||
char tsMnodeDir[TSDB_FILENAME_LEN] = {0};
|
||||
char tsDataDir[TSDB_FILENAME_LEN] = "/var/lib/taos";
|
||||
char tsScriptDir[TSDB_FILENAME_LEN] = "/etc/taos";
|
||||
char tsVnodeBakDir[TSDB_FILENAME_LEN] = {0};
|
||||
|
||||
/*
|
||||
* minimum scale for whole system, millisecond by default
|
||||
|
|
|
@ -464,6 +464,29 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) {
|
|||
}
|
||||
}
|
||||
|
||||
static uint8_t nullBool = TSDB_DATA_BOOL_NULL;
|
||||
static uint8_t nullTinyInt = TSDB_DATA_TINYINT_NULL;
|
||||
static uint16_t nullSmallInt = TSDB_DATA_SMALLINT_NULL;
|
||||
static uint32_t nullInt = TSDB_DATA_INT_NULL;
|
||||
static uint64_t nullBigInt = TSDB_DATA_BIGINT_NULL;
|
||||
static uint32_t nullFloat = TSDB_DATA_FLOAT_NULL;
|
||||
static uint64_t nullDouble = TSDB_DATA_DOUBLE_NULL;
|
||||
|
||||
static union {
|
||||
tstr str;
|
||||
char pad[sizeof(tstr) + 4];
|
||||
} nullBinary = {.str = {.len = 1}}, nullNchar = {.str = {.len = 4}};
|
||||
|
||||
static void *nullValues[] = {
|
||||
&nullBool, &nullTinyInt, &nullSmallInt, &nullInt, &nullBigInt,
|
||||
&nullFloat, &nullDouble, &nullBinary, &nullBigInt, &nullNchar,
|
||||
};
|
||||
|
||||
void *getNullValue(int32_t type) {
|
||||
assert(type >= TSDB_DATA_TYPE_BOOL && type <= TSDB_DATA_TYPE_NCHAR);
|
||||
return nullValues[type - 1];
|
||||
}
|
||||
|
||||
void assignVal(char *val, const char *src, int32_t len, int32_t type) {
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_INT: {
|
||||
|
|
|
@ -256,30 +256,13 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
|
|||
SDataRow trow = (SDataRow)pBlk->data;
|
||||
tdInitDataRow(trow, pSchema);
|
||||
|
||||
union {
|
||||
char buf[sizeof(int64_t)];
|
||||
tstr str;
|
||||
} nullVal;
|
||||
|
||||
for (int32_t i = 0; i < pSchema->numOfCols; i++) {
|
||||
STColumn *c = pSchema->columns + i;
|
||||
char* val = (char*)row[i];
|
||||
if (IS_VAR_DATA_TYPE(c->type)) {
|
||||
if (val == NULL) {
|
||||
val = nullVal.buf;
|
||||
if (c->type == TSDB_DATA_TYPE_BINARY) {
|
||||
setNull(nullVal.str.data, TSDB_DATA_TYPE_BINARY, 1);
|
||||
nullVal.str.len = 1;
|
||||
} else {
|
||||
setNull(nullVal.str.data, TSDB_DATA_TYPE_NCHAR, 4);
|
||||
nullVal.str.len = 4;
|
||||
}
|
||||
} else {
|
||||
val -= sizeof(VarDataLenT);
|
||||
}
|
||||
} else if (val == NULL) {
|
||||
val = nullVal.buf;
|
||||
setNull(val, c->type, c->bytes);
|
||||
void* val = row[i];
|
||||
if (val == NULL) {
|
||||
val = getNullValue(c->type);
|
||||
} else if (IS_VAR_DATA_TYPE(c->type)) {
|
||||
val = ((char*)val) - sizeof(VarDataLenT);
|
||||
}
|
||||
tdAppendColVal(trow, val, c->type, c->bytes, c->offset);
|
||||
}
|
||||
|
|
|
@ -171,6 +171,7 @@ static int32_t dnodeInitStorage() {
|
|||
sprintf(tsMnodeDir, "%s/mnode", tsDataDir);
|
||||
sprintf(tsVnodeDir, "%s/vnode", tsDataDir);
|
||||
sprintf(tsDnodeDir, "%s/dnode", tsDataDir);
|
||||
sprintf(tsVnodeBakDir, "%s/vnode_bak", tsDataDir);
|
||||
|
||||
//TODO(dengyihao): no need to init here
|
||||
if (dnodeCreateDir(tsMnodeDir) < 0) {
|
||||
|
@ -186,6 +187,10 @@ static int32_t dnodeInitStorage() {
|
|||
dError("failed to create dir: %s, reason: %s", tsDnodeDir, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
if (dnodeCreateDir(tsVnodeBakDir) < 0) {
|
||||
dError("failed to create dir: %s, reason: %s", tsVnodeBakDir, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
dnodeCheckDataDirOpenned(tsDnodeDir);
|
||||
|
||||
|
|
|
@ -328,6 +328,7 @@ static int32_t dnodeOpenVnodes() {
|
|||
}
|
||||
|
||||
free(vnodeList);
|
||||
free(threads);
|
||||
dInfo("there are total vnodes:%d, openned:%d failed:%d", numOfVnodes, openVnodes, failedVnodes);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -165,6 +165,7 @@ bool isNull(const char *val, int32_t type);
|
|||
void setVardataNull(char* val, int32_t type);
|
||||
void setNull(char *val, int32_t type, int32_t bytes);
|
||||
void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
|
||||
void* getNullValue(int32_t type);
|
||||
|
||||
void assignVal(char *val, const char *src, int32_t len, int32_t type);
|
||||
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
|
||||
|
|
|
@ -153,6 +153,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_TAG_ALREAY_EXIST, 0, 0x0369, "mnode tag
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TAG_NOT_EXIST, 0, 0x036A, "mnode tag not exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_ALREAY_EXIST, 0, 0x036B, "mnode field already exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_NOT_EXIST, 0, 0x036C, "mnode field not exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STABLE_NAME, 0, 0x036D, "mnode invalid stable name")
|
||||
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_NOT_SELECTED, 0, 0x0380, "mnode db not selected")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_ALREADY_EXIST, 0, 0x0381, "mnode database aleady exist")
|
||||
|
|
|
@ -26,6 +26,7 @@ extern "C" {
|
|||
#include "taosdef.h"
|
||||
#include "taoserror.h"
|
||||
#include "trpc.h"
|
||||
#include "tdataformat.h"
|
||||
|
||||
// message type
|
||||
|
||||
|
@ -619,7 +620,7 @@ typedef struct {
|
|||
} SMDVnodeDesc;
|
||||
|
||||
typedef struct {
|
||||
char db[TSDB_DB_NAME_LEN];
|
||||
char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
|
||||
SMDVnodeCfg cfg;
|
||||
SMDVnodeDesc nodes[TSDB_MAX_REPLICA];
|
||||
} SMDCreateVnodeMsg;
|
||||
|
@ -674,7 +675,7 @@ typedef struct SMultiTableMeta {
|
|||
typedef struct {
|
||||
int32_t dataLen;
|
||||
char name[TSDB_TABLE_ID_LEN];
|
||||
char data[TSDB_MAX_TAGS_LEN];
|
||||
char data[TSDB_MAX_TAGS_LEN + TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * TSDB_MAX_TAGS];
|
||||
} STagData;
|
||||
|
||||
/*
|
||||
|
|
|
@ -47,8 +47,8 @@ typedef struct SRpcMsg {
|
|||
void *pCont;
|
||||
int contLen;
|
||||
int32_t code;
|
||||
void *handle;
|
||||
void *ahandle; //app handle set by client, for debug purpose
|
||||
void *handle; // rpc handle returned to app
|
||||
void *ahandle; // app handle set by client
|
||||
} SRpcMsg;
|
||||
|
||||
typedef struct SRpcInit {
|
||||
|
@ -78,11 +78,11 @@ void rpcClose(void *);
|
|||
void *rpcMallocCont(int contLen);
|
||||
void rpcFreeCont(void *pCont);
|
||||
void *rpcReallocCont(void *ptr, int contLen);
|
||||
void *rpcSendRequest(void *thandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg);
|
||||
void rpcSendRequest(void *thandle, const SRpcIpSet *pIpSet, SRpcMsg *pMsg);
|
||||
void rpcSendResponse(const SRpcMsg *pMsg);
|
||||
void rpcSendRedirectRsp(void *pConn, const SRpcIpSet *pIpSet);
|
||||
int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
|
||||
void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, const SRpcMsg *pReq, SRpcMsg *pRsp);
|
||||
void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
|
||||
int rpcReportProgress(void *pConn, char *pCont, int contLen);
|
||||
void rpcCancelRequest(void *pContext);
|
||||
|
||||
|
|
|
@ -790,7 +790,7 @@ int isCommentLine(char *line) {
|
|||
void source_file(TAOS *con, char *fptr) {
|
||||
wordexp_t full_path;
|
||||
int read_len = 0;
|
||||
char * cmd = calloc(1, MAX_COMMAND_SIZE);
|
||||
char * cmd = calloc(1, tsMaxSQLStringLen+1);
|
||||
size_t cmd_len = 0;
|
||||
char * line = NULL;
|
||||
size_t line_len = 0;
|
||||
|
@ -822,7 +822,7 @@ void source_file(TAOS *con, char *fptr) {
|
|||
}
|
||||
|
||||
while ((read_len = getline(&line, &line_len, f)) != -1) {
|
||||
if (read_len >= MAX_COMMAND_SIZE) continue;
|
||||
if (read_len >= tsMaxSQLStringLen) continue;
|
||||
line[--read_len] = '\0';
|
||||
|
||||
if (read_len == 0 || isCommentLine(line)) { // line starts with #
|
||||
|
@ -839,7 +839,7 @@ void source_file(TAOS *con, char *fptr) {
|
|||
memcpy(cmd + cmd_len, line, read_len);
|
||||
printf("%s%s\n", PROMPT_HEADER, cmd);
|
||||
shellRunCommand(con, cmd);
|
||||
memset(cmd, 0, MAX_COMMAND_SIZE);
|
||||
memset(cmd, 0, tsMaxSQLStringLen);
|
||||
cmd_len = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -808,27 +808,31 @@ void *readMetric(void *sarg) {
|
|||
}
|
||||
|
||||
void queryDB(TAOS *taos, char *command) {
|
||||
int i = 5;
|
||||
int i;
|
||||
TAOS_RES *pSql = NULL;
|
||||
int32_t code = -1;
|
||||
while (i > 0 && code != 0) {
|
||||
int32_t code = -1;
|
||||
|
||||
for (i = 0; i < 5; i++) {
|
||||
if (NULL != pSql) {
|
||||
taos_free_result(pSql);
|
||||
pSql = NULL;
|
||||
}
|
||||
|
||||
pSql = taos_query(taos, command);
|
||||
code = taos_errno(pSql);
|
||||
taos_free_result(pSql);
|
||||
pSql = NULL;
|
||||
if (code == 0) {
|
||||
if (0 == code) {
|
||||
break;
|
||||
}
|
||||
i--;
|
||||
}
|
||||
}
|
||||
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(pSql));
|
||||
taos_free_result(pSql);
|
||||
|
||||
taos_close(taos);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
taos_free_result(pSql);
|
||||
}
|
||||
|
||||
// sync insertion
|
||||
|
|
|
@ -132,7 +132,7 @@ typedef struct SVgObj {
|
|||
int64_t createdTime;
|
||||
int32_t lbDnodeId;
|
||||
int32_t lbTime;
|
||||
char dbName[TSDB_DB_NAME_LEN];
|
||||
char dbName[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
|
||||
int8_t inUse;
|
||||
int8_t accessState;
|
||||
int8_t reserved0[5];
|
||||
|
|
|
@ -264,7 +264,7 @@ static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) {
|
|||
strcpy(pMdCfgDnode->config, pCmCfgDnode->config);
|
||||
|
||||
SRpcMsg rpcMdCfgDnodeMsg = {
|
||||
.handle = 0,
|
||||
.ahandle = 0,
|
||||
.code = 0,
|
||||
.msgType = TSDB_MSG_TYPE_MD_CONFIG_DNODE,
|
||||
.pCont = pMdCfgDnode,
|
||||
|
|
|
@ -367,6 +367,7 @@ void sdbCleanUp() {
|
|||
tsSdbObj.status = SDB_STATUS_CLOSING;
|
||||
|
||||
sdbCleanupWriteWorker();
|
||||
sdbDebug("sdb will be closed, version:%" PRId64, tsSdbObj.version);
|
||||
|
||||
if (tsSdbObj.sync) {
|
||||
syncStop(tsSdbObj.sync);
|
||||
|
|
|
@ -281,6 +281,7 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
|
||||
SCMConnectMsg *pConnectMsg = pMsg->rpcMsg.pCont;
|
||||
SCMConnectRsp *pConnectRsp = NULL;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
SRpcConnInfo connInfo;
|
||||
|
@ -309,7 +310,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
|
|||
mnodeDecDbRef(pDb);
|
||||
}
|
||||
|
||||
SCMConnectRsp *pConnectRsp = rpcMallocCont(sizeof(SCMConnectRsp));
|
||||
pConnectRsp = rpcMallocCont(sizeof(SCMConnectRsp));
|
||||
if (pConnectRsp == NULL) {
|
||||
code = TSDB_CODE_MND_OUT_OF_MEMORY;
|
||||
goto connect_over;
|
||||
|
@ -332,7 +333,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
connect_over:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
rpcFreeCont(pConnectRsp);
|
||||
if (pConnectRsp) rpcFreeCont(pConnectRsp);
|
||||
mLError("user:%s login from %s, result:%s", connInfo.user, taosIpStr(connInfo.clientIp), tstrerror(code));
|
||||
} else {
|
||||
mLInfo("user:%s login from %s, result:%s", connInfo.user, taosIpStr(connInfo.clientIp), tstrerror(code));
|
||||
|
|
|
@ -382,11 +382,13 @@ static void mnodeAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCt
|
|||
pStable->numOfTables++;
|
||||
|
||||
if (pStable->vgHash == NULL) {
|
||||
pStable->vgHash = taosHashInit(100000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false);
|
||||
pStable->vgHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false);
|
||||
}
|
||||
|
||||
if (pStable->vgHash != NULL) {
|
||||
taosHashPut(pStable->vgHash, (char *)&pCtable->vgId, sizeof(pCtable->vgId), &pCtable->vgId, sizeof(pCtable->vgId));
|
||||
if (taosHashGet(pStable->vgHash, &pCtable->vgId, sizeof(pCtable->vgId)) == NULL) {
|
||||
taosHashPut(pStable->vgHash, &pCtable->vgId, sizeof(pCtable->vgId), &pCtable->vgId, sizeof(pCtable->vgId));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -457,10 +459,9 @@ static int32_t mnodeSuperTableActionUpdate(SSdbOper *pOper) {
|
|||
free(pNew);
|
||||
free(oldTableId);
|
||||
free(oldSchema);
|
||||
|
||||
mnodeDecTableRef(pTable);
|
||||
}
|
||||
|
||||
mnodeDecTableRef(pTable);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1573,7 +1574,7 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) {
|
|||
|
||||
SRpcIpSet ipSet = mnodeGetIpSetFromVgroup(pMsg->pVgroup);
|
||||
SRpcMsg rpcMsg = {
|
||||
.handle = pMsg,
|
||||
.ahandle = pMsg,
|
||||
.pCont = pMDCreate,
|
||||
.contLen = htonl(pMDCreate->contLen),
|
||||
.code = 0,
|
||||
|
@ -1750,7 +1751,7 @@ static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
mInfo("app:%p:%p, table:%s, send drop ctable msg", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId);
|
||||
SRpcMsg rpcMsg = {
|
||||
.handle = pMsg,
|
||||
.ahandle = pMsg,
|
||||
.pCont = pDrop,
|
||||
.contLen = sizeof(SMDDropTableMsg),
|
||||
.code = 0,
|
||||
|
@ -1798,7 +1799,7 @@ static int32_t mnodeAlterNormalTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
|
|||
|
||||
SRpcIpSet ipSet = mnodeGetIpSetFromVgroup(pMsg->pVgroup);
|
||||
SRpcMsg rpcMsg = {
|
||||
.handle = pMsg,
|
||||
.ahandle = pMsg,
|
||||
.pCont = pMDCreate,
|
||||
.contLen = htonl(pMDCreate->contLen),
|
||||
.code = 0,
|
||||
|
@ -1965,9 +1966,15 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) {
|
|||
|
||||
static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) {
|
||||
SCMTableInfoMsg *pInfo = pMsg->rpcMsg.pCont;
|
||||
STagData *pTag = (STagData *)pInfo->tags;
|
||||
STagData *pTags = (STagData *)pInfo->tags;
|
||||
int32_t tagLen = htonl(pTags->dataLen);
|
||||
if (pTags->name[0] == 0) {
|
||||
mError("app:%p:%p, table:%s, failed to create table on demand for stable is empty, tagLen:%d", pMsg->rpcMsg.ahandle,
|
||||
pMsg, pInfo->tableId, tagLen);
|
||||
return TSDB_CODE_MND_INVALID_STABLE_NAME;
|
||||
}
|
||||
|
||||
int32_t contLen = sizeof(SCMCreateTableMsg) + offsetof(STagData, data) + htonl(pTag->dataLen);
|
||||
int32_t contLen = sizeof(SCMCreateTableMsg) + offsetof(STagData, data) + tagLen;
|
||||
SCMCreateTableMsg *pCreateMsg = rpcMallocCont(contLen);
|
||||
if (pCreateMsg == NULL) {
|
||||
mError("app:%p:%p, table:%s, failed to create table while get meta info, no enough memory", pMsg->rpcMsg.ahandle,
|
||||
|
@ -1982,9 +1989,9 @@ static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) {
|
|||
pCreateMsg->getMeta = 1;
|
||||
pCreateMsg->contLen = htonl(contLen);
|
||||
|
||||
memcpy(pCreateMsg->schema, pInfo->tags, contLen - sizeof(SCMCreateTableMsg));
|
||||
mDebug("app:%p:%p, table:%s, start to create on demand, stable:%s", pMsg->rpcMsg.ahandle, pMsg, pInfo->tableId,
|
||||
((STagData *)(pCreateMsg->schema))->name);
|
||||
memcpy(pCreateMsg->schema, pTags, contLen - sizeof(SCMCreateTableMsg));
|
||||
mDebug("app:%p:%p, table:%s, start to create on demand, tagLen:%d stable:%s",
|
||||
pMsg->rpcMsg.ahandle, pMsg, pInfo->tableId, tagLen, pTags->name);
|
||||
|
||||
rpcFreeCont(pMsg->rpcMsg.pCont);
|
||||
pMsg->rpcMsg.msgType = TSDB_MSG_TYPE_CM_CREATE_TABLE;
|
||||
|
@ -2137,9 +2144,9 @@ static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
// handle drop child response
|
||||
static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg) {
|
||||
if (rpcMsg->handle == NULL) return;
|
||||
if (rpcMsg->ahandle == NULL) return;
|
||||
|
||||
SMnodeMsg *mnodeMsg = rpcMsg->handle;
|
||||
SMnodeMsg *mnodeMsg = rpcMsg->ahandle;
|
||||
mnodeMsg->received++;
|
||||
|
||||
SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable;
|
||||
|
@ -2188,9 +2195,9 @@ static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg) {
|
|||
* if failed, drop the table cached
|
||||
*/
|
||||
static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) {
|
||||
if (rpcMsg->handle == NULL) return;
|
||||
if (rpcMsg->ahandle == NULL) return;
|
||||
|
||||
SMnodeMsg *mnodeMsg = rpcMsg->handle;
|
||||
SMnodeMsg *mnodeMsg = rpcMsg->ahandle;
|
||||
mnodeMsg->received++;
|
||||
|
||||
SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable;
|
||||
|
@ -2231,9 +2238,9 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) {
|
|||
}
|
||||
|
||||
static void mnodeProcessAlterTableRsp(SRpcMsg *rpcMsg) {
|
||||
if (rpcMsg->handle == NULL) return;
|
||||
if (rpcMsg->ahandle == NULL) return;
|
||||
|
||||
SMnodeMsg *mnodeMsg = rpcMsg->handle;
|
||||
SMnodeMsg *mnodeMsg = rpcMsg->ahandle;
|
||||
mnodeMsg->received++;
|
||||
|
||||
SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable;
|
||||
|
@ -2374,6 +2381,17 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows
|
|||
strcat(prefix, TS_PATH_DELIMITER);
|
||||
int32_t prefixLen = strlen(prefix);
|
||||
|
||||
char* pattern = NULL;
|
||||
if (pShow->payloadLen > 0) {
|
||||
pattern = (char*)malloc(pShow->payloadLen + 1);
|
||||
if (pattern == NULL) {
|
||||
terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
return 0;
|
||||
}
|
||||
memcpy(pattern, pShow->payload, pShow->payloadLen);
|
||||
pattern[pShow->payloadLen] = 0;
|
||||
}
|
||||
|
||||
while (numOfRows < rows) {
|
||||
pShow->pIter = mnodeGetNextChildTable(pShow->pIter, &pTable);
|
||||
if (pTable == NULL) break;
|
||||
|
@ -2389,7 +2407,7 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows
|
|||
// pattern compare for table name
|
||||
mnodeExtractTableName(pTable->info.tableId, tableName);
|
||||
|
||||
if (pShow->payloadLen > 0 && patternMatch(pShow->payload, tableName, sizeof(tableName) - 1, &info) != TSDB_PATTERN_MATCH) {
|
||||
if (pattern != NULL && patternMatch(pattern, tableName, sizeof(tableName) - 1, &info) != TSDB_PATTERN_MATCH) {
|
||||
mnodeDecTableRef(pTable);
|
||||
continue;
|
||||
}
|
||||
|
@ -2433,6 +2451,7 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows
|
|||
|
||||
mnodeVacuumResult(data, NUM_OF_COLUMNS, numOfRows, rows, pShow);
|
||||
mnodeDecDbRef(pDb);
|
||||
free(pattern);
|
||||
|
||||
return numOfRows;
|
||||
}
|
||||
|
|
|
@ -358,7 +358,7 @@ int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) {
|
|||
if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR;
|
||||
|
||||
SVgObj *pVgroup = (SVgObj *)calloc(1, sizeof(SVgObj));
|
||||
tstrncpy(pVgroup->dbName, pDb->name, TSDB_DB_NAME_LEN);
|
||||
tstrncpy(pVgroup->dbName, pDb->name, TSDB_ACCT_LEN + TSDB_DB_NAME_LEN);
|
||||
pVgroup->numOfVnodes = pDb->cfg.replications;
|
||||
pVgroup->createdTime = taosGetTimestampMs();
|
||||
pVgroup->accessState = TSDB_VN_ALL_ACCCESS;
|
||||
|
@ -652,7 +652,7 @@ SRpcIpSet mnodeGetIpSetFromIp(char *ep) {
|
|||
void mnodeSendCreateVnodeMsg(SVgObj *pVgroup, SRpcIpSet *ipSet, void *ahandle) {
|
||||
SMDCreateVnodeMsg *pCreate = mnodeBuildCreateVnodeMsg(pVgroup);
|
||||
SRpcMsg rpcMsg = {
|
||||
.handle = ahandle,
|
||||
.ahandle = ahandle,
|
||||
.pCont = pCreate,
|
||||
.contLen = pCreate ? sizeof(SMDCreateVnodeMsg) : 0,
|
||||
.code = 0,
|
||||
|
@ -673,9 +673,9 @@ void mnodeSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle) {
|
|||
}
|
||||
|
||||
static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) {
|
||||
if (rpcMsg->handle == NULL) return;
|
||||
if (rpcMsg->ahandle == NULL) return;
|
||||
|
||||
SMnodeMsg *mnodeMsg = rpcMsg->handle;
|
||||
SMnodeMsg *mnodeMsg = rpcMsg->ahandle;
|
||||
mnodeMsg->received++;
|
||||
if (rpcMsg->code == TSDB_CODE_SUCCESS) {
|
||||
mnodeMsg->successed++;
|
||||
|
@ -686,7 +686,7 @@ static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) {
|
|||
SVgObj *pVgroup = mnodeMsg->pVgroup;
|
||||
mDebug("vgId:%d, create vnode rsp received, result:%s received:%d successed:%d expected:%d, thandle:%p ahandle:%p",
|
||||
pVgroup->vgId, tstrerror(rpcMsg->code), mnodeMsg->received, mnodeMsg->successed, mnodeMsg->expected,
|
||||
mnodeMsg->rpcMsg.handle, rpcMsg->handle);
|
||||
mnodeMsg->rpcMsg.handle, rpcMsg->ahandle);
|
||||
|
||||
if (mnodeMsg->received != mnodeMsg->expected) return;
|
||||
|
||||
|
@ -718,7 +718,7 @@ static SMDDropVnodeMsg *mnodeBuildDropVnodeMsg(int32_t vgId) {
|
|||
void mnodeSendDropVnodeMsg(int32_t vgId, SRpcIpSet *ipSet, void *ahandle) {
|
||||
SMDDropVnodeMsg *pDrop = mnodeBuildDropVnodeMsg(vgId);
|
||||
SRpcMsg rpcMsg = {
|
||||
.handle = ahandle,
|
||||
.ahandle = ahandle,
|
||||
.pCont = pDrop,
|
||||
.contLen = pDrop ? sizeof(SMDDropVnodeMsg) : 0,
|
||||
.code = 0,
|
||||
|
@ -737,10 +737,10 @@ static void mnodeSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle) {
|
|||
}
|
||||
|
||||
static void mnodeProcessDropVnodeRsp(SRpcMsg *rpcMsg) {
|
||||
mDebug("drop vnode rsp is received, handle:%p", rpcMsg->handle);
|
||||
if (rpcMsg->handle == NULL) return;
|
||||
mDebug("drop vnode rsp is received, handle:%p", rpcMsg->ahandle);
|
||||
if (rpcMsg->ahandle == NULL) return;
|
||||
|
||||
SMnodeMsg *mnodeMsg = rpcMsg->handle;
|
||||
SMnodeMsg *mnodeMsg = rpcMsg->ahandle;
|
||||
mnodeMsg->received++;
|
||||
if (rpcMsg->code == TSDB_CODE_SUCCESS) {
|
||||
mnodeMsg->code = rpcMsg->code;
|
||||
|
@ -750,7 +750,7 @@ static void mnodeProcessDropVnodeRsp(SRpcMsg *rpcMsg) {
|
|||
SVgObj *pVgroup = mnodeMsg->pVgroup;
|
||||
mDebug("vgId:%d, drop vnode rsp received, result:%s received:%d successed:%d expected:%d, thandle:%p ahandle:%p",
|
||||
pVgroup->vgId, tstrerror(rpcMsg->code), mnodeMsg->received, mnodeMsg->successed, mnodeMsg->expected,
|
||||
mnodeMsg->rpcMsg.handle, rpcMsg->handle);
|
||||
mnodeMsg->rpcMsg.handle, rpcMsg->ahandle);
|
||||
|
||||
if (mnodeMsg->received != mnodeMsg->expected) return;
|
||||
|
||||
|
|
|
@ -236,6 +236,9 @@ void taosSetCoreDump();
|
|||
|
||||
void taosBlockSIGPIPE();
|
||||
|
||||
int tSystem(const char * cmd) ;
|
||||
|
||||
|
||||
#ifdef _ALPINE
|
||||
typedef int(*__compar_fn_t)(const void *, const void *);
|
||||
void error (int, int, const char *);
|
||||
|
|
|
@ -241,3 +241,32 @@ void taosBlockSIGPIPE() {
|
|||
uError("failed to block SIGPIPE");
|
||||
}
|
||||
}
|
||||
|
||||
int tSystem(const char * cmd)
|
||||
{
|
||||
FILE * fp;
|
||||
int res;
|
||||
char buf[1024];
|
||||
if (cmd == NULL) {
|
||||
uError("tSystem cmd is NULL!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((fp = popen(cmd, "r") ) == NULL) {
|
||||
uError("popen cmd:%s error: %s/n", cmd, strerror(errno));
|
||||
return -1;
|
||||
} else {
|
||||
while(fgets(buf, sizeof(buf), fp)) {
|
||||
uDebug("popen result:%s", buf);
|
||||
}
|
||||
|
||||
if ((res = pclose(fp)) == -1) {
|
||||
uError("close popen file pointer fp error!\n");
|
||||
} else {
|
||||
uDebug("popen res is :%d\n", res);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -141,10 +141,15 @@ HttpContext *httpGetContext(void *ptr) {
|
|||
void httpReleaseContext(HttpContext *pContext) {
|
||||
int32_t refCount = atomic_sub_fetch_32(&pContext->refCount, 1);
|
||||
assert(refCount >= 0);
|
||||
httpDebug("context:%p, fd:%d, is releasd, refCount:%d", pContext, pContext->fd, refCount);
|
||||
httpDebug("context:%p, is releasd, refCount:%d", pContext, refCount);
|
||||
|
||||
HttpContext **ppContext = pContext->ppContext;
|
||||
taosCacheRelease(tsHttpServer.contextCache, (void **)(&ppContext), false);
|
||||
if (tsHttpServer.contextCache != NULL) {
|
||||
taosCacheRelease(tsHttpServer.contextCache, (void **)(&ppContext), false);
|
||||
} else {
|
||||
httpDebug("context:%p, won't be destroyed for cache is already released", pContext);
|
||||
// httpDestroyContext((void **)(&ppContext));
|
||||
}
|
||||
}
|
||||
|
||||
bool httpInitContext(HttpContext *pContext) {
|
||||
|
|
|
@ -157,7 +157,7 @@ bool httpGetHttpMethod(HttpContext* pContext) {
|
|||
pParser->method.pos[pParser->method.len] = 0;
|
||||
pParser->pLast = pSeek + 1;
|
||||
|
||||
httpDebug("context:%p, fd:%d, ip:%s, httpMethod:%s", pContext, pContext->fd, pContext->ipstr, pParser->method.pos);
|
||||
httpTrace("context:%p, fd:%d, ip:%s, httpMethod:%s", pContext, pContext->fd, pContext->ipstr, pParser->method.pos);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -186,23 +186,23 @@ bool httpParseHead(HttpContext* pContext) {
|
|||
HttpParser* pParser = &pContext->parser;
|
||||
if (strncasecmp(pParser->pLast, "Content-Length: ", 16) == 0) {
|
||||
pParser->data.len = (int32_t)atoi(pParser->pLast + 16);
|
||||
httpDebug("context:%p, fd:%d, ip:%s, Content-Length:%d", pContext, pContext->fd, pContext->ipstr,
|
||||
httpTrace("context:%p, fd:%d, ip:%s, Content-Length:%d", pContext, pContext->fd, pContext->ipstr,
|
||||
pParser->data.len);
|
||||
} else if (strncasecmp(pParser->pLast, "Accept-Encoding: ", 17) == 0) {
|
||||
if (tsHttpEnableCompress && strstr(pParser->pLast + 17, "gzip") != NULL) {
|
||||
pContext->acceptEncoding = HTTP_COMPRESS_GZIP;
|
||||
httpDebug("context:%p, fd:%d, ip:%s, Accept-Encoding:gzip", pContext, pContext->fd, pContext->ipstr);
|
||||
httpTrace("context:%p, fd:%d, ip:%s, Accept-Encoding:gzip", pContext, pContext->fd, pContext->ipstr);
|
||||
} else {
|
||||
pContext->acceptEncoding = HTTP_COMPRESS_IDENTITY;
|
||||
httpDebug("context:%p, fd:%d, ip:%s, Accept-Encoding:identity", pContext, pContext->fd, pContext->ipstr);
|
||||
httpTrace("context:%p, fd:%d, ip:%s, Accept-Encoding:identity", pContext, pContext->fd, pContext->ipstr);
|
||||
}
|
||||
} else if (strncasecmp(pParser->pLast, "Content-Encoding: ", 18) == 0) {
|
||||
if (strstr(pParser->pLast + 18, "gzip") != NULL) {
|
||||
pContext->contentEncoding = HTTP_COMPRESS_GZIP;
|
||||
httpDebug("context:%p, fd:%d, ip:%s, Content-Encoding:gzip", pContext, pContext->fd, pContext->ipstr);
|
||||
httpTrace("context:%p, fd:%d, ip:%s, Content-Encoding:gzip", pContext, pContext->fd, pContext->ipstr);
|
||||
} else {
|
||||
pContext->contentEncoding = HTTP_COMPRESS_IDENTITY;
|
||||
httpDebug("context:%p, fd:%d, ip:%s, Content-Encoding:identity", pContext, pContext->fd, pContext->ipstr);
|
||||
httpTrace("context:%p, fd:%d, ip:%s, Content-Encoding:identity", pContext, pContext->fd, pContext->ipstr);
|
||||
}
|
||||
} else if (strncasecmp(pParser->pLast, "Connection: ", 12) == 0) {
|
||||
if (strncasecmp(pParser->pLast + 12, "Keep-Alive", 10) == 0) {
|
||||
|
@ -210,7 +210,7 @@ bool httpParseHead(HttpContext* pContext) {
|
|||
} else {
|
||||
pContext->httpKeepAlive = HTTP_KEEPALIVE_DISABLE;
|
||||
}
|
||||
httpDebug("context:%p, fd:%d, ip:%s, keepAlive:%d", pContext, pContext->fd, pContext->ipstr,
|
||||
httpTrace("context:%p, fd:%d, ip:%s, keepAlive:%d", pContext, pContext->fd, pContext->ipstr,
|
||||
pContext->httpKeepAlive);
|
||||
} else if (strncasecmp(pParser->pLast, "Transfer-Encoding: ", 19) == 0) {
|
||||
if (strncasecmp(pParser->pLast + 19, "chunked", 7) == 0) {
|
||||
|
@ -281,7 +281,7 @@ bool httpReadChunkedBody(HttpContext* pContext, HttpParser* pParser) {
|
|||
httpParseChunkedBody(pContext, pParser, false);
|
||||
return HTTP_CHECK_BODY_SUCCESS;
|
||||
} else {
|
||||
httpDebug("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, pContext->ipstr);
|
||||
httpTrace("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, pContext->ipstr);
|
||||
if (!httpReadDataImp(pContext)) {
|
||||
httpError("context:%p, fd:%d, ip:%s, read chunked request error", pContext, pContext->fd, pContext->ipstr);
|
||||
return HTTP_CHECK_BODY_ERROR;
|
||||
|
@ -299,7 +299,7 @@ int httpReadUnChunkedBody(HttpContext* pContext, HttpParser* pParser) {
|
|||
httpSendErrorResp(pContext, HTTP_PARSE_BODY_ERROR);
|
||||
return HTTP_CHECK_BODY_ERROR;
|
||||
} else if (dataReadLen < pParser->data.len) {
|
||||
httpDebug("context:%p, fd:%d, ip:%s, un-chunked body not finished, read size:%d dataReadLen:%d < pContext->data.len:%d, continue read",
|
||||
httpTrace("context:%p, fd:%d, ip:%s, un-chunked body not finished, read size:%d dataReadLen:%d < pContext->data.len:%d, continue read",
|
||||
pContext, pContext->fd, pContext->ipstr, pContext->parser.bufsize, dataReadLen, pParser->data.len);
|
||||
return HTTP_CHECK_BODY_CONTINUE;
|
||||
} else {
|
||||
|
@ -313,9 +313,9 @@ bool httpParseRequest(HttpContext* pContext) {
|
|||
return true;
|
||||
}
|
||||
|
||||
httpDebug("context:%p, fd:%d, ip:%s, thread:%s, numOfFds:%d, read size:%d, raw data:\n%s",
|
||||
pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfFds,
|
||||
pContext->parser.bufsize, pContext->parser.buffer);
|
||||
httpTraceDump("context:%p, fd:%d, ip:%s, thread:%s, numOfFds:%d, read size:%d, raw data:\n%s", pContext, pContext->fd,
|
||||
pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfFds, pContext->parser.bufsize,
|
||||
pContext->parser.buffer);
|
||||
|
||||
if (!httpGetHttpMethod(pContext)) {
|
||||
return false;
|
||||
|
|
|
@ -76,8 +76,8 @@ int httpWriteBuf(struct HttpContext *pContext, const char *buf, int sz) {
|
|||
httpError("context:%p, fd:%d, ip:%s, dataSize:%d, writeSize:%d, failed to send response:\n%s",
|
||||
pContext, pContext->fd, pContext->ipstr, sz, writeSz, buf);
|
||||
} else {
|
||||
httpDebug("context:%p, fd:%d, ip:%s, dataSize:%d, writeSize:%d, response:\n%s",
|
||||
pContext, pContext->fd, pContext->ipstr, sz, writeSz, buf);
|
||||
httpTrace("context:%p, fd:%d, ip:%s, dataSize:%d, writeSize:%d, response:\n%s", pContext, pContext->fd,
|
||||
pContext->ipstr, sz, writeSz, buf);
|
||||
}
|
||||
|
||||
return writeSz;
|
||||
|
@ -99,7 +99,7 @@ int httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) {
|
|||
uint64_t srcLen = (uint64_t) (buf->lst - buf->buf);
|
||||
|
||||
if (buf->pContext->fd <= 0) {
|
||||
httpDebug("context:%p, fd:%d, ip:%s, write json body error", buf->pContext, buf->pContext->fd, buf->pContext->ipstr);
|
||||
httpTrace("context:%p, fd:%d, ip:%s, write json body error", buf->pContext, buf->pContext->fd, buf->pContext->ipstr);
|
||||
buf->pContext->fd = -1;
|
||||
}
|
||||
|
||||
|
@ -113,11 +113,11 @@ int httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) {
|
|||
|
||||
if (buf->pContext->acceptEncoding == HTTP_COMPRESS_IDENTITY) {
|
||||
if (buf->lst == buf->buf) {
|
||||
httpDebug("context:%p, fd:%d, ip:%s, no data need dump", buf->pContext, buf->pContext->fd, buf->pContext->ipstr);
|
||||
httpTrace("context:%p, fd:%d, ip:%s, no data need dump", buf->pContext, buf->pContext->fd, buf->pContext->ipstr);
|
||||
return 0; // there is no data to dump.
|
||||
} else {
|
||||
int len = sprintf(sLen, "%lx\r\n", srcLen);
|
||||
httpDebug("context:%p, fd:%d, ip:%s, write body, chunkSize:%" PRIu64 ", response:\n%s",
|
||||
httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%" PRIu64 ", response:\n%s",
|
||||
buf->pContext, buf->pContext->fd, buf->pContext->ipstr, srcLen, buf->buf);
|
||||
httpWriteBufNoTrace(buf->pContext, sLen, len);
|
||||
remain = httpWriteBufNoTrace(buf->pContext, buf->buf, (int) srcLen);
|
||||
|
@ -129,12 +129,12 @@ int httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) {
|
|||
if (ret == 0) {
|
||||
if (compressBufLen > 0) {
|
||||
int len = sprintf(sLen, "%x\r\n", compressBufLen);
|
||||
httpDebug("context:%p, fd:%d, ip:%s, write body, chunkSize:%" PRIu64 ", compressSize:%d, last:%d, response:\n%s",
|
||||
httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%" PRIu64 ", compressSize:%d, last:%d, response:\n%s",
|
||||
buf->pContext, buf->pContext->fd, buf->pContext->ipstr, srcLen, compressBufLen, isTheLast, buf->buf);
|
||||
httpWriteBufNoTrace(buf->pContext, sLen, len);
|
||||
remain = httpWriteBufNoTrace(buf->pContext, (const char *) compressBuf, (int) compressBufLen);
|
||||
} else {
|
||||
httpDebug("context:%p, fd:%d, ip:%s, last:%d, compress already dumped, response:\n%s",
|
||||
httpTrace("context:%p, fd:%d, ip:%s, last:%d, compress already dumped, response:\n%s",
|
||||
buf->pContext, buf->pContext->fd, buf->pContext->ipstr, isTheLast, buf->buf);
|
||||
return 0; // there is no data to dump.
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ void httpWriteJsonBufHead(JsonBuf* buf) {
|
|||
|
||||
void httpWriteJsonBufEnd(JsonBuf* buf) {
|
||||
if (buf->pContext->fd <= 0) {
|
||||
httpDebug("context:%p, fd:%d, ip:%s, json buf fd is 0", buf->pContext, buf->pContext->fd, buf->pContext->ipstr);
|
||||
httpTrace("context:%p, fd:%d, ip:%s, json buf fd is 0", buf->pContext, buf->pContext->fd, buf->pContext->ipstr);
|
||||
buf->pContext->fd = -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -66,8 +66,6 @@ void httpCleanUpConnect() {
|
|||
}
|
||||
}
|
||||
|
||||
tfree(pServer->pThreads);
|
||||
pServer->pThreads = NULL;
|
||||
httpDebug("http server:%s is cleaned up", pServer->label);
|
||||
}
|
||||
|
||||
|
|
|
@ -359,6 +359,8 @@ void httpExecCmd(HttpContext *pContext) {
|
|||
|
||||
void httpProcessRequestCb(void *param, TAOS_RES *result, int code) {
|
||||
HttpContext *pContext = param;
|
||||
taos_free_result(result);
|
||||
|
||||
if (pContext == NULL) return;
|
||||
|
||||
if (code < 0) {
|
||||
|
|
|
@ -95,11 +95,13 @@ void httpCleanUpSystem() {
|
|||
httpInfo("http server cleanup");
|
||||
httpStopSystem();
|
||||
|
||||
httpCleanUpConnect();
|
||||
httpCleanupContexts();
|
||||
httpCleanUpSessions();
|
||||
httpCleanUpConnect();
|
||||
pthread_mutex_destroy(&tsHttpServer.serverMutex);
|
||||
|
||||
tfree(tsHttpServer.pThreads);
|
||||
tsHttpServer.pThreads = NULL;
|
||||
|
||||
tsHttpServer.status = HTTP_SERVER_CLOSED;
|
||||
}
|
||||
|
||||
|
|
|
@ -2237,7 +2237,7 @@ static void doSetTagValueInParam(void *tsdb, void* pTable, int32_t tagColId, tVa
|
|||
}
|
||||
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||
if (isNull(varDataVal(val), type)) {
|
||||
if (isNull(val, type)) {
|
||||
tag->nType = TSDB_DATA_TYPE_NULL;
|
||||
return;
|
||||
}
|
||||
|
@ -6028,7 +6028,7 @@ void qDestroyQueryInfo(qinfo_t qHandle) {
|
|||
return;
|
||||
}
|
||||
|
||||
int16_t ref = T_REF_DEC(pQInfo);
|
||||
int32_t ref = T_REF_DEC(pQInfo);
|
||||
qDebug("QInfo:%p dec refCount, value:%d", pQInfo, ref);
|
||||
|
||||
if (ref == 0) {
|
||||
|
|
|
@ -354,13 +354,13 @@ void *rpcReallocCont(void *ptr, int contLen) {
|
|||
return start + sizeof(SRpcReqContext) + sizeof(SRpcHead);
|
||||
}
|
||||
|
||||
void *rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg) {
|
||||
void rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, SRpcMsg *pMsg) {
|
||||
SRpcInfo *pRpc = (SRpcInfo *)shandle;
|
||||
SRpcReqContext *pContext;
|
||||
|
||||
int contLen = rpcCompressRpcMsg(pMsg->pCont, pMsg->contLen);
|
||||
pContext = (SRpcReqContext *) (pMsg->pCont-sizeof(SRpcHead)-sizeof(SRpcReqContext));
|
||||
pContext->ahandle = pMsg->handle;
|
||||
pContext->ahandle = pMsg->ahandle;
|
||||
pContext->pRpc = (SRpcInfo *)shandle;
|
||||
pContext->ipSet = *pIpSet;
|
||||
pContext->contLen = contLen;
|
||||
|
@ -380,9 +380,12 @@ void *rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg
|
|||
|| type == TSDB_MSG_TYPE_CM_SHOW )
|
||||
pContext->connType = RPC_CONN_TCPC;
|
||||
|
||||
// set the handle to pContext, so app can cancel the request
|
||||
if (pMsg->handle) *((void **)pMsg->handle) = pContext;
|
||||
|
||||
rpcSendReqToServer(pRpc, pContext);
|
||||
|
||||
return pContext;
|
||||
return;
|
||||
}
|
||||
|
||||
void rpcSendResponse(const SRpcMsg *pRsp) {
|
||||
|
@ -483,7 +486,7 @@ int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, const SRpcMsg *pMsg, SRpcMsg *pRsp) {
|
||||
void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) {
|
||||
SRpcReqContext *pContext;
|
||||
pContext = (SRpcReqContext *) (pMsg->pCont-sizeof(SRpcHead)-sizeof(SRpcReqContext));
|
||||
|
||||
|
@ -665,6 +668,12 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) {
|
|||
return pConn;
|
||||
}
|
||||
|
||||
// if code is not 0, it means it is simple reqhead, just ignore
|
||||
if (pHead->code != 0) {
|
||||
terrno = TSDB_CODE_RPC_ALREADY_PROCESSED;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int sid = taosAllocateId(pRpc->idPool);
|
||||
if (sid <= 0) {
|
||||
tError("%s maximum number of sessions:%d is reached", pRpc->label, pRpc->sessions);
|
||||
|
@ -1028,19 +1037,24 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) {
|
|||
rpcMsg.ahandle = pConn->ahandle;
|
||||
|
||||
if ( rpcIsReq(pHead->msgType) ) {
|
||||
rpcMsg.handle = pConn;
|
||||
rpcAddRef(pRpc); // add the refCount for requests
|
||||
if (rpcMsg.contLen > 0) {
|
||||
rpcMsg.handle = pConn;
|
||||
rpcAddRef(pRpc); // add the refCount for requests
|
||||
|
||||
// start the progress timer to monitor the response from server app
|
||||
if (pConn->connType != RPC_CONN_TCPS)
|
||||
pConn->pTimer = taosTmrStart(rpcProcessProgressTimer, tsProgressTimer, pConn, pRpc->tmrCtrl);
|
||||
// start the progress timer to monitor the response from server app
|
||||
if (pConn->connType != RPC_CONN_TCPS)
|
||||
pConn->pTimer = taosTmrStart(rpcProcessProgressTimer, tsProgressTimer, pConn, pRpc->tmrCtrl);
|
||||
|
||||
// notify the server app
|
||||
(*(pRpc->cfp))(&rpcMsg, NULL);
|
||||
// notify the server app
|
||||
(*(pRpc->cfp))(&rpcMsg, NULL);
|
||||
} else {
|
||||
tDebug("%s, message body is empty, ignore", pConn->info);
|
||||
rpcFreeCont(rpcMsg.pCont);
|
||||
}
|
||||
} else {
|
||||
// it's a response
|
||||
SRpcReqContext *pContext = pConn->pContext;
|
||||
rpcMsg.handle = pContext->ahandle;
|
||||
rpcMsg.handle = pContext;
|
||||
pConn->pContext = NULL;
|
||||
|
||||
// for UDP, port may be changed by server, the port in ipSet shall be used for cache
|
||||
|
@ -1244,7 +1258,7 @@ static void rpcProcessConnError(void *param, void *id) {
|
|||
|
||||
if (pContext->numOfTry >= pContext->ipSet.numOfIps) {
|
||||
rpcMsg.msgType = pContext->msgType+1;
|
||||
rpcMsg.handle = pContext->ahandle;
|
||||
rpcMsg.ahandle = pContext->ahandle;
|
||||
rpcMsg.code = pContext->code;
|
||||
rpcMsg.pCont = NULL;
|
||||
rpcMsg.contLen = 0;
|
||||
|
|
|
@ -419,7 +419,7 @@ static int taosReadTcpData(SFdObj *pFdObj, SRecvInfo *pInfo) {
|
|||
tError("%s %p TCP malloc(size:%d) fail", pThreadObj->label, pFdObj->thandle, msgLen);
|
||||
return -1;
|
||||
} else {
|
||||
// tDebug("malloc mem: %p", buffer);
|
||||
tDebug("TCP malloc mem: %p", buffer);
|
||||
}
|
||||
|
||||
msg = buffer + tsRpcOverhead;
|
||||
|
|
|
@ -212,7 +212,7 @@ static void *taosRecvUdpData(void *param) {
|
|||
tError("%s failed to allocate memory, size:%ld", pConn->label, dataLen);
|
||||
continue;
|
||||
} else {
|
||||
// tTrace("malloc mem: %p", tmsg);
|
||||
tDebug("UDP malloc mem: %p", tmsg);
|
||||
}
|
||||
|
||||
tmsg += tsRpcOverhead; // overhead for SRpcReqContext
|
||||
|
|
|
@ -33,7 +33,7 @@ typedef struct {
|
|||
} SInfo;
|
||||
|
||||
static void processResponse(SRpcMsg *pMsg, SRpcIpSet *pIpSet) {
|
||||
SInfo *pInfo = (SInfo *)pMsg->handle;
|
||||
SInfo *pInfo = (SInfo *)pMsg->ahandle;
|
||||
tDebug("thread:%d, response is received, type:%d contLen:%d code:0x%x", pInfo->index, pMsg->msgType, pMsg->contLen, pMsg->code);
|
||||
|
||||
if (pIpSet) pInfo->ipSet = *pIpSet;
|
||||
|
@ -46,7 +46,7 @@ static int tcount = 0;
|
|||
|
||||
static void *sendRequest(void *param) {
|
||||
SInfo *pInfo = (SInfo *)param;
|
||||
SRpcMsg rpcMsg;
|
||||
SRpcMsg rpcMsg = {0};
|
||||
|
||||
tDebug("thread:%d, start to send request", pInfo->index);
|
||||
|
||||
|
@ -54,7 +54,7 @@ static void *sendRequest(void *param) {
|
|||
pInfo->num++;
|
||||
rpcMsg.pCont = rpcMallocCont(pInfo->msgSize);
|
||||
rpcMsg.contLen = pInfo->msgSize;
|
||||
rpcMsg.handle = pInfo;
|
||||
rpcMsg.ahandle = pInfo;
|
||||
rpcMsg.msgType = 1;
|
||||
tDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num);
|
||||
rpcSendRequest(pInfo->pRpc, &pInfo->ipSet, &rpcMsg);
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include "tutil.h"
|
||||
#include "ttime.h"
|
||||
|
||||
const char *tsdbFileSuffix[] = {".head", ".data", ".last", "", ".h", ".h"};
|
||||
const char *tsdbFileSuffix[] = {".head", ".data", ".last", "", ".h", ".l"};
|
||||
|
||||
static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type);
|
||||
static void tsdbDestroyFile(SFile *pFile);
|
||||
|
|
|
@ -111,7 +111,7 @@ int tsdbInsertRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) {
|
|||
ASSERT(pTableData->numOfRows == tSkipListGetSize(pTableData->pData));
|
||||
}
|
||||
|
||||
tsdbDebug("vgId:%d a row is inserted to table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo),
|
||||
tsdbTrace("vgId:%d a row is inserted to table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo),
|
||||
TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), key);
|
||||
|
||||
return 0;
|
||||
|
@ -443,12 +443,14 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) {
|
|||
if (tdUpdateKVStoreRecord(pMeta->pStore, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) {
|
||||
tsdbError("vgId:%d failed to update meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
|
||||
tstrerror(terrno));
|
||||
tdKVStoreEndCommit(pMeta->pStore);
|
||||
goto _err;
|
||||
}
|
||||
} else if (pAct->act == TSDB_DROP_META) {
|
||||
if (tdDropKVStoreRecord(pMeta->pStore, pAct->uid) < 0) {
|
||||
tsdbError("vgId:%d failed to drop meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
|
||||
tstrerror(terrno));
|
||||
tdKVStoreEndCommit(pMeta->pStore);
|
||||
goto _err;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -233,26 +233,10 @@ STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg) {
|
|||
if (tsdbTableSetSName(pCfg, pMsg->superTableId, true) < 0) goto _err;
|
||||
if (tsdbTableSetSuperUid(pCfg, htobe64(pMsg->superTableUid)) < 0) goto _err;
|
||||
|
||||
// Decode tag values
|
||||
if (pMsg->tagDataLen) {
|
||||
int accBytes = 0;
|
||||
int32_t tagDataLen = htonl(pMsg->tagDataLen);
|
||||
if (tagDataLen) {
|
||||
char *pTagData = pMsg->data + (numOfCols + numOfTags) * sizeof(SSchema);
|
||||
|
||||
SKVRowBuilder kvRowBuilder = {0};
|
||||
if (tdInitKVRowBuilder(&kvRowBuilder) < 0) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
for (int i = numOfCols; i < numOfCols + numOfTags; i++) {
|
||||
if (tdAddColToKVRow(&kvRowBuilder, htons(pSchema[i].colId), pSchema[i].type, pTagData + accBytes) < 0) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
accBytes += htons(pSchema[i].bytes);
|
||||
}
|
||||
|
||||
tsdbTableSetTagValue(pCfg, tdGetKVRowFromBuilder(&kvRowBuilder), false);
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
tsdbTableSetTagValue(pCfg, pTagData, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -480,13 +464,11 @@ int tsdbUpdateTable(STsdbRepo *pRepo, STable *pTable, STableCfg *pCfg) {
|
|||
bool changed = false;
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
|
||||
if (pTable->type == TSDB_SUPER_TABLE) {
|
||||
if (schemaVersion(pTable->tagSchema) < schemaVersion(pCfg->tagSchema)) {
|
||||
if (tsdbUpdateTableTagSchema(pTable, pCfg->tagSchema) < 0) {
|
||||
tsdbError("vgId:%d failed to update table %s tag schema since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
if ((pTable->type == TSDB_SUPER_TABLE) && (schemaVersion(pTable->tagSchema) < schemaVersion(pCfg->tagSchema))) {
|
||||
if (tsdbUpdateTableTagSchema(pTable, pCfg->tagSchema) < 0) {
|
||||
tsdbError("vgId:%d failed to update table %s tag schema since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
changed = true;
|
||||
}
|
||||
|
@ -552,13 +534,13 @@ int tsdbUnlockRepoMeta(STsdbRepo *pRepo) {
|
|||
}
|
||||
|
||||
void tsdbRefTable(STable *pTable) {
|
||||
int16_t ref = T_REF_INC(pTable);
|
||||
int32_t ref = T_REF_INC(pTable);
|
||||
UNUSED(ref);
|
||||
// tsdbDebug("ref table %"PRIu64", tid:%d, refCount:%d", TABLE_UID(pTable), TABLE_TID(pTable), ref);
|
||||
}
|
||||
|
||||
void tsdbUnRefTable(STable *pTable) {
|
||||
int16_t ref = T_REF_DEC(pTable);
|
||||
int32_t ref = T_REF_DEC(pTable);
|
||||
tsdbDebug("unref table uid:%"PRIu64", tid:%d, refCount:%d", TABLE_UID(pTable), TABLE_TID(pTable), ref);
|
||||
|
||||
if (ref == 0) {
|
||||
|
@ -598,7 +580,7 @@ static int tsdbRestoreTable(void *pHandle, void *cont, int contLen) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
tsdbDebug("vgId:%d table %s tid %d uid %" PRIu64 " is restored from file", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
tsdbTrace("vgId:%d table %s tid %d uid %" PRIu64 " is restored from file", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
TABLE_TID(pTable), TABLE_UID(pTable));
|
||||
return 0;
|
||||
}
|
||||
|
@ -622,6 +604,10 @@ static char *getTagIndexKey(const void *pData) {
|
|||
STSchema *pSchema = tsdbGetTableTagSchema(pTable);
|
||||
STColumn *pCol = schemaColAt(pSchema, DEFAULT_TAG_INDEX_COLUMN);
|
||||
void * res = tdGetKVRowValOfCol(pTable->tagVal, pCol->colId);
|
||||
if (res == NULL) {
|
||||
// treat the column as NULL if we cannot find it
|
||||
res = getNullValue(pCol->type);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -799,7 +785,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx) {
|
|||
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql, tsdbGetTableSchema(pTable));
|
||||
}
|
||||
|
||||
tsdbDebug("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
tsdbTrace("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
TABLE_TID(pTable), TABLE_UID(pTable));
|
||||
return 0;
|
||||
|
||||
|
@ -1215,7 +1201,7 @@ static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable) {
|
|||
while (tSkipListIterNext(pIter)) {
|
||||
STable *tTable = *(STable **)SL_GET_NODE_DATA(tSkipListIterGet(pIter));
|
||||
ASSERT(TABLE_TYPE(tTable) == TSDB_CHILD_TABLE);
|
||||
pBuf = tsdbInsertTableAct(pRepo, TSDB_DROP_META, pBuf, pTable);
|
||||
pBuf = tsdbInsertTableAct(pRepo, TSDB_DROP_META, pBuf, tTable);
|
||||
}
|
||||
|
||||
tSkipListDestroyIter(pIter);
|
||||
|
@ -1254,4 +1240,4 @@ static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable) {
|
|||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -128,11 +128,12 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) {
|
|||
// Create and open .l file if should
|
||||
if (tsdbShouldCreateNewLast(pHelper)) {
|
||||
if (tsdbOpenFile(&(pHelper->files.nLastF), O_WRONLY | O_CREAT) < 0) goto _err;
|
||||
if (tsendfile(pHelper->files.nLastF.fd, pHelper->files.lastF.fd, NULL, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE)
|
||||
if (tsendfile(pHelper->files.nLastF.fd, pHelper->files.lastF.fd, NULL, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) {
|
||||
tsdbError("vgId:%d failed to sendfile %d bytes from file %s to %s since %s", REPO_ID(pHelper->pRepo),
|
||||
TSDB_FILE_HEAD_SIZE, pHelper->files.lastF.fname, pHelper->files.nLastF.fname, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _err;
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (tsdbOpenFile(&(pHelper->files.dataF), O_RDONLY) < 0) goto _err;
|
||||
|
@ -144,7 +145,7 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) {
|
|||
return tsdbLoadCompIdx(pHelper, NULL);
|
||||
|
||||
_err:
|
||||
return terrno;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError) {
|
||||
|
|
|
@ -22,7 +22,7 @@ typedef void (*_ref_fn_t)(const void* pObj);
|
|||
|
||||
#define T_REF_DECLARE() \
|
||||
struct { \
|
||||
int16_t val; \
|
||||
int32_t val; \
|
||||
} _ref;
|
||||
|
||||
#define T_REF_REGISTER_FUNC(s, e) \
|
||||
|
@ -31,7 +31,7 @@ typedef void (*_ref_fn_t)(const void* pObj);
|
|||
_ref_fn_t end; \
|
||||
} _ref_func = {.begin = (s), .end = (e)};
|
||||
|
||||
#define T_REF_INC(x) (atomic_add_fetch_16(&((x)->_ref.val), 1))
|
||||
#define T_REF_INC(x) (atomic_add_fetch_32(&((x)->_ref.val), 1))
|
||||
|
||||
#define T_REF_INC_WITH_CB(x, p) \
|
||||
do { \
|
||||
|
@ -41,11 +41,11 @@ typedef void (*_ref_fn_t)(const void* pObj);
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
#define T_REF_DEC(x) (atomic_sub_fetch_16(&((x)->_ref.val), 1))
|
||||
#define T_REF_DEC(x) (atomic_sub_fetch_32(&((x)->_ref.val), 1))
|
||||
|
||||
#define T_REF_DEC_WITH_CB(x, p) \
|
||||
do { \
|
||||
int32_t v = atomic_sub_fetch_16(&((x)->_ref.val), 1); \
|
||||
int32_t v = atomic_sub_fetch_32(&((x)->_ref.val), 1); \
|
||||
if (v == 0 && (p)->_ref_func.end != NULL) { \
|
||||
(p)->_ref_func.end((x)); \
|
||||
} \
|
||||
|
|
|
@ -184,6 +184,7 @@ uint32_t ip2uint(const char *const ip_addr);
|
|||
|
||||
void taosRemoveDir(char *rootDir);
|
||||
int tmkdir(const char *pathname, mode_t mode);
|
||||
void taosMvDir(char* destDir, char *srcDir);
|
||||
|
||||
#define TAOS_ALLOC_MODE_DEFAULT 0
|
||||
#define TAOS_ALLOC_MODE_RANDOM_FAIL 1
|
||||
|
|
|
@ -119,7 +119,7 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo
|
|||
int32_t size = pNode->size;
|
||||
taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize);
|
||||
|
||||
uDebug("key:%s is removed from cache,total:%" PRId64 ",size:%dbytes", pNode->key, pCacheObj->totalSize, size);
|
||||
uDebug("key:%p, is removed from cache,total:%" PRId64 ",size:%dbytes", pNode->key, pCacheObj->totalSize, size);
|
||||
if (pCacheObj->freeFp) pCacheObj->freeFp(pNode->data);
|
||||
free(pNode);
|
||||
}
|
||||
|
@ -275,7 +275,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v
|
|||
if (pCacheObj == NULL || pCacheObj->pHashTable == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
__cache_wr_lock(pCacheObj);
|
||||
SCacheDataNode **pt = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen);
|
||||
SCacheDataNode * pOld = (pt != NULL) ? (*pt) : NULL;
|
||||
|
@ -285,14 +285,14 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v
|
|||
if (NULL != pNode) {
|
||||
pCacheObj->totalSize += pNode->size;
|
||||
|
||||
uDebug("key:%p %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", total:%" PRId64 ", size:%" PRId64 " bytes",
|
||||
uDebug("key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", total:%" PRId64 ", size:%" PRId64 " bytes",
|
||||
key, pNode, pNode->addedTime, (pNode->lifespan * pNode->extendFactor + pNode->addedTime), pCacheObj->totalSize, dataSize);
|
||||
} else {
|
||||
uError("key:%p failed to added into cache, out of memory", key);
|
||||
uError("key:%p, failed to added into cache, out of memory", key);
|
||||
}
|
||||
} else { // old data exists, update the node
|
||||
pNode = taosUpdateCacheImpl(pCacheObj, pOld, key, keyLen, pData, dataSize, duration * 1000L);
|
||||
uDebug("key:%p %p exist in cache, updated", key, pNode);
|
||||
uDebug("key:%p, %p exist in cache, updated", key, pNode);
|
||||
}
|
||||
|
||||
__cache_unlock(pCacheObj);
|
||||
|
@ -304,7 +304,7 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen
|
|||
if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) == 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
__cache_rd_lock(pCacheObj);
|
||||
|
||||
SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen);
|
||||
|
@ -327,10 +327,10 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen
|
|||
|
||||
if (ptNode != NULL) {
|
||||
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
|
||||
uDebug("key:%p is retrieved from cache, %p refcnt:%d", key, (*ptNode), ref);
|
||||
uDebug("key:%p, is retrieved from cache, %p refcnt:%d", key, (*ptNode), ref);
|
||||
} else {
|
||||
atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1);
|
||||
uDebug("key:%p not in cache, retrieved failed", key);
|
||||
uDebug("key:%p, not in cache, retrieved failed", key);
|
||||
}
|
||||
|
||||
atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1);
|
||||
|
@ -355,10 +355,10 @@ void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, const char *key, siz
|
|||
|
||||
if (ptNode != NULL) {
|
||||
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
|
||||
uDebug("key:%s expireTime is updated in cache, %p refcnt:%d", key, (*ptNode), T_REF_VAL_GET(*ptNode));
|
||||
uDebug("key:%p, expireTime is updated in cache, %p refcnt:%d", key, (*ptNode), T_REF_VAL_GET(*ptNode));
|
||||
} else {
|
||||
atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1);
|
||||
uDebug("key:%s not in cache, retrieved failed", key);
|
||||
uDebug("key:%p, not in cache, retrieved failed", key);
|
||||
}
|
||||
|
||||
atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1);
|
||||
|
@ -424,7 +424,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
|
|||
|
||||
SCacheDataNode *pNode = (SCacheDataNode *)((char *)(*data) - offset);
|
||||
if (pNode->signature != (uint64_t)pNode) {
|
||||
uError("key: %p release invalid cache data", pNode);
|
||||
uError("key:%p, release invalid cache data", pNode);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -522,7 +522,7 @@ void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
|
|||
pNode->inTrashCan = true;
|
||||
pCacheObj->numOfElemsInTrash++;
|
||||
|
||||
uDebug("key:%p %p move to trash, numOfElem in trash:%d", pNode->key, pNode, pCacheObj->numOfElemsInTrash);
|
||||
uDebug("key:%p, %p move to trash, numOfElem in trash:%d", pNode->key, pNode, pCacheObj->numOfElemsInTrash);
|
||||
}
|
||||
|
||||
void taosRemoveFromTrashCan(SCacheObj *pCacheObj, STrashElem *pElem) {
|
||||
|
@ -546,7 +546,7 @@ void taosRemoveFromTrashCan(SCacheObj *pCacheObj, STrashElem *pElem) {
|
|||
if (pCacheObj->freeFp) {
|
||||
pCacheObj->freeFp(pElem->pData->data);
|
||||
}
|
||||
|
||||
|
||||
uError("-------------------free obj:%p", pElem->pData);
|
||||
free(pElem->pData);
|
||||
free(pElem);
|
||||
|
@ -574,7 +574,7 @@ void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) {
|
|||
}
|
||||
|
||||
if (force || (T_REF_VAL_GET(pElem->pData) == 0)) {
|
||||
uDebug("key:%p %p removed from trash. numOfElem in trash:%d", pElem->pData->key, pElem->pData,
|
||||
uDebug("key:%p, %p removed from trash. numOfElem in trash:%d", pElem->pData->key, pElem->pData,
|
||||
pCacheObj->numOfElemsInTrash - 1);
|
||||
STrashElem *p = pElem;
|
||||
|
||||
|
@ -594,7 +594,12 @@ void doCleanupDataCache(SCacheObj *pCacheObj) {
|
|||
SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable);
|
||||
while (taosHashIterNext(pIter)) {
|
||||
SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter);
|
||||
taosCacheReleaseNode(pCacheObj, pNode);
|
||||
// if (pNode->expiredTime <= expiredTime && T_REF_VAL_GET(pNode) <= 0) {
|
||||
if (T_REF_VAL_GET(pNode) <= 0) {
|
||||
taosCacheReleaseNode(pCacheObj, pNode);
|
||||
} else {
|
||||
uDebug("key:%p, will not remove from cache, refcnt:%d", pNode->key, T_REF_VAL_GET(pNode));
|
||||
}
|
||||
}
|
||||
taosHashDestroyIter(pIter);
|
||||
|
||||
|
|
|
@ -259,6 +259,7 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe
|
|||
}
|
||||
|
||||
taosHashPut(pStore->map, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo));
|
||||
uDebug("put uid %" PRIu64 " into kvStore %s", uid, pStore->fname);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -292,6 +293,7 @@ int tdDropKVStoreRecord(SKVStore *pStore, uint64_t uid) {
|
|||
pStore->info.tombSize += (rInfo.size + sizeof(SKVRecord) * 2);
|
||||
|
||||
taosHashRemove(pStore->map, (void *)(&uid), sizeof(uid));
|
||||
uDebug("drop uid %" PRIu64 " from KV store %s", uid, pStore->fname);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -68,10 +68,15 @@ void taosCloseQueue(taos_queue param) {
|
|||
if (param == NULL) return;
|
||||
STaosQueue *queue = (STaosQueue *)param;
|
||||
STaosQnode *pTemp;
|
||||
STaosQset *qset;
|
||||
|
||||
pthread_mutex_lock(&queue->mutex);
|
||||
STaosQnode *pNode = queue->head;
|
||||
queue->head = NULL;
|
||||
qset = queue->qset;
|
||||
pthread_mutex_unlock(&queue->mutex);
|
||||
|
||||
if (queue->qset) taosRemoveFromQset(queue->qset, queue);
|
||||
if (queue->qset) taosRemoveFromQset(qset, queue);
|
||||
|
||||
pthread_mutex_lock(&queue->mutex);
|
||||
|
||||
|
@ -95,7 +100,7 @@ void *taosAllocateQitem(int size) {
|
|||
void taosFreeQitem(void *param) {
|
||||
if (param == NULL) return;
|
||||
|
||||
uDebug("item:%p is freed", param);
|
||||
uTrace("item:%p is freed", param);
|
||||
char *temp = (char *)param;
|
||||
temp -= sizeof(STaosQnode);
|
||||
free(temp);
|
||||
|
@ -119,7 +124,7 @@ int taosWriteQitem(taos_queue param, int type, void *item) {
|
|||
|
||||
queue->numOfItems++;
|
||||
if (queue->qset) atomic_add_fetch_32(&queue->qset->numOfItems, 1);
|
||||
uDebug("item:%p is put into queue:%p, type:%d items:%d", item, queue, type, queue->numOfItems);
|
||||
uTrace("item:%p is put into queue:%p, type:%d items:%d", item, queue, type, queue->numOfItems);
|
||||
|
||||
pthread_mutex_unlock(&queue->mutex);
|
||||
|
||||
|
@ -201,7 +206,7 @@ int taosGetQitem(taos_qall param, int *type, void **pitem) {
|
|||
*pitem = pNode->item;
|
||||
*type = pNode->type;
|
||||
num = 1;
|
||||
uDebug("item:%p is fetched, type:%d", *pitem, *type);
|
||||
uTrace("item:%p is fetched, type:%d", *pitem, *type);
|
||||
}
|
||||
|
||||
return num;
|
||||
|
@ -339,7 +344,7 @@ int taosReadQitemFromQset(taos_qset param, int *type, void **pitem, void **phand
|
|||
queue->numOfItems--;
|
||||
atomic_sub_fetch_32(&qset->numOfItems, 1);
|
||||
code = 1;
|
||||
uDebug("item:%p is read out from queue:%p, type:%d items:%d", *pitem, queue, *type, queue->numOfItems);
|
||||
uTrace("item:%p is read out from queue:%p, type:%d items:%d", *pitem, queue, *type, queue->numOfItems);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&queue->mutex);
|
||||
|
|
|
@ -799,3 +799,13 @@ int tmkdir(const char *path, mode_t mode) {
|
|||
if (code < 0 && errno == EEXIST) code = 0;
|
||||
return code;
|
||||
}
|
||||
|
||||
void taosMvDir(char* destDir, char *srcDir) {
|
||||
char shellCmd[1024+1] = {0};
|
||||
|
||||
//(void)snprintf(shellCmd, 1024, "cp -rf %s %s", srcDir, destDir);
|
||||
(void)snprintf(shellCmd, 1024, "mv %s %s", srcDir, destDir);
|
||||
tSystem(shellCmd);
|
||||
uInfo("shell cmd:%s is executed", shellCmd);
|
||||
}
|
||||
|
||||
|
|
|
@ -127,9 +127,8 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
|
|||
|
||||
char tsdbDir[TSDB_FILENAME_LEN] = {0};
|
||||
sprintf(tsdbDir, "%s/vnode%d/tsdb", tsVnodeDir, pVnodeCfg->cfg.vgId);
|
||||
code = tsdbCreateRepo(tsdbDir, &tsdbCfg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
vError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(code));
|
||||
if (tsdbCreateRepo(tsdbDir, &tsdbCfg) < 0) {
|
||||
vError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(terrno));
|
||||
return TSDB_CODE_VND_INIT_FAILED;
|
||||
}
|
||||
|
||||
|
@ -355,6 +354,7 @@ void vnodeRelease(void *pVnodeRaw) {
|
|||
if (pVnode->status == TAOS_VN_STATUS_DELETING) {
|
||||
char rootDir[TSDB_FILENAME_LEN] = {0};
|
||||
sprintf(rootDir, "%s/vnode%d", tsVnodeDir, vgId);
|
||||
taosMvDir(tsVnodeBakDir, rootDir);
|
||||
taosRemoveDir(rootDir);
|
||||
}
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pR
|
|||
|
||||
// save insert result into item
|
||||
|
||||
vDebug("vgId:%d, submit msg is processed", pVnode->vgId);
|
||||
vTrace("vgId:%d, submit msg is processed", pVnode->vgId);
|
||||
|
||||
pRet->len = sizeof(SShellSubmitRspMsg);
|
||||
pRet->rsp = rpcMallocCont(pRet->len);
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
4. pip install ../src/connector/python/linux/python2 ; pip3 install
|
||||
../src/connector/python/linux/python3
|
||||
|
||||
5. pip install numpy; pip3 install numpy
|
||||
5. pip install numpy; pip3 install numpy (numpy is required only if you need to run querySort.py)
|
||||
|
||||
> Note: Both Python2 and Python3 are currently supported by the Python test
|
||||
> framework. Since Python2 is no longer officially supported by Python Software
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
WORK_DIR=/mnt/root
|
||||
|
||||
# Coloured Echoes #
|
||||
function red_echo { echo -e "\033[31m$@\033[0m"; } #
|
||||
function green_echo { echo -e "\033[32m$@\033[0m"; } #
|
||||
|
@ -16,6 +18,17 @@ function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") &&
|
|||
function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } #
|
||||
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
|
||||
|
||||
function setMaxTablesPerVnode {
|
||||
echo "/etc/taos/taos.cfg maxTablesPerVnode will be set to $1"
|
||||
|
||||
hasText=`grep "maxTablesPerVnode" /etc/taos/taos.cfg`
|
||||
if [[ -z "$hasText" ]]; then
|
||||
echo "maxTablesPerVnode $1" >> /etc/taos/taos.cfg
|
||||
else
|
||||
sed -i 's/^maxTablesPerVnode.*$/maxTablesPerVnode '"$1"'/g' /etc/taos/taos.cfg
|
||||
fi
|
||||
}
|
||||
|
||||
function setMaxConnections {
|
||||
echo "/etc/taos/taos.cfg maxConnection will be set to $1"
|
||||
|
||||
|
@ -27,6 +40,28 @@ function setMaxConnections {
|
|||
fi
|
||||
}
|
||||
|
||||
function setQDebugFlag {
|
||||
echo "/etc/taos/taos.cfg qDebugFlag will be set to $1"
|
||||
|
||||
hasText=`grep -w "qDebugFlag" /etc/taos/taos.cfg`
|
||||
if [[ -z "$hasText" ]]; then
|
||||
echo "qDebugFlag $1" >> /etc/taos/taos.cfg
|
||||
else
|
||||
sed -i 's/^qDebugFlag.*$/qDebugFlag '"$1"'/g' /etc/taos/taos.cfg
|
||||
fi
|
||||
}
|
||||
|
||||
function setDebugFlag {
|
||||
echo "/etc/taos/taos.cfg DebugFlag will be set to $1"
|
||||
|
||||
hasText=`grep -w "DebugFlag" /etc/taos/taos.cfg`
|
||||
if [[ -z "$hasText" ]]; then
|
||||
echo "DebugFlag $1" >> /etc/taos/taos.cfg
|
||||
else
|
||||
sed -i 's/^DebugFlag.*$/DebugFlag '"$1"'/g' /etc/taos/taos.cfg
|
||||
fi
|
||||
}
|
||||
|
||||
function setWal {
|
||||
echo "/etc/taos/taos.cfg walLevel will be set to $1"
|
||||
|
||||
|
@ -47,9 +82,10 @@ function collectSysInfo {
|
|||
}
|
||||
|
||||
function buildTDengine {
|
||||
cd /root/TDengine
|
||||
echoInfo "Build TDengine"
|
||||
cd $WORK_DIR/TDengine
|
||||
|
||||
git remote update
|
||||
git remote update > /dev/null
|
||||
REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
|
||||
LOCAL_COMMIT=`git rev-parse --short @`
|
||||
|
||||
|
@ -59,29 +95,17 @@ function buildTDengine {
|
|||
echo "repo up-to-date"
|
||||
else
|
||||
echo "repo need to pull"
|
||||
git pull
|
||||
git pull > /dev/null
|
||||
|
||||
LOCAL_COMMIT=`git rev-parse --short @`
|
||||
cd debug
|
||||
rm -rf *
|
||||
cmake ..
|
||||
cmake .. > /dev/null
|
||||
make > /dev/null
|
||||
make install
|
||||
fi
|
||||
}
|
||||
|
||||
function restartTaosd {
|
||||
systemctl stop taosd
|
||||
pkill -KILL -x taosd
|
||||
sleep 10
|
||||
|
||||
rm -rf /mnt/var/log/taos/*
|
||||
rm -rf /mnt/var/lib/taos/*
|
||||
|
||||
taosd 2>&1 > /dev/null &
|
||||
sleep 10
|
||||
}
|
||||
|
||||
function sendReport {
|
||||
receiver="sdsang@taosdata.com, sangshuduo@gmail.com"
|
||||
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
|
||||
|
@ -93,57 +117,90 @@ function sendReport {
|
|||
(cat - && uuencode perftest-13d-wal1-$today.log perftest-13d-wal1-$today.log)| \
|
||||
(cat - && uuencode perftest-13d-wal1-report.csv perftest-13d-wal1-report-$today.csv) | \
|
||||
(cat - && uuencode perftest-13d-wal1-report.png perftest-13d-wal1-report-$today.png) | \
|
||||
(cat - && uuencode perftest-var10k-int10s-wal1-$today.log perftest-var10k-int10s-wal1-$today.log)| \
|
||||
(cat - && uuencode perftest-var10k-int10s-wal1-report.csv perftest-var10k-int10s-wal1-report-$today.csv) | \
|
||||
(cat - && uuencode perftest-var10k-int10s-wal1-report.png perftest-var10k-int10s-wal1-report-$today.png) | \
|
||||
(cat - && uuencode taosdemo-wal1-$today.log taosdemo-wal1-$today.log) | \
|
||||
(cat - && uuencode taosdemo-wal1-report.csv taosdemo-wal1-report-$today.csv) | \
|
||||
(cat - && uuencode taosdemo-rps-wal1-report.csv taosdemo-rps-wal1-report-$today.csv) | \
|
||||
(cat - && uuencode taosdemo-wal1-report.png taosdemo-wal1-report-$today.png) | \
|
||||
(cat - && uuencode taosdemo-rps-wal1-report.csv taosdemo-rps-wal1-report-$today.csv) | \
|
||||
(cat - && uuencode taosdemo-rps-wal1-report.png taosdemo-rps-wal1-report-$today.png) | \
|
||||
(cat - && uuencode perftest-1d-wal2-$today.log perftest-1d-wal2-$today.log)| \
|
||||
(cat - && uuencode perftest-1d-wal2-report.csv perftest-1d-wal2-report-$today.csv) | \
|
||||
(cat - && uuencode perftest-1d-wal2-report.png perftest-1d-wal2-report-$today.png) | \
|
||||
(cat - && uuencode perftest-13d-wal2-$today.log perftest-13d-wal2-$today.log)| \
|
||||
(cat - && uuencode perftest-13d-wal2-report.csv perftest-13d-wal2-report-$today.csv) | \
|
||||
(cat - && uuencode perftest-13d-wal2-report.png perftest-13d-wal2-report-$today.png) | \
|
||||
(cat - && uuencode perftest-var10k-int10s-wal2-$today.log perftest-var10k-int10s-wal2-$today.log)| \
|
||||
(cat - && uuencode perftest-var10k-int10s-wal2-report.csv perftest-var10k-int10s-wal2-report-$today.csv) | \
|
||||
(cat - && uuencode perftest-var10k-int10s-wal2-report.png perftest-var10k-int10s-wal2-report-$today.png) | \
|
||||
(cat - && uuencode taosdemo-wal2-$today.log taosdemo-wal2-$today.log) | \
|
||||
(cat - && uuencode taosdemo-wal2-report.csv taosdemo-wal2-report-$today.csv) | \
|
||||
(cat - && uuencode taosdemo-wal2-report.png taosdemo-wal2-report-$today.png) | \
|
||||
(cat - && uuencode taosdemo-rps-wal2-report.csv taosdemo-rps-wal2-report-$today.csv) | \
|
||||
(cat - && uuencode taosdemo-rps-wal2-report.png taosdemo-rps-wal2-report-$today.png) | \
|
||||
(cat - && uuencode sysinfo.log sysinfo.txt) | \
|
||||
(cat - && uuencode taos.cfg taos-cfg-$today.txt) | \
|
||||
ssmtp "${receiver}"
|
||||
}
|
||||
|
||||
today=`date +"%Y%m%d"`
|
||||
cd /root
|
||||
echo -e "cron-ran-at-${today}" >> cron.log
|
||||
cd $WORK_DIR
|
||||
echo -e "cron-ran-at-${today}" >> $WORK_DIR/cron.log
|
||||
|
||||
echoInfo "Build TDengine"
|
||||
buildTDengine
|
||||
|
||||
############################
|
||||
setMaxConnections 100
|
||||
setMaxConnections 1000
|
||||
setMaxTablesPerVnode 6000
|
||||
setDebugFlag 131
|
||||
setQDebugFlag 131
|
||||
|
||||
############################
|
||||
setWal "2"
|
||||
|
||||
cd /root
|
||||
./perftest-tsdb-compare-1d.sh "wal2"
|
||||
|
||||
cd /root
|
||||
./perftest-tsdb-compare-13d.sh "wal2"
|
||||
|
||||
cd /root
|
||||
cd $WORK_DIR
|
||||
date >> $WORK_DIR/cron.log
|
||||
./perftest-taosdemo.sh "wal2"
|
||||
date >> $WORK_DIR/cron.log
|
||||
|
||||
cd $WORK_DIR
|
||||
date >> $WORK_DIR/cron.log
|
||||
./perftest-tsdb-compare-1d.sh
|
||||
date >> $WORK_DIR/cron.log
|
||||
|
||||
cd $WORK_DIR
|
||||
date >> $WORK_DIR/cron.log
|
||||
./perftest-tsdb-compare-13d.sh
|
||||
date >> $WORK_DIR/cron.log
|
||||
|
||||
cd $WORK_DIR
|
||||
date >> $WORK_DIR/cron.log
|
||||
./perftest-tsdb-compare-var10k-int10s.sh
|
||||
date >> $WORK_DIR/cron.log
|
||||
|
||||
#############################
|
||||
setWal "1"
|
||||
|
||||
cd /root
|
||||
./perftest-tsdb-compare-1d.sh "wal1"
|
||||
|
||||
cd /root
|
||||
./perftest-tsdb-compare-13d.sh "wal1"
|
||||
|
||||
cd /root
|
||||
cd $WORK_DIR
|
||||
date >> $WORK_DIR/cron.log
|
||||
./perftest-taosdemo.sh "wal1"
|
||||
date >> $WORK_DIR/cron.log
|
||||
|
||||
cd $WORK_DIR
|
||||
date >> $WORK_DIR/cron.log
|
||||
./perftest-tsdb-compare-1d.sh
|
||||
date >> $WORK_DIR/cron.log
|
||||
|
||||
cd $WORK_DIR
|
||||
date >> $WORK_DIR/cron.log
|
||||
./perftest-tsdb-compare-13d.sh
|
||||
date >> $WORK_DIR/cron.log
|
||||
|
||||
cd $WORK_DIR
|
||||
date >> $WORK_DIR/cron.log
|
||||
./perftest-tsdb-compare-var10k-int10s.sh
|
||||
date >> $WORK_DIR/cron.log
|
||||
|
||||
#############################
|
||||
collectSysInfo
|
||||
|
|
|
@ -1,5 +1,20 @@
|
|||
#!/bin/bash
|
||||
|
||||
WORK_DIR=/mnt/root
|
||||
|
||||
walLevel=`grep "^walLevel" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
if [[ "$walLevel" -eq "2" ]]; then
|
||||
walPostfix="wal2"
|
||||
elif [[ "$walLevel" -eq "1" ]]; then
|
||||
walPostfix="wal1"
|
||||
else
|
||||
echo -e "${RED}wrong walLevel $walLevel found! ${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
logDir=`grep "^logDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
dataDir=`grep "^dataDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
|
||||
# Coloured Echoes
|
||||
function red_echo { echo -e "\033[31m$@\033[0m"; }
|
||||
function green_echo { echo -e "\033[32m$@\033[0m"; }
|
||||
|
@ -17,13 +32,20 @@ function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf
|
|||
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; }
|
||||
|
||||
function restartTaosd {
|
||||
echo "Stop taosd"
|
||||
systemctl stop taosd
|
||||
pkill -KILL -x taosd
|
||||
sleep 10
|
||||
|
||||
rm -rf /mnt/var/log/taos/*
|
||||
rm -rf /mnt/var/lib/taos/*
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosd
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
||||
rm -rf $logDir/*
|
||||
rm -rf $dataDir/*
|
||||
|
||||
echo "Start taosd"
|
||||
taosd 2>&1 > /dev/null &
|
||||
sleep 10
|
||||
}
|
||||
|
@ -32,7 +54,7 @@ function runCreateTableOnly {
|
|||
echoInfo "Restart Taosd"
|
||||
restartTaosd
|
||||
|
||||
/usr/bin/time -f "Total: %e" -o totaltime.out bash -c "yes | taosdemo -n 0 2>&1 | tee taosdemo-$1-$today.log"
|
||||
/usr/bin/time -f "Total: %e" -o totaltime.out bash -c "yes | taosdemo -n 0 2>&1 | tee taosdemo-$walPostfix-$today.log"
|
||||
demoCreateTableOnly=`grep "Total:" totaltime.out|awk '{print $2}'`
|
||||
}
|
||||
|
||||
|
@ -40,7 +62,7 @@ function runDeleteTableOnly {
|
|||
echoInfo "Restart Taosd"
|
||||
restartTaosd
|
||||
|
||||
/usr/bin/time -f "Total: %e" -o totaltime.out bash -c "yes | taosdemo -t 0 -D 1 2>&1 | tee taosdemo-$1-$today.log"
|
||||
/usr/bin/time -f "Total: %e" -o totaltime.out bash -c "yes | taosdemo -t 0 -D 1 2>&1 | tee taosdemo-$walPostfix-$today.log"
|
||||
demoDeleteTableOnly=`grep "Total:" totaltime.out|awk '{print $2}'`
|
||||
}
|
||||
|
||||
|
@ -48,41 +70,44 @@ function runCreateTableThenInsert {
|
|||
echoInfo "Restart Taosd"
|
||||
restartTaosd
|
||||
|
||||
/usr/bin/time -f "Total: %e" -o totaltime.out bash -c "yes | taosdemo 2>&1 | tee -a taosdemo-$1-$today.log"
|
||||
/usr/bin/time -f "Total: %e" -o totaltime.out bash -c "yes | taosdemo 2>&1 | tee -a taosdemo-$walPostfix-$today.log"
|
||||
demoTableAndInsert=`grep "Total:" totaltime.out|awk '{print $2}'`
|
||||
demoRPS=`grep "records\/second" taosdemo-$1-$today.log | tail -n1 | awk '{print $13}'`
|
||||
demoRPS=`grep "records\/second" taosdemo-$walPostfix-$today.log | tail -n1 | awk '{print $13}'`
|
||||
}
|
||||
|
||||
function generateTaosdemoPlot {
|
||||
echo "${today} $1, demoCreateTableOnly: ${demoCreateTableOnly}, demoDeleteTableOnly: ${demoDeleteTableOnly}, demoTableAndInsert: ${demoTableAndInsert}" | tee -a taosdemo-$today.log
|
||||
echo "${today}, ${demoCreateTableOnly}, ${demoDeleteTableOnly}, ${demoTableAndInsert}">> taosdemo-$1-report.csv
|
||||
echo "${today}, ${demoRPS}" >> taosdemo-rps-$1-report.csv
|
||||
echo "${today} $walPostfix, demoCreateTableOnly: ${demoCreateTableOnly}, demoDeleteTableOnly: ${demoDeleteTableOnly}, demoTableAndInsert: ${demoTableAndInsert}" | tee -a taosdemo-$today.log
|
||||
echo "${today}, ${demoCreateTableOnly}, ${demoDeleteTableOnly}, ${demoTableAndInsert}">> taosdemo-$walPostfix-report.csv
|
||||
echo "${today}, ${demoRPS}" >> taosdemo-rps-$walPostfix-report.csv
|
||||
|
||||
csvLines=`cat taosdemo-$1-report.csv | wc -l`
|
||||
csvLines=`cat taosdemo-$walPostfix-report.csv | wc -l`
|
||||
|
||||
if [ "$csvLines" -gt "10" ]; then
|
||||
sed -i '1d' taosdemo-$1-report.csv
|
||||
sed -i '1d' taosdemo-$walPostfix-report.csv
|
||||
fi
|
||||
|
||||
csvLines=`cat taosdemo-rps-$1-report.csv | wc -l`
|
||||
csvLines=`cat taosdemo-rps-$walPostfix-report.csv | wc -l`
|
||||
|
||||
if [ "$csvLines" -gt "10" ]; then
|
||||
sed -i '1d' taosdemo-rps-$1-report.csv
|
||||
sed -i '1d' taosdemo-rps-$walPostfix-report.csv
|
||||
fi
|
||||
|
||||
gnuplot -e "filename='taosdemo-$1-report'" -p taosdemo-csv2png.gnuplot
|
||||
gnuplot -e "filename='taosdemo-rps-$1-report'" -p taosdemo-rps-csv2png.gnuplot
|
||||
gnuplot -e "filename='taosdemo-$walPostfix-report'" -p taosdemo-csv2png.gnuplot
|
||||
gnuplot -e "filename='taosdemo-rps-$walPostfix-report'" -p taosdemo-rps-csv2png.gnuplot
|
||||
}
|
||||
|
||||
today=`date +"%Y%m%d"`
|
||||
|
||||
cd /root
|
||||
cd $WORK_DIR
|
||||
echoInfo "Test Create Table Only "
|
||||
runCreateTableOnly $1
|
||||
runCreateTableOnly
|
||||
echoInfo "Test Create Table then Insert data"
|
||||
runDeleteTableOnly $1
|
||||
runDeleteTableOnly
|
||||
echoInfo "Test Create Table then Insert data"
|
||||
runCreateTableThenInsert $1
|
||||
runCreateTableThenInsert
|
||||
echoInfo "Generate plot for taosdemo"
|
||||
generateTaosdemoPlot $1
|
||||
echoInfo "End of TaosDemo Test"
|
||||
generateTaosdemoPlot
|
||||
|
||||
tar czf $WORK_DIR/taos-log-taosdemo-$today.tar.gz $logDir/*
|
||||
|
||||
echoInfo "End of TaosDemo Test" | tee -a $WORK_DIR/cron.log
|
||||
|
|
|
@ -1,5 +1,21 @@
|
|||
#!/bin/bash
|
||||
|
||||
WORK_DIR=/mnt/root
|
||||
TSDB_CMP_DIR=timeseriesdatabase-comparisons/build/tsdbcompare
|
||||
|
||||
walLevel=`grep "^walLevel" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
if [[ "$walLevel" -eq "2" ]]; then
|
||||
walPostfix="wal2"
|
||||
elif [[ "$walLevel" -eq "1" ]]; then
|
||||
walPostfix="wal1"
|
||||
else
|
||||
echo -e "${RED}wrong walLevel $walLevel found! ${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
logDir=`grep "^logDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
dataDir=`grep "^dataDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
|
||||
# Coloured Echoes #
|
||||
function red_echo { echo -e "\033[31m$@\033[0m"; } #
|
||||
function green_echo { echo -e "\033[32m$@\033[0m"; } #
|
||||
|
@ -17,13 +33,20 @@ function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf
|
|||
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
|
||||
|
||||
function restartTaosd {
|
||||
echo "Stop taosd"
|
||||
systemctl stop taosd
|
||||
pkill -KILL -x taosd
|
||||
sleep 10
|
||||
|
||||
rm -rf /mnt/var/log/taos/*
|
||||
rm -rf /mnt/var/lib/taos/*
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosd
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
||||
rm -rf $logDir/*
|
||||
rm -rf $dataDir/*
|
||||
|
||||
echo "Start taosd"
|
||||
taosd 2>&1 > /dev/null &
|
||||
sleep 10
|
||||
}
|
||||
|
@ -32,27 +55,30 @@ function runPerfTest13d {
|
|||
echoInfo "Restart Taosd"
|
||||
restartTaosd
|
||||
|
||||
cd /home/taos/tliu/timeseriesdatabase-comparisons/build/tsdbcompare
|
||||
./runreal-13d-csv.sh $1 2>&1 | tee /root/perftest-13d-$1-$today.log
|
||||
cd $WORK_DIR/$TSDB_CMP_DIR
|
||||
./runTDengine.sh -d 13 -w -q 2>&1 | tee $WORK_DIR/perftest-13d-$walPostfix-$today.log
|
||||
}
|
||||
|
||||
function generatePerfPlot13d {
|
||||
cd /root
|
||||
cd $WORK_DIR
|
||||
|
||||
csvLines=`cat perftest-13d-$1-report.csv | wc -l`
|
||||
csvLines=`cat perftest-13d-$walPostfix-report.csv | wc -l`
|
||||
|
||||
if [ "$csvLines" -gt "10" ]; then
|
||||
sed -i '1d' perftest-13d-$1-report.csv
|
||||
sed -i '1d' perftest-13d-$walPostfix-report.csv
|
||||
fi
|
||||
|
||||
gnuplot -e "filename='perftest-13d-$1-report'" -p perftest-csv2png.gnuplot
|
||||
gnuplot -e "filename='perftest-13d-$walPostfix-report'" -p perftest-csv2png.gnuplot
|
||||
}
|
||||
|
||||
today=`date +"%Y%m%d"`
|
||||
cd /root
|
||||
cd $WORK_DIR
|
||||
|
||||
echoInfo "run Performance Test with 13 days data"
|
||||
runPerfTest13d $1
|
||||
echoInfo "Generate plot of 13 days data"
|
||||
generatePerfPlot13d $1
|
||||
echoInfo "End of TSDB-Compare 13-days-data Test"
|
||||
|
||||
tar czf $WORK_DIR/taos-log-13d-$today.tar.gz $logDir/*
|
||||
|
||||
echoInfo "End of TSDB-Compare 13-days-data Test" | tee -a $WORK_DIR/cron.log
|
||||
|
|
|
@ -1,5 +1,21 @@
|
|||
#!/bin/bash
|
||||
|
||||
WORK_DIR=/mnt/root
|
||||
TSDB_CMP_DIR=timeseriesdatabase-comparisons/build/tsdbcompare
|
||||
|
||||
walLevel=`grep "^walLevel" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
if [[ "$walLevel" -eq "2" ]]; then
|
||||
walPostfix="wal2"
|
||||
elif [[ "$walLevel" -eq "1" ]]; then
|
||||
walPostfix="wal1"
|
||||
else
|
||||
echo -e "${RED}wrong walLevel $walLevel found! ${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
logDir=`grep "^logDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
dataDir=`grep "^dataDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
|
||||
# Coloured Echoes #
|
||||
function red_echo { echo -e "\033[31m$@\033[0m"; } #
|
||||
function green_echo { echo -e "\033[32m$@\033[0m"; } #
|
||||
|
@ -17,13 +33,20 @@ function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf
|
|||
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
|
||||
|
||||
function restartTaosd {
|
||||
echo "Stop taosd"
|
||||
systemctl stop taosd
|
||||
pkill -KILL -x taosd
|
||||
sleep 10
|
||||
|
||||
rm -rf /mnt/var/log/taos/*
|
||||
rm -rf /mnt/var/lib/taos/*
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosd
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
||||
rm -rf $logDir/*
|
||||
rm -rf $dataDir/*
|
||||
|
||||
echo "Start taosd"
|
||||
taosd 2>&1 > /dev/null &
|
||||
sleep 10
|
||||
}
|
||||
|
@ -32,27 +55,30 @@ function runPerfTest1d {
|
|||
echoInfo "Restart Taosd"
|
||||
restartTaosd
|
||||
|
||||
cd /home/taos/tliu/timeseriesdatabase-comparisons/build/tsdbcompare
|
||||
./runreal-1d-csv.sh $1 2>&1 | tee /root/perftest-1d-$1-$today.log
|
||||
cd $WORK_DIR/$TSDB_CMP_DIR
|
||||
./runTDengine.sh -d 1 -w -q 2>&1 | tee $WORK_DIR/perftest-1d-$walPostfix-$today.log
|
||||
}
|
||||
|
||||
function generatePerfPlot1d {
|
||||
cd /root
|
||||
cd $WORK_DIR
|
||||
|
||||
csvLines=`cat perftest-1d-$1-report.csv | wc -l`
|
||||
csvLines=`cat perftest-1d-$walPostfix-report.csv | wc -l`
|
||||
|
||||
if [ "$csvLines" -gt "10" ]; then
|
||||
sed -i '2d' perftest-1d-$1-report.csv
|
||||
sed -i '1d' perftest-1d-$walPostfix-report.csv
|
||||
fi
|
||||
|
||||
gnuplot -e "filename='perftest-1d-$1-report'" -p perftest-csv2png.gnuplot
|
||||
gnuplot -e "filename='perftest-1d-$walPostfix-report'" -p perftest-csv2png.gnuplot
|
||||
}
|
||||
|
||||
today=`date +"%Y%m%d"`
|
||||
cd /root
|
||||
cd $WORK_DIR
|
||||
|
||||
echoInfo "run Performance Test with 1 day data"
|
||||
runPerfTest1d $1
|
||||
runPerfTest1d
|
||||
echoInfo "Generate plot of 1 day data"
|
||||
generatePerfPlot1d $1
|
||||
echoInfo "End of TSDB-Compare 1-day-data Test"
|
||||
generatePerfPlot1d
|
||||
|
||||
tar czf $WORK_DIR/taos-log-1d-$today.tar.gz $logDir/*
|
||||
|
||||
echoInfo "End of TSDB-Compare 1-day-data Test" | tee -a $WORK_DIR/cron.log
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
#!/bin/bash
|
||||
|
||||
WORK_DIR=/mnt/root
|
||||
TSDB_CMP_DIR=timeseriesdatabase-comparisons/build/tsdbcompare
|
||||
|
||||
walLevel=`grep "^walLevel" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
if [[ "$walLevel" -eq "2" ]]; then
|
||||
walPostfix="wal2"
|
||||
elif [[ "$walLevel" -eq "1" ]]; then
|
||||
walPostfix="wal1"
|
||||
else
|
||||
echo -e "${RED}wrong walLevel $walLevel found! ${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
logDir=`grep "^logDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
dataDir=`grep "^dataDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
|
||||
# Coloured Echoes #
|
||||
function red_echo { echo -e "\033[31m$@\033[0m"; } #
|
||||
function green_echo { echo -e "\033[32m$@\033[0m"; } #
|
||||
function yellow_echo { echo -e "\033[33m$@\033[0m"; } #
|
||||
function white_echo { echo -e "\033[1;37m$@\033[0m"; } #
|
||||
# Coloured Printfs #
|
||||
function red_printf { printf "\033[31m$@\033[0m"; } #
|
||||
function green_printf { printf "\033[32m$@\033[0m"; } #
|
||||
function yellow_printf { printf "\033[33m$@\033[0m"; } #
|
||||
function white_printf { printf "\033[1;37m$@\033[0m"; } #
|
||||
# Debugging Outputs #
|
||||
function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; } #
|
||||
function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; } #
|
||||
function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } #
|
||||
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
|
||||
|
||||
function restartTaosd {
|
||||
echo "Stop taosd"
|
||||
systemctl stop taosd
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosd
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
||||
rm -rf $logDir/*
|
||||
rm -rf $dataDir/*
|
||||
|
||||
echo "Start taosd"
|
||||
taosd 2>&1 > /dev/null &
|
||||
sleep 10
|
||||
}
|
||||
|
||||
function runPerfTestVar10K {
|
||||
echoInfo "Restart Taosd"
|
||||
restartTaosd
|
||||
|
||||
cd $WORK_DIR/$TSDB_CMP_DIR
|
||||
./runTDengine.sh -v 10000 -i 100 -w -q 2>&1 | tee $WORK_DIR/perftest-var10k-int100s-$walPostfix-$today.log
|
||||
}
|
||||
|
||||
function generatePerfPlotVar10K {
|
||||
cd $WORK_DIR
|
||||
|
||||
csvLines=`cat perftest-var10k-int100s-$walPostfix-report.csv | wc -l`
|
||||
|
||||
if [ "$csvLines" -gt "10" ]; then
|
||||
sed -i '1d' perftest-var10k-int100s-$walPostfix-report.csv
|
||||
fi
|
||||
|
||||
gnuplot -e "filename='perftest-var10k-int100s-$walPostfix-report'" -p perftest-csv2png.gnuplot
|
||||
}
|
||||
|
||||
today=`date +"%Y%m%d"`
|
||||
cd $WORK_DIR
|
||||
|
||||
echoInfo "run Performance Test with 10K tables data"
|
||||
runPerfTestVar10K
|
||||
echoInfo "Generate plot of 10K tables data"
|
||||
generatePerfPlotVar10K
|
||||
|
||||
tar czf $WORK_DIR/taos-log-var10k-int100s-$today.tar.gz $logDir/*
|
||||
|
||||
echoInfo "End of TSDB-Compare var10k-int100s-tables-data Test" | tee -a $WORK_DIR/cron.log
|
|
@ -0,0 +1,84 @@
|
|||
#!/bin/bash
|
||||
|
||||
WORK_DIR=/mnt/root
|
||||
TSDB_CMP_DIR=timeseriesdatabase-comparisons/build/tsdbcompare
|
||||
|
||||
walLevel=`grep "^walLevel" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
if [[ "$walLevel" -eq "2" ]]; then
|
||||
walPostfix="wal2"
|
||||
elif [[ "$walLevel" -eq "1" ]]; then
|
||||
walPostfix="wal1"
|
||||
else
|
||||
echo -e "${RED}wrong walLevel $walLevel found! ${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
logDir=`grep "^logDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
dataDir=`grep "^dataDir" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
|
||||
# Coloured Echoes #
|
||||
function red_echo { echo -e "\033[31m$@\033[0m"; } #
|
||||
function green_echo { echo -e "\033[32m$@\033[0m"; } #
|
||||
function yellow_echo { echo -e "\033[33m$@\033[0m"; } #
|
||||
function white_echo { echo -e "\033[1;37m$@\033[0m"; } #
|
||||
# Coloured Printfs #
|
||||
function red_printf { printf "\033[31m$@\033[0m"; } #
|
||||
function green_printf { printf "\033[32m$@\033[0m"; } #
|
||||
function yellow_printf { printf "\033[33m$@\033[0m"; } #
|
||||
function white_printf { printf "\033[1;37m$@\033[0m"; } #
|
||||
# Debugging Outputs #
|
||||
function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; } #
|
||||
function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; } #
|
||||
function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } #
|
||||
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
|
||||
|
||||
function restartTaosd {
|
||||
echo "Stop taosd"
|
||||
systemctl stop taosd
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosd
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
||||
rm -rf $logDir/*
|
||||
rm -rf $dataDir/*
|
||||
|
||||
echo "Start taosd"
|
||||
taosd 2>&1 > /dev/null &
|
||||
sleep 10
|
||||
}
|
||||
|
||||
function runPerfTestVar10K {
|
||||
echoInfo "Restart Taosd"
|
||||
restartTaosd
|
||||
|
||||
cd $WORK_DIR/$TSDB_CMP_DIR
|
||||
./runTDengine.sh -v 10000 -w -q 2>&1 | tee $WORK_DIR/perftest-var10k-int10s-$walPostfix-$today.log
|
||||
}
|
||||
|
||||
function generatePerfPlotVar10K {
|
||||
cd $WORK_DIR
|
||||
|
||||
csvLines=`cat perftest-var10k-int10s-$walPostfix-report.csv | wc -l`
|
||||
|
||||
if [ "$csvLines" -gt "10" ]; then
|
||||
sed -i '1d' perftest-var10k-int10s-$walPostfix-report.csv
|
||||
fi
|
||||
|
||||
gnuplot -e "filename='perftest-var10k-int10s-$walPostfix-report'" -p perftest-csv2png.gnuplot
|
||||
}
|
||||
|
||||
today=`date +"%Y%m%d"`
|
||||
cd $WORK_DIR
|
||||
|
||||
echoInfo "run Performance Test with 10K tables data"
|
||||
runPerfTestVar10K
|
||||
echoInfo "Generate plot of 10K tables data"
|
||||
generatePerfPlotVar10K
|
||||
|
||||
tar czf $WORK_DIR/taos-log-var10k-int10s-$today.tar.gz $logDir/*
|
||||
|
||||
echoInfo "End of TSDB-Compare var10k-int10s-tables-data Test" | tee -a $WORK_DIR/cron.log
|
|
@ -0,0 +1,251 @@
|
|||
#!/bin/bash
|
||||
#set -x
|
||||
|
||||
WORK_DIR=/mnt/root
|
||||
DATA_DIR=/mnt/data
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
# default value
|
||||
DEFAULT_BATCH=5000
|
||||
DEFAULT_DAYS=1
|
||||
DEFAULT_INTERVAL=1
|
||||
DEFAULT_SCALEVAR=10
|
||||
DEFAULT_DOPREPARE=false
|
||||
DEFAULT_DOWRITE=false
|
||||
DEFAULT_DOQUERY=false
|
||||
|
||||
# function
|
||||
function do_prepare {
|
||||
echo
|
||||
echo "---------------Generating Data-----------------"
|
||||
echo
|
||||
|
||||
echo
|
||||
echo "Prepare data for InfluxDB...."
|
||||
|
||||
echo "bin/bulk_data_gen -seed 123 -format influx-bulk -sampling-interval $interval_s \
|
||||
-scale-var $scalevar -use-case devops -timestamp-start $TIME_START \
|
||||
-timestamp-end $TIME_END > $DATA_FILE"
|
||||
|
||||
bin/bulk_data_gen -seed 123 -format influx-bulk -sampling-interval $interval_s \
|
||||
-scale-var $scalevar -use-case devops -timestamp-start $TIME_START \
|
||||
-timestamp-end $TIME_END > $DATA_FILE
|
||||
}
|
||||
|
||||
function do_write {
|
||||
echo "cat $DATA_FILE | bin/bulk_load_influx \
|
||||
--batch-size=$batch --workers=20 --urls=http://172.15.1.5:8086 | grep loaded"
|
||||
INFLUXRES=`cat $DATA_FILE | bin/bulk_load_influx \
|
||||
--batch-size=$batch --workers=20 --urls="http://172.15.1.5:8086" | grep loaded`
|
||||
|
||||
echo -e "${GREEN}InfluxDB writing result:${NC}"
|
||||
echo -e "${GREEN}$INFLUXRES${NC}"
|
||||
DATA=`echo $INFLUXRES|awk '{print($2)}'`
|
||||
TMP=`echo $INFLUXRES|awk '{print($5)}'`
|
||||
IFWTM=`echo ${TMP%s*}`
|
||||
}
|
||||
|
||||
function do_query {
|
||||
|
||||
echo
|
||||
echo "------------------Querying Data-----------------"
|
||||
echo
|
||||
|
||||
echo
|
||||
echo "start query test, query max from 8 hosts group by 1 hour, InfluxDB"
|
||||
echo
|
||||
|
||||
#Test case 1
|
||||
#测试用例1,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据的最大值。
|
||||
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') ;
|
||||
# a,b,c,d,e,f,g,h are random 8 numbers.
|
||||
echo "IFQS1=bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-all -scale-var $scalevar -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall"
|
||||
|
||||
IFQS1=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-all -scale-var $scalevar -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
|
||||
echo -e "${GREEN}InfluxDB query test case 1 result:${NC}"
|
||||
echo -e "${GREEN}$IFQS1${NC}"
|
||||
TMP=`echo $IFQS1|awk '{print($4)}'`
|
||||
IFQ1=`echo ${TMP%s*}`
|
||||
|
||||
#Test case 2
|
||||
#测试用例2,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1小时为粒度,查询每1小时的最大值。
|
||||
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') interval(1h);
|
||||
# a,b,c,d,e,f,g,h are random 8 numbers
|
||||
echo "IFQS2=bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-allbyhr -scale-var $scalevar -queries 1000 | bin/query_benchmarker_influxdb -urls=http://172.15.1.5:8086 -workers 50 -print-interval 0|grep wall"
|
||||
|
||||
IFQS2=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-allbyhr \
|
||||
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_influxdb \
|
||||
-urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
|
||||
echo -e "${GREEN}InfluxDB query test case 2 result:${NC}"
|
||||
echo -e "${GREEN}$IFQS2${NC}"
|
||||
TMP=`echo $IFQS2|awk '{print($4)}'`
|
||||
IFQ2=`echo ${TMP%s*}`
|
||||
|
||||
#Test case 3
|
||||
#测试用例3,测试用例3,随机查询12个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以10分钟为粒度,查询每10分钟的最大值
|
||||
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
|
||||
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =12 hour
|
||||
echo "IFQS3=bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-12-hr -scale-var $scalevar -queries 1000 | bin/query_benchmarker_influxdb -urls=http://172.15.1.5:8086 -workers 50 -print-interval 0|grep wall"
|
||||
|
||||
IFQS3=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-12-hr \
|
||||
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_influxdb \
|
||||
-urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
|
||||
echo -e "${GREEN}InfluxDB query test case 3 result:${NC}"
|
||||
echo -e "${GREEN}$IFQS3${NC}"
|
||||
TMP=`echo $IFQS3|awk '{print($4)}'`
|
||||
IFQ3=`echo ${TMP%s*}`
|
||||
|
||||
#Test case 4
|
||||
#测试用例4,随机查询1个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1分钟为粒度,查询每1分钟的最大值
|
||||
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
|
||||
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =1 hours
|
||||
echo "IFQS4=bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-1-hr -scale-var $scalevar -queries 1000 | bin/query_benchmarker_influxdb -urls=http://172.15.1.5:8086 -workers 50 -print-interval 0|grep wall"
|
||||
|
||||
IFQS4=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-1-hr \
|
||||
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_influxdb \
|
||||
-urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
|
||||
echo -e "${GREEN}InfluxDB query test case 4 result:${NC}"
|
||||
echo -e "${GREEN}$IFQS4${NC}"
|
||||
TMP=`echo $IFQS4|awk '{print($4)}'`
|
||||
IFQ4=`echo ${TMP%s*}`
|
||||
|
||||
}
|
||||
|
||||
batch=$DEFAULT_BATCH
|
||||
days=$DEFAULT_DAYS
|
||||
interval=$DEFAULT_INTERVAL
|
||||
scalevar=$DEFAULT_SCALEVAR
|
||||
doprepare=$DEFAULT_DOPREPARE
|
||||
dowrite=$DEFAULT_DOWRITE
|
||||
doquery=$DEFAULT_DOQUERY
|
||||
|
||||
help="$(basename "$0") [-h] [-b batchsize] [-d data-of-days] [-i interval] [-v scalevar] [-p false for don't prepare] [-w false for don't do write] [-q false for don't do query]"
|
||||
|
||||
while getopts ':b:d:i:v:pwqh' flag; do
|
||||
case "${flag}" in
|
||||
b) batch=${OPTARG};;
|
||||
d) days=${OPTARG};;
|
||||
i) interval=${OPTARG};;
|
||||
v) scalevar=${OPTARG};;
|
||||
p) doprepare=${OPTARG};;
|
||||
w) dowrite=${OPTARG};;
|
||||
q) doquery=${OPTARG};;
|
||||
:) echo -e "{RED}Missing argument!{NC}"
|
||||
echo "$help"
|
||||
exit 1
|
||||
;;
|
||||
h) echo "$help"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $scalevar -eq 10000 ]]
|
||||
then
|
||||
TIME_START="2018-01-01T00:00:00Z"
|
||||
TIME_END="2018-01-02T00:00:00Z"
|
||||
|
||||
if [[ $interval -eq 100 ]]
|
||||
then
|
||||
interval_s=100s
|
||||
DATA_FILE=$DATA_DIR/influxdb-var10k-int100s.dat
|
||||
RECORD_CSV_FILE=$WORK_DIR/perftest-influxdb-report-var10k-int100s.csv
|
||||
else
|
||||
interval_s=10s
|
||||
DATA_FILE=$DATA_DIR/influxdb-var10k-int10s.dat
|
||||
RECORD_CSV_FILE=$WORK_DIR/perftest-influxdb-report-var10k-int10s.csv
|
||||
fi
|
||||
else
|
||||
if [[ $days -eq 1 ]]
|
||||
then
|
||||
TIME_START="2018-01-01T00:00:00Z"
|
||||
TIME_END="2018-01-02T00:00:00Z"
|
||||
DATA_FILE=$DATA_DIR/influxdb-1d.dat
|
||||
RECORD_CSV_FILE=$WORK_DIR/perftest-influxdb-report-1d.csv
|
||||
elif [[ $days -eq 13 ]]
|
||||
then
|
||||
TIME_START="2018-01-01T00:00:00Z"
|
||||
TIME_END="2018-01-14T00:00:00Z"
|
||||
DATA_FILE=$DATA_DIR/influxdb-13d.dat
|
||||
RECORD_CSV_FILE=$WORK_DIR/perftest-influxdb-report-13d.csv
|
||||
else
|
||||
echo -e "{RED} don't support input $days{NC}"
|
||||
exit 1
|
||||
fi
|
||||
interval_s=${interval}s
|
||||
fi
|
||||
|
||||
echo "TIME_START: $TIME_START, TIME_END: $TIME_END, DATA_FILE: $DATA_FILE"
|
||||
echo "doprepare: $doprepare, dowrite: $dowrite, doquery: $doquery"
|
||||
|
||||
if $doprepare;
|
||||
then
|
||||
do_prepare
|
||||
fi
|
||||
|
||||
docker network create --ip-range 172.15.1.255/24 --subnet 172.15.1.1/16 tsdbcomp >>/dev/null 2>&1
|
||||
INFLUX=`docker run -d -p 8086:8086 --net tsdbcomp --ip 172.15.1.5 influxdb` >>/dev/null 2>&1
|
||||
sleep 10
|
||||
|
||||
if $dowrite;
|
||||
then
|
||||
echo -e "Start test InfluxDB writting, result in ${GREEN}Green line${NC}"
|
||||
do_write
|
||||
fi
|
||||
|
||||
if $doquery;
|
||||
then
|
||||
echo -e "Start test InfluxDB query, result in ${GREEN}Green line${NC}"
|
||||
do_query
|
||||
fi
|
||||
|
||||
echo
|
||||
echo
|
||||
echo "======================================================"
|
||||
echo " tsdb performance comparision "
|
||||
echo "======================================================"
|
||||
if $dowrite;
|
||||
then
|
||||
echo -e " Writing $DATA records test takes: "
|
||||
printf " InfluxDB | %-4.5f Seconds \n" $IFWTM
|
||||
echo "------------------------------------------------------"
|
||||
fi
|
||||
|
||||
if $doquery;
|
||||
then
|
||||
echo " Query test cases: "
|
||||
echo " case 1: select the max(value) from all data "
|
||||
echo " filtered out 8 hosts "
|
||||
echo " Query test case 1 takes: "
|
||||
printf " InfluxDB | %-4.5f Seconds \n" $IFQ1
|
||||
echo "------------------------------------------------------"
|
||||
echo " case 2: select the max(value) from all data "
|
||||
echo " filtered out 8 hosts with an interval of 1 hour "
|
||||
echo " case 2 takes: "
|
||||
printf " InfluxDB | %-4.5f Seconds \n" $IFQ2
|
||||
echo "------------------------------------------------------"
|
||||
echo " case 3: select the max(value) from random 12 hours"
|
||||
echo " data filtered out 8 hosts with an interval of 10 min "
|
||||
echo " filtered out 8 hosts interval(1h) "
|
||||
echo " case 3 takes: "
|
||||
printf " InfluxDB | %-4.5f Seconds \n" $IFQ3
|
||||
echo "------------------------------------------------------"
|
||||
echo " case 4: select the max(value) from random 1 hour data "
|
||||
echo " data filtered out 8 hosts with an interval of 1 min "
|
||||
echo " case 4 takes: "
|
||||
printf " InfluxDB | %-4.5f Seconds \n" $IFQ4
|
||||
echo "------------------------------------------------------"
|
||||
fi
|
||||
|
||||
docker stop $INFLUX >>/dev/null 2>&1
|
||||
docker container rm -f $INFLUX >>/dev/null 2>&1
|
||||
docker network rm tsdbcomp >>/dev/null 2>&1
|
||||
|
||||
today=`date +"%Y%m%d"`
|
||||
echo "${today}, ${IFWTM}, ${IFQ1}, ${IFQ2}, ${IFQ3}, ${IFQ4}" >> $RECORD_CSV_FILE
|
|
@ -0,0 +1,283 @@
|
|||
#!/bin/bash
|
||||
#set -x
|
||||
|
||||
WORK_DIR=/mnt/root
|
||||
DATA_DIR=/mnt/data
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
# default value
|
||||
DEFAULT_BATCH=5000
|
||||
DEFAULT_DAYS=1
|
||||
DEFAULT_INTERVAL=1
|
||||
DEFAULT_SCALEVAR=10
|
||||
DEFAULT_DOPREPARE=false
|
||||
DEFAULT_DOWRITE=false
|
||||
DEFAULT_DOQUERY=false
|
||||
|
||||
# function
|
||||
function do_prepare {
|
||||
echo
|
||||
echo "---------------Generating Data-----------------"
|
||||
echo
|
||||
|
||||
echo
|
||||
echo "Prepare data for TDengine...."
|
||||
|
||||
# bin/bulk_data_gen -seed 123 -format tdengine -tdschema-file config/TDengineSchema.toml -scale-var 100 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" > $DATA_DIR/tdengine.dat
|
||||
echo "bin/bulk_data_gen -seed 123 -format tdengine -sampling-interval $interval_s \
|
||||
-tdschema-file config/TDengineSchema.toml -scale-var $scalevar \
|
||||
-use-case devops -timestamp-start $TIME_START \
|
||||
-timestamp-end $TIME_END \
|
||||
> $DATA_FILE"
|
||||
|
||||
bin/bulk_data_gen -seed 123 -format tdengine -sampling-interval $interval_s \
|
||||
-tdschema-file config/TDengineSchema.toml -scale-var $scalevar \
|
||||
-use-case devops -timestamp-start $TIME_START \
|
||||
-timestamp-end $TIME_END \
|
||||
> $DATA_FILE
|
||||
}
|
||||
|
||||
function do_write {
|
||||
echo "TDENGINERES=cat $DATA_FILE |bin/bulk_load_tdengine --url 127.0.0.1:0 \
|
||||
--batch-size $batch -do-load -report-tags n1 -workers 20 -fileout=false| grep loaded"
|
||||
|
||||
TDENGINERES=`cat $DATA_FILE |bin/bulk_load_tdengine --url 127.0.0.1:0 \
|
||||
--batch-size $batch -do-load -report-tags n1 -workers 20 -fileout=false| grep loaded`
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}TDengine writing result:${NC}"
|
||||
echo -e "${GREEN}$TDENGINERES${NC}"
|
||||
DATA=`echo $TDENGINERES|awk '{print($2)}'`
|
||||
TMP=`echo $TDENGINERES|awk '{print($5)}'`
|
||||
TDWTM=`echo ${TMP%s*}`
|
||||
|
||||
}
|
||||
|
||||
function do_query {
|
||||
|
||||
echo
|
||||
echo "------------------Querying Data-----------------"
|
||||
echo
|
||||
|
||||
echo
|
||||
echo "start query test, query max from 8 hosts group by 1 hour, TDengine"
|
||||
echo
|
||||
|
||||
#Test case 1
|
||||
#测试用例1,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据的最大值。
|
||||
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') ;
|
||||
# a,b,c,d,e,f,g,h are random 8 numbers.
|
||||
echo "TDQS1=bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-all \
|
||||
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_tdengine \
|
||||
-urls=http://127.0.0.1:6020 -workers 50 -print-interval 0|grep wall"
|
||||
|
||||
TDQS1=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-all \
|
||||
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_tdengine \
|
||||
-urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
|
||||
echo
|
||||
echo -e "${GREEN}TDengine query test case 1 result:${NC}"
|
||||
echo -e "${GREEN}$TDQS1${NC}"
|
||||
TMP=`echo $TDQS1|awk '{print($4)}'`
|
||||
TDQ1=`echo ${TMP%s*}`
|
||||
|
||||
#Test case 2
|
||||
#测试用例2,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1小时为粒度,查询每1小时的最大值。
|
||||
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') interval(1h);
|
||||
# a,b,c,d,e,f,g,h are random 8 numbers
|
||||
echo "TDQS2=bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-allbyhr \
|
||||
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_tdengine \
|
||||
-urls=http://127.0.0.1:6020 -workers 50 -print-interval 0|grep wall"
|
||||
|
||||
TDQS2=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-allbyhr \
|
||||
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_tdengine \
|
||||
-urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}TDengine query test case 2 result:${NC}"
|
||||
echo -e "${GREEN}$TDQS2${NC}"
|
||||
TMP=`echo $TDQS2|awk '{print($4)}'`
|
||||
TDQ2=`echo ${TMP%s*}`
|
||||
|
||||
#Test case 3
|
||||
#测试用例3,测试用例3,随机查询12个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以10分钟为粒度,查询每10分钟的最大值
|
||||
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
|
||||
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =12 hour
|
||||
echo "TDQS3=bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-12-hr \
|
||||
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_tdengine \
|
||||
-urls=http://127.0.0.1:6020 -workers 50 -print-interval 0|grep wall"
|
||||
|
||||
TDQS3=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-12-hr \
|
||||
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_tdengine \
|
||||
-urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
|
||||
echo
|
||||
echo -e "${GREEN}TDengine query test case 3 result:${NC}"
|
||||
echo -e "${GREEN}$TDQS3${NC}"
|
||||
TMP=`echo $TDQS3|awk '{print($4)}'`
|
||||
TDQ3=`echo ${TMP%s*}`
|
||||
|
||||
#Test case 4
|
||||
#测试用例4,随机查询1个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1分钟为粒度,查询每1分钟的最大值
|
||||
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
|
||||
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =1 hours
|
||||
echo "TDQS4=bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-1-hr \
|
||||
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_tdengine \
|
||||
-urls=http://127.0.0.1:6020 -workers 50 -print-interval 0|grep wall"
|
||||
|
||||
TDQS4=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-1-hr \
|
||||
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_tdengine \
|
||||
-urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
|
||||
echo
|
||||
echo -e "${GREEN}TDengine query test case 4 result:${NC}"
|
||||
echo -e "${GREEN}$TDQS4${NC}"
|
||||
TMP=`echo $TDQS4|awk '{print($4)}'`
|
||||
TDQ4=`echo ${TMP%s*}`
|
||||
|
||||
}
|
||||
|
||||
batch=$DEFAULT_BATCH
|
||||
days=$DEFAULT_DAYS
|
||||
interval=$DEFAULT_INTERVAL
|
||||
scalevar=$DEFAULT_SCALEVAR
|
||||
doprepare=$DEFAULT_DOPREPARE
|
||||
dowrite=$DEFAULT_DOWRITE
|
||||
doquery=$DEFAULT_DOQUERY
|
||||
|
||||
help="$(basename "$0") [-h] [-b batchsize] [-d data-of-days] [-i interval] [-v scalevar] [-p false for don't prepare] [-w false for don't do write] [-q false for don't do query]"
|
||||
|
||||
while getopts ':b:d:i:v:pwqh' flag; do
|
||||
case "${flag}" in
|
||||
b) batch=${OPTARG};;
|
||||
d) days=${OPTARG};;
|
||||
i) interval=${OPTARG};;
|
||||
v) scalevar=${OPTARG};;
|
||||
p) doprepare=${OPTARG};;
|
||||
w) dowrite=${OPTARG};;
|
||||
q) doquery=${OPTARG};;
|
||||
:) echo -e "{RED}Missing argument!{NC}"
|
||||
echo "$help"
|
||||
exit 1
|
||||
;;
|
||||
h) echo "$help"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
walLevel=`grep "^walLevel" /etc/taos/taos.cfg | awk '{print $2}'`
|
||||
if [[ "$walLevel" -eq "2" ]]; then
|
||||
walPostfix="wal2"
|
||||
elif [[ "$walLevel" -eq "1" ]]; then
|
||||
walPostfix="wal1"
|
||||
else
|
||||
echo -e "${RED}wrong walLevel $walLevel found! ${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN} $walPostfix found! ${NC}"
|
||||
|
||||
if [[ $scalevar -eq 10000 ]]
|
||||
then
|
||||
TIME_START="2018-01-01T00:00:00Z"
|
||||
TIME_END="2018-01-02T00:00:00Z"
|
||||
|
||||
if [[ $interval -eq 100 ]]
|
||||
then
|
||||
interval_s=100s
|
||||
DATA_FILE=$DATA_DIR/tdengine-var10k-int100s.dat
|
||||
RECORD_CSV_FILE=$WORK_DIR/perftest-var10k-int100s-$walPostfix-report.csv
|
||||
else
|
||||
interval_s=10s
|
||||
DATA_FILE=$DATA_DIR/tdengine-var10k-int10s.dat
|
||||
RECORD_CSV_FILE=$WORK_DIR/perftest-var10k-int10s-$walPostfix-report.csv
|
||||
fi
|
||||
else
|
||||
if [[ $days -eq 1 ]]
|
||||
then
|
||||
TIME_START="2018-01-01T00:00:00Z"
|
||||
TIME_END="2018-01-02T00:00:00Z"
|
||||
DATA_FILE=$DATA_DIR/tdengine-1d.dat
|
||||
RECORD_CSV_FILE=$WORK_DIR/perftest-1d-$walPostfix-report.csv
|
||||
elif [[ $days -eq 13 ]]
|
||||
then
|
||||
TIME_START="2018-01-01T00:00:00Z"
|
||||
TIME_END="2018-01-14T00:00:00Z"
|
||||
DATA_FILE=$DATA_DIR/tdengine-13d.dat
|
||||
RECORD_CSV_FILE=$WORK_DIR/perftest-13d-$walPostfix-report.csv
|
||||
else
|
||||
echo -e "{RED} don't support input $days{NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
interval_s=${interval}s
|
||||
fi
|
||||
|
||||
echo "TIME_START: $TIME_START, TIME_END: $TIME_END, DATA_FILE: $DATA_FILE"
|
||||
echo "doprepare: $doprepare, dowrite: $dowrite, doquery: $doquery"
|
||||
|
||||
if $doprepare;
|
||||
then
|
||||
do_prepare
|
||||
fi
|
||||
|
||||
echo
|
||||
|
||||
if $dowrite;
|
||||
then
|
||||
echo -e "Start test TDengine writting, result in ${GREEN}Green line${NC}"
|
||||
do_write
|
||||
fi
|
||||
|
||||
if $doquery;
|
||||
then
|
||||
echo -e "Start test TDengine query, result in ${GREEN}Green line${NC}"
|
||||
do_query
|
||||
fi
|
||||
|
||||
echo
|
||||
echo
|
||||
echo "======================================================"
|
||||
echo " tsdb performance comparision "
|
||||
echo "======================================================"
|
||||
if $dowrite;
|
||||
then
|
||||
echo -e " Writing $DATA records test takes: "
|
||||
printf " TDengine | %-4.5f Seconds \n" $TDWTM
|
||||
echo "------------------------------------------------------"
|
||||
fi
|
||||
|
||||
if $doquery;
|
||||
then
|
||||
echo " Query test cases: "
|
||||
echo " case 1: select the max(value) from all data "
|
||||
echo " filtered out 8 hosts "
|
||||
echo " Query test case 1 takes: "
|
||||
printf " TDengine | %-4.5f Seconds \n" $TDQ1
|
||||
echo "------------------------------------------------------"
|
||||
echo " case 2: select the max(value) from all data "
|
||||
echo " filtered out 8 hosts with an interval of 1 hour "
|
||||
echo " case 2 takes: "
|
||||
printf " TDengine | %-4.5f Seconds \n" $TDQ2
|
||||
echo "------------------------------------------------------"
|
||||
echo " case 3: select the max(value) from random 12 hours"
|
||||
echo " data filtered out 8 hosts with an interval of 10 min "
|
||||
echo " filtered out 8 hosts interval(1h) "
|
||||
echo " case 3 takes: "
|
||||
printf " TDengine | %-4.5f Seconds \n" $TDQ3
|
||||
echo "------------------------------------------------------"
|
||||
echo " case 4: select the max(value) from random 1 hour data "
|
||||
echo " data filtered out 8 hosts with an interval of 1 min "
|
||||
echo " case 4 takes: "
|
||||
printf " TDengine | %-4.5f Seconds \n" $TDQ4
|
||||
echo "------------------------------------------------------"
|
||||
fi
|
||||
|
||||
#bulk_query_gen/bulk_query_gen -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_influxdb/query_benchmarker_influxdb -urls="http://172.26.89.231:8086"
|
||||
#bulk_query_gen/bulk_query_gen -format tdengine -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_tdengine/query_benchmarker_tdengine -urls="http://172.26.89.231:6020"
|
||||
today=`date +"%Y%m%d"`
|
||||
echo "${today}, ${TDWTM}, ${TDQ1}, ${TDQ2}, ${TDQ3}, ${TDQ4}" >> $RECORD_CSV_FILE
|
|
@ -0,0 +1,24 @@
|
|||
#!/user/bin/gnuplot
|
||||
reset
|
||||
set terminal png
|
||||
|
||||
set title filename font ",20"
|
||||
|
||||
set ylabel "Time in Seconds"
|
||||
|
||||
set xdata time
|
||||
set timefmt "%Y%m%d"
|
||||
set format x "%Y-%m-%d"
|
||||
set xlabel "Date"
|
||||
|
||||
set style data linespoints
|
||||
|
||||
set terminal pngcairo size 1024,768 enhanced font 'Segoe UI, 10'
|
||||
set output filename . '.png'
|
||||
set datafile separator ','
|
||||
|
||||
set key reverse Left outside
|
||||
set grid
|
||||
|
||||
|
||||
plot filename . '.csv' using 1:2 title "Request Per Second"
|
|
@ -77,11 +77,7 @@ class TDTestCase:
|
|||
# join queries
|
||||
tdSql.query(
|
||||
"select * from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id")
|
||||
tdSql.checkRows(6)
|
||||
|
||||
tdSql.query(
|
||||
"select * from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id order by ts desc")
|
||||
tdSql.checkColumnSorted(0, "desc")
|
||||
tdSql.checkRows(6)
|
||||
|
||||
tdSql.error(
|
||||
"select ts, pressure, temperature, id, dscrption from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id")
|
||||
|
@ -116,8 +112,7 @@ class TDTestCase:
|
|||
tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id")
|
||||
tdSql.checkRows(6)
|
||||
|
||||
tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.pid, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.error("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.pid, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -0,0 +1,132 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
import threading
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class MetadataQuery:
|
||||
def initConnection(self):
|
||||
self.tables = 100000
|
||||
self.records = 10
|
||||
self.numOfTherads = 10
|
||||
self.ts = 1537146000000
|
||||
self.host = "127.0.0.1"
|
||||
self.user = "root"
|
||||
self.password = "taosdata"
|
||||
self.config = "/etc/taos"
|
||||
|
||||
def connectDB(self):
|
||||
self.conn = taos.connect(
|
||||
self.host,
|
||||
self.user,
|
||||
self.password,
|
||||
self.config)
|
||||
return self.conn.cursor()
|
||||
|
||||
def createStable(self):
|
||||
print("================= Create stable meters =================")
|
||||
cursor = self.connectDB()
|
||||
cursor.execute("drop database if exists test")
|
||||
cursor.execute("create database test")
|
||||
cursor.execute("use test")
|
||||
cursor.execute('''create table if not exists meters (ts timestamp, speed int) tags(
|
||||
tgcol1 tinyint, tgcol2 smallint, tgcol3 int, tgcol4 bigint, tgcol5 float, tgcol6 double, tgcol7 bool, tgcol8 binary(20), tgcol9 nchar(20),
|
||||
tgcol10 tinyint, tgcol11 smallint, tgcol12 int, tgcol13 bigint, tgcol14 float, tgcol15 double, tgcol16 bool, tgcol17 binary(20), tgcol18 nchar(20),
|
||||
tgcol19 tinyint, tgcol20 smallint, tgcol21 int, tgcol22 bigint, tgcol23 float, tgcol24 double, tgcol25 bool, tgcol26 binary(20), tgcol27 nchar(20),
|
||||
tgcol28 tinyint, tgcol29 smallint, tgcol30 int, tgcol31 bigint, tgcol32 float, tgcol33 double, tgcol34 bool, tgcol35 binary(20), tgcol36 nchar(20),
|
||||
tgcol37 tinyint, tgcol38 smallint, tgcol39 int, tgcol40 bigint, tgcol41 float, tgcol42 double, tgcol43 bool, tgcol44 binary(20), tgcol45 nchar(20),
|
||||
tgcol46 tinyint, tgcol47 smallint, tgcol48 int, tgcol49 bigint, tgcol50 float, tgcol51 double, tgcol52 bool, tgcol53 binary(20), tgcol54 nchar(20))''')
|
||||
cursor.close()
|
||||
self.conn.close()
|
||||
|
||||
def createTablesAndInsertData(self, threadID):
|
||||
cursor = self.connectDB()
|
||||
cursor.execute("use test")
|
||||
base = threadID * self.tables
|
||||
|
||||
tablesPerThread = int (self.tables / self.numOfTherads)
|
||||
for i in range(tablesPerThread):
|
||||
cursor.execute(
|
||||
'''create table t%d using meters tags(
|
||||
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
|
||||
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
|
||||
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
|
||||
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
|
||||
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
|
||||
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')''' %
|
||||
(base + i + 1,
|
||||
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
|
||||
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
|
||||
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
|
||||
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
|
||||
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
|
||||
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100))
|
||||
for j in range(self.records):
|
||||
cursor.execute(
|
||||
"insert into t%d values(%d, %d)" %
|
||||
(base + i + 1, self.ts + j, j))
|
||||
cursor.close()
|
||||
self.conn.close()
|
||||
|
||||
def queryData(self, query):
|
||||
cursor = self.connectDB()
|
||||
cursor.execute("use test")
|
||||
|
||||
print("================= query tag data =================")
|
||||
startTime = datetime.now()
|
||||
cursor.execute(query)
|
||||
cursor.fetchall()
|
||||
endTime = datetime.now()
|
||||
print(
|
||||
"Query time for the above query is %d seconds" %
|
||||
(endTime - startTime).seconds)
|
||||
|
||||
cursor.close()
|
||||
self.conn.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
t = MetadataQuery()
|
||||
t.initConnection()
|
||||
t.createStable()
|
||||
|
||||
print(
|
||||
"================= Create %d tables and insert %d records into each table =================" %
|
||||
(t.tables, t.records))
|
||||
startTime = datetime.now()
|
||||
for i in range(t.numOfTherads):
|
||||
thread = threading.Thread(
|
||||
target=t.createTablesAndInsertData, args=(i,))
|
||||
thread.start()
|
||||
thread.join()
|
||||
endTime = datetime.now()
|
||||
diff = (endTime - startTime).seconds
|
||||
print(
|
||||
"spend %d seconds to create %d tables and insert %d records into each table" %
|
||||
(diff, t.tables, t.records))
|
||||
|
||||
query = '''select tgcol1, tgcol2, tgcol3, tgcol4, tgcol5, tgcol6, tgcol7, tgcol8, tgcol9,
|
||||
tgcol10, tgcol11, tgcol12, tgcol13, tgcol14, tgcol15, tgcol16, tgcol17, tgcol18,
|
||||
tgcol19, tgcol20, tgcol21, tgcol22, tgcol23, tgcol24, tgcol25, tgcol26, tgcol27,
|
||||
tgcol28, tgcol29, tgcol30, tgcol31, tgcol32, tgcol33, tgcol34, tgcol35, tgcol36,
|
||||
tgcol37, tgcol38, tgcol39, tgcol40, tgcol41, tgcol42, tgcol43, tgcol44, tgcol45,
|
||||
tgcol46, tgcol47, tgcol48, tgcol49, tgcol50, tgcol51, tgcol52, tgcol53, tgcol54
|
||||
from meters where tgcol1 > 10 AND tgcol1 < 100 and tgcol2 > 100 and tgcol2 < 1000 or tgcol3 > 10000 or tgcol7 = true
|
||||
or tgcol8 like '%2' and tgcol10 < 10'''
|
||||
|
||||
t.queryData(query)
|
|
@ -16,6 +16,7 @@ import taos
|
|||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
@ -26,6 +27,46 @@ class TDTestCase:
|
|||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def checkColumnSorted(self, col, order):
|
||||
frame = inspect.stack()[1]
|
||||
callerModule = inspect.getmodule(frame[0])
|
||||
callerFilename = callerModule.__file__
|
||||
|
||||
if col < 0:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is smaller than zero" %
|
||||
(callerFilename, tdSql.sql, col))
|
||||
if col > tdSql.queryCols:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is larger than queryCols:%d" %
|
||||
(callerFilename, tdSql.sql, col, tdSql.queryCols))
|
||||
|
||||
matrix = np.array(tdSql.queryResult)
|
||||
list = matrix[:, 0]
|
||||
|
||||
if order == "" or order.upper() == "ASC":
|
||||
if all(sorted(list) == list):
|
||||
tdLog.info(
|
||||
"sql:%s, column :%d is sorted in accending order as expected" %
|
||||
(tdSql.sql, col))
|
||||
else:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is not sorted in accesnind order" %
|
||||
(callerFilename, tdSql.sql, col))
|
||||
elif order.upper() == "DESC":
|
||||
if all(sorted(list, reverse=True) == list):
|
||||
tdLog.info(
|
||||
"sql:%s, column :%d is sorted in decending order as expected" %
|
||||
(tdSql.sql, col))
|
||||
else:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is not sorted in decending order" %
|
||||
(callerFilename, tdSql.sql, col))
|
||||
else:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, the order provided for col:%d is not correct" %
|
||||
(callerFilename, tdSql.sql, col))
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
|
@ -49,11 +90,11 @@ class TDTestCase:
|
|||
print("======= step 2: verify order for each column =========")
|
||||
# sort for timestamp in asc order
|
||||
tdSql.query("select * from st order by ts asc")
|
||||
tdSql.checkColumnSorted(0, "asc")
|
||||
self.checkColumnSorted(0, "asc")
|
||||
|
||||
# sort for timestamp in desc order
|
||||
tdSql.query("select * from st order by ts desc")
|
||||
tdSql.checkColumnSorted(0, "desc")
|
||||
self.checkColumnSorted(0, "desc")
|
||||
|
||||
for i in range(1, 10):
|
||||
tdSql.error("select * from st order by tbcol%d" % i)
|
||||
|
@ -63,17 +104,17 @@ class TDTestCase:
|
|||
tdSql.query(
|
||||
"select avg(tbcol1) from st group by tagcol%d order by tagcol%d" %
|
||||
(i, i))
|
||||
tdSql.checkColumnSorted(1, "")
|
||||
self.checkColumnSorted(1, "")
|
||||
|
||||
tdSql.query(
|
||||
"select avg(tbcol1) from st group by tagcol%d order by tagcol%d asc" %
|
||||
(i, i))
|
||||
tdSql.checkColumnSorted(1, "asc")
|
||||
self.checkColumnSorted(1, "asc")
|
||||
|
||||
tdSql.query(
|
||||
"select avg(tbcol1) from st group by tagcol%d order by tagcol%d desc" %
|
||||
(i, i))
|
||||
tdSql.checkColumnSorted(1, "desc")
|
||||
self.checkColumnSorted(1, "desc")
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -43,7 +43,7 @@ python3 ./test.py -f tag_lite/commit.py
|
|||
python3 ./test.py -f tag_lite/create.py
|
||||
python3 ./test.py -f tag_lite/datatype.py
|
||||
python3 ./test.py -f tag_lite/datatype-without-alter.py
|
||||
# python3 ./test.py -f tag_lite/delete.py
|
||||
python3 ./test.py -f tag_lite/delete.py
|
||||
python3 ./test.py -f tag_lite/double.py
|
||||
python3 ./test.py -f tag_lite/float.py
|
||||
python3 ./test.py -f tag_lite/int_binary.py
|
||||
|
@ -134,9 +134,10 @@ python3 ./test.py -f table/del_stable.py
|
|||
python3 ./test.py -f query/filter.py
|
||||
python3 ./test.py -f query/filterAllIntTypes.py
|
||||
python3 ./test.py -f query/filterFloatAndDouble.py
|
||||
python3 ./test.py -f query/filterOtherTypes.py
|
||||
python3 ./test.py -f query/queryError.py
|
||||
python3 ./test.py -f query/querySort.py
|
||||
|
||||
|
||||
#stream
|
||||
python3 ./test.py -f stream/stream1.py
|
||||
python3 ./test.py -f stream/stream2.py
|
||||
|
|
|
@ -8,24 +8,8 @@ python3 ./test.py $1 -s && sleep 1
|
|||
# insert
|
||||
python3 ./test.py $1 -f insert/basic.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/int.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/float.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/bigint.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/bool.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/double.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/smallint.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/tinyint.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/binary.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/date.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/nchar.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/multi.py
|
||||
|
@ -42,18 +26,6 @@ python3 ./test.py $1 -s && sleep 1
|
|||
# import
|
||||
python3 ./test.py $1 -f import_merge/importDataLastSub.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f import_merge/importHead.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f import_merge/importLastT.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f import_merge/importSpan.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f import_merge/importTail.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f import_merge/importTRestart.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f import_merge/importInsertThenImport.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
|
||||
#tag
|
||||
python3 ./test.py $1 -f tag_lite/filter.py
|
||||
|
|
|
@ -17,7 +17,6 @@ import time
|
|||
import datetime
|
||||
import inspect
|
||||
from util.log import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDSql:
|
||||
|
@ -199,47 +198,7 @@ class TDSql:
|
|||
"%s failed: sql:%s, affectedRows:%d != expect:%d" %
|
||||
(callerFilename, self.sql, self.affectedRows, expectAffectedRows))
|
||||
tdLog.info("sql:%s, affectedRows:%d == expect:%d" %
|
||||
(self.sql, self.affectedRows, expectAffectedRows))
|
||||
|
||||
def checkColumnSorted(self, col, order):
|
||||
frame = inspect.stack()[1]
|
||||
callerModule = inspect.getmodule(frame[0])
|
||||
callerFilename = callerModule.__file__
|
||||
|
||||
if col < 0:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is smaller than zero" %
|
||||
(callerFilename, self.sql, col))
|
||||
if col > self.queryCols:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is larger than queryCols:%d" %
|
||||
(callerFilename, self.sql, col, self.queryCols))
|
||||
|
||||
matrix = np.array(self.queryResult)
|
||||
list = matrix[:, 0]
|
||||
|
||||
if order == "" or order.upper() == "ASC":
|
||||
if all(sorted(list) == list):
|
||||
tdLog.info(
|
||||
"sql:%s, column :%d is sorted in accending order as expected" %
|
||||
(self.sql, col))
|
||||
else:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is not sorted in accesnind order" %
|
||||
(callerFilename, self.sql, col))
|
||||
elif order.upper() == "DESC":
|
||||
if all(sorted(list, reverse=True) == list):
|
||||
tdLog.info(
|
||||
"sql:%s, column :%d is sorted in decending order as expected" %
|
||||
(self.sql, col))
|
||||
else:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is not sorted in decending order" %
|
||||
(callerFilename, self.sql, col))
|
||||
else:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, the order provided for col:%d is not correct" %
|
||||
(callerFilename, self.sql, col))
|
||||
(self.sql, self.affectedRows, expectAffectedRows))
|
||||
|
||||
|
||||
tdSql = TDSql()
|
||||
|
|
|
@ -1,21 +1,20 @@
|
|||
run general/cache/new_metrics.sim
|
||||
run general/column/commit.sim
|
||||
run general/compress/compress.sim
|
||||
run general/compute/interval.sim
|
||||
run general/db/basic4.sim
|
||||
run general/field/binary.sim
|
||||
run general/http/restful_insert.sim
|
||||
run general/http/restful_full.sim
|
||||
run general/import/commit.sim
|
||||
run general/import/replica1.sim
|
||||
run general/parser/auto_create_tb_drop_tb.sim
|
||||
run general/parser/binary_escapeCharacter.sim
|
||||
run general/parser/select_from_cache_disk.sim
|
||||
run general/parser/null_char.sim
|
||||
run general/parser/alter.sim
|
||||
run general/stable/vnode3.sim
|
||||
run general/table/autocreate.sim
|
||||
run general/table/fill.sim
|
||||
run general/table/vgroup.sim
|
||||
run general/tag/filter.sim
|
||||
run general/table/vgroup.sim
|
||||
run general/user/authority.sim
|
||||
run general/user/pass_alter.sim
|
||||
run general/vector/metrics_mix.sim
|
||||
run general/vector/table_field.sim
|
||||
run general/user/authority.sim
|
||||
run general/tag/set.sim
|
||||
run general/table/delete_writing.sim
|
||||
run general/stable/disk.sim
|
||||
|
|
|
@ -232,24 +232,3 @@ cd ../../../debug; make
|
|||
./test.sh -f general/vector/table_mix.sim
|
||||
./test.sh -f general/vector/table_query.sim
|
||||
./test.sh -f general/vector/table_time.sim
|
||||
|
||||
./test.sh -f unique/account/account_create.sim
|
||||
./test.sh -f unique/account/account_delete.sim
|
||||
./test.sh -f unique/account/account_len.sim
|
||||
./test.sh -f unique/account/authority.sim
|
||||
./test.sh -f unique/account/basic.sim
|
||||
./test.sh -f unique/account/paras.sim
|
||||
./test.sh -f unique/account/pass_alter.sim
|
||||
./test.sh -f unique/account/pass_len.sim
|
||||
./test.sh -f unique/account/usage.sim
|
||||
./test.sh -f unique/account/user_create.sim
|
||||
./test.sh -f unique/account/user_len.sim
|
||||
|
||||
./test.sh -f unique/cluster/balance1.sim
|
||||
./test.sh -f unique/cluster/balance2.sim
|
||||
./test.sh -f unique/dnode/balance1.sim
|
||||
./test.sh -f unique/dnode/balance2.sim
|
||||
./test.sh -f unique/stable/dnode3.sim
|
||||
./test.sh -f unique/mnode/mgmt22.sim
|
||||
./test.sh -f unique/mnode/mgmt33.sim
|
||||
./test.sh -f unique/vnode/many.sim
|
|
@ -102,9 +102,10 @@ cd ../../../debug; make
|
|||
./test.sh -f unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_replica_change.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_stopDnode_timeout.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_vnode_change.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim
|
||||
#./test.sh -f unique/arbitrator/dn3_mn1_vnode_change.sim
|
||||
#./test.sh -f unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim
|
||||
#./test.sh -f unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim
|
||||
#./test.sh -f unique/arbitrator/dn3_mn1_vnode_createErrData_online.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_vnode_noCorruptFile_offline.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_vnode_delDir.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_r2_vnode_delDir.sim
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
##unsupport run general/alter/cached_schema_after_alter.sim
|
||||
unsupport run general/alter/count.sim
|
||||
unsupport run general/alter/import.sim
|
||||
run general/alter/count.sim
|
||||
run general/alter/import.sim
|
||||
##unsupport run general/alter/insert1.sim
|
||||
unsupport run general/alter/insert2.sim
|
||||
unsupport run general/alter/metrics.sim
|
||||
unsupport run general/alter/table.sim
|
||||
run general/alter/insert2.sim
|
||||
run general/alter/metrics.sim
|
||||
run general/alter/table.sim
|
||||
run general/cache/new_metrics.sim
|
||||
run general/cache/restart_metrics.sim
|
||||
run general/cache/restart_table.sim
|
||||
|
@ -86,14 +86,14 @@ run general/insert/query_block2_file.sim
|
|||
run general/insert/query_file_memory.sim
|
||||
run general/insert/query_multi_file.sim
|
||||
run general/insert/tcp.sim
|
||||
##unsupport run general/parser/alter.sim
|
||||
run general/parser/alter.sim
|
||||
run general/parser/alter1.sim
|
||||
run general/parser/alter_stable.sim
|
||||
run general/parser/auto_create_tb.sim
|
||||
run general/parser/auto_create_tb_drop_tb.sim
|
||||
run general/parser/col_arithmetic_operation.sim
|
||||
run general/parser/columnValue.sim
|
||||
#run general/parser/commit.sim
|
||||
run general/parser/commit.sim
|
||||
run general/parser/create_db.sim
|
||||
run general/parser/create_mt.sim
|
||||
run general/parser/create_tb.sim
|
||||
|
@ -106,7 +106,7 @@ run general/parser/first_last.sim
|
|||
##unsupport run general/parser/import_file.sim
|
||||
run general/parser/lastrow.sim
|
||||
run general/parser/nchar.sim
|
||||
##unsupport run general/parser/null_char.sim
|
||||
run general/parser/null_char.sim
|
||||
run general/parser/single_row_in_tb.sim
|
||||
run general/parser/select_from_cache_disk.sim
|
||||
run general/parser/limit.sim
|
||||
|
@ -132,7 +132,7 @@ run general/parser/groupby.sim
|
|||
run general/parser/bug.sim
|
||||
run general/parser/tags_dynamically_specifiy.sim
|
||||
run general/parser/set_tag_vals.sim
|
||||
##unsupport run general/parser/repeatAlter.sim
|
||||
run general/parser/repeatAlter.sim
|
||||
##unsupport run general/parser/slimit_alter_tags.sim
|
||||
##unsupport run general/parser/stream_on_sys.sim
|
||||
run general/parser/stream.sim
|
||||
|
@ -142,6 +142,8 @@ run general/stable/dnode3.sim
|
|||
run general/stable/metrics.sim
|
||||
run general/stable/values.sim
|
||||
run general/stable/vnode3.sim
|
||||
run general/stable/refcount.sim
|
||||
run general/stable/show.sim
|
||||
run general/table/autocreate.sim
|
||||
run general/table/basic1.sim
|
||||
run general/table/basic2.sim
|
||||
|
|
|
@ -64,7 +64,7 @@ print ======== step7
|
|||
$lastRows = $data00
|
||||
print ======== loop Times $x
|
||||
|
||||
if $x < 2 then
|
||||
if $x < 5 then
|
||||
$x = $x + 1
|
||||
goto loop
|
||||
endi
|
||||
|
|
|
@ -75,7 +75,7 @@ print ======== step8
|
|||
$lastRows = $data00
|
||||
print ======== loop Times $x
|
||||
|
||||
if $x < 2 then
|
||||
if $x < 5 then
|
||||
$x = $x + 1
|
||||
goto loop
|
||||
endi
|
||||
|
|
Loading…
Reference in New Issue