Merge branch 'develop' into feature/mqtt

This commit is contained in:
Shengliang Guan 2020-06-01 13:31:46 +08:00 committed by GitHub
commit d5db330398
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
204 changed files with 5141 additions and 3222 deletions

View File

@ -51,7 +51,7 @@ matrix:
./test-all.sh $TRAVIS_EVENT_TYPE || travis_terminate $?
cd ${TRAVIS_BUILD_DIR}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.txt
./valgrind-test.sh 2>&1 > mem-error-out.log
sleep 1
# Color setting
@ -61,9 +61,9 @@ matrix:
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
grep 'start to execute\|ERROR SUMMARY' mem-error-out.txt|grep -v 'grep'|uniq|tee uniq-mem-error-out.txt
grep 'start to execute\|ERROR SUMMARY' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-mem-error-out.log
for memError in `grep 'ERROR SUMMARY' uniq-mem-error-out.txt | awk '{print $4}'`
for memError in `grep 'ERROR SUMMARY' uniq-mem-error-out.log | awk '{print $4}'`
do
if [ -n "$memError" ]; then
if [ "$memError" -gt 12 ]; then
@ -74,8 +74,8 @@ matrix:
fi
done
grep 'start to execute\|definitely lost:' mem-error-out.txt|grep -v 'grep'|uniq|tee uniq-definitely-lost-out.txt
for defiMemError in `grep 'definitely lost:' uniq-definitely-lost-out.txt | awk '{print $7}'`
grep 'start to execute\|definitely lost:' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-definitely-lost-out.log
for defiMemError in `grep 'definitely lost:' uniq-definitely-lost-out.log | awk '{print $7}'`
do
if [ -n "$defiMemError" ]; then
if [ "$defiMemError" -gt 13 ]; then

View File

@ -18,7 +18,7 @@ import (
"sync"
"time"
_ "github.com/taosdata/TDengine/src/connector/go/src/taosSql"
_ "github.com/taosdata/TDengine/src/connector/go/taosSql"
)
const (
@ -634,6 +634,7 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []
if appendRows == batch {
// executebatch
insertSql := buffers.String()
connection.Exec("use " + db)
affectedRows := executeBatchInsert(insertSql, connection)
successRows[threadIndex] += affectedRows
@ -658,6 +659,7 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []
if appendRows > 0 {
// executebatch
insertSql := buffers.String()
connection.Exec("use " + db)
affectedRows := executeBatchInsert(insertSql, connection)
successRows[threadIndex] += affectedRows

Binary file not shown.

View File

@ -175,7 +175,7 @@ SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIn
SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type,
int16_t size);
int32_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo);
size_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo);
SSqlExpr* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index);
void tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy);

View File

@ -84,7 +84,7 @@ typedef struct SSqlExpr {
int16_t functionId; // function id in aAgg array
int16_t resType; // return value type
int16_t resBytes; // length of return value
int16_t interBytes; // inter result buffer size
int32_t interBytes; // inter result buffer size
int16_t numOfParams; // argument value of each function
tVariant param[3]; // parameters are not more than 3
int32_t offset; // sub result column value of arithmetic expression.
@ -320,7 +320,7 @@ typedef struct SSqlObj {
tsem_t rspSem;
SSqlCmd cmd;
SSqlRes res;
uint8_t numOfSubs;
uint16_t numOfSubs;
struct SSqlObj **pSubs;
struct SSqlObj * prev, *next;
} SSqlObj;

View File

@ -57,6 +57,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
}
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql);
tscQueueAsyncError(fp, param, TSDB_CODE_CLI_OUT_OF_MEMORY);
@ -165,7 +166,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
SSqlRes *pRes = &pSql->res;
if ((pRes->qhandle == 0 || numOfRows != 0) && pCmd->command < TSDB_SQL_LOCAL) {
if (pRes->qhandle == 0) {
if (pRes->qhandle == 0 && numOfRows != 0) {
tscError("qhandle is NULL");
} else {
pRes->code = numOfRows;

View File

@ -153,7 +153,7 @@ typedef struct SRateInfo {
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type,
int16_t *bytes, int16_t *interBytes, int16_t extLength, bool isSuperTable) {
int16_t *bytes, int32_t *interBytes, int16_t extLength, bool isSuperTable) {
if (!isValidDataType(dataType, dataBytes)) {
tscError("Illegal data type %d or data type length %d", dataType, dataBytes);
return TSDB_CODE_INVALID_SQL;
@ -478,7 +478,7 @@ int32_t count_load_data_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32
if (colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return BLK_DATA_NO_NEEDED;
} else {
return BLK_DATA_FILEDS_NEEDED;
return BLK_DATA_STATIS_NEEDED;
}
}
@ -690,7 +690,7 @@ static void sum_func_second_merge(SQLFunctionCtx *pCtx) {
}
static int32_t precal_req_load_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
return BLK_DATA_FILEDS_NEEDED;
return BLK_DATA_STATIS_NEEDED;
}
static int32_t data_req_load_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
@ -1848,13 +1848,14 @@ static void last_row_function(SQLFunctionCtx *pCtx) {
pResInfo->hasResult = DATA_SET_FLAG;
SLastrowInfo *pInfo = (SLastrowInfo *)pResInfo->interResultBuf;
pInfo->ts = pCtx->param[0].i64Key;
pInfo->ts = pCtx->ptsList[0];
pInfo->hasResult = DATA_SET_FLAG;
// set the result to final result buffer
if (pResInfo->superTableQ) {
SLastrowInfo *pInfo1 = (SLastrowInfo *)(pCtx->aOutputBuf + pCtx->inputBytes);
pInfo1->ts = pCtx->param[0].i64Key;
pInfo1->ts = pCtx->ptsList[0];
pInfo1->hasResult = DATA_SET_FLAG;
DO_UPDATE_TAG_COLUMNS(pCtx, pInfo1->ts);
@ -1904,12 +1905,12 @@ static void valuePairAssign(tValuePair *dst, int16_t type, const char *val, int6
memcpy(dst->pTags, pTags, (size_t)pTagInfo->tagsLen);
} else { // the tags are dumped from the ctx tag fields
for (int32_t i = 0; i < pTagInfo->numOfTagCols; ++i) {
SQLFunctionCtx* __ctx = pTagInfo->pTagCtxList[i];
if (__ctx->functionId == TSDB_FUNC_TS_DUMMY) {
__ctx->tag = (tVariant) {.nType = TSDB_DATA_TYPE_BIGINT, .i64Key = tsKey};
SQLFunctionCtx* ctx = pTagInfo->pTagCtxList[i];
if (ctx->functionId == TSDB_FUNC_TS_DUMMY) {
ctx->tag = (tVariant) {.nType = TSDB_DATA_TYPE_BIGINT, .i64Key = tsKey};
}
tVariantDump(&pTagInfo->pTagCtxList[i]->tag, dst->pTags + size, pTagInfo->pTagCtxList[i]->tag.nType);
tVariantDump(&ctx->tag, dst->pTags + size, ctx->tag.nType, true);
size += pTagInfo->pTagCtxList[i]->outputBytes;
}
}
@ -2226,7 +2227,6 @@ static STopBotInfo *getTopBotOutputInfo(SQLFunctionCtx *pCtx) {
static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) {
char *tmp = (char *)pTopBotInfo + sizeof(STopBotInfo);
pTopBotInfo->res = (tValuePair**) tmp;
tmp += POINTER_BYTES * pCtx->param[0].i64Key;
size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen;
@ -2981,14 +2981,7 @@ static void tag_project_function(SQLFunctionCtx *pCtx) {
assert(pCtx->inputBytes == pCtx->outputBytes);
for (int32_t i = 0; i < pCtx->size; ++i) {
char* output = pCtx->aOutputBuf;
if (pCtx->tag.nType == TSDB_DATA_TYPE_BINARY || pCtx->tag.nType == TSDB_DATA_TYPE_NCHAR) {
varDataSetLen(output, pCtx->tag.nLen);
tVariantDump(&pCtx->tag, varDataVal(output), pCtx->outputType);
} else {
tVariantDump(&pCtx->tag, output, pCtx->outputType);
}
tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->outputType, true);
pCtx->aOutputBuf += pCtx->outputBytes;
}
@ -2997,13 +2990,7 @@ static void tag_project_function(SQLFunctionCtx *pCtx) {
static void tag_project_function_f(SQLFunctionCtx *pCtx, int32_t index) {
INC_INIT_VAL(pCtx, 1);
char* output = pCtx->aOutputBuf;
if (pCtx->tag.nType == TSDB_DATA_TYPE_BINARY || pCtx->tag.nType == TSDB_DATA_TYPE_NCHAR) {
*(int16_t*) output = pCtx->tag.nLen;
output += VARSTR_HEADER_SIZE;
}
tVariantDump(&pCtx->tag, output, pCtx->tag.nType);
tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->tag.nType, true);
pCtx->aOutputBuf += pCtx->outputBytes;
}
@ -3016,30 +3003,12 @@ static void tag_project_function_f(SQLFunctionCtx *pCtx, int32_t index) {
*/
static void tag_function(SQLFunctionCtx *pCtx) {
SET_VAL(pCtx, 1, 1);
char* output = pCtx->aOutputBuf;
// todo refactor to dump length presented string(var string)
if (pCtx->tag.nType == TSDB_DATA_TYPE_BINARY || pCtx->tag.nType == TSDB_DATA_TYPE_NCHAR) {
*(int16_t*) output = pCtx->tag.nLen;
output += VARSTR_HEADER_SIZE;
}
tVariantDump(&pCtx->tag, output, pCtx->tag.nType);
tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->tag.nType, true);
}
static void tag_function_f(SQLFunctionCtx *pCtx, int32_t index) {
SET_VAL(pCtx, 1, 1);
char* output = pCtx->aOutputBuf;
// todo refactor to dump length presented string(var string)
if (pCtx->tag.nType == TSDB_DATA_TYPE_BINARY || pCtx->tag.nType == TSDB_DATA_TYPE_NCHAR) {
*(int16_t*) output = pCtx->tag.nLen;
output += VARSTR_HEADER_SIZE;
}
tVariantDump(&pCtx->tag, output, pCtx->tag.nType);
tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->tag.nType, true);
}
static void copy_function(SQLFunctionCtx *pCtx) {
@ -3853,15 +3822,15 @@ void twa_function_finalizer(SQLFunctionCtx *pCtx) {
}
/**
* param[1]: default value/previous value of specified timestamp
* param[2]: next value of specified timestamp
* param[3]: denotes if the result is a precious result or interpolation results
*
* @param pCtx
*/
static void interp_function(SQLFunctionCtx *pCtx) {
// at this point, the value is existed, return directly
if (pCtx->param[3].i64Key == 1) {
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
SInterpInfoDetail* pInfo = pResInfo->interResultBuf;
if (pCtx->size == 1) {
char *pData = GET_INPUT_CHAR(pCtx);
assignVal(pCtx->aOutputBuf, pData, pCtx->inputBytes, pCtx->inputType);
} else {
@ -3869,76 +3838,65 @@ static void interp_function(SQLFunctionCtx *pCtx) {
* use interpolation to generate the result.
* Note: the result of primary timestamp column uses the timestamp specified by user in the query sql
*/
assert(pCtx->param[3].i64Key == 2);
assert(pCtx->size == 2);
if (pInfo->type == TSDB_FILL_NONE) { // set no output result
return;
}
SInterpInfo interpInfo = *(SInterpInfo *)pCtx->aOutputBuf;
SInterpInfoDetail *pInfoDetail = interpInfo.pInterpDetail;
/* set no output result */
if (pInfoDetail->type == TSDB_FILL_NONE) {
pCtx->param[3].i64Key = 0;
} else if (pInfoDetail->primaryCol == 1) {
*(TSKEY *)pCtx->aOutputBuf = pInfoDetail->ts;
if (pInfo->primaryCol == 1) {
*(TSKEY *) pCtx->aOutputBuf = pInfo->ts;
} else {
if (pInfoDetail->type == TSDB_FILL_NULL) {
if (pInfo->type == TSDB_FILL_NULL) {
if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) {
setVardataNull(pCtx->aOutputBuf, pCtx->outputType);
} else {
setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes);
}
} else if (pInfoDetail->type == TSDB_FILL_SET_VALUE) {
tVariantDump(&pCtx->param[1], pCtx->aOutputBuf, pCtx->inputType);
} else if (pInfoDetail->type == TSDB_FILL_PREV) {
char *data = pCtx->param[1].pz;
char *pVal = data + TSDB_KEYSIZE;
if (pCtx->outputType == TSDB_DATA_TYPE_FLOAT) {
float v = GET_DOUBLE_VAL(pVal);
assignVal(pCtx->aOutputBuf, (const char*) &v, pCtx->outputBytes, pCtx->outputType);
} else {
assignVal(pCtx->aOutputBuf, pVal, pCtx->outputBytes, pCtx->outputType);
}
} else if (pInfoDetail->type == TSDB_FILL_LINEAR) {
char *data1 = pCtx->param[1].pz;
char *data2 = pCtx->param[2].pz;
char *pVal1 = data1 + TSDB_KEYSIZE;
char *pVal2 = data2 + TSDB_KEYSIZE;
SPoint point1 = {.key = *(TSKEY *)data1, .val = &pCtx->param[1].i64Key};
SPoint point2 = {.key = *(TSKEY *)data2, .val = &pCtx->param[2].i64Key};
SPoint point = {.key = pInfoDetail->ts, .val = pCtx->aOutputBuf};
SET_VAL(pCtx, pCtx->size, 1);
} else if (pInfo->type == TSDB_FILL_SET_VALUE) {
tVariantDump(&pCtx->param[1], pCtx->aOutputBuf, pCtx->inputType, true);
} else if (pInfo->type == TSDB_FILL_PREV) {
char *data = GET_INPUT_CHAR_INDEX(pCtx, 0);
assignVal(pCtx->aOutputBuf, data, pCtx->outputBytes, pCtx->outputType);
SET_VAL(pCtx, pCtx->size, 1);
} else if (pInfo->type == TSDB_FILL_LINEAR) {
char *data1 = GET_INPUT_CHAR_INDEX(pCtx, 0);
char *data2 = GET_INPUT_CHAR_INDEX(pCtx, 1);
TSKEY key1 = pCtx->ptsList[0];
TSKEY key2 = pCtx->ptsList[1];
SPoint point1 = {.key = key1, .val = data1};
SPoint point2 = {.key = key2, .val = data2};
SPoint point = {.key = pInfo->ts, .val = pCtx->aOutputBuf};
int32_t srcType = pCtx->inputType;
if ((srcType >= TSDB_DATA_TYPE_TINYINT && srcType <= TSDB_DATA_TYPE_BIGINT) ||
srcType == TSDB_DATA_TYPE_TIMESTAMP || srcType == TSDB_DATA_TYPE_DOUBLE) {
point1.val = pVal1;
point2.val = pVal2;
if (isNull(pVal1, srcType) || isNull(pVal2, srcType)) {
point1.val = data1;
point2.val = data2;
if (isNull(data1, srcType) || isNull(data2, srcType)) {
setNull(pCtx->aOutputBuf, srcType, pCtx->inputBytes);
} else {
taosDoLinearInterpolation(pCtx->outputType, &point1, &point2, &point);
}
} else if (srcType == TSDB_DATA_TYPE_FLOAT) {
float v1 = GET_DOUBLE_VAL(pVal1);
float v2 = GET_DOUBLE_VAL(pVal2);
point1.val = &v1;
point2.val = &v2;
if (isNull(pVal1, srcType) || isNull(pVal2, srcType)) {
point1.val = data1;
point2.val = data2;
if (isNull(data1, srcType) || isNull(data2, srcType)) {
setNull(pCtx->aOutputBuf, srcType, pCtx->inputBytes);
} else {
taosDoLinearInterpolation(pCtx->outputType, &point1, &point2, &point);
}
} else {
if (srcType == TSDB_DATA_TYPE_BINARY || srcType == TSDB_DATA_TYPE_NCHAR) {
setVardataNull(pCtx->aOutputBuf, pCtx->inputBytes);
setVardataNull(pCtx->aOutputBuf, pCtx->inputType);
} else {
setNull(pCtx->aOutputBuf, srcType, pCtx->inputBytes);
}
@ -3946,15 +3904,8 @@ static void interp_function(SQLFunctionCtx *pCtx) {
}
}
free(interpInfo.pInterpDetail);
}
pCtx->size = pCtx->param[3].i64Key;
tVariantDestroy(&pCtx->param[1]);
tVariantDestroy(&pCtx->param[2]);
// data in the check operation are all null, not output
SET_VAL(pCtx, pCtx->size, 1);
}
@ -4910,7 +4861,7 @@ SQLAggFuncElem aAggs[] = {{
"interp",
TSDB_FUNC_INTERP,
TSDB_FUNC_INTERP,
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS,
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS ,
function_setup,
interp_function,
do_sum_f, // todo filter handle
@ -4918,7 +4869,7 @@ SQLAggFuncElem aAggs[] = {{
doFinalizer,
noop1,
copy_function,
no_data_info,
data_req_load_info,
},
{
// 28

View File

@ -142,7 +142,7 @@ static int setColumnFilterInfoForTimestamp(SQueryInfo* pQueryInfo, tVariant* pVa
return invalidSqlErrMsg(pQueryInfo->msg, msg);
}
} else {
if (tVariantDump(pVar, (char*)&time, TSDB_DATA_TYPE_BIGINT)) {
if (tVariantDump(pVar, (char*)&time, TSDB_DATA_TYPE_BIGINT, true)) {
return invalidSqlErrMsg(pQueryInfo->msg, msg);
}
}
@ -1403,7 +1403,6 @@ int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pI
SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE};
strcpy(colSchema.name, TSQL_TBNAME_L);
pQueryInfo->type = TSDB_QUERY_TYPE_STABLE_QUERY;
tscAddSpecialColumnForSelect(pQueryInfo, startPos, TSDB_FUNC_TAGPRJ, &index, &colSchema, true);
} else {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@ -1595,7 +1594,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
int16_t resultType = 0;
int16_t resultSize = 0;
int16_t intermediateResSize = 0;
int32_t intermediateResSize = 0;
int16_t functionID = 0;
if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) {
@ -1628,14 +1627,14 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
if (optr == TK_LEASTSQUARES) {
/* set the leastsquares parameters */
char val[8] = {0};
if (tVariantDump(&pParamElem[1].pNode->val, val, TSDB_DATA_TYPE_DOUBLE) < 0) {
if (tVariantDump(&pParamElem[1].pNode->val, val, TSDB_DATA_TYPE_DOUBLE, true) < 0) {
return TSDB_CODE_INVALID_SQL;
}
addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, DOUBLE_BYTES, 0);
memset(val, 0, tListLen(val));
if (tVariantDump(&pParamElem[2].pNode->val, val, TSDB_DATA_TYPE_DOUBLE) < 0) {
if (tVariantDump(&pParamElem[2].pNode->val, val, TSDB_DATA_TYPE_DOUBLE, true) < 0) {
return TSDB_CODE_INVALID_SQL;
}
@ -1795,7 +1794,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
SSqlExpr* pExpr = NULL;
if (optr == TK_PERCENTILE || optr == TK_APERCENTILE) {
tVariantDump(pVariant, val, TSDB_DATA_TYPE_DOUBLE);
tVariantDump(pVariant, val, TSDB_DATA_TYPE_DOUBLE, true);
double dp = GET_DOUBLE_VAL(val);
if (dp < 0 || dp > TOP_BOTTOM_QUERY_LIMIT) {
@ -1818,7 +1817,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, resultSize, false);
addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double), 0);
} else {
tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT);
tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true);
int64_t nTop = *((int32_t*)val);
if (nTop <= 0 || nTop > 100) { // todo use macro
@ -1902,7 +1901,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
int16_t bytes = 0;
int16_t type = 0;
int16_t inter = 0;
int32_t inter = 0;
int32_t ret = getResultDataInfo(s.type, s.bytes, TSDB_FUNC_TID_TAG, 0, &type, &bytes, &inter, 0, 0);
assert(ret == TSDB_CODE_SUCCESS);
@ -2288,7 +2287,7 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) {
int16_t bytes = 0;
int16_t type = 0;
int16_t intermediateBytes = 0;
int32_t interBytes = 0;
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t k = 0; k < size; ++k) {
@ -2302,13 +2301,13 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) {
(functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_LAST_DST) ||
(functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE)) {
if (getResultDataInfo(pSrcSchema->type, pSrcSchema->bytes, functionId, pExpr->param[0].i64Key, &type, &bytes,
&intermediateBytes, 0, true) != TSDB_CODE_SUCCESS) {
&interBytes, 0, true) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_INVALID_SQL;
}
tscSqlExprUpdate(pQueryInfo, k, functionId, pExpr->colInfo.colIndex, TSDB_DATA_TYPE_BINARY, bytes);
// todo refactor
pExpr->interBytes = intermediateBytes;
pExpr->interBytes = interBytes;
}
}
@ -2328,27 +2327,23 @@ void tscRestoreSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pExpr->colInfo.colIndex);
// if (/*(pExpr->functionId >= TSDB_FUNC_FIRST_DST && pExpr->functionId <= TSDB_FUNC_LAST_DST) ||
// (pExpr->functionId >= TSDB_FUNC_SUM && pExpr->functionId <= TSDB_FUNC_MAX) ||
// pExpr->functionId == TSDB_FUNC_LAST_ROW*/) {
// the final result size and type in the same as query on single table.
// so here, set the flag to be false;
int16_t inter = 0;
int32_t functionId = pExpr->functionId;
if (functionId >= TSDB_FUNC_TS && functionId <= TSDB_FUNC_DIFF) {
continue;
}
if (functionId == TSDB_FUNC_FIRST_DST) {
functionId = TSDB_FUNC_FIRST;
} else if (functionId == TSDB_FUNC_LAST_DST) {
functionId = TSDB_FUNC_LAST;
}
getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &pExpr->resType, &pExpr->resBytes,
&inter, 0, false);
// }
// the final result size and type in the same as query on single table.
// so here, set the flag to be false;
int32_t inter = 0;
int32_t functionId = pExpr->functionId;
if (functionId >= TSDB_FUNC_TS && functionId <= TSDB_FUNC_DIFF) {
continue;
}
if (functionId == TSDB_FUNC_FIRST_DST) {
functionId = TSDB_FUNC_FIRST;
} else if (functionId == TSDB_FUNC_LAST_DST) {
functionId = TSDB_FUNC_LAST;
}
getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &pExpr->resType, &pExpr->resBytes,
&inter, 0, false);
}
}
@ -2631,23 +2626,23 @@ static int32_t doExtractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnFilterIn
}
if (pExpr->nSQLOptr == TK_LE || pExpr->nSQLOptr == TK_LT) {
tVariantDump(&pRight->val, (char*)&pColumnFilter->upperBndd, colType);
tVariantDump(&pRight->val, (char*)&pColumnFilter->upperBndd, colType, false);
} else { // TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd
if (colType == TSDB_DATA_TYPE_BINARY) {
pColumnFilter->pz = (int64_t)calloc(1, pRight->val.nLen + 1);
pColumnFilter->len = pRight->val.nLen;
tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType);
tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false);
} else if (colType == TSDB_DATA_TYPE_NCHAR) {
// pRight->val.nLen + 1 is larger than the actual nchar string length
pColumnFilter->pz = (int64_t)calloc(1, (pRight->val.nLen + 1) * TSDB_NCHAR_SIZE);
tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType);
tVariantDump(&pRight->val, (char*)pColumnFilter->pz, colType, false);
size_t len = wcslen((wchar_t*)pColumnFilter->pz);
pColumnFilter->len = len * TSDB_NCHAR_SIZE;
} else {
tVariantDump(&pRight->val, (char*)&pColumnFilter->lowerBndd, colType);
tVariantDump(&pRight->val, (char*)&pColumnFilter->lowerBndd, colType, false);
}
}
@ -3336,9 +3331,8 @@ static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, S
*pExpr = NULL; // remove this expression
*type = TSQL_EXPR_TS;
} else if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) ||
index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { // query on tags
// check for tag query condition
} else if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) || index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
// query on tags, check for tag query condition
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
}
@ -3933,7 +3927,7 @@ int32_t getTimeRange(STimeWindow* win, tSQLExpr* pRight, int32_t optr, int16_t t
* failed to parse timestamp in regular formation, try next
* it may be a epoch time in string format
*/
tVariantDump(&pRight->val, (char*)&val, TSDB_DATA_TYPE_BIGINT);
tVariantDump(&pRight->val, (char*)&val, TSDB_DATA_TYPE_BIGINT, true);
/*
* transfer it into MICROSECOND format if it is a string, since for
@ -4070,14 +4064,13 @@ int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
continue;
}
int32_t ret = tVariantDump(&pFillToken->a[j].pVar, (char*)&pQueryInfo->fillVal[i], pFields->type);
int32_t ret = tVariantDump(&pFillToken->a[j].pVar, (char*)&pQueryInfo->fillVal[i], pFields->type, true);
if (ret != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(pQueryInfo->msg, msg);
}
}
if ((pFillToken->nExpr < size) ||
((pFillToken->nExpr - 1 < size) && (tscIsPointInterpQuery(pQueryInfo)))) {
if ((pFillToken->nExpr < size) || ((pFillToken->nExpr - 1 < size) && (tscIsPointInterpQuery(pQueryInfo)))) {
tVariantListItem* lastItem = &pFillToken->a[pFillToken->nExpr - 1];
for (int32_t i = numOfFillVal; i < size; ++i) {
@ -4086,7 +4079,7 @@ int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) {
setVardataNull((char*) &pQueryInfo->fillVal[i], pFields->type);
} else {
tVariantDump(&lastItem->pVar, (char*)&pQueryInfo->fillVal[i], pFields->type);
tVariantDump(&lastItem->pVar, (char*)&pQueryInfo->fillVal[i], pFields->type, true);
}
}
}
@ -4168,6 +4161,10 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
int32_t relTagIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
// it is a tag column
if (pQueryInfo->groupbyExpr.columnInfo == NULL) {
return invalidSqlErrMsg(pQueryInfo->msg, msg2);
}
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
if (relTagIndex == pColIndex->colIndex) {
orderByTags = true;
@ -4420,10 +4417,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
SSchema* pTagsSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex);
if (tVariantDump(&pVarList->a[1].pVar, pAlterSQL->tagData.data /*pCmd->payload*/, pTagsSchema->type) !=
TSDB_CODE_SUCCESS) {
if (tVariantDump(&pVarList->a[1].pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(pQueryInfo->msg, msg13);
}
pAlterSQL->tagData.dataLen = pTagsSchema->bytes;
// validate the length of binary
@ -4680,7 +4677,7 @@ int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL*
const char* msg0 = "soffset/offset can not be less than 0";
const char* msg1 = "slimit/soffset only available for STable query";
const char* msg2 = "function not supported on table";
const char* msg2 = "functions mixed up in table query";
const char* msg3 = "slimit/soffset can not apply to projection query";
// handle the limit offset value, validate the limit
@ -4763,14 +4760,22 @@ int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL*
}
size_t size = taosArrayGetSize(pQueryInfo->exprList);
bool hasTags = false;
bool hasOtherFunc = false;
// filter the query functions operating on "tbname" column that are not supported by normal columns.
for (int32_t i = 0; i < size; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
if (pExpr->colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) {
return invalidSqlErrMsg(pQueryInfo->msg, msg2);
if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) {
hasTags = true;
} else {
hasOtherFunc = true;
}
}
if (hasTags && hasOtherFunc) {
return invalidSqlErrMsg(pQueryInfo->msg, msg2);
}
}
return TSDB_CODE_SUCCESS;
@ -5571,21 +5576,9 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
if (pList->a[i].pVar.nLen + VARSTR_HEADER_SIZE > pTagSchema[i].bytes) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
ret = tVariantDump(&(pList->a[i].pVar), varDataVal(tagVal), pTagSchema[i].type);
if (pList->a[i].pVar.nType == TSDB_DATA_TYPE_NULL) {
if (pTagSchema[i].type == TSDB_DATA_TYPE_BINARY) {
varDataSetLen(tagVal, sizeof(uint8_t));
} else {
varDataSetLen(tagVal, sizeof(uint32_t));
}
} else { // todo refactor
varDataSetLen(tagVal, pList->a[i].pVar.nLen);
}
} else {
ret = tVariantDump(&(pList->a[i].pVar), tagVal, pTagSchema[i].type);
}
ret = tVariantDump(&(pList->a[i].pVar), tagVal, pTagSchema[i].type, true);
if (ret != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
@ -5845,7 +5838,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000;
}
} else { // set the time rang
pQueryInfo->window.skey = 0;
pQueryInfo->window.skey = TSKEY_INITIAL_VAL;
pQueryInfo->window.ekey = INT64_MAX;
}

View File

@ -689,7 +689,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
SSchema *p1 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pExpr->colInfo.colIndex);
int16_t inter = 0;
int32_t inter = 0;
int16_t type = -1;
int16_t bytes = 0;
@ -1049,7 +1049,14 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer,
int32_t functionId = pExpr->functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS_DUMMY) {
tVariantDestroy(&pCtx->tag);
tVariantCreateFromBinary(&pCtx->tag, pCtx->aInputElemBuf, pCtx->inputBytes, pCtx->inputType);
char* input = pCtx->aInputElemBuf;
if (pCtx->inputType == TSDB_DATA_TYPE_BINARY || pCtx->inputType == TSDB_DATA_TYPE_NCHAR) {
assert(varDataLen(input) <= pCtx->inputBytes);
tVariantCreateFromBinary(&pCtx->tag, varDataVal(input), varDataLen(input), pCtx->inputType);
} else {
tVariantCreateFromBinary(&pCtx->tag, input, pCtx->inputBytes, pCtx->inputType);
}
}
pCtx->currentStage = SECONDARY_STAGE_MERGE;
@ -1309,7 +1316,7 @@ static bool isAllSourcesCompleted(SLocalReducer *pLocalReducer) {
return (pLocalReducer->numOfBuffer == pLocalReducer->numOfCompleted);
}
static bool doInterpolationForCurrentGroup(SSqlObj *pSql) {
static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
@ -1347,8 +1354,8 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
SLocalReducer * pLocalReducer = pRes->pLocalReducer;
SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
bool prevGroupCompleted = (!pLocalReducer->discard) && pLocalReducer->hasUnprocessedRow;
@ -1445,7 +1452,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
if (doInterpolationForCurrentGroup(pSql)) {
if (doBuildFilledResultForGroup(pSql)) {
pLocalReducer->status = TSC_LOCALREDUCE_READY; // set the flag, taos_free_result can release this result.
return TSDB_CODE_SUCCESS;
}
@ -1464,8 +1471,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
#ifdef _DEBUG_VIEW
printf("chosen data in pTree[0] = %d\n", pTree->pNode[0].index);
#endif
assert((pTree->pNode[0].index < pLocalReducer->numOfBuffer) && (pTree->pNode[0].index >= 0) &&
tmpBuffer->num == 0);
assert((pTree->pNode[0].index < pLocalReducer->numOfBuffer) && (pTree->pNode[0].index >= 0) && tmpBuffer->num == 0);
// chosen from loser tree
SLocalDataSource *pOneDataSrc = pLocalReducer->pLocalDataSrc[pTree->pNode[0].index];

View File

@ -237,10 +237,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
rpcMsg->code = TSDB_CODE_NETWORK_UNAVAIL;
} else {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
if (rpcMsg->code == TSDB_CODE_NOT_ACTIVE_TABLE || rpcMsg->code == TSDB_CODE_INVALID_TABLE_ID ||
rpcMsg->code == TSDB_CODE_INVALID_VNODE_ID || rpcMsg->code == TSDB_CODE_NOT_ACTIVE_VNODE ||
rpcMsg->code == TSDB_CODE_NETWORK_UNAVAIL || rpcMsg->code == TSDB_CODE_NOT_ACTIVE_TABLE ||
rpcMsg->code == TSDB_CODE_TABLE_ID_MISMATCH) {
if (rpcMsg->code == TSDB_CODE_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_INVALID_VGROUP_ID ||
rpcMsg->code == TSDB_CODE_NETWORK_UNAVAIL) {
/*
* not_active_table: 1. the virtual node may fail to create table, since the procedure of create table is asynchronized,
* the virtual node may have not create table till now, so try again by using the new metermeta.
@ -653,7 +651,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->order = htons(pQueryInfo->order.order);
pQueryMsg->orderColId = htons(pQueryInfo->order.orderColId);
pQueryMsg->fillType = htons(pQueryInfo->fillType);
pQueryMsg->fillType = htons(pQueryInfo->fillType);
pQueryMsg->limit = htobe64(pQueryInfo->limit.limit);
pQueryMsg->offset = htobe64(pQueryInfo->limit.offset);
pQueryMsg->numOfCols = htons(taosArrayGetSize(pQueryInfo->colList));
@ -1289,7 +1287,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pAlterTableMsg->numOfCols = htons(tscNumOfFields(pQueryInfo));
SSchema *pSchema = pAlterTableMsg->schema;
for (int i = 0; i < pAlterTableMsg->numOfCols; ++i) {
for (int i = 0; i < tscNumOfFields(pQueryInfo); ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
pSchema->type = pField->type;
@ -1845,17 +1843,6 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
size_t size = 0;
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg, &size);
#if 0
// if current table is created according to super table, get the table meta of super table
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
char id[TSDB_TABLE_ID_LEN + 1] = {0};
strncpy(id, pMetaMsg->stableId, TSDB_TABLE_ID_LEN);
// NOTE: if the table meta of super table is not cached at client side yet, the pSTable is NULL
pTableMeta->pSTable = taosCacheAcquireByName(tscCacheHandle, id);
}
#endif
// todo add one more function: taosAddDataIfNotExists();
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
@ -1978,7 +1965,7 @@ int tscProcessMultiMeterMetaRsp(SSqlObj *pSql) {
pSql->res.code = TSDB_CODE_SUCCESS;
pSql->res.numOfTotal = i;
tscTrace("%p load multi-metermeta resp complete num:%d", pSql, pSql->res.numOfTotal);
tscTrace("%p load multi-metermeta resp from complete num:%d", pSql, pSql->res.numOfTotal);
#endif
return TSDB_CODE_SUCCESS;

View File

@ -284,12 +284,11 @@ int taos_query(TAOS *taos, const char *sqlstr) {
}
SSqlObj* pSql = pObj->pSql;
size_t sqlLen = strlen(sqlstr);
size_t sqlLen = strlen(sqlstr);
doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen);
// wait for the callback function to post the semaphore
sem_wait(&pSql->rspSem);
tsem_wait(&pSql->rspSem);
return pSql->res.code;
}
@ -525,7 +524,7 @@ int taos_select_db(TAOS *taos, const char *db) {
return taos_query(taos, sql);
}
void taos_free_result_imp(TAOS_RES *res, int keepCmd) {
void taos_free_result(TAOS_RES *res) {
if (res == NULL) return;
SSqlObj *pSql = (SSqlObj *)res;
@ -536,26 +535,23 @@ void taos_free_result_imp(TAOS_RES *res, int keepCmd) {
if (pSql->signature != pSql) return;
STscObj* pObj = pSql->pTscObj;
if (pRes == NULL || pRes->qhandle == 0) {
/* Query rsp is not received from vnode, so the qhandle is NULL */
tscTrace("%p qhandle is null, abort free, fp:%p", pSql, pSql->fp);
STscObj* pTscObj = pSql->pTscObj;
if (pTscObj->pSql != pSql) {
// The semaphore can not be changed while freeing async sub query objects.
if (pObj->pSql != pSql) {
tscTrace("%p SqlObj is freed by app", pSql);
tscFreeSqlObj(pSql);
} else {
if (keepCmd) {
tscFreeSqlResult(pSql);
} else {
tscPartiallyFreeSqlObj(pSql);
}
tscPartiallyFreeSqlObj(pSql);
}
return;
}
// set freeFlag to 1 in retrieve message if there are un-retrieved results
// set freeFlag to 1 in retrieve message if there are un-retrieved results data in node
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo == NULL) {
tscPartiallyFreeSqlObj(pSql);
@ -563,6 +559,7 @@ void taos_free_result_imp(TAOS_RES *res, int keepCmd) {
}
pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE;
STscObj* pTscObj = pSql->pTscObj;
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@ -579,9 +576,8 @@ void taos_free_result_imp(TAOS_RES *res, int keepCmd) {
if ((pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_RETRIEVE ||
pCmd->command == TSDB_SQL_FETCH) &&
(pRes->code != TSDB_CODE_QUERY_CANCELLED && ((pCmd->command < TSDB_SQL_LOCAL && pRes->completed == false) ||
(pRes->code == TSDB_CODE_SUCCESS && pCmd->command == TSDB_SQL_SELECT && pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL)))) {
pCmd->command == TSDB_SQL_FETCH) && pRes->code == TSDB_CODE_SUCCESS && pRes->completed == false &&
(pCmd->command == TSDB_SQL_SELECT && pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL)) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
tscTrace("%p send msg to free qhandle in vnode, code:%d, numOfRows:%d, command:%s", pSql, pRes->code, pRes->numOfRows,
@ -591,27 +587,20 @@ void taos_free_result_imp(TAOS_RES *res, int keepCmd) {
tscProcessSql(pSql);
// waits for response and then goes on
sem_wait(&pSql->rspSem);
if (pTscObj->pSql == pSql) {
sem_wait(&pSql->rspSem);
}
} else { // if no free resource msg is sent to vnode, we free this object immediately.
STscObj* pTscObj = pSql->pTscObj;
if (pTscObj->pSql != pSql) {
tscFreeSqlObj(pSql);
tscTrace("%p sql result is freed by app", pSql);
} else {
if (keepCmd) {
tscFreeSqlResult(pSql);
tscTrace("%p sql result is freed while sql command is kept", pSql);
} else {
tscPartiallyFreeSqlObj(pSql);
tscTrace("%p sql result is freed by app", pSql);
}
tscPartiallyFreeSqlObj(pSql);
tscTrace("%p sql result is freed by app", pSql);
}
}
}
void taos_free_result(TAOS_RES *res) { taos_free_result_imp(res, 0); }
// todo should not be used in async query
int taos_errno(TAOS *taos) {
STscObj *pObj = (STscObj *)taos;

View File

@ -1084,7 +1084,7 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
int16_t bytes = 0;
int16_t type = 0;
int16_t inter = 0;
int32_t inter = 0;
getResultDataInfo(s.type, s.bytes, TSDB_FUNC_TID_TAG, 0, &type, &bytes, &inter, 0, 0);
@ -1770,6 +1770,8 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
*/
pNew->fetchFp = pNew->fp;
pSql->pSubs[i] = pNew;
pNew->fetchFp = pNew->fp;
tscTrace("%p sub:%p create subObj success. orderOfSub:%d", pSql, pNew, i);
}

View File

@ -57,9 +57,9 @@ int32_t tscInitRpc(const char *user, const char *secret, void** pDnodeConn) {
memset(&rpcInit, 0, sizeof(rpcInit));
rpcInit.localPort = 0;
rpcInit.label = "TSC";
rpcInit.numOfThreads = tscNumOfThreads;
rpcInit.numOfThreads = 1; // every DB connection has only one thread
rpcInit.cfp = tscProcessMsgFromServer;
rpcInit.sessions = tsMaxVnodeConnections;
rpcInit.sessions = tsMaxConnections;
rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.user = (char*)user;
rpcInit.idleTime = 2000;
@ -121,7 +121,7 @@ void taos_init_imp() {
}
tscInitMsgsFp();
int queueSize = tsMaxVnodeConnections + tsMaxMeterConnections + tsMaxMgmtConnections + tsMaxMgmtConnections;
int queueSize = tsMaxConnections*2;
if (tscEmbedded == 0) {
tscNumOfThreads = tsNumOfCores * tsNumOfThreadsPerCore / 2.0;
@ -137,7 +137,7 @@ void taos_init_imp() {
return;
}
tscTmr = taosTmrInit(tsMaxMgmtConnections * 2, 200, 60000, "TSC");
tscTmr = taosTmrInit(tsMaxConnections * 2, 200, 60000, "TSC");
if(0 == tscEmbedded){
taosTmrReset(tscCheckDiskUsage, 10, NULL, tscTmr, &tscCheckDiskUsageTmr);
}

View File

@ -421,7 +421,6 @@ void tscFreeSqlObj(SSqlObj* pSql) {
memset(pCmd->payload, 0, (size_t)pCmd->allocSize);
tfree(pCmd->payload);
pCmd->allocSize = 0;
tfree(pSql->sqlstr);
@ -1033,7 +1032,7 @@ SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functi
return pExpr;
}
int32_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo) {
size_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo) {
return taosArrayGetSize(pQueryInfo->exprList);
}
@ -1352,7 +1351,7 @@ bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId) {
return false;
}
if (colId == -1 && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
if (colId == TSDB_TBNAME_COLUMN_INDEX) {
return true;
}
@ -1768,11 +1767,12 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
pNewQueryInfo->limit = pQueryInfo->limit;
pNewQueryInfo->slimit = pQueryInfo->slimit;
pNewQueryInfo->order = pQueryInfo->order;
pNewQueryInfo->clauseLimit = pQueryInfo->clauseLimit;
pNewQueryInfo->pTableMetaInfo = NULL;
pNewQueryInfo->tsBuf = NULL;
pNewQueryInfo->fillType = pQueryInfo->fillType;
pNewQueryInfo->fillVal = NULL;
pNewQueryInfo->clauseLimit = pQueryInfo->clauseLimit;
pNewQueryInfo->numOfTables = 0;
pNewQueryInfo->tsBuf = NULL;
pNewQueryInfo->pTableMetaInfo = NULL;
pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr;
if (pQueryInfo->groupbyExpr.columnInfo != NULL) {
@ -1850,8 +1850,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
STableMetaInfo* pFinalInfo = NULL;
if (pPrevSql == NULL) {
STableMeta* pTableMeta = taosCacheAcquireByName(tscCacheHandle, name);
// todo handle error
STableMeta* pTableMeta = taosCacheAcquireByData(tscCacheHandle, pTableMetaInfo->pTableMeta); // get by name may failed due to the cache cleanup
assert(pTableMeta != NULL);
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList);
} else { // transfer the ownership of pTableMeta to the newly create sql object.
@ -1865,7 +1864,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
}
if (pFinalInfo->pTableMeta == NULL) {
tscError("%p new subquery failed for get pMeterMeta is NULL from cache", pSql);
tscError("%p new subquery failed for get tableMeta is NULL from cache", pSql);
tscFreeSqlObj(pNew);
return NULL;
}
@ -2012,7 +2011,7 @@ bool hasMoreVnodesToTry(SSqlObj* pSql) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
assert(pRes->completed);
// for normal table, do not try any more if result are exhausted
// for normal table, no need to try any more if results are all retrieved from one vnode
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) || (pTableMetaInfo->vgroupList == NULL)) {
return false;
}
@ -2038,7 +2037,7 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) {
int32_t totalVgroups = pTableMetaInfo->vgroupList->numOfVgroups;
while (++pTableMetaInfo->vgroupIndex < totalVgroups) {
tscTrace("%p current vnode:%d exhausted, try next:%d. total vnode:%d. current numOfRes:%d", pSql,
tscTrace("%p results from vgroup index:%d completed, try next:%d. total vgroups:%d. current numOfRes:%d", pSql,
pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups, pRes->numOfClauseTotal);
/*
@ -2122,7 +2121,7 @@ void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t column
int32_t type = pInfo->pSqlExpr->resType;
int32_t bytes = pInfo->pSqlExpr->resBytes;
char* pData = ((char*) pRes->data) + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row;
char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row;
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
int32_t realLen = varDataLen(pData);
@ -2135,7 +2134,7 @@ void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t column
}
if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor
*(char*) (pData + realLen + VARSTR_HEADER_SIZE) = 0;
*(pData + realLen + VARSTR_HEADER_SIZE) = 0;
}
pRes->length[columnIndex] = realLen;

View File

@ -87,7 +87,7 @@ extern int16_t tsWAL;
extern int32_t tsReplications;
extern int16_t tsAffectedRowsMod;
extern int32_t tsNumOfMPeers;
extern int32_t tsNumOfMnodes;
extern int32_t tsMaxShellConns;
extern int32_t tsMaxTables;
@ -98,9 +98,7 @@ extern char tsDefaultPass[];
extern char tsMqttBrokerAddress[];
extern char tsMqttBrokerClientId[];
extern int32_t tsMaxMeterConnections;
extern int32_t tsMaxVnodeConnections;
extern int32_t tsMaxMgmtConnections;
extern int32_t tsMaxConnections;
extern int32_t tsBalanceInterval;
extern int32_t tsOfflineThreshold;

View File

@ -196,6 +196,7 @@ void * tdQueryTagByID(SDataRow row, int16_t colId, int16_t *type) {
STagCol key = {colId,0,0};
STagCol * stCol = taosbsearch(&key, pBase, nCols, sizeof(STagCol), compTagId, TD_EQ);
if (NULL == stCol) {
type = TSDB_DATA_TYPE_NULL;
return NULL;
}

View File

@ -105,15 +105,13 @@ int32_t tsReplications = TSDB_DEFAULT_REPLICA_NUM;
* 1: affected rows include those duplicate records
*/
int16_t tsAffectedRowsMod = 0;
int32_t tsNumOfMPeers = 3;
int32_t tsMaxShellConns = 2000;
int32_t tsNumOfMnodes = 3;
int32_t tsMaxShellConns = 5000;
char tsDefaultDB[TSDB_DB_NAME_LEN] = {0};
char tsDefaultUser[64] = "root";
char tsDefaultPass[64] = "taosdata";
int32_t tsMaxMeterConnections = 10000;
int32_t tsMaxMgmtConnections = 2000;
int32_t tsMaxVnodeConnections = 10000;
int32_t tsMaxConnections = 50;
int32_t tsBalanceInterval = 300; // seconds
int32_t tsOfflineThreshold = 86400*100; // seconds 10days
@ -409,8 +407,8 @@ static void doInitGlobalConfig() {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "numOfMPeers";
cfg.ptr = &tsNumOfMPeers;
cfg.option = "numOfMnodes";
cfg.ptr = &tsNumOfMnodes;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 1;
@ -429,7 +427,7 @@ static void doInitGlobalConfig() {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
// 0-any; 1-mnode; 2-dnode
// 0-any; 1-mnode; 2-vnode
cfg.option = "alternativeRole";
cfg.ptr = &tsAlternativeRole;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@ -682,7 +680,7 @@ static void doInitGlobalConfig() {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "wallevel";
cfg.option = "walLevel";
cfg.ptr = &tsWAL;
cfg.valType = TAOS_CFG_VTYPE_INT16;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
@ -836,32 +834,12 @@ static void doInitGlobalConfig() {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "maxMeterConnections";
cfg.ptr = &tsMaxMeterConnections;
cfg.option = "maxConnections";
cfg.ptr = &tsMaxConnections;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 10;
cfg.maxValue = 50000000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "maxMgmtConnections";
cfg.ptr = &tsMaxMgmtConnections;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 10;
cfg.maxValue = 50000000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "maxVnodeConnections";
cfg.ptr = &tsMaxVnodeConnections;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 10;
cfg.maxValue = 50000000;
cfg.minValue = 1;
cfg.maxValue = 100;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);

View File

@ -32,6 +32,35 @@ const int32_t TYPE_BYTES[11] = {
sizeof(VarDataOffsetT) // TSDB_DATA_TYPE_NCHAR
};
static void getStatics_bool(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
int8_t *data = (int8_t *)pData;
*min = INT64_MAX;
*max = INT64_MIN;
*minIndex = 0;
*maxIndex = 0;
ASSERT(numOfRow <= INT16_MAX);
for (int32_t i = 0; i < numOfRow; ++i) {
if (isNull((char *)&data[i], TSDB_DATA_TYPE_BOOL)) {
(*numOfNull) += 1;
continue;
}
*sum += data[i];
if (*min > data[i]) {
*min = data[i];
*minIndex = i;
}
if (*max < data[i]) {
*max = data[i];
*maxIndex = i;
}
}
}
static void getStatics_i8(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
int8_t *data = (int8_t *)pData;
@ -131,15 +160,6 @@ static void getStatics_i32(const TSKEY *primaryKey, const void *pData, int32_t n
*max = data[i];
*maxIndex = i;
}
// if (isNull(&lastVal, TSDB_DATA_TYPE_INT)) {
// lastKey = primaryKey[i];
// lastVal = data[i];
// } else {
// *wsum = lastVal * (primaryKey[i] - lastKey);
// lastKey = primaryKey[i];
// lastVal = data[i];
// }
}
}
@ -279,11 +299,11 @@ static void getStatics_bin(const TSKEY *primaryKey, const void *pData, int32_t n
ASSERT(numOfRow <= INT16_MAX);
for (int32_t i = 0; i < numOfRow; ++i) {
if (isNull((const char*) varDataVal(data), TSDB_DATA_TYPE_BINARY)) {
if (isNull(data, TSDB_DATA_TYPE_BINARY)) {
(*numOfNull) += 1;
}
data += varDataLen(data);
data += varDataTLen(data);
}
*sum = 0;
@ -299,11 +319,11 @@ static void getStatics_nchr(const TSKEY *primaryKey, const void *pData, int32_t
ASSERT(numOfRow <= INT16_MAX);
for (int32_t i = 0; i < numOfRow; ++i) {
if (isNull((const char*) varDataVal(data), TSDB_DATA_TYPE_NCHAR)) {
if (isNull(data, TSDB_DATA_TYPE_NCHAR)) {
(*numOfNull) += 1;
}
data += varDataLen(data);
data += varDataTLen(data);
}
*sum = 0;
@ -315,7 +335,7 @@ static void getStatics_nchr(const TSKEY *primaryKey, const void *pData, int32_t
tDataTypeDescriptor tDataTypeDesc[11] = {
{TSDB_DATA_TYPE_NULL, 6, 1, "NOTYPE", NULL, NULL, NULL},
{TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", tsCompressBool, tsDecompressBool, getStatics_i8},
{TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", tsCompressBool, tsDecompressBool, getStatics_bool},
{TSDB_DATA_TYPE_TINYINT, 7, CHAR_BYTES, "TINYINT", tsCompressTinyint, tsDecompressTinyint, getStatics_i8},
{TSDB_DATA_TYPE_SMALLINT, 8, SHORT_BYTES, "SMALLINT", tsCompressSmallint, tsDecompressSmallint, getStatics_i16},
{TSDB_DATA_TYPE_INT, 3, INT_BYTES, "INT", tsCompressInt, tsDecompressInt, getStatics_i32},

View File

@ -15,17 +15,19 @@
#define _DEFAULT_SOURCE
#include <errno.h>
#include <pthread.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <errno.h>
#include "taos.h"
#include "taosdef.h"
#include "taosmsg.h"
#include "tcq.h"
#include "tdataformat.h"
#include "tglobal.h"
#include "tlog.h"
#include "twal.h"
#include "tcq.h"
#include "taos.h"
#define cError(...) { if (cqDebugFlag & DEBUG_ERROR) { taosPrintLog("ERROR CQ ", cqDebugFlag, __VA_ARGS__); }}
#define cWarn(...) { if (cqDebugFlag & DEBUG_WARN) { taosPrintLog("WARN CQ ", cqDebugFlag, __VA_ARGS__); }}
@ -46,15 +48,14 @@ typedef struct {
} SCqContext;
typedef struct SCqObj {
int tid; // table ID
int rowSize; // bytes of a row
char *sqlStr; // SQL string
int columns; // number of columns
SSchema *pSchema; // pointer to schema array
void *pStream;
struct SCqObj *prev;
struct SCqObj *next;
SCqContext *pContext;
int tid; // table ID
int rowSize; // bytes of a row
char * sqlStr; // SQL string
STSchema * pSchema; // pointer to schema array
void * pStream;
struct SCqObj *prev;
struct SCqObj *next;
SCqContext * pContext;
} SCqObj;
int cqDebugFlag = 135;
@ -152,7 +153,7 @@ void cqStop(void *handle) {
pthread_mutex_unlock(&pContext->mutex);
}
void *cqCreate(void *handle, int tid, char *sqlStr, SSchema *pSchema, int columns) {
void *cqCreate(void *handle, int tid, char *sqlStr, STSchema *pSchema) {
SCqContext *pContext = handle;
SCqObj *pObj = calloc(sizeof(SCqObj), 1);
@ -162,11 +163,7 @@ void *cqCreate(void *handle, int tid, char *sqlStr, SSchema *pSchema, int column
pObj->sqlStr = malloc(strlen(sqlStr)+1);
strcpy(pObj->sqlStr, sqlStr);
pObj->columns = columns;
int size = sizeof(SSchema) * columns;
pObj->pSchema = malloc(size);
memcpy(pObj->pSchema, pSchema, size);
pObj->pSchema = tdDupSchema(pSchema);
cTrace("vgId:%d, id:%d CQ:%s is created", pContext->vgId, pObj->tid, pObj->sqlStr);

View File

@ -59,21 +59,16 @@ int main(int argc, char *argv[]) {
exit(-1);
}
SSchema schema[2];
schema[0].type = TSDB_DATA_TYPE_TIMESTAMP;
strcpy(schema[0].name, "ts");
schema[0].colId = 0;
schema[0].bytes = 8;
schema[1].type = TSDB_DATA_TYPE_INT;
strcpy(schema[1].name, "avgspeed");
schema[1].colId = 1;
schema[1].bytes = 4;
STSchema *pSchema = tdNewSchema(2);
tdSchemaAddCol(pSchema, TSDB_DATA_TYPE_TIMESTAMP, 0, 8);
tdSchemaAddCol(pSchema, TSDB_DATA_TYPE_INT, 1, 4);
for (int sid =1; sid<10; ++sid) {
cqCreate(pCq, sid, "select avg(speed) from demo.t1 sliding(1s) interval(5s)", schema, 2);
cqCreate(pCq, sid, "select avg(speed) from demo.t1 sliding(1s) interval(5s)", pSchema);
}
tdFreeSchema(pSchema);
while (1) {
char c = getchar();

View File

@ -22,7 +22,7 @@ extern "C" {
int32_t dnodeInitMgmt();
void dnodeCleanupMgmt();
void dnodeDispatchToDnodeMgmt(SRpcMsg *rpcMsg);
void dnodeDispatchToMgmtQueue(SRpcMsg *rpcMsg);
void* dnodeGetVnode(int32_t vgId);
int32_t dnodeGetVnodeStatus(void *pVnode);

View File

@ -22,6 +22,7 @@
#include "ttimer.h"
#include "tsdb.h"
#include "twal.h"
#include "tqueue.h"
#include "tsync.h"
#include "ttime.h"
#include "ttimer.h"
@ -42,10 +43,12 @@ void * tsDnodeTmr = NULL;
static void * tsStatusTimer = NULL;
static uint32_t tsRebootTime;
static SRpcIpSet tsDMnodeIpSetForPeer = {0};
static SRpcIpSet tsDMnodeIpSetForShell = {0};
static SRpcIpSet tsDMnodeIpSet = {0};
static SDMMnodeInfos tsDMnodeInfos = {0};
static SDMDnodeCfg tsDnodeCfg = {0};
static taos_qset tsMgmtQset = NULL;
static taos_queue tsMgmtQueue = NULL;
static pthread_t tsQthread;
static void dnodeUpdateMnodeInfos(SDMMnodeInfos *pMnodes);
static bool dnodeReadMnodeInfos();
@ -55,6 +58,7 @@ static bool dnodeReadDnodeCfg();
static void dnodeSaveDnodeCfg();
static void dnodeProcessStatusRsp(SRpcMsg *pMsg);
static void dnodeSendStatusMsg(void *handle, void *tmrId);
static void *dnodeProcessMgmtQueue(void *param);
static int32_t dnodeOpenVnodes();
static void dnodeCloseVnodes();
@ -74,52 +78,64 @@ int32_t dnodeInitMgmt() {
dnodeReadDnodeCfg();
tsRebootTime = taosGetTimestampSec();
if (!dnodeReadMnodeInfos()) {
memset(&tsDMnodeIpSet, 0, sizeof(SRpcIpSet));
memset(&tsDMnodeInfos, 0, sizeof(SDMMnodeInfos));
tsDMnodeIpSet.numOfIps = 1;
taosGetFqdnPortFromEp(tsFirst, tsDMnodeIpSet.fqdn[0], &tsDMnodeIpSet.port[0]);
if (strcmp(tsSecond, tsFirst) != 0) {
tsDMnodeIpSet.numOfIps = 2;
taosGetFqdnPortFromEp(tsSecond, tsDMnodeIpSet.fqdn[1], &tsDMnodeIpSet.port[1]);
}
} else {
tsDMnodeIpSet.inUse = tsDMnodeInfos.inUse;
tsDMnodeIpSet.numOfIps = tsDMnodeInfos.nodeNum;
for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) {
taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeIpSet.fqdn[i], &tsDMnodeIpSet.port[i]);
}
}
// create the queue and thread to handle the message
tsMgmtQset = taosOpenQset();
if (tsMgmtQset == NULL) {
dError("failed to create the mgmt queue set");
dnodeCleanupMgmt();
return -1;
}
tsMgmtQueue = taosOpenQueue();
if (tsMgmtQueue == NULL) {
dError("failed to create the mgmt queue");
dnodeCleanupMgmt();
return -1;
}
taosAddIntoQset(tsMgmtQset, tsMgmtQueue, NULL);
pthread_attr_t thAttr;
pthread_attr_init(&thAttr);
pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE);
int32_t code = pthread_create(&tsQthread, &thAttr, dnodeProcessMgmtQueue, NULL);
pthread_attr_destroy(&thAttr);
if (code != 0) {
dError("failed to create thread to process mgmt queue, reason:%s", strerror(errno));
dnodeCleanupMgmt();
return -1;
}
code = dnodeOpenVnodes();
if (code != TSDB_CODE_SUCCESS) {
dnodeCleanupMgmt();
return -1;
}
tsDnodeTmr = taosTmrInit(100, 200, 60000, "DND-DM");
if (tsDnodeTmr == NULL) {
dError("failed to init dnode timer");
return -1;
}
if (!dnodeReadMnodeInfos()) {
memset(&tsDMnodeIpSetForPeer, 0, sizeof(SRpcIpSet));
memset(&tsDMnodeIpSetForShell, 0, sizeof(SRpcIpSet));
memset(&tsDMnodeInfos, 0, sizeof(SDMMnodeInfos));
tsDMnodeIpSetForPeer.numOfIps = 1;
taosGetFqdnPortFromEp(tsFirst, tsDMnodeIpSetForPeer.fqdn[0], &tsDMnodeIpSetForPeer.port[0]);
tsDMnodeIpSetForPeer.port[0] += TSDB_PORT_DNODEDNODE;
tsDMnodeIpSetForShell.numOfIps = 1;
taosGetFqdnPortFromEp(tsFirst, tsDMnodeIpSetForShell.fqdn[0], &tsDMnodeIpSetForShell.port[0]);
tsDMnodeIpSetForShell.port[0] += TSDB_PORT_DNODESHELL;
if (strcmp(tsSecond, tsFirst) != 0) {
tsDMnodeIpSetForPeer.numOfIps = 2;
taosGetFqdnPortFromEp(tsSecond, tsDMnodeIpSetForPeer.fqdn[1], &tsDMnodeIpSetForPeer.port[1]);
tsDMnodeIpSetForPeer.port[1] += TSDB_PORT_DNODEDNODE;
tsDMnodeIpSetForShell.numOfIps = 2;
taosGetFqdnPortFromEp(tsSecond, tsDMnodeIpSetForShell.fqdn[1], &tsDMnodeIpSetForShell.port[1]);
tsDMnodeIpSetForShell.port[1] += TSDB_PORT_DNODESHELL;
}
} else {
tsDMnodeIpSetForPeer.inUse = tsDMnodeInfos.inUse;
tsDMnodeIpSetForPeer.numOfIps = tsDMnodeInfos.nodeNum;
for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) {
taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeIpSetForPeer.fqdn[i], &tsDMnodeIpSetForPeer.port[i]);
tsDMnodeIpSetForPeer.port[i] += TSDB_PORT_DNODEDNODE;
}
tsDMnodeIpSetForShell.inUse = tsDMnodeInfos.inUse;
tsDMnodeIpSetForShell.numOfIps = tsDMnodeInfos.nodeNum;
for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) {
taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeIpSetForShell.fqdn[i], &tsDMnodeIpSetForShell.port[i]);
tsDMnodeIpSetForShell.port[i] += TSDB_PORT_DNODESHELL;
}
}
int32_t code = dnodeOpenVnodes();
if (code != TSDB_CODE_SUCCESS) {
dnodeCleanupMgmt();
return -1;
}
@ -142,22 +158,62 @@ void dnodeCleanupMgmt() {
}
dnodeCloseVnodes();
if (tsMgmtQset) taosQsetThreadResume(tsMgmtQset);
if (tsQthread) pthread_join(tsQthread, NULL);
if (tsMgmtQueue) taosCloseQueue(tsMgmtQueue);
if (tsMgmtQset) taosCloseQset(tsMgmtQset);
tsMgmtQset = NULL;
tsMgmtQueue = NULL;
}
void dnodeDispatchToDnodeMgmt(SRpcMsg *pMsg) {
SRpcMsg rsp;
void dnodeDispatchToMgmtQueue(SRpcMsg *pMsg) {
void *item;
if (dnodeProcessMgmtMsgFp[pMsg->msgType]) {
rsp.code = (*dnodeProcessMgmtMsgFp[pMsg->msgType])(pMsg);
item = taosAllocateQitem(sizeof(SRpcMsg));
if (item) {
memcpy(item, pMsg, sizeof(SRpcMsg));
taosWriteQitem(tsMgmtQueue, 1, item);
} else {
rsp.code = TSDB_CODE_MSG_NOT_PROCESSED;
SRpcMsg rsp;
rsp.handle = pMsg->handle;
rsp.pCont = NULL;
rsp.code = TSDB_CODE_SERV_OUT_OF_MEMORY;
rpcSendResponse(&rsp);
rpcFreeCont(pMsg->pCont);
}
}
static void *dnodeProcessMgmtQueue(void *param) {
SRpcMsg *pMsg;
SRpcMsg rsp;
int type;
void *handle;
while (1) {
if (taosReadQitemFromQset(tsMgmtQset, &type, (void **) &pMsg, &handle) == 0) {
dTrace("dnode mgmt got no message from qset, exit ...");
break;
}
dTrace("%p, msg:%s will be processed", pMsg->ahandle, taosMsg[pMsg->msgType]);
if (dnodeProcessMgmtMsgFp[pMsg->msgType]) {
rsp.code = (*dnodeProcessMgmtMsgFp[pMsg->msgType])(pMsg);
} else {
rsp.code = TSDB_CODE_MSG_NOT_PROCESSED;
}
rsp.handle = pMsg->handle;
rsp.pCont = NULL;
rpcSendResponse(&rsp);
rpcFreeCont(pMsg->pCont);
taosFreeQitem(pMsg);
}
rsp.handle = pMsg->handle;
rsp.pCont = NULL;
rpcSendResponse(&rsp);
rpcFreeCont(pMsg->pCont);
return NULL;
}
static int32_t dnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) {
@ -284,22 +340,26 @@ static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg) {
}
void dnodeUpdateMnodeIpSetForPeer(SRpcIpSet *pIpSet) {
dPrint("mnode IP list for peer is changed, numOfIps:%d inUse:%d", pIpSet->numOfIps, pIpSet->inUse);
dPrint("mnode IP list for is changed, numOfIps:%d inUse:%d", pIpSet->numOfIps, pIpSet->inUse);
for (int i = 0; i < pIpSet->numOfIps; ++i) {
pIpSet->port[i] -= TSDB_PORT_DNODEDNODE;
dPrint("mnode index:%d %s:%u", i, pIpSet->fqdn[i], pIpSet->port[i])
}
tsDMnodeIpSetForPeer = *pIpSet;
tsDMnodeIpSet = *pIpSet;
}
void dnodeGetMnodeIpSetForPeer(void *ipSetRaw) {
SRpcIpSet *ipSet = ipSetRaw;
*ipSet = tsDMnodeIpSetForPeer;
*ipSet = tsDMnodeIpSet;
for (int i=0; i<ipSet->numOfIps; ++i)
ipSet->port[i] += TSDB_PORT_DNODEDNODE;
}
void dnodeGetMnodeIpSetForShell(void *ipSetRaw) {
SRpcIpSet *ipSet = ipSetRaw;
*ipSet = tsDMnodeIpSetForShell;
*ipSet = tsDMnodeIpSet;
}
static void dnodeProcessStatusRsp(SRpcMsg *pMsg) {
@ -349,19 +409,10 @@ static void dnodeUpdateMnodeInfos(SDMMnodeInfos *pMnodes) {
dPrint("mnode index:%d, %s", tsDMnodeInfos.nodeInfos[i].nodeId, tsDMnodeInfos.nodeInfos[i].nodeEp);
}
tsDMnodeIpSetForPeer.inUse = tsDMnodeInfos.inUse;
tsDMnodeIpSetForPeer.numOfIps = tsDMnodeInfos.nodeNum;
tsDMnodeIpSet.inUse = tsDMnodeInfos.inUse;
tsDMnodeIpSet.numOfIps = tsDMnodeInfos.nodeNum;
for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) {
taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeIpSetForPeer.fqdn[i], &tsDMnodeIpSetForPeer.port[i]);
tsDMnodeIpSetForPeer.port[i] += TSDB_PORT_DNODEDNODE;
dPrint("mnode index:%d, for peer %s %d", i, tsDMnodeIpSetForPeer.fqdn[i], tsDMnodeIpSetForPeer.port[i]);
}
tsDMnodeIpSetForShell.inUse = tsDMnodeInfos.inUse;
tsDMnodeIpSetForShell.numOfIps = tsDMnodeInfos.nodeNum;
for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) {
taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeIpSetForShell.fqdn[i], &tsDMnodeIpSetForShell.port[i]);
dPrint("mnode index:%d, for shell %s %d", i, tsDMnodeIpSetForShell.fqdn[i], tsDMnodeIpSetForShell.port[i]);
taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeIpSet.fqdn[i], &tsDMnodeIpSet.port[i]);
}
dnodeSaveMnodeInfos();
@ -369,7 +420,8 @@ static void dnodeUpdateMnodeInfos(SDMMnodeInfos *pMnodes) {
}
static bool dnodeReadMnodeInfos() {
char ipFile[TSDB_FILENAME_LEN] = {0};
char ipFile[TSDB_FILENAME_LEN*2] = {0};
sprintf(ipFile, "%s/mnodeIpList.json", tsDnodeDir);
FILE *fp = fopen(ipFile, "r");
if (!fp) {
@ -486,7 +538,7 @@ static void dnodeSaveMnodeInfos() {
}
char *dnodeGetMnodeMasterEp() {
return tsDMnodeInfos.nodeInfos[tsDMnodeIpSetForPeer.inUse].nodeEp;
return tsDMnodeInfos.nodeInfos[tsDMnodeIpSet.inUse].nodeEp;
}
void* dnodeGetMnodeInfos() {
@ -533,11 +585,14 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) {
.msgType = TSDB_MSG_TYPE_DM_STATUS
};
dnodeSendMsgToDnode(&tsDMnodeIpSetForPeer, &rpcMsg);
SRpcIpSet ipSet;
dnodeGetMnodeIpSetForPeer(&ipSet);
dnodeSendMsgToDnode(&ipSet, &rpcMsg);
}
static bool dnodeReadDnodeCfg() {
char dnodeCfgFile[TSDB_FILENAME_LEN] = {0};
char dnodeCfgFile[TSDB_FILENAME_LEN*2] = {0};
sprintf(dnodeCfgFile, "%s/dnodeCfg.json", tsDnodeDir);
FILE *fp = fopen(dnodeCfgFile, "r");

View File

@ -43,10 +43,10 @@ int32_t dnodeInitServer() {
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_TABLE] = dnodeDispatchToVnodeWriteQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = dnodeDispatchToVnodeWriteQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeDispatchToDnodeMgmt;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeDispatchToDnodeMgmt;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeDispatchToDnodeMgmt;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeDispatchToDnodeMgmt;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeDispatchToMgmtQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeDispatchToMgmtQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeDispatchToMgmtQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeDispatchToMgmtQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_CONFIG_TABLE] = dnodeDispatchToMnodePeerQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_CONFIG_VNODE] = dnodeDispatchToMnodePeerQueue;

View File

@ -84,7 +84,7 @@ int32_t dnodeInitShell() {
rpcInit.label = "SHELL";
rpcInit.numOfThreads = numOfThreads;
rpcInit.cfp = dnodeProcessMsgFromShell;
rpcInit.sessions = TSDB_SESSIONS_PER_DNODE;
rpcInit.sessions = tsMaxShellConns;
rpcInit.connType = TAOS_CONN_SERVER;
rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.afp = dnodeRetrieveUserAuthInfo;
@ -168,6 +168,44 @@ static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char
return rpcRsp.code;
}
void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t sid) {
dTrace("vgId:%d, sid:%d send config table msg to mnode", vgId, sid);
int32_t contLen = sizeof(SDMConfigTableMsg);
SDMConfigTableMsg *pMsg = rpcMallocCont(contLen);
pMsg->dnodeId = htonl(dnodeGetDnodeId());
pMsg->vgId = htonl(vgId);
pMsg->sid = htonl(sid);
SRpcMsg rpcMsg = {0};
rpcMsg.pCont = pMsg;
rpcMsg.contLen = contLen;
rpcMsg.msgType = TSDB_MSG_TYPE_DM_CONFIG_TABLE;
SRpcMsg rpcRsp = {0};
dnodeSendMsgToDnodeRecv(&rpcMsg, &rpcRsp);
terrno = rpcRsp.code;
if (rpcRsp.code != 0) {
rpcFreeCont(rpcRsp.pCont);
dError("vgId:%d, sid:%d failed to config table from mnode", vgId, sid);
return NULL;
} else {
dPrint("vgId:%d, sid:%d config table msg is received", vgId, sid);
// delete this after debug finished
SMDCreateTableMsg *pTable = rpcRsp.pCont;
int16_t numOfColumns = htons(pTable->numOfColumns);
int16_t numOfTags = htons(pTable->numOfTags);
int32_t sid = htonl(pTable->sid);
uint64_t uid = htobe64(pTable->uid);
dPrint("table:%s, numOfColumns:%d numOfTags:%d sid:%d uid:%d", pTable->tableId, numOfColumns, numOfTags, sid, uid);
return rpcRsp.pCont;
}
}
SDnodeStatisInfo dnodeGetStatisInfo() {
SDnodeStatisInfo info = {0};
if (dnodeGetRunStatus() == TSDB_DNODE_RUN_STATUS_RUNING) {

View File

@ -28,8 +28,12 @@ int32_t main(int32_t argc, char *argv[]) {
// Set global configuration file
for (int32_t i = 1; i < argc; ++i) {
if (strcmp(argv[i], "-c") == 0) {
if (i < argc - 1) {
strcpy(configDir, argv[++i]);
if (i < argc - 1) {
if (strlen(argv[++i]) > TSDB_FILENAME_LEN - 1) {
printf("config file path overflow");
exit(EXIT_FAILURE);
}
strcpy(configDir, argv[i]);
} else {
printf("'-c' requires a parameter, default:%s\n", configDir);
exit(EXIT_FAILURE);

View File

@ -129,7 +129,10 @@ void *dnodeAllocateVnodeWqueue(void *pVnode) {
if (pWorker->qset == NULL) {
pWorker->qset = taosOpenQset();
if (pWorker->qset == NULL) return NULL;
if (pWorker->qset == NULL) {
taosCloseQueue(queue);
return NULL;
}
taosAddIntoQset(pWorker->qset, queue, pVnode);
pWorker->qall = taosAllocateQall();

View File

@ -44,9 +44,10 @@ void dnodeGetMnodeIpSetForShell(void *ipSet);
void * dnodeGetMnodeInfos();
int32_t dnodeGetDnodeId();
void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg));
void dnodeSendMsgToDnode(SRpcIpSet *ipSet, SRpcMsg *rpcMsg);
void dnodeSendMsgToDnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp);
void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg));
void dnodeSendMsgToDnode(SRpcIpSet *ipSet, SRpcMsg *rpcMsg);
void dnodeSendMsgToDnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp);
void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t sid);
void *dnodeAllocateVnodeWqueue(void *pVnode);
void dnodeFreeVnodeWqueue(void *queue);

View File

@ -293,9 +293,9 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_MAX_COMP_LEVEL 2
#define TSDB_DEFAULT_COMP_LEVEL 2
#define TSDB_MIN_WAL_LEVEL 0
#define TSDB_MAX_WAL_LEVEL 2
#define TSDB_DEFAULT_WAL_LEVEL 2
#define TSDB_MIN_WAL_LEVEL 1
#define TSDB_MAX_WAL_LEVEL 2
#define TSDB_DEFAULT_WAL_LEVEL 1
#define TSDB_MIN_REPLICA_NUM 1
#define TSDB_MAX_REPLICA_NUM 3
@ -338,9 +338,6 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_ORDER_ASC 1
#define TSDB_ORDER_DESC 2
#define TSDB_SESSIONS_PER_VNODE (300)
#define TSDB_SESSIONS_PER_DNODE (TSDB_SESSIONS_PER_VNODE * TSDB_MAX_VNODES)
#define TSDB_DEFAULT_MNODES_HASH_SIZE 5
#define TSDB_DEFAULT_DNODES_HASH_SIZE 10
#define TSDB_DEFAULT_ACCOUNTS_HASH_SIZE 10

View File

@ -72,9 +72,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_NO_RESOURCE, 0, 0x0018, "no resource"
TAOS_DEFINE_ERROR(TSDB_CODE_OPS_NOT_SUPPORT, 0, 0x0019, "operations not support")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_OPTION, 0, 0x001A, "invalid option")
TAOS_DEFINE_ERROR(TSDB_CODE_NOT_CONFIGURED, 0, 0x001B, "not configured")
TAOS_DEFINE_ERROR(TSDB_CODE_NODE_OFFLINE, 0, 0x001C, "node offline")
TAOS_DEFINE_ERROR(TSDB_CODE_NETWORK_UNAVAIL, 0, 0x001D, "network unavailable")
TAOS_DEFINE_ERROR(TSDB_CODE_AUTH_REQUIRED, 0, 0x001E, "auth required")
TAOS_DEFINE_ERROR(TSDB_CODE_NETWORK_UNAVAIL, 0, 0x001C, "network unavailable")
TAOS_DEFINE_ERROR(TSDB_CODE_AUTH_REQUIRED, 0, 0x001D, "auth required")
// db
TAOS_DEFINE_ERROR(TSDB_CODE_DB_NOT_SELECTED, 0, 0x0100, "db not selected")
@ -94,16 +93,13 @@ TAOS_DEFINE_ERROR(TSDB_CODE_NO_USER_FROM_CONN, 0, 0x0185, "can not get
TAOS_DEFINE_ERROR(TSDB_CODE_TABLE_ALREADY_EXIST, 0, 0x0200, "table already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TABLE_ID, 0, 0x0201, "invalid table id")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TABLE_TYPE, 0, 0x0202, "invalid table typee")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TABLE, 0, 0x0203, "invalid table name")
TAOS_DEFINE_ERROR(TSDB_CODE_NOT_SUPER_TABLE, 0, 0x0204, "no super table") // operation only available for super table
TAOS_DEFINE_ERROR(TSDB_CODE_NOT_ACTIVE_TABLE, 0, 0x0205, "not active table")
TAOS_DEFINE_ERROR(TSDB_CODE_TABLE_ID_MISMATCH, 0, 0x0206, "table id mismatch")
TAOS_DEFINE_ERROR(TSDB_CODE_TAG_ALREAY_EXIST, 0, 0x0207, "tag already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_TAG_NOT_EXIST, 0, 0x0208, "tag not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_FIELD_ALREAY_EXIST, 0, 0x0209, "field already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_FIELD_NOT_EXIST, 0, 0x020A, "field not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_COL_NAME_TOO_LONG, 0, 0x020B, "column name too long")
TAOS_DEFINE_ERROR(TSDB_CODE_TOO_MANY_TAGS, 0, 0x020C, "too many tags")
TAOS_DEFINE_ERROR(TSDB_CODE_NOT_SUPER_TABLE, 0, 0x0203, "no super table") // operation only available for super table
TAOS_DEFINE_ERROR(TSDB_CODE_TAG_ALREAY_EXIST, 0, 0x0204, "tag already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_TAG_NOT_EXIST, 0, 0x0205, "tag not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_FIELD_ALREAY_EXIST, 0, 0x0206, "field already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_FIELD_NOT_EXIST, 0, 0x0207, "field not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_COL_NAME_TOO_LONG, 0, 0x0209, "column name too long")
TAOS_DEFINE_ERROR(TSDB_CODE_TOO_MANY_TAGS, 0, 0x0209, "too many tags")
// dnode & mnode
@ -148,15 +144,13 @@ TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_CPU_LIMITED, 0, 0x038F, "grant cpu li
// server
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_VGROUP_ID, 0, 0x0400, "invalid vgroup id")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_VNODE_ID, 0, 0x0401, "invalid vnode id")
TAOS_DEFINE_ERROR(TSDB_CODE_NOT_ACTIVE_VNODE, 0, 0x0402, "not active vnode")
TAOS_DEFINE_ERROR(TSDB_CODE_VG_INIT_FAILED, 0, 0x0403, "vg init failed")
TAOS_DEFINE_ERROR(TSDB_CODE_SERV_NO_DISKSPACE, 0, 0x0404, "server no diskspace")
TAOS_DEFINE_ERROR(TSDB_CODE_SERV_OUT_OF_MEMORY, 0, 0x0405, "server out of memory")
TAOS_DEFINE_ERROR(TSDB_CODE_NO_DISK_PERMISSIONS, 0, 0x0406, "no disk permissions")
TAOS_DEFINE_ERROR(TSDB_CODE_FILE_CORRUPTED, 0, 0x0407, "file corrupted")
TAOS_DEFINE_ERROR(TSDB_CODE_MEMORY_CORRUPTED, 0, 0x0408, "memory corrupted")
TAOS_DEFINE_ERROR(TSDB_CODE_NOT_SUCH_FILE_OR_DIR, 0, 0x0409, "no such file or directory")
TAOS_DEFINE_ERROR(TSDB_CODE_VG_INIT_FAILED, 0, 0x0402, "vgroup init failed")
TAOS_DEFINE_ERROR(TSDB_CODE_SERV_NO_DISKSPACE, 0, 0x0403, "server no diskspace")
TAOS_DEFINE_ERROR(TSDB_CODE_SERV_OUT_OF_MEMORY, 0, 0x0404, "server out of memory")
TAOS_DEFINE_ERROR(TSDB_CODE_NO_DISK_PERMISSIONS, 0, 0x0405, "no disk permissions")
TAOS_DEFINE_ERROR(TSDB_CODE_FILE_CORRUPTED, 0, 0x0406, "file corrupted")
TAOS_DEFINE_ERROR(TSDB_CODE_MEMORY_CORRUPTED, 0, 0x0407, "memory corrupted")
TAOS_DEFINE_ERROR(TSDB_CODE_NOT_SUCH_FILE_OR_DIR, 0, 0x0408, "no such file or directory")
// client
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_CLIENT_VERSION, 0, 0x0481, "invalid client version")

View File

@ -370,7 +370,7 @@ typedef struct SExprInfo {
struct tExprNode* pExpr;
int16_t bytes;
int16_t type;
int16_t interBytes;
int32_t interBytes;
} SExprInfo;
typedef struct SColumnFilterInfo {
@ -620,13 +620,6 @@ typedef struct {
SCMVgroupInfo vgroups[];
} SVgroupsInfo;
//typedef struct {
// int32_t numOfTables;
// int32_t join;
// int32_t joinCondLen; // for join condition
// int32_t metaElem[TSDB_MAX_JOIN_TABLE_NUM];
//} SSuperTableMetaMsg;
typedef struct STableMetaMsg {
int32_t contLen;
char tableId[TSDB_TABLE_ID_LEN + 1]; // table id
@ -676,9 +669,9 @@ typedef struct {
} SCMCreateDnodeMsg, SCMDropDnodeMsg;
typedef struct {
uint32_t dnode;
int32_t vnode;
int32_t sid;
int32_t dnodeId;
int32_t vgId;
int32_t sid;
} SDMConfigTableMsg;
typedef struct {

View File

@ -19,6 +19,7 @@
extern "C" {
#endif
#include "tdataformat.h"
typedef int (*FCqWrite)(void *ahandle, void *pHead, int type);
@ -40,7 +41,7 @@ void cqStart(void *handle);
void cqStop(void *handle);
// cqCreate is called by TSDB to start an instance of CQ
void *cqCreate(void *handle, int sid, char *sqlStr, SSchema *pSchema, int columns);
void *cqCreate(void *handle, int sid, char *sqlStr, STSchema *pSchema);
// cqDrop is called by TSDB to stop an instance of CQ, handle is the return value of cqCreate
void cqDrop(void *handle);

View File

@ -43,6 +43,8 @@ typedef struct {
void *cqH;
int (*notifyStatus)(void *, int status);
int (*eventCallBack)(void *);
void *(*cqCreateFunc)(void *handle, int sid, char *sqlStr, STSchema *pSchema);
void (*cqDropFunc)(void *handle);
} STsdbAppH;
// --------- TSDB REPOSITORY CONFIGURATION DEFINITION
@ -71,7 +73,7 @@ typedef void TsdbRepoT; // use void to hide implementation details from outside
int tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg, void *limiter);
int32_t tsdbDropRepo(TsdbRepoT *repo);
TsdbRepoT *tsdbOpenRepo(char *tsdbDir, STsdbAppH *pAppH);
TsdbRepoT *tsdbOpenRepo(char *rootDir, STsdbAppH *pAppH);
int32_t tsdbCloseRepo(TsdbRepoT *repo, int toCommit);
int32_t tsdbConfigRepo(TsdbRepoT *repo, STsdbCfg *pCfg);
@ -198,6 +200,10 @@ TsdbQueryHandleT *tsdbQueryTables(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STable
*/
TsdbQueryHandleT tsdbQueryLastRow(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupInfo);
SArray* tsdbGetQueriedTableIdList(TsdbQueryHandleT *pHandle);
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TsdbRepoT *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList);
/**
* move to next block if exists
*

View File

@ -96,8 +96,12 @@ void shellParseArgument(int argc, char *argv[], struct arguments *arguments) {
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[i], "-c") == 0) {
if (i < argc - 1) {
strcpy(configDir, argv[++i]);
if (i < argc - 1) {
if (strlen(argv[++i]) > TSDB_FILENAME_LEN - 1) {
fprintf(stderr, "config file path: %s overflow max len %d\n", argv[i], TSDB_FILENAME_LEN - 1);
exit(EXIT_FAILURE);
}
strcpy(configDir, argv[i]);
} else {
fprintf(stderr, "Option -c requires an argument\n");
exit(EXIT_FAILURE);

View File

@ -80,6 +80,11 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
if (wordexp(arg, &full_path, 0) != 0) {
fprintf(stderr, "Invalid path %s\n", arg);
return -1;
}
if (strlen(full_path.we_wordv[0]) > TSDB_FILENAME_LEN - 1) {
fprintf(stderr, "config file path: %s overflow max len %d\n", full_path.we_wordv[0], TSDB_FILENAME_LEN - 1);
wordfree(&full_path);
return -1;
}
strcpy(configDir, full_path.we_wordv[0]);
wordfree(&full_path);

View File

@ -76,8 +76,12 @@ void shellParseArgument(int argc, char *argv[], struct arguments *arguments) {
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[i], "-c") == 0) {
if (i < argc - 1) {
strcpy(configDir, argv[++i]);
if (i < argc - 1) {
if (strlen(argv[++i]) > TSDB_FILENAME_LEN - 1) {
fprintf(stderr, "config file path: %s overflow max len %d\n", argv[i], TSDB_FILENAME_LEN - 1);
exit(EXIT_FAILURE);
}
strcpy(configDir, argv[i]);
} else {
fprintf(stderr, "Option -c requires an argument\n");
exit(EXIT_FAILURE);

View File

@ -49,7 +49,7 @@ static struct argp_option options[] = {
{0, 'h', "host", 0, "The host to connect to TDEngine. Default is localhost.", 0},
{0, 'p', "port", 0, "The TCP/IP port number to use for the connection. Default is 0.", 1},
{0, 'u', "user", 0, "The TDEngine user name to use when connecting to the server. Default is 'root'.", 2},
{0, 'a', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3},
{0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3},
{0, 'd', "database", 0, "Destination database. Default is 'test'.", 3},
{0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 3},
{0, 'M', 0, 0, "Use metric flag.", 13},
@ -58,12 +58,15 @@ static struct argp_option options[] = {
{0, 'b', "type_of_cols", 0, "The data_type of columns: 'INT', 'TINYINT', 'SMALLINT', 'BIGINT', 'FLOAT', 'DOUBLE', 'BINARY'. Default is 'INT'.", 7},
{0, 'w', "length_of_binary", 0, "The length of data_type 'BINARY'. Only applicable when type of cols is 'BINARY'. Default is 8", 8},
{0, 'l', "num_of_cols_per_record", 0, "The number of columns per record. Default is 3.", 8},
{0, 'c', "num_of_conns", 0, "The number of connections. Default is 10.", 9},
{0, 'T', "num_of_threads", 0, "The number of threads. Default is 10.", 9},
{0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 1000.", 10},
{0, 't', "num_of_tables", 0, "The number of tables. Default is 10000.", 11},
{0, 'n', "num_of_records_per_table", 0, "The number of records per table. Default is 100000.", 12},
{0, 'f', "config_directory", 0, "Configuration directory. Default is '/etc/taos/'.", 14},
{0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/taos/'.", 14},
{0, 'x', 0, 0, "Insert only flag.", 13},
{0, 'O', "order", 0, "Insert mode--0: In order, 1: Out of order. Default is in order.", 14},
{0, 'R', "rate", 0, "Out of order data's rate--if order=1 Default 10, min: 0, max: 50.", 14},
{0, 'D', "delete table", 0, "Delete data methods——0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database", 14},
{0}};
/* Used by main to communicate with parse_opt. */
@ -81,11 +84,14 @@ typedef struct DemoArguments {
char *datatype[MAX_NUM_DATATYPE];
int len_of_binary;
int num_of_CPR;
int num_of_connections;
int num_of_threads;
int num_of_RPR;
int num_of_tables;
int num_of_DPT;
int abort;
int order;
int rate;
int method_of_delete;
char **arg_list;
} SDemoArguments;
@ -106,7 +112,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'u':
arguments->user = arg;
break;
case 'a':
case 'P':
arguments->password = arg;
break;
case 'o':
@ -115,8 +121,8 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'q':
arguments->mode = atoi(arg);
break;
case 'c':
arguments->num_of_connections = atoi(arg);
case 'T':
arguments->num_of_threads = atoi(arg);
break;
case 'r':
arguments->num_of_RPR = atoi(arg);
@ -176,7 +182,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'x':
arguments->insert_only = true;
break;
case 'f':
case 'c':
if (wordexp(arg, &full_path, 0) != 0) {
fprintf(stderr, "Invalid path %s\n", arg);
return -1;
@ -184,6 +190,30 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]);
wordfree(&full_path);
break;
case 'O':
arguments->order = atoi(arg);
if (arguments->order > 1 || arguments->order < 0)
{
arguments->order = 0;
} else if (arguments->order == 1)
{
arguments->rate = 10;
}
break;
case 'R':
arguments->rate = atoi(arg);
if (arguments->order == 1 && (arguments->rate > 50 || arguments->rate <= 0))
{
arguments->rate = 10;
}
break;
case 'D':
arguments->method_of_delete = atoi(arg);
if (arguments->method_of_delete < 0 || arguments->method_of_delete > 3)
{
arguments->method_of_delete = 0;
}
break;
case OPT_ABORT:
arguments->abort = 1;
break;
@ -217,6 +247,8 @@ typedef struct {
int ncols_per_record;
int nrecords_per_table;
int nrecords_per_request;
int data_of_order;
int data_of_rate;
int64_t start_time;
bool do_aggreFunc;
@ -236,6 +268,8 @@ typedef struct {
int ncols_per_record;
char **data_type;
int len_of_binary;
int data_of_order;
int data_of_rate;
sem_t *mutex_sem;
int *notFinished;
@ -258,6 +292,8 @@ void *readMetric(void *sarg);
void *syncWrite(void *sarg);
void *deleteTable();
void *asyncWrite(void *sarg);
void generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary);
@ -291,11 +327,14 @@ int main(int argc, char *argv[]) {
},
8, // len_of_binary
1, // num_of_CPR
1, // num_of_connections
1, // num_of_connections/thread
1, // num_of_RPR
1, // num_of_tables
50000, // num_of_DPT
0, // abort
0, // order
0, // rate
0, // method_of_delete
NULL // arg_list
};
@ -304,7 +343,7 @@ int main(int argc, char *argv[]) {
// For demo use, change default values for some parameters;
arguments.num_of_tables = 10000;
arguments.num_of_CPR = 3;
arguments.num_of_connections = 10;
arguments.num_of_threads = 10;
arguments.num_of_DPT = 100000;
arguments.num_of_RPR = 1000;
arguments.use_metric = true;
@ -330,8 +369,11 @@ int main(int argc, char *argv[]) {
char *tb_prefix = arguments.tb_prefix;
int len_of_binary = arguments.len_of_binary;
int ncols_per_record = arguments.num_of_CPR;
int order = arguments.order;
int rate = arguments.rate;
int method_of_delete = arguments.method_of_delete;
int ntables = arguments.num_of_tables;
int nconnections = arguments.num_of_connections;
int threads = arguments.num_of_threads;
int nrecords_per_table = arguments.num_of_DPT;
int nrecords_per_request = arguments.num_of_RPR;
bool use_metric = arguments.use_metric;
@ -371,12 +413,19 @@ int main(int argc, char *argv[]) {
printf("# Binary Length(If applicable): %d\n",
(strcasestr(dataString, "BINARY") != NULL) ? len_of_binary : -1);
printf("# Number of Columns per record: %d\n", ncols_per_record);
printf("# Number of Connections: %d\n", nconnections);
printf("# Number of Threads: %d\n", threads);
printf("# Number of Tables: %d\n", ntables);
printf("# Number of Data per Table: %d\n", nrecords_per_table);
printf("# Records/Request: %d\n", nrecords_per_request);
printf("# Database name: %s\n", db_name);
printf("# Table prefix: %s\n", tb_prefix);
if (order == 1)
{
printf("# Data order: %d\n", order);
printf("# Data out of order rate: %d\n", rate);
}
printf("# Delete method: %d\n", method_of_delete);
printf("# Test time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
printf("###################################################################\n\n");
@ -392,12 +441,18 @@ int main(int argc, char *argv[]) {
fprintf(fp, "# Binary Length(If applicable): %d\n",
(strcasestr(dataString, "BINARY") != NULL) ? len_of_binary : -1);
fprintf(fp, "# Number of Columns per record: %d\n", ncols_per_record);
fprintf(fp, "# Number of Connections: %d\n", nconnections);
fprintf(fp, "# Number of Threads: %d\n", threads);
fprintf(fp, "# Number of Tables: %d\n", ntables);
fprintf(fp, "# Number of Data per Table: %d\n", nrecords_per_table);
fprintf(fp, "# Records/Request: %d\n", nrecords_per_request);
fprintf(fp, "# Database name: %s\n", db_name);
fprintf(fp, "# Table prefix: %s\n", tb_prefix);
if (order == 1)
{
printf("# Data order: %d\n", order);
printf("# Data out of order rate: %d\n", rate);
}
fprintf(fp, "# Test time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1,
tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
fprintf(fp, "###################################################################\n\n");
@ -414,7 +469,7 @@ int main(int argc, char *argv[]) {
sprintf(command, "drop database %s;", db_name);
taos_query(taos, command);
sleep(3);
sprintf(command, "create database %s;", db_name);
taos_query(taos, command);
@ -479,22 +534,22 @@ int main(int argc, char *argv[]) {
taos_close(taos);
}
/* Wait for table to create */
sleep(5);
/* Insert data */
double ts = getCurrentTime();
printf("Inserting data......\n");
pthread_t *pids = malloc(nconnections * sizeof(pthread_t));
info *infos = malloc(nconnections * sizeof(info));
pthread_t *pids = malloc(threads * sizeof(pthread_t));
info *infos = malloc(threads * sizeof(info));
int a = ntables / nconnections;
int a = ntables / threads;
if (a < 1) {
nconnections = ntables;
threads = ntables;
a = 1;
}
int b = ntables % nconnections;
int b = ntables % threads;
int last = 0;
for (int i = 0; i < nconnections; i++) {
for (int i = 0; i < threads; i++) {
info *t_info = infos + i;
t_info->threadID = i;
strcpy(t_info->db_name, db_name);
@ -507,6 +562,8 @@ int main(int argc, char *argv[]) {
t_info->len_of_binary = len_of_binary;
t_info->nrecords_per_request = nrecords_per_request;
t_info->start_table_id = last;
t_info->data_of_order = order;
t_info->data_of_rate = rate;
t_info->end_table_id = i < b ? last + a : last + a - 1;
last = t_info->end_table_id + 1;
@ -520,15 +577,15 @@ int main(int argc, char *argv[]) {
pthread_create(pids + i, NULL, asyncWrite, t_info);
}
}
for (int i = 0; i < nconnections; i++) {
for (int i = 0; i < threads; i++) {
pthread_join(pids[i], NULL);
}
double t = getCurrentTime() - ts;
if (query_mode == SYNC) {
printf("SYNC Insert with %d connections:\n", nconnections);
printf("SYNC Insert with %d connections:\n", threads);
} else {
printf("ASYNC Insert with %d connections:\n", nconnections);
printf("ASYNC Insert with %d connections:\n", threads);
}
fprintf(fp, "|%10.d | %10.2f | %10.2f | %10.4f |\n\n",
@ -540,7 +597,7 @@ int main(int argc, char *argv[]) {
t, ntables * nrecords_per_table, nrecords_per_request,
ntables * nrecords_per_table / t);
for (int i = 0; i < nconnections; i++) {
for (int i = 0; i < threads; i++) {
info *t_info = infos + i;
taos_close(t_info->taos);
sem_destroy(&(t_info->mutex_sem));
@ -551,6 +608,55 @@ int main(int argc, char *argv[]) {
free(infos);
fclose(fp);
if (method_of_delete != 0)
{
TAOS *dtaos = taos_connect(ip_addr, user, pass, db_name, port);
double dts = getCurrentTime();
printf("Deleteing %d table(s)......\n", ntables);
switch (method_of_delete)
{
case 1:
// delete by table
/* Create all the tables; */
for (int i = 0; i < ntables; i++) {
sprintf(command, "drop table %s.%s%d;", db_name, tb_prefix, i);
queryDB(dtaos, command);
}
break;
case 2:
// delete by stable
if (!use_metric) {
break;
}
else
{
sprintf(command, "drop table %s.meters;", db_name);
queryDB(dtaos, command);
}
break;
case 3:
// delete by database
sprintf(command, "drop database %s;", db_name);
queryDB(dtaos, command);
break;
default:
break;
}
printf("Table(s) droped!\n");
taos_close(dtaos);
double dt = getCurrentTime() - dts;
printf("Spent %.4f seconds to drop %d tables\n", dt, ntables);
FILE *fp = fopen(arguments.output_file, "a");
fprintf(fp, "Spent %.4f seconds to drop %d tables\n", dt, ntables);
fclose(fp);
}
if (!insert_only) {
// query data
pthread_t read_id;
@ -735,7 +841,15 @@ void *syncWrite(void *sarg) {
pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, tID);
int k;
for (k = 0; k < winfo->nrecords_per_request;) {
generateData(data, data_type, ncols_per_record, tmp_time++, len_of_binary);
int rand_num = rand() % 100;
if (winfo->data_of_order ==1 && rand_num < winfo->data_of_rate)
{
long d = tmp_time - rand() % 1000000 + rand_num;
generateData(data, data_type, ncols_per_record, d, len_of_binary);
} else
{
generateData(data, data_type, ncols_per_record, tmp_time += 1000, len_of_binary);
}
pstr += sprintf(pstr, " %s", data);
inserted++;
k++;
@ -774,6 +888,8 @@ void *asyncWrite(void *sarg) {
tb_info->mutex_sem = &(winfo->mutex_sem);
tb_info->notFinished = &(winfo->notFinished);
tb_info->lock_sem = &(winfo->lock_sem);
tb_info->data_of_order = winfo->data_of_order;
tb_info->data_of_rate = winfo->data_of_rate;
/* char buff[BUFFER_SIZE] = "\0"; */
/* sprintf(buff, "insert into %s values (0, 0)", tb_info->tb_name); */
@ -815,7 +931,15 @@ void callBack(void *param, TAOS_RES *res, int code) {
pstr += sprintf(pstr, "insert into %s values", tb_info->tb_name);
for (int i = 0; i < tb_info->nrecords_per_request; i++) {
generateData(data, datatype, ncols_per_record, tmp_time++, len_of_binary);
int rand_num = rand() % 100;
if (tb_info->data_of_order ==1 && rand_num < tb_info->data_of_rate)
{
long d = tmp_time - rand() % 1000000 + rand_num;
generateData(data, datatype, ncols_per_record, d, len_of_binary);
} else
{
generateData(data, datatype, ncols_per_record, tmp_time += 1000, len_of_binary);
}
pstr += sprintf(pstr, "%s", data);
tb_info->counter++;

View File

@ -220,6 +220,7 @@ typedef struct SAcctObj {
typedef struct {
int8_t type;
int32_t index;
char db[TSDB_DB_NAME_LEN + 1];
void * pIter;
int16_t numOfColumns;
@ -228,7 +229,6 @@ typedef struct {
int32_t numOfReads;
int16_t offset[TSDB_MAX_COLUMNS];
int16_t bytes[TSDB_MAX_COLUMNS];
void * signature;
uint16_t payloadLen;
char payload[];
} SShowObj;

View File

@ -27,6 +27,12 @@ typedef enum {
TAOS_DN_STATUS_READY
} EDnodeStatus;
typedef enum {
TAOS_DN_ALTERNATIVE_ROLE_ANY,
TAOS_DN_ALTERNATIVE_ROLE_MNODE,
TAOS_DN_ALTERNATIVE_ROLE_VNODE
} EDnodeAlternativeRole;
int32_t mnodeInitDnodes();
void mnodeCleanupDnodes();

View File

@ -27,7 +27,8 @@ void mnodeCleanupVgroups();
SVgObj *mnodeGetVgroup(int32_t vgId);
void mnodeIncVgroupRef(SVgObj *pVgroup);
void mnodeDecVgroupRef(SVgObj *pVgroup);
void mnodeDropAllDbVgroups(SDbObj *pDropDb, bool sendMsg);
void mnodeDropAllDbVgroups(SDbObj *pDropDb);
void mnodeSendDropAllDbVgroupsMsg(SDbObj *pDropDb);
void mnodeDropAllDnodeVgroups(SDnodeObj *pDropDnode);
void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb);

View File

@ -81,10 +81,10 @@ static int32_t mnodeDbActionDelete(SSdbOper *pOper) {
SDbObj *pDb = pOper->pObj;
SAcctObj *pAcct = mnodeGetAcct(pDb->acct);
mnodeDropDbFromAcct(pAcct, pDb);
mnodeDropAllChildTables(pDb);
mnodeDropAllSuperTables(pDb);
mnodeDropAllDbVgroups(pDb, false);
mnodeDropAllDbVgroups(pDb);
mnodeDropDbFromAcct(pAcct, pDb);
mnodeDecAcctRef(pAcct);
return TSDB_CODE_SUCCESS;
@ -276,8 +276,8 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) {
return TSDB_CODE_INVALID_OPTION;
}
if (pCfg->replications > 1 && pCfg->walLevel <= TSDB_MIN_WAL_LEVEL) {
mError("invalid db option walLevel:%d must > 0, while replica:%d > 1", pCfg->walLevel, pCfg->replications);
if (pCfg->walLevel < TSDB_MIN_WAL_LEVEL) {
mError("invalid db option walLevel:%d must be greater than 0", pCfg->walLevel);
return TSDB_CODE_INVALID_OPTION;
}
@ -871,8 +871,8 @@ static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SCMAlterDbMsg *pAlter) {
mTrace("db:%s, replications:%d change to %d", pDb->name, pDb->cfg.replications, replications);
newCfg.replications = replications;
if (replications > 1 && pDb->cfg.walLevel <= TSDB_MIN_WAL_LEVEL) {
mError("db:%s, walLevel:%d must > 0, while replica:%d > 1", pDb->name, pDb->cfg.walLevel, replications);
if (pDb->cfg.walLevel < TSDB_MIN_WAL_LEVEL) {
mError("db:%s, walLevel:%d must be greater than 0", pDb->name, pDb->cfg.walLevel);
terrno = TSDB_CODE_INVALID_OPTION;
}
@ -998,19 +998,7 @@ static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg) {
return code;
}
#if 1
mnodeDropAllDbVgroups(pMsg->pDb, true);
#else
SVgObj *pVgroup = pMsg->pDb->pHead;
if (pVgroup != NULL) {
mPrint("vgId:%d, will be dropped", pVgroup->vgId);
SMnodeMsg *newMsg = mnodeCloneMsg(pMsg);
newMsg->ahandle = pVgroup;
newMsg->expected = pVgroup->numOfVnodes;
mnodeDropVgroup(pVgroup, newMsg);
return;
}
#endif
mnodeSendDropAllDbVgroupsMsg(pMsg->pDb);
mTrace("db:%s, all vgroups is dropped", pMsg->pDb->name);
return mnodeDropDb(pMsg);

View File

@ -58,6 +58,7 @@ static int32_t mnodeGetVnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
static int32_t mnodeRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, void *pConn);
static int32_t mnodeGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
static int32_t mnodeRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, void *pConn);
static char* mnodeGetDnodeAlternativeRoleStr(int32_t alternativeRole);
static int32_t mnodeDnodeActionDestroy(SSdbOper *pOper) {
tfree(pOper->pObj);
@ -521,6 +522,12 @@ static int32_t mnodeGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 6 + VARSTR_HEADER_SIZE;
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
strcpy(pSchema[cols].name, "alternativeRole");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 8;
pSchema[cols].type = TSDB_DATA_TYPE_TIMESTAMP;
strcpy(pSchema[cols].name, "create_time");
@ -572,12 +579,16 @@ static int32_t mnodeRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, vo
*(int16_t *)pWrite = pDnode->totalVnodes;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
char* status = mnodeGetDnodeStatusStr(pDnode->status);
STR_TO_VARSTR(pWrite, status);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
char* role = mnodeGetDnodeAlternativeRoleStr(pDnode->alternativeRole);
STR_TO_VARSTR(pWrite, role);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int64_t *)pWrite = pDnode->createdTime;
cols++;
@ -895,3 +906,13 @@ char* mnodeGetDnodeStatusStr(int32_t dnodeStatus) {
default: return "undefined";
}
}
static char* mnodeGetDnodeAlternativeRoleStr(int32_t alternativeRole) {
switch (alternativeRole) {
case TAOS_DN_ALTERNATIVE_ROLE_ANY: return "any";
case TAOS_DN_ALTERNATIVE_ROLE_MNODE: return "mnode";
case TAOS_DN_ALTERNATIVE_ROLE_VNODE: return "vnode";
default:return "any";
}
}

View File

@ -160,7 +160,7 @@ void mnodeStopSystem() {
static void mnodeInitTimer() {
if (tsMnodeTmr == NULL) {
tsMnodeTmr = taosTmrInit((tsMaxShellConns)*3, 200, 3600000, "MND");
tsMnodeTmr = taosTmrInit(tsMaxShellConns, 200, 3600000, "MND");
}
}
@ -185,4 +185,4 @@ static bool mnodeNeedStart() {
bool mnodeIsRunning() {
return tsMgmtIsRunning;
}
}

View File

@ -358,8 +358,8 @@ void sdbIncRef(void *handle, void *pObj) {
SSdbTable *pTable = handle;
int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos);
atomic_add_fetch_32(pRefCount, 1);
if (0 && (pTable->tableId == SDB_TABLE_MNODE || pTable->tableId == SDB_TABLE_DNODE)) {
sdbTrace("add ref to table:%s record:%s:%d", pTable->tableName, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount);
if (0 && (pTable->tableId == SDB_TABLE_CTABLE || pTable->tableId == SDB_TABLE_DB)) {
sdbTrace("add ref to table:%s record:%p:%s:%d", pTable->tableName, pObj, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount);
}
}
@ -369,13 +369,13 @@ void sdbDecRef(void *handle, void *pObj) {
SSdbTable *pTable = handle;
int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos);
int32_t refCount = atomic_sub_fetch_32(pRefCount, 1);
if (0 && (pTable->tableId == SDB_TABLE_MNODE || pTable->tableId == SDB_TABLE_DNODE)) {
sdbTrace("def ref of table:%s record:%s:%d", pTable->tableName, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount);
if (0 && (pTable->tableId == SDB_TABLE_CTABLE || pTable->tableId == SDB_TABLE_DB)) {
sdbTrace("def ref of table:%s record:%p:%s:%d", pTable->tableName, pObj, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount);
}
int8_t *updateEnd = pObj + pTable->refCountPos - 1;
if (refCount <= 0 && *updateEnd) {
sdbTrace("table:%s, record:%s:%d is destroyed", pTable->tableName, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount);
sdbTrace("table:%s, record:%p:%s:%d is destroyed", pTable->tableName, pObj, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount);
SSdbOper oper = {.pObj = pObj};
(*pTable->destroyFp)(&oper);
}

View File

@ -47,13 +47,14 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *mnodeMsg);
static int32_t mnodeProcessUseMsg(SMnodeMsg *mnodeMsg);
static void mnodeFreeShowObj(void *data);
static bool mnodeCheckShowObj(SShowObj *pShow);
static bool mnodeAccquireShowObj(SShowObj *pShow);
static bool mnodeCheckShowFinished(SShowObj *pShow);
static void *mnodeSaveShowObj(SShowObj *pShow, int32_t size);
static void mnodeCleanupShowObj(void *pShow, bool forceRemove);
static void *mnodePutShowObj(SShowObj *pShow, int32_t size);
static void mnodeReleaseShowObj(void *pShow, bool forceRemove);
extern void *tsMnodeTmr;
static void *tsQhandleCache = NULL;
static void *tsMnodeShowCache = NULL;
static int32_t tsShowObjIndex = 0;
static SShowMetaFp tsMnodeShowMetaFp[TSDB_MGMT_TABLE_MAX] = {0};
static SShowRetrieveFp tsMnodeShowRetrieveFp[TSDB_MGMT_TABLE_MAX] = {0};
@ -64,14 +65,15 @@ int32_t mnodeInitShow() {
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_CONNECT, mnodeProcessConnectMsg);
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_USE_DB, mnodeProcessUseMsg);
tsQhandleCache = taosCacheInitWithCb(tsMnodeTmr, 10, mnodeFreeShowObj);
tsMnodeShowCache = taosCacheInitWithCb(tsMnodeTmr, 10, mnodeFreeShowObj);
return 0;
}
void mnodeCleanUpShow() {
if (tsQhandleCache != NULL) {
taosCacheCleanup(tsQhandleCache);
tsQhandleCache = NULL;
if (tsMnodeShowCache != NULL) {
mPrint("show cache is cleanup");
taosCacheCleanup(tsMnodeShowCache);
tsMnodeShowCache = NULL;
}
}
@ -116,35 +118,37 @@ static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) {
return TSDB_CODE_OPS_NOT_SUPPORT;
}
int32_t size = sizeof(SCMShowRsp) + sizeof(SSchema) * TSDB_MAX_COLUMNS + TSDB_EXTRA_PAYLOAD_SIZE;
SCMShowRsp *pShowRsp = rpcMallocCont(size);
if (pShowRsp == NULL) {
return TSDB_CODE_SERV_OUT_OF_MEMORY;
}
int32_t showObjSize = sizeof(SShowObj) + htons(pShowMsg->payloadLen);
SShowObj *pShow = (SShowObj *) calloc(1, showObjSize);
pShow->signature = pShow;
pShow->type = pShowMsg->type;
pShow->payloadLen = htons(pShowMsg->payloadLen);
strcpy(pShow->db, pShowMsg->db);
memcpy(pShow->payload, pShowMsg->payload, pShow->payloadLen);
pShow = mnodeSaveShowObj(pShow, showObjSize);
if (pShow == NULL) {
pShow = mnodePutShowObj(pShow, showObjSize);
if (pShow == NULL) {
return TSDB_CODE_SERV_OUT_OF_MEMORY;
}
int32_t size = sizeof(SCMShowRsp) + sizeof(SSchema) * TSDB_MAX_COLUMNS + TSDB_EXTRA_PAYLOAD_SIZE;
SCMShowRsp *pShowRsp = rpcMallocCont(size);
if (pShowRsp == NULL) {
mnodeReleaseShowObj(pShow, true);
return TSDB_CODE_SERV_OUT_OF_MEMORY;
}
pShowRsp->qhandle = htobe64((uint64_t) pShow);
mTrace("show:%p, type:%s, start to get meta", pShow, mnodeGetShowType(pShowMsg->type));
mTrace("%p, show type:%s, start to get meta", pShow, mnodeGetShowType(pShowMsg->type));
int32_t code = (*tsMnodeShowMetaFp[pShowMsg->type])(&pShowRsp->tableMeta, pShow, pMsg->rpcMsg.handle);
if (code == 0) {
pMsg->rpcRsp.rsp = pShowRsp;
pMsg->rpcRsp.len = sizeof(SCMShowRsp) + sizeof(SSchema) * pShow->numOfColumns;
mnodeReleaseShowObj(pShow, false);
return TSDB_CODE_SUCCESS;
} else {
mError("show:%p, type:%s, failed to get meta, reason:%s", pShow, mnodeGetShowType(pShowMsg->type), tstrerror(code));
mnodeCleanupShowObj(pShow, true);
mError("%p, show type:%s, failed to get meta, reason:%s", pShow, mnodeGetShowType(pShowMsg->type), tstrerror(code));
rpcFreeCont(pShowRsp);
mnodeReleaseShowObj(pShow, true);
return code;
}
}
@ -157,22 +161,20 @@ static int32_t mnodeProcessRetrieveMsg(SMnodeMsg *pMsg) {
pRetrieve->qhandle = htobe64(pRetrieve->qhandle);
SShowObj *pShow = (SShowObj *)pRetrieve->qhandle;
mTrace("show:%p, type:%s, retrieve data", pShow, mnodeGetShowType(pShow->type));
mTrace("%p, show type:%s, retrieve data", pShow, mnodeGetShowType(pShow->type));
/*
* in case of server restart, apps may hold qhandle created by server before
* restart, which is actually invalid, therefore, signature check is required.
*/
if (!mnodeCheckShowObj(pShow)) {
mError("retrieve:%p, qhandle:%p is invalid", pRetrieve, pShow);
if (!mnodeAccquireShowObj(pShow)) {
mError("%p, show is invalid", pShow);
return TSDB_CODE_INVALID_QHANDLE;
}
if (mnodeCheckShowFinished(pShow)) {
mTrace("retrieve:%p, qhandle:%p already read finished, numOfReads:%d numOfRows:%d", pRetrieve, pShow, pShow->numOfReads, pShow->numOfRows);
mTrace("%p, show is already read finished, numOfReads:%d numOfRows:%d", pShow, pShow->numOfReads, pShow->numOfRows);
pShow->numOfReads = pShow->numOfRows;
//mnodeCleanupShowObj(pShow, true);
//return TSDB_CODE_SUCCESS;
}
if ((pRetrieve->free & TSDB_QUERY_TYPE_FREE_RESOURCE) != TSDB_QUERY_TYPE_FREE_RESOURCE) {
@ -198,7 +200,7 @@ static int32_t mnodeProcessRetrieveMsg(SMnodeMsg *pMsg) {
if (rowsRead < 0) {
rpcFreeCont(pRsp);
mnodeCleanupShowObj(pShow, false);
mnodeReleaseShowObj(pShow, false);
assert(false);
return TSDB_CODE_ACTION_IN_PROGRESS;
}
@ -209,12 +211,13 @@ static int32_t mnodeProcessRetrieveMsg(SMnodeMsg *pMsg) {
pMsg->rpcRsp.rsp = pRsp;
pMsg->rpcRsp.len = size;
if (rowsToRead == 0) {
mnodeCleanupShowObj(pShow, true);
if (rowsToRead == 0 || rowsRead == rowsToRead) {
pRsp->completed = 1;
mnodeReleaseShowObj(pShow, true);
} else {
mnodeCleanupShowObj(pShow, false);
mnodeReleaseShowObj(pShow, false);
}
return TSDB_CODE_SUCCESS;
}
@ -316,24 +319,29 @@ static bool mnodeCheckShowFinished(SShowObj *pShow) {
return false;
}
static bool mnodeCheckShowObj(SShowObj *pShow) {
SShowObj *pSaved = taosCacheAcquireByData(tsQhandleCache, pShow);
static bool mnodeAccquireShowObj(SShowObj *pShow) {
char key[10];
sprintf(key, "%d", pShow->index);
SShowObj *pSaved = taosCacheAcquireByName(tsMnodeShowCache, key);
if (pSaved == pShow) {
mTrace("%p, show is accquired from cache", pShow);
return true;
} else {
mTrace("show:%p, is already released", pShow);
return false;
}
}
static void *mnodeSaveShowObj(SShowObj *pShow, int32_t size) {
if (tsQhandleCache != NULL) {
char key[24];
sprintf(key, "show:%p", pShow);
SShowObj *newQhandle = taosCachePut(tsQhandleCache, key, pShow, size, 60);
static void *mnodePutShowObj(SShowObj *pShow, int32_t size) {
if (tsMnodeShowCache != NULL) {
char key[10];
pShow->index = atomic_add_fetch_32(&tsShowObjIndex, 1);
sprintf(key, "%d", pShow->index);
SShowObj *newQhandle = taosCachePut(tsMnodeShowCache, key, pShow, size, 60);
free(pShow);
mTrace("show:%p, is saved", newQhandle);
mTrace("%p, show is put into cache", newQhandle);
return newQhandle;
}
@ -343,10 +351,10 @@ static void *mnodeSaveShowObj(SShowObj *pShow, int32_t size) {
static void mnodeFreeShowObj(void *data) {
SShowObj *pShow = data;
sdbFreeIter(pShow->pIter);
mTrace("show:%p, is destroyed", pShow);
mTrace("%p, show is destroyed", pShow);
}
static void mnodeCleanupShowObj(void *pShow, bool forceRemove) {
mTrace("show:%p, is released, force:%s", pShow, forceRemove ? "true" : "false");
taosCacheRelease(tsQhandleCache, &pShow, forceRemove);
static void mnodeReleaseShowObj(void *pShow, bool forceRemove) {
mTrace("%p, show is released, force:%s", pShow, forceRemove ? "true" : "false");
taosCacheRelease(tsMnodeShowCache, &pShow, forceRemove);
}

View File

@ -148,37 +148,30 @@ static int32_t mnodeChildTableActionDelete(SSdbOper *pOper) {
return TSDB_CODE_INVALID_VGROUP_ID;
}
SVgObj *pVgroup = mnodeGetVgroup(pTable->vgId);
if (pVgroup == NULL) {
return TSDB_CODE_INVALID_VGROUP_ID;
}
mnodeDecVgroupRef(pVgroup);
SDbObj *pDb = mnodeGetDb(pVgroup->dbName);
if (pDb == NULL) {
mError("ctable:%s, vgId:%d not in DB:%s", pTable->info.tableId, pVgroup->vgId, pVgroup->dbName);
return TSDB_CODE_INVALID_DB;
}
mnodeDecDbRef(pDb);
SAcctObj *pAcct = mnodeGetAcct(pDb->acct);
if (pAcct == NULL) {
mError("ctable:%s, acct:%s not exists", pTable->info.tableId, pDb->acct);
return TSDB_CODE_INVALID_ACCT;
}
mnodeDecAcctRef(pAcct);
SVgObj *pVgroup = NULL;
SDbObj *pDb = NULL;
SAcctObj *pAcct = NULL;
pVgroup = mnodeGetVgroup(pTable->vgId);
if (pVgroup != NULL) pDb = mnodeGetDb(pVgroup->dbName);
if (pDb != NULL) pAcct = mnodeGetAcct(pDb->acct);
if (pTable->info.type == TSDB_CHILD_TABLE) {
grantRestore(TSDB_GRANT_TIMESERIES, pTable->superTable->numOfColumns - 1);
pAcct->acctInfo.numOfTimeSeries -= (pTable->superTable->numOfColumns - 1);
if (pAcct != NULL) pAcct->acctInfo.numOfTimeSeries -= (pTable->superTable->numOfColumns - 1);
mnodeRemoveTableFromStable(pTable->superTable, pTable);
mnodeDecTableRef(pTable->superTable);
} else {
grantRestore(TSDB_GRANT_TIMESERIES, pTable->numOfColumns - 1);
pAcct->acctInfo.numOfTimeSeries -= (pTable->numOfColumns - 1);
if (pAcct != NULL) pAcct->acctInfo.numOfTimeSeries -= (pTable->numOfColumns - 1);
}
mnodeRemoveTableFromDb(pDb);
mnodeRemoveTableFromVgroup(pVgroup, pTable);
if (pDb != NULL) mnodeRemoveTableFromDb(pDb);
if (pVgroup != NULL) mnodeRemoveTableFromVgroup(pVgroup, pTable);
mnodeDecVgroupRef(pVgroup);
mnodeDecDbRef(pDb);
mnodeDecAcctRef(pAcct);
return TSDB_CODE_SUCCESS;
}
@ -484,7 +477,10 @@ static int32_t mnodeSuperTableActionDecode(SSdbOper *pOper) {
if (pStable == NULL) return TSDB_CODE_SERV_OUT_OF_MEMORY;
int32_t len = strlen(pOper->rowData);
if (len > TSDB_TABLE_ID_LEN) return TSDB_CODE_INVALID_TABLE_ID;
if (len > TSDB_TABLE_ID_LEN){
free(pStable);
return TSDB_CODE_INVALID_TABLE_ID;
}
pStable->info.tableId = strdup(pOper->rowData);
len++;
@ -690,10 +686,10 @@ static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) {
}
if (pCreate->numOfTags != 0) {
mTrace("table:%s, create msg is received from thandle:%p", pCreate->tableId, pMsg->rpcMsg.handle);
mTrace("table:%s, create stable msg is received from thandle:%p", pCreate->tableId, pMsg->rpcMsg.handle);
return mnodeProcessCreateSuperTableMsg(pMsg);
} else {
mTrace("table:%s, create msg is received from thandle:%p", pCreate->tableId, pMsg->rpcMsg.handle);
mTrace("table:%s, create ctable msg is received from thandle:%p", pCreate->tableId, pMsg->rpcMsg.handle);
return mnodeProcessCreateChildTableMsg(pMsg);
}
}
@ -718,7 +714,7 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) {
return TSDB_CODE_SUCCESS;
} else {
mError("table:%s, failed to drop table, table not exist", pDrop->tableId);
return TSDB_CODE_INVALID_TABLE;
return TSDB_CODE_INVALID_TABLE_ID;
}
}
@ -746,7 +742,7 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) {
if (pMsg->pTable == NULL) {
if (!pInfo->createFlag) {
mError("table:%s, failed to get table meta, table not exist", pInfo->tableId);
return TSDB_CODE_INVALID_TABLE;
return TSDB_CODE_INVALID_TABLE_ID;
} else {
mTrace("table:%s, failed to get table meta, start auto create table ", pInfo->tableId);
return mnodeAutoCreateChildTable(pMsg);
@ -783,7 +779,7 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
if (pStable->schema == NULL) {
free(pStable);
mError("table:%s, failed to create, no schema input", pCreate->tableId);
return TSDB_CODE_INVALID_TABLE;
return TSDB_CODE_INVALID_TABLE_ID;
}
memcpy(pStable->schema, pCreate->schema, numOfCols * sizeof(SSchema));
@ -1273,9 +1269,9 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
for (int32_t i = 0; i < numOfTable; ++i) {
char *stableName = (char*)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_ID_LEN) * i;
SSuperTableObj *pTable = mnodeGetSuperTable(stableName);
if (pTable->vgHash != NULL) {
if (pTable != NULL && pTable->vgHash != NULL) {
contLen += (taosHashGetSize(pTable->vgHash) * sizeof(SCMVgroupInfo) + sizeof(SVgroupsInfo));
}
}
mnodeDecTableRef(pTable);
}
@ -1284,12 +1280,23 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
return TSDB_CODE_SERV_OUT_OF_MEMORY;
}
pRsp->numOfTables = htonl(numOfTable);
char* msg = (char*) pRsp + sizeof(SCMSTableVgroupRspMsg);
pRsp->numOfTables = 0;
char *msg = (char *)pRsp + sizeof(SCMSTableVgroupRspMsg);
for (int32_t i = 0; i < numOfTable; ++i) {
char *stableName = (char*)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_ID_LEN) * i;
SSuperTableObj *pTable = mnodeGetSuperTable(stableName);
if (pTable == NULL) {
mError("stable:%s, not exist while get stable vgroup info", stableName);
mnodeDecTableRef(pTable);
continue;
}
if (pTable->vgHash == NULL) {
mError("stable:%s, not vgroup exist while get stable vgroup info", stableName);
mnodeDecTableRef(pTable);
continue;
}
SVgroupsInfo *pVgroupInfo = (SVgroupsInfo *)msg;
SHashMutableIterator *pIter = taosHashCreateIter(pTable->vgHash);
@ -1315,17 +1322,25 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
}
taosHashDestroyIter(pIter);
mnodeDecTableRef(pTable);
pVgroupInfo->numOfVgroups = htonl(vgSize);
// one table is done, try the next table
msg += sizeof(SVgroupsInfo) + vgSize * sizeof(SCMVgroupInfo);
pRsp->numOfTables++;
}
pMsg->rpcRsp.rsp = pRsp;
pMsg->rpcRsp.len = msg - (char *)pRsp;
if (pRsp->numOfTables != numOfTable) {
rpcFreeCont(pRsp);
return TSDB_CODE_INVALID_TABLE_ID;
} else {
pRsp->numOfTables = htonl(pRsp->numOfTables);
pMsg->rpcRsp.rsp = pRsp;
pMsg->rpcRsp.len = msg - (char *)pRsp;
return TSDB_CODE_SUCCESS;
return TSDB_CODE_SUCCESS;
}
}
static void mnodeProcessDropSuperTableRsp(SRpcMsg *rpcMsg) {
@ -1426,8 +1441,8 @@ static SChildTableObj* mnodeDoCreateChildTable(SCMCreateTableMsg *pCreate, SVgOb
SSuperTableObj *pSuperTable = mnodeGetSuperTable(pTagData->name);
if (pSuperTable == NULL) {
mError("table:%s, corresponding super table:%s does not exist", pCreate->tableId, pTagData->name);
free(pTable);
terrno = TSDB_CODE_INVALID_TABLE;
mnodeDestroyChildTable(pTable);
terrno = TSDB_CODE_INVALID_TABLE_ID;
return NULL;
}
mnodeDecTableRef(pSuperTable);
@ -1735,7 +1750,7 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) {
static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) {
SCMTableInfoMsg *pInfo = pMsg->rpcMsg.pCont;
STagData* pTag = (STagData*)pInfo->tags;
STagData *pTag = (STagData *)pInfo->tags;
int32_t contLen = sizeof(SCMCreateTableMsg) + offsetof(STagData, data) + ntohl(pTag->dataLen);
SCMCreateTableMsg *pCreateMsg = rpcMallocCont(contLen);
@ -1751,14 +1766,13 @@ static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) {
pCreateMsg->contLen = htonl(contLen);
memcpy(pCreateMsg->schema, pInfo->tags, contLen - sizeof(SCMCreateTableMsg));
mTrace("table:%s, start to create on demand, stable:%s", pInfo->tableId, ((STagData *)(pCreateMsg->schema))->name);
rpcFreeCont(pMsg->rpcMsg.pCont);
pMsg->rpcMsg.msgType = TSDB_MSG_TYPE_CM_CREATE_TABLE;
pMsg->rpcMsg.pCont = pCreateMsg;
pMsg->rpcMsg.contLen = contLen;
mTrace("table:%s, start to create on demand, stable:%s", pInfo->tableId, pInfo->tags);
return TSDB_CODE_ACTION_NEED_REPROCESSED;
}
@ -1877,38 +1891,25 @@ static SChildTableObj* mnodeGetTableByPos(int32_t vnode, int32_t sid) {
static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) {
SDMConfigTableMsg *pCfg = pMsg->rpcMsg.pCont;
pCfg->dnode = htonl(pCfg->dnode);
pCfg->vnode = htonl(pCfg->vnode);
pCfg->sid = htonl(pCfg->sid);
mTrace("dnode:%s, vnode:%d, sid:%d, receive table config msg", taosIpStr(pCfg->dnode), pCfg->vnode, pCfg->sid);
pCfg->dnodeId = htonl(pCfg->dnodeId);
pCfg->vgId = htonl(pCfg->vgId);
pCfg->sid = htonl(pCfg->sid);
mTrace("dnode:%d, vgId:%d sid:%d, receive table config msg", pCfg->dnodeId, pCfg->vgId, pCfg->sid);
SChildTableObj *pTable = mnodeGetTableByPos(pCfg->vnode, pCfg->sid);
SChildTableObj *pTable = mnodeGetTableByPos(pCfg->vgId, pCfg->sid);
if (pTable == NULL) {
mError("dnode:%s, vnode:%d, sid:%d, table not found", taosIpStr(pCfg->dnode), pCfg->vnode, pCfg->sid);
return TSDB_CODE_NOT_ACTIVE_TABLE;
mError("dnode:%d, vgId:%d sid:%d, table not found", pCfg->dnodeId, pCfg->vgId, pCfg->sid);
return TSDB_CODE_INVALID_TABLE_ID;
}
SMDCreateTableMsg *pMDCreate = NULL;
pMDCreate = mnodeBuildCreateChildTableMsg(NULL, (SChildTableObj *)pTable);
if (pMDCreate == NULL) {
mnodeDecTableRef(pTable);
return terrno;
}
SDnodeObj *pDnode = mnodeGetDnode(pCfg->dnode);
SRpcIpSet ipSet = mnodeGetIpSetFromIp(pDnode->dnodeEp);
SRpcMsg rpcRsp = {
.handle = NULL,
.pCont = pMDCreate,
.contLen = htonl(pMDCreate->contLen),
.code = 0,
.msgType = TSDB_MSG_TYPE_MD_CREATE_TABLE
};
dnodeSendMsgToDnode(&ipSet, &rpcRsp);
SMDCreateTableMsg *pCreate = NULL;
pCreate = mnodeBuildCreateChildTableMsg(NULL, (SChildTableObj *)pTable);
mnodeDecTableRef(pTable);
mnodeDecDnodeRef(pDnode);
if (pCreate == NULL) return terrno;
pMsg->rpcRsp.rsp = pCreate;
pMsg->rpcRsp.len = htonl(pCreate->contLen);
return TSDB_CODE_SUCCESS;
}
@ -1926,7 +1927,6 @@ static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg) {
if (rpcMsg->code != TSDB_CODE_SUCCESS) {
mError("table:%s, failed to drop in dnode, reason:%s", pTable->info.tableId, tstrerror(rpcMsg->code));
dnodeSendRpcMnodeWriteRsp(mnodeMsg, rpcMsg->code);
mnodeDecTableRef(pTable);
return;
}
@ -2210,7 +2210,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pAlter->tableId);
if (pMsg->pTable == NULL) {
mError("table:%s, failed to alter table, table not exist", pMsg->pTable->tableId);
return TSDB_CODE_INVALID_TABLE;
return TSDB_CODE_INVALID_TABLE_ID;
}
pAlter->type = htons(pAlter->type);
@ -2369,4 +2369,4 @@ static int32_t mnodeRetrieveStreams(SShowObj *pShow, char *data, int32_t rows, v
mnodeDecDbRef(pDb);
return numOfRows;
}
}

View File

@ -716,14 +716,14 @@ static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) {
SDnodeObj *pDnode = mnodeGetDnode(pCfg->dnodeId);
if (pDnode == NULL) {
mTrace("dnode:%s, invalid dnode", taosIpStr(pCfg->dnodeId), pCfg->vgId);
return TSDB_CODE_NOT_ACTIVE_VNODE;
return TSDB_CODE_INVALID_VGROUP_ID;
}
mnodeDecDnodeRef(pDnode);
SVgObj *pVgroup = mnodeGetVgroup(pCfg->vgId);
if (pVgroup == NULL) {
mTrace("dnode:%s, vgId:%d, no vgroup info", taosIpStr(pCfg->dnodeId), pCfg->vgId);
return TSDB_CODE_NOT_ACTIVE_VNODE;
return TSDB_CODE_INVALID_VGROUP_ID;
}
mnodeDecVgroupRef(pVgroup);
@ -784,7 +784,7 @@ void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb) {
mPrint("db:%s, all vgroups is updated in sdb", pAlterDb->name);
}
void mnodeDropAllDbVgroups(SDbObj *pDropDb, bool sendMsg) {
void mnodeDropAllDbVgroups(SDbObj *pDropDb) {
void * pIter = NULL;
int32_t numOfVgroups = 0;
SVgObj *pVgroup = NULL;
@ -802,10 +802,6 @@ void mnodeDropAllDbVgroups(SDbObj *pDropDb, bool sendMsg) {
};
sdbDeleteRow(&oper);
numOfVgroups++;
if (sendMsg) {
mnodeSendDropVgroupMsg(pVgroup, NULL);
}
}
mnodeDecVgroupRef(pVgroup);
@ -815,3 +811,25 @@ void mnodeDropAllDbVgroups(SDbObj *pDropDb, bool sendMsg) {
mPrint("db:%s, all vgroups:%d is dropped from sdb", pDropDb->name, numOfVgroups);
}
void mnodeSendDropAllDbVgroupsMsg(SDbObj *pDropDb) {
void * pIter = NULL;
int32_t numOfVgroups = 0;
SVgObj *pVgroup = NULL;
mPrint("db:%s, all vgroups will be dropped in dnode", pDropDb->name);
while (1) {
pIter = mnodeGetNextVgroup(pIter, &pVgroup);
if (pVgroup == NULL) break;
if (pVgroup->pDb == pDropDb) {
mnodeSendDropVgroupMsg(pVgroup, NULL);
}
mnodeDecVgroupRef(pVgroup);
}
sdbFreeIter(pIter);
mPrint("db:%s, all vgroups:%d drop msg is sent to dnode", pDropDb->name, numOfVgroups);
}

View File

@ -445,10 +445,10 @@ void httpJsonPairStatus(JsonBuf* buf, int code) {
httpJsonItemToken(buf);
if (code == TSDB_CODE_DB_NOT_SELECTED) {
httpJsonPair(buf, "desc", 4, "failed to create database", 23);
} else if (code == TSDB_CODE_INVALID_TABLE) {
} else if (code == TSDB_CODE_INVALID_TABLE_ID) {
httpJsonPair(buf, "desc", 4, "failed to create table", 22);
} else
httpJsonPair(buf, "desc", 4, (char*)tstrerror(code), (int)strlen(tstrerror(code)));
}
}
}
}

View File

@ -281,7 +281,7 @@ int tgReadSchema(char *fileName) {
}
void tgInitHandle(HttpServer *pServer) {
char fileName[256] = {0};
char fileName[TSDB_FILENAME_LEN*2] = {0};
sprintf(fileName, "%s/taos.telegraf.cfg", configDir);
if (tgReadSchema(fileName) <= 0) {
tgFreeSchemas();

View File

@ -111,7 +111,7 @@ bool tgCheckFinished(struct HttpContext *pContext, HttpSqlCmd *cmd, int code) {
pContext->ipstr);
return false;
}
} else if (code == TSDB_CODE_INVALID_TABLE) {
} else if (code == TSDB_CODE_INVALID_TABLE_ID) {
cmd->cmdState = HTTP_CMD_STATE_RUN_FINISHED;
if (multiCmds->cmds[multiCmds->pos - 1].cmdState == HTTP_CMD_STATE_NOT_RUN_YET) {
multiCmds->pos = (int16_t)(multiCmds->pos - 2);
@ -151,4 +151,4 @@ void tgSetNextCmd(struct HttpContext *pContext, HttpSqlCmd *cmd, int code) {
} else {
multiCmds->pos++;
}
}
}

View File

@ -28,21 +28,16 @@
#include "tsdb.h"
#include "tsqlfunction.h"
//typedef struct tFilePage {
// int64_t num;
// char data[];
//} tFilePage;
struct SColumnFilterElem;
typedef bool (*__filter_func_t)(struct SColumnFilterElem* pFilter, char* val1, char* val2);
typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int32_t order);
typedef struct SSqlGroupbyExpr {
int16_t tableIndex;
SArray* columnInfo; // SArray<SColIndex>, group by columns information
int16_t numOfGroupCols;
int16_t orderIndex; // order by column index
int16_t orderType; // order by type: asc/desc
int16_t tableIndex;
SArray* columnInfo; // SArray<SColIndex>, group by columns information
int16_t numOfGroupCols;
int16_t orderIndex; // order by column index
int16_t orderType; // order by type: asc/desc
} SSqlGroupbyExpr;
typedef struct SPosInfo {
@ -62,25 +57,27 @@ typedef struct SWindowResult {
SWindowStatus status; // this result status: closed or opened
} SWindowResult;
/**
* If the number of generated results is greater than this value,
* query query will be halt and return results to client immediate.
*/
typedef struct SResultRec {
int64_t total; // total generated result size in rows
int64_t rows; // current result set size in rows
int64_t capacity; // capacity of current result output buffer
// result size threshold in rows. If the result buffer is larger than this, pause query and return to client
int32_t threshold;
int64_t total; // total generated result size in rows
int64_t rows; // current result set size in rows
int64_t capacity; // capacity of current result output buffer
int32_t threshold; // result size threshold in rows.
} SResultRec;
typedef struct SWindowResInfo {
SWindowResult* pResult; // result list
void* hashList; // hash list for quick access
SHashObj* hashList; // hash list for quick access
int16_t type; // data type for hash key
int32_t capacity; // max capacity
int32_t curIndex; // current start active index
int32_t size; // number of result set
int64_t startTime; // start time of the first time window for sliding query
int64_t prevSKey; // previous (not completed) sliding window start key
int64_t threshold; // threshold to pausing query and return closed results.
int64_t threshold; // threshold to halt query and return the generated results.
} SWindowResInfo;
typedef struct SColumnFilterElem {
@ -90,98 +87,111 @@ typedef struct SColumnFilterElem {
} SColumnFilterElem;
typedef struct SSingleColumnFilterInfo {
SColumnInfo info;
int32_t numOfFilters;
SColumnFilterElem* pFilters;
void* pData;
int32_t numOfFilters;
SColumnInfo info;
SColumnFilterElem* pFilters;
} SSingleColumnFilterInfo;
typedef struct STableQueryInfo { // todo merge with the STableQueryInfo struct
int32_t tableIndex;
int32_t groupIdx; // group id in table list
int32_t groupIndex; // group id in table list
TSKEY lastKey;
int32_t numOfRes;
int16_t queryRangeSet; // denote if the query range is set, only available for interval query
int64_t tag;
STimeWindow win;
STSCursor cur;
STableId id; // for retrieve the page id list
STableId id; // for retrieve the page id list
SWindowResInfo windowResInfo;
} STableQueryInfo;
typedef struct SQueryCostSummary {
} SQueryCostSummary;
typedef struct SQueryCostInfo {
uint64_t loadStatisTime;
uint64_t loadFileBlockTime;
uint64_t loadDataInCacheTime;
uint64_t loadStatisSize;
uint64_t loadFileBlockSize;
uint64_t loadDataInCacheSize;
uint64_t loadDataTime;
uint64_t dataInRows;
uint64_t checkRows;
uint32_t dataBlocks;
uint32_t loadBlockStatis;
uint32_t discardBlocks;
} SQueryCostInfo;
typedef struct SGroupItem {
STableId id;
STableId id;
STableQueryInfo* info;
} SGroupItem;
typedef struct SQuery {
int16_t numOfCols;
int16_t numOfTags;
SOrderVal order;
STimeWindow window;
int64_t intervalTime;
int64_t slidingTime; // sliding time for sliding window query
char slidingTimeUnit; // interval data type, used for daytime revise
int8_t precision;
int16_t numOfOutput;
int16_t fillType;
int16_t checkBuffer; // check if the buffer is full during scan each block
SLimitVal limit;
int32_t rowSize;
SSqlGroupbyExpr* pGroupbyExpr;
SExprInfo* pSelectExpr;
SColumnInfo* colList;
SColumnInfo* tagColList;
int32_t numOfFilterCols;
int64_t* fillVal;
uint32_t status; // query status
SResultRec rec;
int32_t pos;
tFilePage** sdata;
STableQueryInfo* current;
int16_t numOfCols;
int16_t numOfTags;
SOrderVal order;
STimeWindow window;
int64_t intervalTime;
int64_t slidingTime; // sliding time for sliding window query
char slidingTimeUnit; // interval data type, used for daytime revise
int8_t precision;
int16_t numOfOutput;
int16_t fillType;
int16_t checkBuffer; // check if the buffer is full during scan each block
SLimitVal limit;
int32_t rowSize;
SSqlGroupbyExpr* pGroupbyExpr;
SExprInfo* pSelectExpr;
SColumnInfo* colList;
SColumnInfo* tagColList;
int32_t numOfFilterCols;
int64_t* fillVal;
uint32_t status; // query status
SResultRec rec;
int32_t pos;
tFilePage** sdata;
STableQueryInfo* current;
SSingleColumnFilterInfo* pFilterInfo;
} SQuery;
typedef struct SQueryRuntimeEnv {
SResultInfo* resultInfo; // todo refactor to merge with SWindowResInfo
SQuery* pQuery;
SQLFunctionCtx* pCtx;
int16_t numOfRowsPerPage;
int16_t offset[TSDB_MAX_COLUMNS];
uint16_t scanFlag; // denotes reversed scan of data or not
SFillInfo* pFillInfo;
SWindowResInfo windowResInfo;
STSBuf* pTSBuf;
STSCursor cur;
SQueryCostSummary summary;
bool stableQuery; // super table query or not
void* pQueryHandle;
void* pSecQueryHandle; // another thread for
SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
SResultInfo* resultInfo; // todo refactor to merge with SWindowResInfo
SQuery* pQuery;
SQLFunctionCtx* pCtx;
int16_t numOfRowsPerPage;
int16_t offset[TSDB_MAX_COLUMNS];
uint16_t scanFlag; // denotes reversed scan of data or not
SFillInfo* pFillInfo;
SWindowResInfo windowResInfo;
STSBuf* pTSBuf;
STSCursor cur;
SQueryCostInfo summary;
bool stableQuery; // super table query or not
void* pQueryHandle;
void* pSecQueryHandle; // another thread for
SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
} SQueryRuntimeEnv;
typedef struct SQInfo {
void* signature;
TSKEY startTime;
TSKEY elapsedTime;
int32_t pointsInterpo;
int32_t code; // error code to returned to client
sem_t dataReady;
void* tsdb;
int32_t vgId;
void* signature;
TSKEY startTime;
TSKEY elapsedTime;
int32_t pointsInterpo;
int32_t code; // error code to returned to client
sem_t dataReady;
void* tsdb;
int32_t vgId;
STableGroupInfo tableIdGroupInfo; // table id list < only includes the STableId list>
STableGroupInfo groupInfo; //
SQueryRuntimeEnv runtimeEnv;
int32_t groupIndex;
int32_t offset; // offset in group result set of subgroup, todo refactor
int32_t offset; // offset in group result set of subgroup, todo refactor
SArray* arrTableIdInfo;
T_REF_DECLARE()
/*
* the query is executed position on which meter of the whole list.
@ -189,8 +199,8 @@ typedef struct SQInfo {
* We later may refactor to remove this attribution by using another flag to denote
* whether a multimeter query is completed or not.
*/
int32_t tableIndex;
int32_t numOfGroupResultPages;
int32_t tableIndex;
int32_t numOfGroupResultPages;
} SQInfo;
#endif // TDENGINE_QUERYEXECUTOR_H

View File

@ -28,8 +28,7 @@ extern "C" {
#include "tdataformat.h"
#include "talgo.h"
#define DEFAULT_PAGE_SIZE 16384 // 16k larger than the SHistoInfo
#define MIN_BUFFER_SIZE (1 << 19)
#define DEFAULT_PAGE_SIZE (1024L*56) // 16k larger than the SHistoInfo
#define MAX_TMPFILE_PATH_LENGTH PATH_MAX
#define INITIAL_ALLOCATION_BUFFER_SIZE 64

View File

@ -45,12 +45,13 @@ typedef struct SFillInfo {
int32_t numOfCols; // number of columns, including the tags columns
int32_t rowSize; // size of each row
char ** pTags; // tags value for current interpolation
int64_t slidingTime; // sliding value to determine the number of result for a given time window
int64_t slidingTime; // sliding value to determine the number of result for a given time window
char * prevValues; // previous row of data, to generate the interpolation results
char * nextValues; // next row of data
char** pData; // original result data block involved in filling data
int32_t capacityInRows; // data buffer size in rows
SFillColInfo* pFillCol; // column info for fill operations
char** pData; // original result data block involved in filling data
} SFillInfo;
typedef struct SPoint {

View File

@ -44,6 +44,8 @@ typedef struct SDiskbasedResultBuf {
SIDList* list; // for each id, there is a page id list
} SDiskbasedResultBuf;
#define DEFAULT_INTERN_BUF_PAGE_SIZE (8192L*5)
/**
* create disk-based result buffer
* @param pResultBuf

View File

@ -161,26 +161,24 @@ typedef struct SExtTagsInfo {
// sql function runtime context
typedef struct SQLFunctionCtx {
int32_t startOffset;
int32_t size; // number of rows
uint32_t order; // asc|desc
uint32_t scanFlag; // TODO merge with currentStage
int16_t inputType;
int16_t inputBytes;
int16_t outputType;
int16_t outputBytes; // size of results, determined by function and input column data type
bool hasNull; // null value exist in current block
int16_t functionId; // function id
void * aInputElemBuf;
char * aOutputBuf; // final result output buffer, point to sdata->data
uint8_t currentStage; // record current running step, default: 0
int64_t nStartQueryTimestamp; // timestamp range of current query when function is executed on a specific data block
int32_t numOfParams;
tVariant param[4]; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param */
int64_t *ptsList; // corresponding timestamp array list
void * ptsOutputBuf; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/
int32_t startOffset;
int32_t size; // number of rows
uint32_t order; // asc|desc
int16_t inputType;
int16_t inputBytes;
int16_t outputType;
int16_t outputBytes; // size of results, determined by function and input column data type
bool hasNull; // null value exist in current block
int16_t functionId; // function id
void * aInputElemBuf;
char * aOutputBuf; // final result output buffer, point to sdata->data
uint8_t currentStage; // record current running step, default: 0
int64_t nStartQueryTimestamp; // timestamp range of current query when function is executed on a specific data block
int32_t numOfParams;
tVariant param[4]; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param */
int64_t * ptsList; // corresponding timestamp array list
void * ptsOutputBuf; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/
SQLPreAggVal preAggVals;
tVariant tag;
SResultInfo *resultInfo;
@ -219,7 +217,7 @@ typedef struct SQLAggFuncElem {
#define GET_RES_INFO(ctx) ((ctx)->resultInfo)
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type,
int16_t *len, int16_t *interBytes, int16_t extLength, bool isSuperTable);
int16_t *len, int32_t *interBytes, int16_t extLength, bool isSuperTable);
#define IS_STREAM_QUERY_VALID(x) (((x)&TSDB_FUNCSTATE_STREAM) != 0)
#define IS_MULTIOUTPUT(x) (((x)&TSDB_FUNCSTATE_MO) != 0)
@ -239,7 +237,7 @@ enum {
/* determine the real data need to calculated the result */
enum {
BLK_DATA_NO_NEEDED = 0x0,
BLK_DATA_FILEDS_NEEDED = 0x1,
BLK_DATA_STATIS_NEEDED = 0x1,
BLK_DATA_ALL_NEEDED = 0x3,
};
@ -269,9 +267,6 @@ extern struct SQLAggFuncElem aAggs[];
/* compatible check array list */
extern int32_t funcCompatDefList[];
void getStatistics(char *priData, char *data, int32_t size, int32_t numOfRow, int32_t type, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int32_t *numOfNull);
bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *minval, char *maxval);
bool stableQueryFunctChanged(int32_t funcId);

View File

@ -48,7 +48,7 @@ void tVariantAssign(tVariant *pDst, const tVariant *pSrc);
int32_t tVariantToString(tVariant *pVar, char *dst);
int32_t tVariantDump(tVariant *pVariant, char *payload, char type);
int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool includeLengthPrefix);
int32_t tVariantTypeSetType(tVariant *pVariant, char type);

File diff suppressed because it is too large Load Diff

View File

@ -209,7 +209,7 @@ bool like_str(SColumnFilterElem *pFilter, char *minval, char *maxval) {
bool like_nchar(SColumnFilterElem* pFilter, char* minval, char *maxval) {
SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER;
return WCSPatternMatch((wchar_t*) pFilter->filterInfo.pz, varDataVal(minval), varDataLen(minval)/TSDB_NCHAR_SIZE, &info) == TSDB_PATTERN_MATCH;
return WCSPatternMatch((wchar_t*)pFilter->filterInfo.pz, varDataVal(minval), varDataLen(minval)/TSDB_NCHAR_SIZE, &info) == TSDB_PATTERN_MATCH;
}
////////////////////////////////////////////////////////////////

View File

@ -137,11 +137,10 @@ void clearFirstNTimeWindow(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) {
for (int32_t k = 0; k < pWindowResInfo->size; ++k) {
SWindowResult *pResult = &pWindowResInfo->pResult[k];
int32_t *p = (int32_t *)taosHashGet(pWindowResInfo->hashList, (const char *)&pResult->window.skey, TSDB_KEYSIZE);
int32_t v = (*p - num);
assert(v >= 0 && v <= pWindowResInfo->size);
taosHashPut(pWindowResInfo->hashList, (const char *)&pResult->window.skey, TSDB_KEYSIZE, (char *)&v,
sizeof(int32_t));
taosHashPut(pWindowResInfo->hashList, (char *)&pResult->window.skey, TSDB_KEYSIZE, (char *)&v, sizeof(int32_t));
}
pWindowResInfo->curIndex = -1;

View File

@ -79,7 +79,7 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_
int32_t rowsize = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
int32_t bytes = pFillInfo->pFillCol[i].col.bytes;
pFillInfo->pData[i] = calloc(1, sizeof(tFilePage) + bytes * capacity);
pFillInfo->pData[i] = calloc(1, bytes * capacity);
rowsize += bytes;
}
@ -89,6 +89,8 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_
}
pFillInfo->rowSize = rowsize;
pFillInfo->capacityInRows = capacity;
return pFillInfo;
}
@ -119,6 +121,17 @@ void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey)
pFillInfo->rowIdx = 0;
pFillInfo->endKey = endKey;
pFillInfo->numOfRows = numOfRows;
// ensure the space
if (pFillInfo->capacityInRows < numOfRows) {
for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
char* tmp = realloc(pFillInfo->pData[i], numOfRows*pFillInfo->pFillCol[i].col.bytes);
assert(tmp != NULL); // todo handle error
memset(tmp, 0, numOfRows*pFillInfo->pFillCol[i].col.bytes);
pFillInfo->pData[i] = tmp;
}
}
}
void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, tFilePage** pInput) {
@ -474,11 +487,11 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu
}
int64_t taosGenerateDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity) {
int32_t remain = taosNumOfRemainRows(pFillInfo); // todo use iterator?
int32_t rows = taosGetNumOfResultWithFill(pFillInfo, remain, pFillInfo->endKey, capacity);
int32_t remain = taosNumOfRemainRows(pFillInfo); // todo use iterator?
int32_t rows = taosGetNumOfResultWithFill(pFillInfo, remain, pFillInfo->endKey, capacity);
int32_t numOfRes = generateDataBlockImpl(pFillInfo, output, remain, rows, pFillInfo->pData);
assert(numOfRes == rows);
int32_t numOfRes = generateDataBlockImpl(pFillInfo, output, remain, rows, pFillInfo->pData);
assert(numOfRes == rows);
return numOfRes;
return numOfRes;
}

View File

@ -5,14 +5,12 @@
#include "tsqlfunction.h"
#include "queryLog.h"
#define DEFAULT_INTERN_BUF_SIZE 16384L
int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t size, int32_t rowSize, void* handle) {
SDiskbasedResultBuf* pResBuf = calloc(1, sizeof(SDiskbasedResultBuf));
pResBuf->numOfRowsPerPage = (DEFAULT_INTERN_BUF_SIZE - sizeof(tFilePage)) / rowSize;
pResBuf->numOfRowsPerPage = (DEFAULT_INTERN_BUF_PAGE_SIZE - sizeof(tFilePage)) / rowSize;
pResBuf->numOfPages = size;
pResBuf->totalBufSize = pResBuf->numOfPages * DEFAULT_INTERN_BUF_SIZE;
pResBuf->totalBufSize = pResBuf->numOfPages * DEFAULT_INTERN_BUF_PAGE_SIZE;
pResBuf->incStep = 4;
// init id hash table
@ -33,7 +31,7 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t si
return TSDB_CODE_CLI_NO_DISKSPACE;
}
int32_t ret = ftruncate(pResBuf->fd, pResBuf->numOfPages * DEFAULT_INTERN_BUF_SIZE);
int32_t ret = ftruncate(pResBuf->fd, pResBuf->numOfPages * DEFAULT_INTERN_BUF_PAGE_SIZE);
if (ret != TSDB_CODE_SUCCESS) {
qError("failed to create tmp file: %s on disk. %s", pResBuf->path, strerror(errno));
return TSDB_CODE_CLI_NO_DISKSPACE;
@ -55,7 +53,7 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t si
tFilePage* getResultBufferPageById(SDiskbasedResultBuf* pResultBuf, int32_t id) {
assert(id < pResultBuf->numOfPages && id >= 0);
return (tFilePage*)(pResultBuf->pBuf + DEFAULT_INTERN_BUF_SIZE * id);
return (tFilePage*)(pResultBuf->pBuf + DEFAULT_INTERN_BUF_PAGE_SIZE * id);
}
int32_t getNumOfResultBufGroupId(SDiskbasedResultBuf* pResultBuf) { return taosHashGetSize(pResultBuf->idsTable); }
@ -63,7 +61,7 @@ int32_t getNumOfResultBufGroupId(SDiskbasedResultBuf* pResultBuf) { return taosH
int32_t getResBufSize(SDiskbasedResultBuf* pResultBuf) { return pResultBuf->totalBufSize; }
static int32_t extendDiskFileSize(SDiskbasedResultBuf* pResultBuf, int32_t numOfPages) {
assert(pResultBuf->numOfPages * DEFAULT_INTERN_BUF_SIZE == pResultBuf->totalBufSize);
assert(pResultBuf->numOfPages * DEFAULT_INTERN_BUF_PAGE_SIZE == pResultBuf->totalBufSize);
int32_t ret = munmap(pResultBuf->pBuf, pResultBuf->totalBufSize);
pResultBuf->numOfPages += numOfPages;
@ -72,14 +70,14 @@ static int32_t extendDiskFileSize(SDiskbasedResultBuf* pResultBuf, int32_t numOf
* disk-based output buffer is exhausted, try to extend the disk-based buffer, the available disk space may
* be insufficient
*/
ret = ftruncate(pResultBuf->fd, pResultBuf->numOfPages * DEFAULT_INTERN_BUF_SIZE);
ret = ftruncate(pResultBuf->fd, pResultBuf->numOfPages * DEFAULT_INTERN_BUF_PAGE_SIZE);
if (ret != 0) {
// dError("QInfo:%p failed to create intermediate result output file:%s. %s", pQInfo, pSupporter->extBufFile,
// strerror(errno));
return -TSDB_CODE_SERV_NO_DISKSPACE;
}
pResultBuf->totalBufSize = pResultBuf->numOfPages * DEFAULT_INTERN_BUF_SIZE;
pResultBuf->totalBufSize = pResultBuf->numOfPages * DEFAULT_INTERN_BUF_PAGE_SIZE;
pResultBuf->pBuf = mmap(NULL, pResultBuf->totalBufSize, PROT_READ | PROT_WRITE, MAP_SHARED, pResultBuf->fd, 0);
if (pResultBuf->pBuf == MAP_FAILED) {
@ -174,7 +172,7 @@ tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32
tFilePage* page = getResultBufferPageById(pResultBuf, *pageId);
// clear memory for the new page
memset(page, 0, DEFAULT_INTERN_BUF_SIZE);
memset(page, 0, DEFAULT_INTERN_BUF_PAGE_SIZE);
return page;
}

View File

@ -363,8 +363,6 @@ static int32_t toBinary(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
taosUcs4ToMbs(pVariant->wpz, newSize, pBuf);
free(pVariant->wpz);
/* terminated string */
pBuf[newSize] = 0;
} else {
taosUcs4ToMbs(pVariant->wpz, newSize, *pDest);
@ -598,7 +596,7 @@ static int32_t convertToBool(tVariant *pVariant, int64_t *pDest) {
*
* todo handle the return value
*/
int32_t tVariantDump(tVariant *pVariant, char *payload, char type) {
int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool includeLengthPrefix) {
if (pVariant == NULL || (pVariant->nType != 0 && !isValidDataType(pVariant->nType, pVariant->nLen))) {
return -1;
}
@ -765,13 +763,30 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, char type) {
}
case TSDB_DATA_TYPE_BINARY: {
if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
*payload = TSDB_DATA_BINARY_NULL;
} else {
if (pVariant->nType != TSDB_DATA_TYPE_BINARY) {
toBinary(pVariant, &payload, &pVariant->nLen);
if (!includeLengthPrefix) {
if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
*(uint8_t*) payload = TSDB_DATA_BINARY_NULL;
} else {
strncpy(payload, pVariant->pz, pVariant->nLen);
if (pVariant->nType != TSDB_DATA_TYPE_BINARY) {
toBinary(pVariant, &payload, &pVariant->nLen);
} else {
strncpy(payload, pVariant->pz, pVariant->nLen);
}
}
} else {
if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
setVardataNull(payload, TSDB_DATA_TYPE_BINARY);
} else {
char *p = varDataVal(payload);
if (pVariant->nType != TSDB_DATA_TYPE_BINARY) {
toBinary(pVariant, &p, &pVariant->nLen);
} else {
strncpy(p, pVariant->pz, pVariant->nLen);
}
varDataSetLen(payload, pVariant->nLen);
assert(p == varDataVal(payload));
}
}
break;
@ -785,15 +800,33 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, char type) {
break;
}
case TSDB_DATA_TYPE_NCHAR: {
if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
*(uint32_t *) payload = TSDB_DATA_NCHAR_NULL;
} else {
if (pVariant->nType != TSDB_DATA_TYPE_NCHAR) {
toNchar(pVariant, &payload, &pVariant->nLen);
if (!includeLengthPrefix) {
if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
*(uint32_t *)payload = TSDB_DATA_NCHAR_NULL;
} else {
wcsncpy((wchar_t *)payload, pVariant->wpz, pVariant->nLen);
if (pVariant->nType != TSDB_DATA_TYPE_NCHAR) {
toNchar(pVariant, &payload, &pVariant->nLen);
} else {
wcsncpy((wchar_t *)payload, pVariant->wpz, pVariant->nLen);
}
}
} else {
if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
setVardataNull(payload, TSDB_DATA_TYPE_NCHAR);
} else {
char *p = varDataVal(payload);
if (pVariant->nType != TSDB_DATA_TYPE_NCHAR) {
toNchar(pVariant, &p, &pVariant->nLen);
} else {
wcsncpy((wchar_t *)p, pVariant->wpz, pVariant->nLen);
}
varDataSetLen(payload, pVariant->nLen); // the length may be changed after toNchar function called
assert(p == varDataVal(payload));
}
}
break;
}
}

View File

@ -124,8 +124,7 @@ typedef struct SRpcConn {
} SRpcConn;
int tsRpcMaxUdpSize = 15000; // bytes
int tsRpcProgressTime = 10; // milliseocnds
int tsProgressTimer = 100;
// not configurable
int tsRpcMaxRetry;
int tsRpcHeadSize;
@ -204,7 +203,8 @@ static void rpcUnlockConn(SRpcConn *pConn);
void *rpcOpen(const SRpcInit *pInit) {
SRpcInfo *pRpc;
tsRpcMaxRetry = tsRpcMaxTime * 1000 / tsRpcProgressTime;
tsProgressTimer = tsRpcTimer/2;
tsRpcMaxRetry = tsRpcMaxTime * 1000/tsProgressTimer;
tsRpcHeadSize = RPC_MSG_OVERHEAD;
tsRpcOverhead = sizeof(SRpcReqContext);
@ -420,13 +420,16 @@ void rpcSendResponse(const SRpcMsg *pRsp) {
pConn->rspMsgLen = msgLen;
if (pMsg->code == TSDB_CODE_ACTION_IN_PROGRESS) pConn->inTranId--;
rpcUnlockConn(pConn);
SRpcInfo *pRpc = pConn->pRpc;
taosTmrStopA(&pConn->pTimer);
// taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer);
// set the idle timer to monitor the activity
taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer);
rpcSendMsgToPeer(pConn, msg, msgLen);
pConn->secured = 1; // connection shall be secured
rpcUnlockConn(pConn);
return;
}
@ -683,6 +686,7 @@ static SRpcConn *rpcSetupConnToServer(SRpcReqContext *pContext) {
tError("%s %p, failed to set up connection(%s)", pRpc->label, pContext->ahandle, tstrerror(terrno));
}
pConn->tretry = 0;
return pConn;
}
@ -748,20 +752,28 @@ static int rpcProcessRspHead(SRpcConn *pConn, SRpcHead *pHead) {
taosTmrStopA(&pConn->pTimer);
pConn->retry = 0;
if (pHead->code == TSDB_CODE_AUTH_REQUIRED && pRpc->spi) {
tTrace("%s, authentication shall be restarted", pConn->info);
pConn->secured = 0;
rpcSendMsgToPeer(pConn, pConn->pReqMsg, pConn->reqMsgLen);
pConn->pTimer = taosTmrStart(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl);
return TSDB_CODE_ALREADY_PROCESSED;
}
if (pHead->code == TSDB_CODE_ACTION_IN_PROGRESS) {
if (pConn->tretry <= tsRpcMaxRetry) {
tTrace("%s, peer is still processing the transaction", pConn->info);
tTrace("%s, peer is still processing the transaction, retry:%d", pConn->info, pConn->tretry);
pConn->tretry++;
rpcSendReqHead(pConn);
taosTmrReset(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl, &pConn->pTimer);
pConn->pTimer = taosTmrStart(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl);
return TSDB_CODE_ALREADY_PROCESSED;
} else {
// peer still in processing, give up
return TSDB_CODE_TOO_SLOW;
tTrace("%s, server processing takes too long time, give up", pConn->info);
pHead->code = TSDB_CODE_TOO_SLOW;
}
}
pConn->tretry = 0;
pConn->outType = 0;
pConn->pReqMsg = NULL;
pConn->reqMsgLen = 0;
@ -820,7 +832,9 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv) {
if ( rpcIsReq(pHead->msgType) ) {
terrno = rpcProcessReqHead(pConn, pHead);
pConn->connType = pRecv->connType;
taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer);
// client shall send the request within tsRpcTime again, put 20 mseconds tolerance
taosTmrReset(rpcProcessIdleTimer, tsRpcTimer+20, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer);
} else {
terrno = rpcProcessRspHead(pConn, pHead);
}
@ -935,7 +949,7 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) {
if ( rpcIsReq(pHead->msgType) ) {
rpcMsg.handle = pConn;
taosTmrReset(rpcProcessProgressTimer, tsRpcTimer/2, pConn, pRpc->tmrCtrl, &pConn->pTimer);
pConn->pTimer = taosTmrStart(rpcProcessProgressTimer, tsProgressTimer, pConn, pRpc->tmrCtrl);
(*(pRpc->cfp))(&rpcMsg, NULL);
} else {
// it's a response
@ -943,14 +957,12 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) {
rpcMsg.handle = pContext->ahandle;
pConn->pContext = NULL;
if (pHead->code == TSDB_CODE_AUTH_REQUIRED) {
pConn->secured = 0;
rpcSendReqToServer(pRpc, pContext);
return;
}
// for UDP, port may be changed by server, the port in ipSet shall be used for cache
rpcAddConnIntoCache(pRpc->pCache, pConn, pConn->peerFqdn, pContext->ipSet.port[pContext->ipSet.inUse], pConn->connType);
if (pHead->code != TSDB_CODE_TOO_SLOW) {
rpcAddConnIntoCache(pRpc->pCache, pConn, pConn->peerFqdn, pContext->ipSet.port[pContext->ipSet.inUse], pConn->connType);
} else {
rpcCloseConn(pConn);
}
if (pHead->code == TSDB_CODE_REDIRECT) {
pContext->redirect++;
@ -1039,6 +1051,7 @@ static void rpcSendErrorMsgToPeer(SRecvInfo *pRecv, int32_t code) {
pReplyHead->sourceId = pRecvHead->destId;
pReplyHead->destId = pRecvHead->sourceId;
pReplyHead->linkUid = pRecvHead->linkUid;
pReplyHead->ahandle = pRecvHead->ahandle;
pReplyHead->code = htonl(code);
msgLen = sizeof(SRpcHead);
@ -1095,10 +1108,10 @@ static void rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) {
pConn->reqMsgLen = msgLen;
pConn->pContext = pContext;
rpcUnlockConn(pConn);
taosTmrReset(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl, &pConn->pTimer);
rpcSendMsgToPeer(pConn, msg, msgLen);
taosTmrReset(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl, &pConn->pTimer);
rpcUnlockConn(pConn);
}
static void rpcSendMsgToPeer(SRpcConn *pConn, void *msg, int msgLen) {
@ -1172,7 +1185,7 @@ static void rpcProcessRetryTimer(void *param, void *tmrId) {
if (pConn->retry < 4) {
tTrace("%s, re-send msg:%s to %s:%hu", pConn->info, taosMsg[pConn->outType], pConn->peerFqdn, pConn->peerPort);
rpcSendMsgToPeer(pConn, pConn->pReqMsg, pConn->reqMsgLen);
taosTmrReset(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl, &pConn->pTimer);
pConn->pTimer = taosTmrStart(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl);
} else {
// close the connection
tTrace("%s, failed to send msg:%s to %s:%hu", pConn->info, taosMsg[pConn->outType], pConn->peerFqdn, pConn->peerPort);
@ -1225,7 +1238,7 @@ static void rpcProcessProgressTimer(void *param, void *tmrId) {
if (pConn->inType && pConn->user[0]) {
tTrace("%s, progress timer expired, send progress", pConn->info);
rpcSendQuickRsp(pConn, TSDB_CODE_ACTION_IN_PROGRESS);
taosTmrReset(rpcProcessProgressTimer, tsRpcTimer/2, pConn, pRpc->tmrCtrl, &pConn->pTimer);
pConn->pTimer = taosTmrStart(rpcProcessProgressTimer, tsProgressTimer, pConn, pRpc->tmrCtrl);
} else {
tTrace("%s, progress timer:%p not processed", pConn->info, tmrId);
}
@ -1357,15 +1370,17 @@ static int rpcCheckAuthentication(SRpcConn *pConn, char *msg, int msgLen) {
if ((pConn->secured && pHead->spi == 0) || (pHead->spi == 0 && pConn->spi == 0)){
// secured link, or no authentication
pHead->msgLen = (int32_t)htonl((uint32_t)pHead->msgLen);
// tTrace("%s, secured link, no auth is required", pConn->info);
return 0;
}
if ( !rpcIsReq(pHead->msgType) ) {
// for response, if code is auth failure, it shall bypass the auth process
code = htonl(pHead->code);
if (code==TSDB_CODE_INVALID_TIME_STAMP || code==TSDB_CODE_AUTH_FAILURE ||
if (code==TSDB_CODE_INVALID_TIME_STAMP || code==TSDB_CODE_AUTH_FAILURE || code == TSDB_CODE_AUTH_REQUIRED ||
code==TSDB_CODE_INVALID_USER || code == TSDB_CODE_NOT_READY) {
pHead->msgLen = (int32_t)htonl((uint32_t)pHead->msgLen);
// tTrace("%s, dont check authentication since code is:0x%x", pConn->info, code);
return 0;
}
}
@ -1388,12 +1403,12 @@ static int rpcCheckAuthentication(SRpcConn *pConn, char *msg, int msgLen) {
} else {
pHead->msgLen = (int32_t)htonl((uint32_t)pHead->msgLen) - sizeof(SRpcDigest);
if ( !rpcIsReq(pHead->msgType) ) pConn->secured = 1; // link is secured for client
//tTrace("%s, message is authenticated", pConn->info);
// tTrace("%s, message is authenticated", pConn->info);
}
}
} else {
tError("%s, auth spi:%d not matched with received:%d", pConn->info, pConn->spi, pHead->spi);
code = TSDB_CODE_AUTH_FAILURE;
code = pHead->spi ? TSDB_CODE_AUTH_FAILURE : TSDB_CODE_AUTH_REQUIRED;
}
return code;

View File

@ -87,6 +87,7 @@ typedef struct STable {
struct STable *prev;
tstr * name; // NOTE: there a flexible string here
char * sql;
void * cqhandle;
} STable;
#define TSDB_GET_TABLE_LAST_KEY(tb) ((tb)->lastKey)
@ -110,6 +111,7 @@ typedef struct {
SMetaFile *mfh; // meta file handle
int maxRowBytes;
int maxCols;
void * pRepo;
} STsdbMeta;
// element put in skiplist for each table
@ -118,7 +120,7 @@ typedef struct STableIndexElem {
STable* pTable;
} STableIndexElem;
STsdbMeta *tsdbInitMeta(char *rootDir, int32_t maxTables);
STsdbMeta *tsdbInitMeta(char *rootDir, int32_t maxTables, void *pRepo);
int32_t tsdbFreeMeta(STsdbMeta *pMeta);
STSchema * tsdbGetTableSchema(STsdbMeta *pMeta, STable *pTable);
STSchema * tsdbGetTableTagSchema(STsdbMeta *pMeta, STable *pTable);

View File

@ -288,7 +288,11 @@ int tsdbCopyBlockDataInFile(SFile *pOutFile, SFile *pInFile, SCompInfo *pCompInf
static int compFGroupKey(const void *key, const void *fgroup) {
int fid = *(int *)key;
SFileGroup *pFGroup = (SFileGroup *)fgroup;
return (fid - pFGroup->fileId);
if (fid == pFGroup->fileId) {
return 0;
} else {
return fid > pFGroup->fileId? 1:-1;
}
}
static int compFGroup(const void *arg1, const void *arg2) {

View File

@ -189,9 +189,9 @@ _err:
*
* @return a TSDB repository handle on success, NULL for failure and the error number is set
*/
TsdbRepoT *tsdbOpenRepo(char *tsdbDir, STsdbAppH *pAppH) {
TsdbRepoT *tsdbOpenRepo(char *rootDir, STsdbAppH *pAppH) {
char dataDir[128] = "\0";
if (access(tsdbDir, F_OK | W_OK | R_OK) < 0) {
if (access(rootDir, F_OK | W_OK | R_OK) < 0) {
return NULL;
}
@ -200,12 +200,12 @@ TsdbRepoT *tsdbOpenRepo(char *tsdbDir, STsdbAppH *pAppH) {
return NULL;
}
pRepo->rootDir = strdup(tsdbDir);
pRepo->rootDir = strdup(rootDir);
tsdbRestoreCfg(pRepo, &(pRepo->config));
if (pAppH) pRepo->appH = *pAppH;
pRepo->tsdbMeta = tsdbInitMeta(tsdbDir, pRepo->config.maxTables);
pRepo->tsdbMeta = tsdbInitMeta(rootDir, pRepo->config.maxTables, pRepo);
if (pRepo->tsdbMeta == NULL) {
free(pRepo->rootDir);
free(pRepo);

View File

@ -103,7 +103,8 @@ STable *tsdbDecodeTable(void *cont, int contLen) {
if (pTable->type == TSDB_STREAM_TABLE) {
ptr = taosDecodeString(ptr, &(pTable->sql));
}
pTable->lastKey = TSKEY_INITIAL_VAL;
return pTable;
}
@ -118,7 +119,7 @@ static char* getTagIndexKey(const void* pData) {
STSchema* pSchema = tsdbGetTableTagSchema(elem->pMeta, elem->pTable);
STColumn* pCol = &pSchema->columns[DEFAULT_TAG_INDEX_COLUMN];
int16_t type = 0;
void * res = tdQueryTagByID(row, pCol->colId,&type);
void * res = tdQueryTagByID(row, pCol->colId, &type);
ASSERT(type == pCol->type);
return res;
}
@ -142,6 +143,7 @@ int tsdbRestoreTable(void *pHandle, void *cont, int contLen) {
void tsdbOrgMeta(void *pHandle) {
STsdbMeta *pMeta = (STsdbMeta *)pHandle;
STsdbRepo *pRepo = (STsdbRepo *)pMeta->pRepo;
for (int i = 1; i < pMeta->maxTables; i++) {
STable *pTable = pMeta->tables[i];
@ -149,13 +151,20 @@ void tsdbOrgMeta(void *pHandle) {
tsdbAddTableIntoIndex(pMeta, pTable);
}
}
for (int i = 0; i < pMeta->maxTables; i++) {
STable *pTable = pMeta->tables[i];
if (pTable && pTable->type == TSDB_STREAM_TABLE) {
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, i, pTable->sql, tsdbGetTableSchema(pMeta, pTable));
}
}
}
/**
* Initialize the meta handle
* ASSUMPTIONS: VALID PARAMETER
*/
STsdbMeta *tsdbInitMeta(char *rootDir, int32_t maxTables) {
STsdbMeta *tsdbInitMeta(char *rootDir, int32_t maxTables, void *pRepo) {
STsdbMeta *pMeta = (STsdbMeta *)malloc(sizeof(STsdbMeta));
if (pMeta == NULL) return NULL;
@ -165,6 +174,7 @@ STsdbMeta *tsdbInitMeta(char *rootDir, int32_t maxTables) {
pMeta->tables = (STable **)calloc(maxTables, sizeof(STable *));
pMeta->maxRowBytes = 0;
pMeta->maxCols = 0;
pMeta->pRepo = pRepo;
if (pMeta->tables == NULL) {
free(pMeta);
return NULL;
@ -189,13 +199,16 @@ STsdbMeta *tsdbInitMeta(char *rootDir, int32_t maxTables) {
}
int32_t tsdbFreeMeta(STsdbMeta *pMeta) {
STsdbRepo *pRepo = (STsdbRepo *)pMeta->pRepo;
if (pMeta == NULL) return 0;
tsdbCloseMetaFile(pMeta->mfh);
for (int i = 1; i < pMeta->maxTables; i++) {
if (pMeta->tables[i] != NULL) {
tsdbFreeTable(pMeta->tables[i]);
STable *pTable = pMeta->tables[i];
if (pTable->type == TSDB_STREAM_TABLE) (*pRepo->appH.cqDropFunc)(pTable->cqhandle);
tsdbFreeTable(pTable);
}
}
@ -243,30 +256,18 @@ int32_t tsdbGetTableTagVal(TsdbRepoT* repo, STableId* id, int32_t colId, int16_t
STsdbMeta* pMeta = tsdbGetMeta(repo);
STable* pTable = tsdbGetTableByUid(pMeta, id->uid);
STSchema* pSchema = tsdbGetTableTagSchema(pMeta, pTable);
STColumn* pCol = NULL;
*val = tdQueryTagByID(pTable->tagVal, colId, type);
// todo binary search
for(int32_t col = 0; col < schemaNCols(pSchema); ++col) {
STColumn* p = schemaColAt(pSchema, col);
if (p->colId == colId) {
pCol = p;
break;
if (*val != NULL) {
switch(*type) {
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: *bytes = varDataLen(*val); break;
case TSDB_DATA_TYPE_NULL: *bytes = 0; break;
default:
*bytes = tDataTypeDesc[*type].nSize;break;
}
}
if (pCol == NULL) {
return -1; // No matched tags. Maybe the modification of tags has not been done yet.
}
SDataRow row = (SDataRow)pTable->tagVal;
int16_t tagtype = 0;
char* d = tdQueryTagByID(row, pCol->colId, &tagtype);
//ASSERT((int8_t)tagtype == pCol->type)
*val = d;
*type = pCol->type;
*bytes = pCol->bytes;
return TSDB_CODE_SUCCESS;
}
@ -393,7 +394,9 @@ int tsdbCreateTable(TsdbRepoT *repo, STableCfg *pCfg) {
return -1;
}
}
table->lastKey = TSKEY_INITIAL_VAL;
// Register to meta
if (newSuper) {
tsdbAddTableToMeta(pMeta, super, true);
@ -512,6 +515,7 @@ STable *tsdbGetTableByUid(STsdbMeta *pMeta, uint64_t uid) {
}
static int tsdbAddTableToMeta(STsdbMeta *pMeta, STable *pTable, bool addIdx) {
STsdbRepo *pRepo = (STsdbRepo *)pMeta->pRepo;
if (pTable->type == TSDB_SUPER_TABLE) {
// add super table to the linked list
if (pMeta->superList == NULL) {
@ -531,7 +535,7 @@ static int tsdbAddTableToMeta(STsdbMeta *pMeta, STable *pTable, bool addIdx) {
tsdbAddTableIntoIndex(pMeta, pTable);
}
if (pTable->type == TSDB_STREAM_TABLE && addIdx) {
// TODO
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, pTable->tableId.tid, pTable->sql, tsdbGetTableSchema(pMeta, pTable));
}
pMeta->nTables++;

View File

@ -443,6 +443,11 @@ int tsdbWriteCompIdx(SRWHelper *pHelper) {
for (uint32_t i = 0; i < pHelper->config.maxTables; i++) {
SCompIdx *pCompIdx = pHelper->pCompIdx + i;
if (pCompIdx->offset > 0) {
int drift = POINTER_DISTANCE(buf, pHelper->pBuffer);
if (tsizeof(pHelper->pBuffer) - drift < 128) {
pHelper->pBuffer = trealloc(pHelper->pBuffer, tsizeof(pHelper->pBuffer)*2);
}
buf = POINTER_SHIFT(pHelper->pBuffer, drift);
buf = taosEncodeVariant32(buf, i);
buf = tsdbEncodeSCompIdx(buf, pCompIdx);
}
@ -469,6 +474,7 @@ int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) {
ASSERT(pFile->info.offset > TSDB_FILE_HEAD_SIZE);
if (lseek(fd, pFile->info.offset, SEEK_SET) < 0) return -1;
if ((pHelper->pBuffer = trealloc(pHelper->pBuffer, pFile->info.len)) == NULL) return -1;
if (tread(fd, (void *)(pHelper->pBuffer), pFile->info.len) < pFile->info.len)
return -1;
if (!taosCheckChecksumWhole((uint8_t *)(pHelper->pBuffer), pFile->info.len)) {

View File

@ -95,7 +95,6 @@ typedef struct STsdbQueryHandle {
SQueryFilePos cur; // current position
int16_t order;
STimeWindow window; // the primary query time window that applies to all queries
SCompBlock* pBlock;
SDataStatis* statis; // query level statistics, only one table block statistics info exists at any time
int32_t numOfBlocks;
SArray* pColumns; // column list, SColumnInfoData array list
@ -117,6 +116,12 @@ typedef struct STsdbQueryHandle {
} STsdbQueryHandle;
static void changeQueryHandleForLastrowQuery(TsdbQueryHandleT pqHandle);
static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle);
static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock,
SArray* sa);
static int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey,
STsdbQueryHandle* pQueryHandle);
static void tsdbInitDataBlockLoadInfo(SDataBlockLoadInfo* pBlockLoadInfo) {
pBlockLoadInfo->slot = -1;
@ -188,9 +193,6 @@ TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STable
}
}
for(int32_t i = 0; i < numOfCols; ++i) {
}
uTrace("%p total numOfTable:%d in query", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo));
tsdbInitDataBlockLoadInfo(&pQueryHandle->dataBlockLoadInfo);
@ -209,13 +211,29 @@ TsdbQueryHandleT tsdbQueryLastRow(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STable
return pQueryHandle;
}
SArray* tsdbGetQueriedTableIdList(TsdbQueryHandleT *pHandle) {
assert(pHandle != NULL);
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) pHandle;
size_t size = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
SArray* res = taosArrayInit(size, sizeof(STableId));
for(int32_t i = 0; i < size; ++i) {
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
taosArrayPush(res, &pCheckInfo->tableId);
}
return res;
}
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TsdbRepoT *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList);
pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL;
pQueryHandle->order = TSDB_ORDER_ASC;
// pQueryHandle->outputCapacity = 2; // only allowed two rows to be loaded
// changeQueryHandleForLastrowQuery(pQueryHandle);
changeQueryHandleForInterpQuery(pQueryHandle);
return pQueryHandle;
}
@ -328,13 +346,35 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
(pCheckInfo->lastKey < pHandle->window.ekey && !ASCENDING_TRAVERSE(pHandle->order))) {
return false;
}
int32_t step = ASCENDING_TRAVERSE(pHandle->order)? 1:-1;
STimeWindow* win = &pHandle->cur.win;
pHandle->cur.rows = tsdbReadRowsFromCache(pCheckInfo->iter, pCheckInfo->pTableObj, pHandle->window.ekey,
pHandle->outputCapacity, &win->skey, &win->ekey, pHandle); // todo refactor API
// update the last key value
pCheckInfo->lastKey = win->ekey + step;
pHandle->cur.lastKey = win->ekey + step;
pHandle->cur.mixBlock = true;
if (!ASCENDING_TRAVERSE(pHandle->order)) {
SWAP(win->skey, win->ekey, TSKEY);
}
return true;
}
static int32_t getFileIdFromKey(TSKEY key, int32_t daysPerFile) {
if (key == TSKEY_INITIAL_VAL) {
return INT32_MIN;
}
int64_t fid = (int64_t)(key / (daysPerFile * tsMsPerDay[0])); // set the starting fileId
if (fid > INT32_MAX) {
if (fid < 0L && llabs(fid) > INT32_MAX) { // data value overflow for INT32
fid = INT32_MIN;
}
if (fid > 0L && fid > INT32_MAX) {
fid = INT32_MAX;
}
@ -472,12 +512,6 @@ static SArray* getDefaultLoadColumns(STsdbQueryHandle* pQueryHandle, bool loadTS
return pLocalIdList;
}
static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock,
SArray* sa);
static int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey,
STsdbQueryHandle* pQueryHandle);
static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo) {
STsdbRepo *pRepo = pQueryHandle->pTsdb;
SCompData* data = calloc(1, sizeof(SCompData) + sizeof(SCompCol) * pBlock->numOfCols);
@ -581,13 +615,21 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock*
}
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo);
doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, sa);
taosArrayDestroy(sa);
} else {
/*
* no data in cache, only load data from file
* during the query processing, data in cache will not be checked anymore.
*
* Here the buffer is not enough, so only part of file block can be loaded into memory buffer
*/
assert(pQueryHandle->outputCapacity >= binfo.rows);
pQueryHandle->realNumOfRows = binfo.rows;
cur->rows = binfo.rows;
cur->win = binfo.window;
cur->mixBlock = false;
@ -622,15 +664,14 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo);
}
} else { //desc order, query ended in current block
if (pQueryHandle->window.ekey > pBlock->keyFirst) {
if (pQueryHandle->window.ekey > pBlock->keyFirst || pCheckInfo->lastKey < pBlock->keyLast) {
if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo)) {
return false;
}
SDataCols* pDataCols = pCheckInfo->pDataCols;
SDataCols* pTSCol = pQueryHandle->rhelper.pDataCols[0];
if (pCheckInfo->lastKey < pBlock->keyLast) {
cur->pos =
binarySearchForKey(pDataCols->cols[0].pData, pBlock->numOfRows, pCheckInfo->lastKey, pQueryHandle->order);
cur->pos = binarySearchForKey(pTSCol->cols[0].pData, pBlock->numOfRows, pCheckInfo->lastKey, pQueryHandle->order);
} else {
cur->pos = pBlock->numOfRows - 1;
}
@ -1011,7 +1052,7 @@ int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order) {
firstPos = 0;
lastPos = num - 1;
if (order == 0) {
if (order == TSDB_ORDER_DESC) {
// find the first position which is smaller than the key
while (1) {
if (key >= keyList[lastPos]) return lastPos;
@ -1293,7 +1334,7 @@ static bool getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle) {
static bool doHasDataInBuffer(STsdbQueryHandle* pQueryHandle) {
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
// todo add assert, the value of numOfTables should be less than the maximum value for each vnode capacity
assert(numOfTables <= ((STsdbRepo*)pQueryHandle->pTsdb)->config.maxTables);
while (pQueryHandle->activeIndex < numOfTables) {
if (hasMoreDataInCache(pQueryHandle)) {
@ -1307,12 +1348,116 @@ static bool doHasDataInBuffer(STsdbQueryHandle* pQueryHandle) {
}
// handle data in cache situation
bool tsdbNextDataBlock(TsdbQueryHandleT* pqHandle) {
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pqHandle;
bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle;
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
assert(numOfTables > 0);
if (pQueryHandle->type == TSDB_QUERY_TYPE_EXTERNAL) {
pQueryHandle->type = TSDB_QUERY_TYPE_ALL;
pQueryHandle->order = TSDB_ORDER_DESC;
if (!tsdbNextDataBlock(pHandle)) {
return false;
}
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
/*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo(pHandle);
/*SArray *pDataBlock = */tsdbRetrieveDataBlock(pHandle, sa);
if (pQueryHandle->cur.win.ekey == pQueryHandle->window.skey) {
// data already retrieve, discard other data rows and return
int32_t numOfCols = QH_GET_NUM_OF_COLS(pQueryHandle);
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i);
memcpy(pCol->pData, pCol->pData + pCol->info.bytes * (pQueryHandle->cur.rows-1), pCol->info.bytes);
}
pQueryHandle->cur.win = (STimeWindow){pQueryHandle->window.skey, pQueryHandle->window.skey};
pQueryHandle->window = pQueryHandle->cur.win;
pQueryHandle->cur.rows = 1;
pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL;
return true;
} else {
STsdbQueryHandle* pSecQueryHandle = calloc(1, sizeof(STsdbQueryHandle));
pSecQueryHandle->order = TSDB_ORDER_ASC;
pSecQueryHandle->window = (STimeWindow) {pQueryHandle->window.skey, INT64_MAX};
pSecQueryHandle->pTsdb = pQueryHandle->pTsdb;
pSecQueryHandle->type = TSDB_QUERY_TYPE_ALL;
pSecQueryHandle->cur.fid = -1;
pSecQueryHandle->cur.win = TSWINDOW_INITIALIZER;
pSecQueryHandle->checkFiles = true;
pSecQueryHandle->activeIndex = 0;
pSecQueryHandle->outputCapacity = ((STsdbRepo*)pSecQueryHandle->pTsdb)->config.maxRowsPerFileBlock;
tsdbInitReadHelper(&pSecQueryHandle->rhelper, (STsdbRepo*) pSecQueryHandle->pTsdb);
// allocate buffer in order to load data blocks from file
int32_t numOfCols = QH_GET_NUM_OF_COLS(pQueryHandle);
pSecQueryHandle->statis = calloc(numOfCols, sizeof(SDataStatis));
pSecQueryHandle->pColumns = taosArrayInit(numOfCols, sizeof(SColumnInfoData));
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData colInfo = {{0}, 0};
SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i);
colInfo.info = pCol->info;
colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCol->info.bytes);
taosArrayPush(pSecQueryHandle->pColumns, &colInfo);
pSecQueryHandle->statis[i].colId = colInfo.info.colId;
}
size_t si = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
pSecQueryHandle->pTableCheckInfo = taosArrayInit(si, sizeof(STableCheckInfo));
STsdbMeta* pMeta = tsdbGetMeta(pQueryHandle->pTsdb);
assert(pMeta != NULL);
for (int32_t j = 0; j < si; ++j) {
STableCheckInfo* pCheckInfo = (STableCheckInfo*) taosArrayGet(pQueryHandle->pTableCheckInfo, j);
STableCheckInfo info = {
.lastKey = pSecQueryHandle->window.skey,
.tableId = pCheckInfo->tableId,
.pTableObj = pCheckInfo->pTableObj,
};
taosArrayPush(pSecQueryHandle->pTableCheckInfo, &info);
}
tsdbInitDataBlockLoadInfo(&pSecQueryHandle->dataBlockLoadInfo);
tsdbInitCompBlockLoadInfo(&pSecQueryHandle->compBlockLoadInfo);
bool ret = tsdbNextDataBlock((void*) pSecQueryHandle);
assert(ret);
/*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle);
/*SArray *pDataBlock = */tsdbRetrieveDataBlock((void*) pSecQueryHandle, sa);
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i);
memcpy(pCol->pData, pCol->pData + pCol->info.bytes * (pQueryHandle->cur.rows-1), pCol->info.bytes);
SColumnInfoData* pCol1 = taosArrayGet(pSecQueryHandle->pColumns, i);
assert(pCol->info.colId == pCol1->info.colId);
memcpy(pCol->pData + pCol->info.bytes, pCol1->pData, pCol1->info.bytes);
}
SColumnInfoData* pTSCol = taosArrayGet(pQueryHandle->pColumns, 0);
pQueryHandle->cur.win = (STimeWindow){((TSKEY*)pTSCol->pData)[0], ((TSKEY*)pTSCol->pData)[1]};
pQueryHandle->window = pQueryHandle->cur.win;
pQueryHandle->cur.rows = 2;
tsdbCleanupQueryHandle(pSecQueryHandle);
}
pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL;
return true;
}
if (pQueryHandle->checkFiles) {
if (getDataBlocksInFiles(pQueryHandle)) {
return true;
@ -1322,7 +1467,6 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pqHandle) {
pQueryHandle->checkFiles = false;
}
// TODO: opt by using lastKeyOnFile
// TODO: opt by consider the scan order
return doHasDataInBuffer(pQueryHandle);
}
@ -1336,23 +1480,25 @@ void changeQueryHandleForLastrowQuery(TsdbQueryHandleT pqHandle) {
// todo consider the query time window, current last_row does not apply the query time window
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
TSKEY key = 0;
TSKEY key = TSKEY_INITIAL_VAL;
int32_t index = -1;
for(int32_t i = 0; i < numOfTables; ++i) {
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
if (pCheckInfo->pTableObj->lastKey > key) { //todo lastKey should not be 0 by default
if (pCheckInfo->pTableObj->tableId.uid == 12094628167747) {
printf("abc\n");
}
if (pCheckInfo->pTableObj->lastKey > key) {
key = pCheckInfo->pTableObj->lastKey;
index = i;
}
}
// todo, there are no data in all the tables. opt performance
if (index == -1) {
return;
}
// erase all other elements in array list, todo refactor
// erase all other elements in array list
size_t size = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
for (int32_t i = 0; i < size; ++i) {
if (i == index) {
@ -1371,9 +1517,7 @@ void changeQueryHandleForLastrowQuery(TsdbQueryHandleT pqHandle) {
}
STableCheckInfo info = *(STableCheckInfo*) taosArrayGet(pQueryHandle->pTableCheckInfo, index);
taosArrayDestroy(pQueryHandle->pTableCheckInfo);
pQueryHandle->pTableCheckInfo = taosArrayInit(1, sizeof(STableCheckInfo));
taosArrayClear(pQueryHandle->pTableCheckInfo);
info.lastKey = key;
taosArrayPush(pQueryHandle->pTableCheckInfo, &info);
@ -1382,6 +1526,43 @@ void changeQueryHandleForLastrowQuery(TsdbQueryHandleT pqHandle) {
pQueryHandle->window = (STimeWindow) {key, key};
}
static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle) {
// filter the queried time stamp in the first place
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle;
pQueryHandle->order = TSDB_ORDER_DESC;
assert(pQueryHandle->window.skey == pQueryHandle->window.ekey);
// starts from the buffer in case of descending timestamp order check data blocks
// todo consider the query time window, current last_row does not apply the query time window
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
int32_t i = 0;
while(i < numOfTables) {
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
if (pQueryHandle->window.skey <= pCheckInfo->pTableObj->lastKey &&
pCheckInfo->pTableObj->lastKey != TSKEY_INITIAL_VAL) {
break;
}
i++;
}
// there are no data in all the tables
if (i == numOfTables) {
return;
}
STableCheckInfo info = *(STableCheckInfo*) taosArrayGet(pQueryHandle->pTableCheckInfo, i);
taosArrayClear(pQueryHandle->pTableCheckInfo);
info.lastKey = pQueryHandle->window.skey;
taosArrayPush(pQueryHandle->pTableCheckInfo, &info);
// update the query time window according to the chosen last timestamp
pQueryHandle->window = (STimeWindow) {info.lastKey, TSKEY_INITIAL_VAL};
}
static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey,
STsdbQueryHandle* pQueryHandle) {
int numOfRows = 0;
@ -1466,58 +1647,29 @@ static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY
return numOfRows;
}
// copy data from cache into data block
SDataBlockInfo tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle) {
STsdbQueryHandle* pHandle = (STsdbQueryHandle*)pQueryHandle;
int32_t step = ASCENDING_TRAVERSE(pHandle->order)? 1:-1;
SQueryFilePos* cur = &pHandle->cur;
STable* pTable = NULL;
// there are data in file
if (pHandle->cur.fid >= 0) {
STableBlockInfo* pBlockInfo = &pHandle->pDataBlockInfo[pHandle->cur.slot];
STable* pTable = pBlockInfo->pTableCheckInfo->pTableObj;
SDataBlockInfo blockInfo = {
.uid = pTable->tableId.uid,
.tid = pTable->tableId.tid,
.rows = pHandle->cur.rows,
.window = pHandle->cur.win,
.numOfCols = QH_GET_NUM_OF_COLS(pHandle),
};
return blockInfo;
STableBlockInfo* pBlockInfo = &pHandle->pDataBlockInfo[cur->slot];
pTable = pBlockInfo->pTableCheckInfo->pTableObj;
} else {
STableCheckInfo* pCheckInfo = taosArrayGet(pHandle->pTableCheckInfo, pHandle->activeIndex);
SQueryFilePos* cur = &pHandle->cur;
STable* pTable = pCheckInfo->pTableObj;
if (pTable->mem != NULL) { // create mem table iterator if it is not created yet
assert(pCheckInfo->iter != NULL);
STimeWindow* win = &cur->win;
pHandle->cur.rows = tsdbReadRowsFromCache(pCheckInfo->iter, pCheckInfo->pTableObj, pHandle->window.ekey,
pHandle->outputCapacity, &win->skey, &win->ekey, pHandle); // todo refactor API
// update the last key value
pCheckInfo->lastKey = win->ekey + step;
cur->lastKey = win->ekey + step;
cur->mixBlock = true;
}
if (!ASCENDING_TRAVERSE(pHandle->order)) {
SWAP(pHandle->cur.win.skey, pHandle->cur.win.ekey, TSKEY);
}
SDataBlockInfo blockInfo = {
.uid = pTable->tableId.uid,
.tid = pTable->tableId.tid,
.rows = pHandle->cur.rows,
.window = pHandle->cur.win,
.numOfCols = QH_GET_NUM_OF_COLS(pHandle),
};
return blockInfo;
pTable = pCheckInfo->pTableObj;
}
SDataBlockInfo blockInfo = {
.uid = pTable->tableId.uid,
.tid = pTable->tableId.tid,
.rows = cur->rows,
.window = cur->win,
.numOfCols = QH_GET_NUM_OF_COLS(pHandle),
};
return blockInfo;
}
/*
@ -1536,6 +1688,13 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta
((cur->slot == pHandle->numOfBlocks) && (cur->slot == 0)));
STableBlockInfo* pBlockInfo = &pHandle->pDataBlockInfo[cur->slot];
// file block with subblocks has no statistics data
if (pBlockInfo->compBlock->numOfSubBlocks > 1) {
*pBlockStatis = NULL;
return TSDB_CODE_SUCCESS;
}
tsdbLoadCompData(&pHandle->rhelper, pBlockInfo->compBlock, NULL);
size_t numOfCols = QH_GET_NUM_OF_COLS(pHandle);
@ -1708,12 +1867,7 @@ void filterPrepare(void* expr, void* param) {
pInfo->q = (char*) pCond->arr;
} else {
pInfo->q = calloc(1, pSchema->bytes);
if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) {
tVariantDump(pCond, varDataVal(pInfo->q), pSchema->type);
varDataSetLen(pInfo->q, pCond->nLen); // the length may be changed after dump, so assign its value after dump
} else {
tVariantDump(pCond, pInfo->q, pSchema->type);
}
tVariantDump(pCond, pInfo->q, pSchema->type, true);
}
}
@ -1843,13 +1997,11 @@ bool indexedNodeFilterFp(const void* pNode, void* param) {
val = (char*) elem->pTable->name;
type = TSDB_DATA_TYPE_BINARY;
} else {
// STSchema* pTSchema = (STSchema*) pInfo->param; // todo table schema is identical to stable schema??
int16_t type;
// int32_t offset = pTSchema->columns[pInfo->colIndex].offset;
// val = tdGetRowDataOfCol(elem->pTable->tagVal, pInfo->sch.type, TD_DATA_ROW_HEAD_SIZE + offset);
val = tdQueryTagByID(elem->pTable->tagVal, pInfo->sch.colId, &type);
// ASSERT(pInfo->sch.type == type);
int16_t t1;
val = tdQueryTagByID(elem->pTable->tagVal, pInfo->sch.colId, &t1);
assert(pInfo->sch.type == t1);
}
//todo :the val is possible to be null, so check it out carefully
int32_t ret = 0;
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {

View File

@ -30,24 +30,19 @@ typedef void (*_hash_free_fn_t)(void *param);
typedef struct SHashNode {
char *key;
union {
// union {
struct SHashNode * prev;
struct SHashEntry *prev1;
};
// struct SHashEntry *prev1;
// };
//
struct SHashNode *next;
uint32_t hashVal; // the hash value of key, if hashVal == HASH_VALUE_IN_TRASH, this node is moved to trash
uint32_t keyLen; // length of the key
char data[];
} SHashNode;
typedef struct SHashEntry {
SHashNode *next;
uint32_t num;
} SHashEntry;
typedef struct SHashObj {
SHashEntry ** hashList;
SHashNode **hashList;
size_t capacity; // number of slots
size_t size; // number of elements in hash table
_hash_fn_t hashFp; // hash function

View File

@ -46,6 +46,7 @@ extern "C" {
// Pointer p drift right by b bytes
#define POINTER_SHIFT(p, b) ((void *)((char *)(p) + (b)))
#define POINTER_DISTANCE(p1, p2) ((char *)(p1) - (char *)(p2))
#ifndef NDEBUG
#define ASSERT(x) assert(x)

View File

@ -83,17 +83,10 @@ static FORCE_INLINE int32_t taosHashCapacity(int32_t length) {
int32_t len = MIN(length, HASH_MAX_CAPACITY);
uint32_t i = 4;
while (i < len) i = (i << 1U);
while (i < len) i = (i << 1u);
return i;
}
/**
* inplace update node in hash table
* @param pHashObj hash table object
* @param pNode hash data node
*/
static void doUpdateHashTable(SHashObj *pHashObj, SHashNode *pNode);
/**
* Get SHashNode from hashlist, nodes from trash are not included.
* @param pHashObj Cache objection
@ -105,10 +98,9 @@ static void doUpdateHashTable(SHashObj *pHashObj, SHashNode *pNode);
FORCE_INLINE SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, uint32_t *hashVal) {
uint32_t hash = (*pHashObj->hashFp)(key, keyLen);
int32_t slot = HASH_INDEX(hash, pHashObj->capacity);
SHashEntry *pEntry = pHashObj->hashList[slot];
int32_t slot = HASH_INDEX(hash, pHashObj->capacity);
SHashNode *pNode = pHashObj->hashList[slot];
SHashNode *pNode = pEntry->next;
while (pNode) {
if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) {
break;
@ -190,17 +182,13 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool threadsafe) {
pHashObj->hashFp = fn;
pHashObj->hashList = (SHashEntry **)calloc(pHashObj->capacity, sizeof(SHashEntry *));
pHashObj->hashList = (SHashNode **)calloc(pHashObj->capacity, POINTER_BYTES);
if (pHashObj->hashList == NULL) {
free(pHashObj);
uError("failed to allocate memory, reason:%s", strerror(errno));
return NULL;
}
for (int32_t i = 0; i < pHashObj->capacity; ++i) {
pHashObj->hashList[i] = calloc(1, sizeof(SHashEntry));
}
if (threadsafe) {
#if defined(LINUX)
pHashObj->lock = calloc(1, sizeof(pthread_rwlock_t));
@ -252,7 +240,18 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da
return -1;
}
doUpdateHashTable(pHashObj, pNewNode);
if (pNewNode->prev) {
pNewNode->prev->next = pNewNode;
} else {
int32_t slot = HASH_INDEX(pNewNode->hashVal, pHashObj->capacity);
assert(pHashObj->hashList[slot] == pNode);
pHashObj->hashList[slot] = pNewNode;
}
if (pNewNode->next) {
(pNewNode->next)->prev = pNewNode;
}
}
__unlock(pHashObj->lock);
@ -287,24 +286,19 @@ void taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) {
}
SHashNode *pNext = pNode->next;
if (pNode->prev != NULL) {
if (pNode->prev == NULL) {
int32_t slot = HASH_INDEX(val, pHashObj->capacity);
if (pHashObj->hashList[slot]->next == pNode) {
pHashObj->hashList[slot]->next = pNext;
} else {
pNode->prev->next = pNext;
}
assert(pHashObj->hashList[slot] == pNode);
pHashObj->hashList[slot] = pNext;
} else {
pNode->prev->next = pNext;
}
if (pNext != NULL) {
pNext->prev = pNode->prev;
}
uint32_t index = HASH_INDEX(pNode->hashVal, pHashObj->capacity);
SHashEntry *pEntry = pHashObj->hashList[index];
pEntry->num--;
pHashObj->size--;
pNode->next = NULL;
@ -325,8 +319,7 @@ void taosHashCleanup(SHashObj *pHashObj) {
if (pHashObj->hashList) {
for (int32_t i = 0; i < pHashObj->capacity; ++i) {
SHashEntry *pEntry = pHashObj->hashList[i];
pNode = pEntry->next;
pNode = pHashObj->hashList[i];
while (pNode) {
pNext = pNode->next;
@ -337,8 +330,6 @@ void taosHashCleanup(SHashObj *pHashObj) {
free(pNode);
pNode = pNext;
}
tfree(pEntry);
}
free(pHashObj->hashList);
@ -385,13 +376,13 @@ bool taosHashIterNext(SHashMutableIterator *pIter) {
assert(pIter->pCur == NULL && pIter->pNext == NULL);
while (1) {
SHashEntry *pEntry = pIter->pHashObj->hashList[pIter->entryIndex];
if (pEntry->next == NULL) {
SHashNode *pEntry = pIter->pHashObj->hashList[pIter->entryIndex];
if (pEntry == NULL) {
pIter->entryIndex++;
continue;
}
pIter->pCur = pEntry->next;
pIter->pCur = pEntry;
if (pIter->pCur->next) {
pIter->pNext = pIter->pCur->next;
@ -444,25 +435,25 @@ int32_t taosHashGetMaxOverflowLinkLength(const SHashObj *pHashObj) {
int32_t num = 0;
for (int32_t i = 0; i < pHashObj->size; ++i) {
SHashEntry *pEntry = pHashObj->hashList[i];
if (num < pEntry->num) {
num = pEntry->num;
SHashNode *pEntry = pHashObj->hashList[i];
if (pEntry == NULL) {
continue;
}
int32_t j = 0;
while(pEntry != NULL) {
pEntry = pEntry->next;
j++;
}
if (num < j) {
num = j;
}
}
return num;
}
void doUpdateHashTable(SHashObj *pHashObj, SHashNode *pNode) {
if (pNode->prev1) {
pNode->prev1->next = pNode;
}
if (pNode->next) {
(pNode->next)->prev = pNode;
}
}
void taosHashTableResize(SHashObj *pHashObj) {
if (pHashObj->size < pHashObj->capacity * HASH_DEFAULT_LOAD_FACTOR) {
return;
@ -479,69 +470,53 @@ void taosHashTableResize(SHashObj *pHashObj) {
return;
}
// int64_t st = taosGetTimestampUs();
SHashEntry **pNewEntry = realloc(pHashObj->hashList, sizeof(SHashEntry *) * newSize);
if (pNewEntry == NULL) {
void *pNewEntry = realloc(pHashObj->hashList, POINTER_BYTES * newSize);
if (pNewEntry == NULL) {// todo handle error
// uTrace("cache resize failed due to out of memory, capacity remain:%d", pHashObj->capacity);
return;
}
pHashObj->hashList = pNewEntry;
for (int32_t i = pHashObj->capacity; i < newSize; ++i) {
pHashObj->hashList[i] = calloc(1, sizeof(SHashEntry));
}
memset(&pHashObj->hashList[pHashObj->capacity], 0, POINTER_BYTES * (newSize - pHashObj->capacity));
pHashObj->capacity = newSize;
for (int32_t i = 0; i < pHashObj->capacity; ++i) {
SHashEntry *pEntry = pHashObj->hashList[i];
pNode = pEntry->next;
pNode = pHashObj->hashList[i];
if (pNode != NULL) {
assert(pNode->prev1 == pEntry && pEntry->num > 0);
assert(pNode->prev == NULL);
}
while (pNode) {
int32_t j = HASH_INDEX(pNode->hashVal, pHashObj->capacity);
if (j == i) { // this key resides in the same slot, no need to relocate it
if (j == i) { // this key locates in the same slot, no need to relocate it
pNode = pNode->next;
} else {
pNext = pNode->next;
// remove from current slot
assert(pNode->prev1 != NULL);
if (pNode->prev1 == pEntry) { // first node of the overflow linked list
pEntry->next = pNode->next;
if (pNode->prev == NULL) { // first node of the overflow linked list
pHashObj->hashList[i] = pNext;
} else {
pNode->prev->next = pNode->next;
pNode->prev->next = pNext;
}
pEntry->num--;
assert(pEntry->num >= 0);
if (pNode->next != NULL) {
(pNode->next)->prev = pNode->prev;
if (pNext != NULL) {
pNext->prev = pNode->prev;
}
// clear pointer
pNode->next = NULL;
pNode->prev = NULL;
// added into new slot
pNode->next = NULL;
pNode->prev1 = NULL;
SHashEntry *pNewIndexEntry = pHashObj->hashList[j];
if (pNewIndexEntry->next != NULL) {
assert(pNewIndexEntry->next->prev1 == pNewIndexEntry);
pNewIndexEntry->next->prev = pNode;
SHashNode *pNew = pHashObj->hashList[j];
if (pNew != NULL) {
assert(pNew->prev == NULL);
pNew->prev = pNode;
}
pNode->next = pNewIndexEntry->next;
pNode->prev1 = pNewIndexEntry;
pNewIndexEntry->next = pNode;
pNewIndexEntry->num++;
pNode->next = pNew;
pHashObj->hashList[j] = pNode;
// continue
pNode = pNext;
@ -549,7 +524,6 @@ void taosHashTableResize(SHashObj *pHashObj) {
}
}
// int64_t et = taosGetTimestampUs();
// uTrace("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", pHashObj->capacity,
// ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0);
}
@ -595,19 +569,17 @@ SHashNode *doUpdateHashNode(SHashNode *pNode, const void *key, size_t keyLen, co
void doAddToHashTable(SHashObj *pHashObj, SHashNode *pNode) {
assert(pNode != NULL);
int32_t index = HASH_INDEX(pNode->hashVal, pHashObj->capacity);
SHashEntry *pEntry = pHashObj->hashList[index];
int32_t index = HASH_INDEX(pNode->hashVal, pHashObj->capacity);
pNode->next = pEntry->next;
if (pEntry->next) {
pEntry->next->prev = pNode;
SHashNode* pEntry = pHashObj->hashList[index];
if (pEntry != NULL) {
pEntry->prev = pNode;
pNode->next = pEntry;
pNode->prev = NULL;
}
pEntry->next = pNode;
pNode->prev1 = pEntry;
pEntry->num++;
pHashObj->hashList[index] = pNode;
pHashObj->size++;
}
@ -616,13 +588,13 @@ SHashNode *getNextHashNode(SHashMutableIterator *pIter) {
pIter->entryIndex++;
while (pIter->entryIndex < pIter->pHashObj->capacity) {
SHashEntry *pEntry = pIter->pHashObj->hashList[pIter->entryIndex];
if (pEntry->next == NULL) {
SHashNode *pNode = pIter->pHashObj->hashList[pIter->entryIndex];
if (pNode == NULL) {
pIter->entryIndex++;
continue;
}
return pEntry->next;
return pNode;
}
return NULL;

View File

@ -92,7 +92,7 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) {
if (len1 != len2) {
return len1 > len2? 1:-1;
} else {
int32_t ret = wcsncmp(varDataVal(pLeft), varDataVal(pRight), len1);
int32_t ret = wcsncmp(varDataVal(pLeft), varDataVal(pRight), len1/TSDB_NCHAR_SIZE);
if (ret == 0) {
return 0;
} else {

View File

@ -241,7 +241,12 @@ void taosReadGlobalLogCfg() {
wordexp_t full_path;
wordexp(configDir, &full_path, 0);
if (full_path.we_wordv != NULL && full_path.we_wordv[0] != NULL) {
if (full_path.we_wordv != NULL && full_path.we_wordv[0] != NULL) {
if (strlen(full_path.we_wordv[0]) > TSDB_FILENAME_LEN - 1) {
printf("\nconfig file: %s path overflow max len %d, all variables are set to default\n", full_path.we_wordv[0], TSDB_FILENAME_LEN - 1);
wordfree(&full_path);
return;
}
strcpy(configDir, full_path.we_wordv[0]);
} else {
printf("configDir:%s not there, use default value: /etc/taos", configDir);

View File

@ -18,24 +18,24 @@
#include "taoserror.h"
#include "tqueue.h"
typedef struct _taos_qnode {
typedef struct STaosQnode {
int type;
struct _taos_qnode *next;
struct STaosQnode *next;
char item[];
} STaosQnode;
typedef struct _taos_q {
typedef struct STaosQueue {
int32_t itemSize;
int32_t numOfItems;
struct _taos_qnode *head;
struct _taos_qnode *tail;
struct _taos_q *next; // for queue set
struct _taos_qset *qset; // for queue set
struct STaosQnode *head;
struct STaosQnode *tail;
struct STaosQueue *next; // for queue set
struct STaosQset *qset; // for queue set
void *ahandle; // for queue set
pthread_mutex_t mutex;
} STaosQueue;
typedef struct _taos_qset {
typedef struct STaosQset {
STaosQueue *head;
STaosQueue *current;
pthread_mutex_t mutex;
@ -44,7 +44,7 @@ typedef struct _taos_qset {
tsem_t sem;
} STaosQset;
typedef struct _taos_qall {
typedef struct STaosQall {
STaosQnode *current;
STaosQnode *start;
int32_t itemSize;
@ -95,6 +95,7 @@ void *taosAllocateQitem(int size) {
void taosFreeQitem(void *param) {
if (param == NULL) return;
uTrace("item:%p is freed", param);
char *temp = (char *)param;
temp -= sizeof(STaosQnode);
free(temp);
@ -104,6 +105,7 @@ int taosWriteQitem(taos_queue param, int type, void *item) {
STaosQueue *queue = (STaosQueue *)param;
STaosQnode *pNode = (STaosQnode *)(((char *)item) - sizeof(STaosQnode));
pNode->type = type;
pNode->next = NULL;
pthread_mutex_lock(&queue->mutex);
@ -143,7 +145,7 @@ int taosReadQitem(taos_queue param, int *type, void **pitem) {
queue->numOfItems--;
if (queue->qset) atomic_sub_fetch_32(&queue->qset->numOfItems, 1);
code = 1;
uTrace("item:%p is read out from queue, items:%d", *pitem, queue->numOfItems);
uTrace("item:%p is read out from queue:%p, type:%d items:%d", *pitem, *type, queue->numOfItems);
}
pthread_mutex_unlock(&queue->mutex);
@ -337,6 +339,7 @@ int taosReadQitemFromQset(taos_qset param, int *type, void **pitem, void **phand
queue->numOfItems--;
atomic_sub_fetch_32(&qset->numOfItems, 1);
code = 1;
uTrace("item:%p is read out from queue:%p, type:%d items:%d", *pitem, queue, *type, queue->numOfItems);
}
pthread_mutex_unlock(&queue->mutex);

View File

@ -342,6 +342,7 @@ static void taosTimerLoopFunc(int signo) {
int64_t now = taosGetTimestampMs();
for (int i = 0; i < tListLen(wheels); i++) {
tmrTrace("begin processing wheel %d", i);
// `expried` is a temporary expire list.
// expired timers are first add to this list, then move
// to expired queue as a batch to improve performance.
@ -389,6 +390,7 @@ static void taosTimerLoopFunc(int signo) {
}
addToExpired(expired);
tmrTrace("end processing wheel %d", i);
}
}

View File

@ -149,8 +149,8 @@ int main(int argc, char** argv) {
}
TEST(testCase, hashTest) {
// simpleTest();
// stringKeyTest();
// noLockPerformanceTest();
// multithreadsTest();
simpleTest();
stringKeyTest();
noLockPerformanceTest();
multithreadsTest();
}

View File

@ -220,6 +220,8 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
appH.appH = (void *)pVnode;
appH.notifyStatus = vnodeProcessTsdbStatus;
appH.cqH = pVnode->cq;
appH.cqCreateFunc = cqCreate;
appH.cqDropFunc = cqDrop;
sprintf(temp, "%s/tsdb", rootDir);
pVnode->tsdb = tsdbOpenRepo(temp, &appH);
if (pVnode->tsdb == NULL) {
@ -391,14 +393,14 @@ static void vnodeCleanUp(SVnodeObj *pVnode) {
pVnode->sync = NULL;
}
if (pVnode->wal)
walClose(pVnode->wal);
pVnode->wal = NULL;
if (pVnode->tsdb)
tsdbCloseRepo(pVnode->tsdb, 1);
pVnode->tsdb = NULL;
if (pVnode->wal)
walClose(pVnode->wal);
pVnode->wal = NULL;
if (pVnode->cq)
cqClose(pVnode->cq);
pVnode->cq = NULL;
@ -467,6 +469,8 @@ static void vnodeNotifyFileSynced(void *ahandle, uint64_t fversion) {
appH.appH = (void *)pVnode;
appH.notifyStatus = vnodeProcessTsdbStatus;
appH.cqH = pVnode->cq;
appH.cqCreateFunc = cqCreate;
appH.cqDropFunc = cqDrop;
pVnode->tsdb = tsdbOpenRepo(rootDir, &appH);
}
@ -548,6 +552,7 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode) {
int len = fread(content, 1, maxLen, fp);
if (len <= 0) {
vError("vgId:%d, failed to read vnode cfg, content is null", pVnode->vgId);
free(content);
return errno;
}

View File

@ -43,7 +43,7 @@ int32_t vnodeProcessRead(void *param, int msgType, void *pCont, int32_t contLen,
return TSDB_CODE_MSG_NOT_PROCESSED;
if (pVnode->status == TAOS_VN_STATUS_DELETING || pVnode->status == TAOS_VN_STATUS_CLOSING)
return TSDB_CODE_NOT_ACTIVE_VNODE;
return TSDB_CODE_INVALID_VGROUP_ID;
return (*vnodeProcessReadMsgFp[msgType])(pVnode, pCont, contLen, ret);
}

View File

@ -53,7 +53,7 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) {
if (pHead->version == 0) { // from client or CQ
if (pVnode->status != TAOS_VN_STATUS_READY)
return TSDB_CODE_NOT_ACTIVE_VNODE;
return TSDB_CODE_INVALID_VGROUP_ID; // it may be in deleting or closing state
if (pVnode->syncCfg.replica > 1 && pVnode->role != TAOS_SYNC_ROLE_MASTER)
return TSDB_CODE_NOT_READY;
@ -139,12 +139,10 @@ static int32_t vnodeProcessCreateTableMsg(SVnodeObj *pVnode, void *pCont, SRspRe
char *pTagData = pTable->data + totalCols * sizeof(SSchema);
int accumBytes = 0;
//dataRow = tdNewDataRowFromSchema(pDestTagSchema);
dataRow = tdNewTagRowFromSchema(pDestTagSchema, numOfTags);
for (int i = 0; i < numOfTags; i++) {
STColumn *pTCol = schemaColAt(pDestTagSchema, i);
// tdAppendColVal(dataRow, pTagData + accumBytes, pTCol->type, pTCol->bytes, pTCol->offset);
tdAppendTagColVal(dataRow, pTagData + accumBytes, pTCol->type, pTCol->bytes, pTCol->colId);
accumBytes += htons(pSchema[i + numOfColumns].bytes);
}

View File

@ -8,8 +8,8 @@
3. mkdir debug; cd debug; cmake ..; make ; sudo make install
4. pip install src/connector/python/linux/python2 ; pip3 install
src/connector/python/linux/python3
4. pip install ../src/connector/python/linux/python2 ; pip3 install
../src/connector/python/linux/python3
> Note: Both Python2 and Python3 are currently supported by the Python test
> framework. Since Python2 is no longer officially supported by Python Software

View File

@ -16,11 +16,12 @@
// TAOS standard API example. The same syntax as MySQL, but only a subet
// to compile: gcc -o demo demo.c -ltaos
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <taos.h> // TAOS header file
#include <unistd.h>
void taosMsleep(int mseconds);
@ -49,19 +50,52 @@ static int32_t doQuery(TAOS* taos, const char* sql) {
return 0;
}
void* oneLoader(void* param) {
TAOS* conn = (TAOS*) param;
for(int32_t i = 0; i < 20000; ++i) {
// doQuery(conn, "show databases");
doQuery(conn, "use test");
// doQuery(conn, "describe t12");
// doQuery(conn, "show tables");
// doQuery(conn, "create table if not exists abc (ts timestamp, k int)");
// doQuery(conn, "select * from t12");
}
return 0;
}
static __attribute__((unused)) void multiThreadTest(int32_t numOfThreads, void* conn) {
pthread_attr_t thattr;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
pthread_t* threadId = malloc(sizeof(pthread_t)*numOfThreads);
for (int i = 0; i < numOfThreads; ++i) {
pthread_create(&threadId[i], NULL, oneLoader, conn);
}
for (int32_t i = 0; i < numOfThreads; ++i) {
pthread_join(threadId[i], NULL);
}
pthread_attr_destroy(&thattr);
}
int main(int argc, char *argv[]) {
TAOS * taos;
char qstr[1024];
TAOS_RES *result;
// connect to server
if (argc < 2) {
printf("please input server-ip \n");
return 0;
}
taos_options(TSDB_OPTION_CONFIGDIR, "~/sec/cfg");
taos_options(TSDB_OPTION_CONFIGDIR, "/home/lisa/Documents/workspace/TDinternal/community/sim/tsim/cfg");
// init TAOS
taos_init();
@ -73,15 +107,12 @@ int main(int argc, char *argv[]) {
}
printf("success to connect to server\n");
doQuery(taos, "create database if not exists test");
doQuery(taos, "use test");
doQuery(taos, "select count(*) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:1:59' interval(500a) fill(value, 99)");
// doQuery(taos, "create table t1(ts timestamp, k binary(12), f nchar(2))");
// for(int32_t i = 0; i< 100000; ++i) {
// doQuery(taos, "select m1.ts,m1.a from m1, m2 where m1.ts=m2.ts and m1.a=m2.b;");
// usleep(500000);
// multiThreadTest(1, taos);
doQuery(taos, "select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from lm2_db0.lm2_stb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1, -2) group by t1 limit 2 offset 10;");
// for(int32_t i = 0; i < 100000; ++i) {
// doQuery(taos, "insert into t1 values(now, 2)");
// }
// doQuery(taos, "create table t1(ts timestamp, k binary(12), f nchar(2))");
// doQuery(taos, "insert into tm0 values('2020-1-1 1:1:1', 'abc')");
// doQuery(taos, "create table if not exists tm0 (ts timestamp, k int);");

View File

@ -0,0 +1,33 @@
#!/user/bin/gnuplot
reset
set terminal png
set title "Performance Test Report" font ",20"
set ylabel "Time in Seconds"
set xdata time
set timefmt "%Y%m%d"
set format x "%Y-%m-%d"
set xlabel "Date"
set style data linespoints
set terminal pngcairo size 1024,768 enhanced font 'Segoe UI, 10'
set output filename . '.png'
set datafile separator ','
set key reverse Left outside
set grid
# plot 'perftest-influx-report.csv' using 1:2 title "InfluxDB Write", \
# "" using 1:3 title "InfluxDB Query case1", \
# "" using 1:4 title "InfluxDB Query case2", \
# "" using 1:5 title "InfluxDB Query case3", \
# "" using 1:6 title "InfluxDB Query case4"
#
plot filename . '.csv' using 1:2 title "TDengine Write", \
"" using 1:3 title "TDengine Query case1", \
"" using 1:4 title "TDengine Query case2", \
"" using 1:5 title "TDengine Query case3", \
"" using 1:6 title "TDengine Query case4"

View File

@ -0,0 +1,95 @@
#!/bin/bash
# Coloured Echoes #
function red_echo { echo -e "\033[31m$@\033[0m"; } #
function green_echo { echo -e "\033[32m$@\033[0m"; } #
function yellow_echo { echo -e "\033[33m$@\033[0m"; } #
function white_echo { echo -e "\033[1;37m$@\033[0m"; } #
# Coloured Printfs #
function red_printf { printf "\033[31m$@\033[0m"; } #
function green_printf { printf "\033[32m$@\033[0m"; } #
function yellow_printf { printf "\033[33m$@\033[0m"; } #
function white_printf { printf "\033[1;37m$@\033[0m"; } #
# Debugging Outputs #
function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; } #
function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; } #
function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } #
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
function set-Wal {
echo "/etc/taos/taos.cfg walLevel will be set to $1"
sed -i 's/^walLevel.*$/walLevel '"$1"'/g' /etc/taos/taos.cfg
}
function collectSysInfo {
rm sysinfo.log
grep model /proc/cpuinfo | tail -n1 | tee sysinfo.log
grep cores /proc/cpuinfo | tail -n1 | tee -a sysinfo.log
grep MemTotal /proc/meminfo | tee -a sysinfo.log
grep "^[^#;]" /etc/taos/taos.cfg | tee taos.cfg
}
function buildTDengine {
cd /root/TDengine
git pull
cd debug
rm -rf *
cmake ..
make > /dev/null
make install
}
function restartTaosd {
systemctl stop taosd
pkill -KILL -x taosd
sleep 10
rm -rf /mnt/var/log/taos/*
rm -rf /mnt/var/lib/taos/*
taosd 2>&1 > /dev/null &
sleep 10
}
function sendReport {
receiver="sdsang@taosdata.com, sangshuduo@gmail.com"
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
echo -e "to: ${receiver}\nsubject: Perf test report ${today}\n" | \
(cat - && uuencode perftest-1d-$today.log perftest-1d-$today.log)| \
(cat - && uuencode perftest-1d-report.csv perftest-1d-report-$today.csv) | \
(cat - && uuencode perftest-1d-report.png perftest-1d-report-$today.png) | \
(cat - && uuencode perftest-13d-$today.log perftest-13d-$today.log)| \
(cat - && uuencode perftest-13d-report.csv perftest-13d-report-$today.csv) | \
(cat - && uuencode perftest-13d-report.png perftest-13d-report-$today.png) | \
(cat - && uuencode taosdemo-$today.log taosdemo-$today.log) | \
(cat - && uuencode taosdemo-report.csv taosdemo-report-$today.csv) | \
(cat - && uuencode taosdemo-report.png taosdemo-report-$today.png) | \
(cat - && uuencode sysinfo.log sysinfo.txt) | \
(cat - && uuencode taos.cfg taos-cfg-$today.txt) | \
ssmtp "${receiver}"
}
today=`date +"%Y%m%d"`
cd /root
echo -e "cron-ran-at-${today}" >> cron.log
echoInfo "Build TDengine"
buildTDengine
set-Wal "2"
cd /root
./perftest-tsdb-compare-1d.sh
cd /root
./perftest-tsdb-compare-13d.sh
cd /root
./perftest-taosdemo.sh
collectSysInfo
echoInfo "Send Report"
sendReport
echoInfo "End of Test"

View File

@ -0,0 +1,70 @@
#!/bin/bash
# Coloured Echoes #
function red_echo { echo -e "\033[31m$@\033[0m"; } #
function green_echo { echo -e "\033[32m$@\033[0m"; } #
function yellow_echo { echo -e "\033[33m$@\033[0m"; } #
function white_echo { echo -e "\033[1;37m$@\033[0m"; } #
# Coloured Printfs #
function red_printf { printf "\033[31m$@\033[0m"; } #
function green_printf { printf "\033[32m$@\033[0m"; } #
function yellow_printf { printf "\033[33m$@\033[0m"; } #
function white_printf { printf "\033[1;37m$@\033[0m"; } #
# Debugging Outputs #
function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; } #
function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; } #
function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } #
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
function restartTaosd {
systemctl stop taosd
pkill -KILL -x taosd
sleep 10
rm -rf /mnt/var/log/taos/*
rm -rf /mnt/var/lib/taos/*
taosd 2>&1 > /dev/null &
sleep 10
}
function runCreateTableOnly {
echoInfo "Restart Taosd"
restartTaosd
/usr/bin/time -f "Total: %e" -o totaltime.out bash -c "yes | taosdemo -n 0 2>&1 | tee taosdemo-$today.log"
demoTableOnly=`grep "Total:" totaltime.out|awk '{print $2}'`
}
function runCreateTableThenInsert {
echoInfo "Restart Taosd"
restartTaosd
/usr/bin/time -f "Total: %e" -o totaltime.out bash -c "yes | taosdemo 2>&1 | tee -a taosdemo-$today.log"
demoTableAndInsert=`grep "Total:" totaltime.out|awk '{print $2}'`
demoRPS=`grep "records\/second" taosdemo-$today.log | tail -n1 | awk '{print $13}'`
}
function generateTaosdemoPlot {
echo "${today}, demoTableOnly: ${demoTableOnly}, demoTableAndInsert: ${demoTableAndInsert}" | tee -a taosdemo-$today.log
echo "${today}, ${demoTableOnly}, ${demoTableAndInsert}, ${demoRPS}" >> taosdemo-report.csv
csvLines=`cat taosdemo-report.csv | wc -l`
if [ "$csvLines" -gt "10" ]; then
sed -i '1d' taosdemo-report.csv
fi
gnuplot -p taosdemo-csv2png.gnuplot
}
today=`date +"%Y%m%d"`
cd /root
echoInfo "Test Create Table Only "
runCreateTableOnly
echoInfo "Test Create Table then Insert data"
runCreateTableThenInsert
echoInfo "Generate plot for taosdemo"
generateTaosdemoPlot
echoInfo "End of TaosDemo Test"

View File

@ -0,0 +1,58 @@
#!/bin/bash
# Coloured Echoes #
function red_echo { echo -e "\033[31m$@\033[0m"; } #
function green_echo { echo -e "\033[32m$@\033[0m"; } #
function yellow_echo { echo -e "\033[33m$@\033[0m"; } #
function white_echo { echo -e "\033[1;37m$@\033[0m"; } #
# Coloured Printfs #
function red_printf { printf "\033[31m$@\033[0m"; } #
function green_printf { printf "\033[32m$@\033[0m"; } #
function yellow_printf { printf "\033[33m$@\033[0m"; } #
function white_printf { printf "\033[1;37m$@\033[0m"; } #
# Debugging Outputs #
function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; } #
function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; } #
function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } #
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
function restartTaosd {
systemctl stop taosd
pkill -KILL -x taosd
sleep 10
rm -rf /mnt/var/log/taos/*
rm -rf /mnt/var/lib/taos/*
taosd 2>&1 > /dev/null &
sleep 10
}
function runPerfTest13d {
echoInfo "Restart Taosd"
restartTaosd
cd /home/taos/tliu/timeseriesdatabase-comparisons/build/tsdbcompare
./runreal-13d-csv.sh 2>&1 | tee /root/perftest-13d-$today.log
}
function generatePerfPlot13d {
cd /root
csvLines=`cat perftest-13d-report.csv | wc -l`
if [ "$csvLines" -gt "10" ]; then
sed -i '1d' perftest-13d-report.csv
fi
gnuplot -e "filename='perftest-13d-report'" -p perftest-csv2png.gnuplot
}
today=`date +"%Y%m%d"`
cd /root
echoInfo "run Performance Test with 13 days data"
runPerfTest13d
echoInfo "Generate plot of 13 days data"
generatePerfPlot13d
echoInfo "End of TSDB-Compare 13-days-data Test"

View File

@ -0,0 +1,58 @@
#!/bin/bash
# Coloured Echoes #
function red_echo { echo -e "\033[31m$@\033[0m"; } #
function green_echo { echo -e "\033[32m$@\033[0m"; } #
function yellow_echo { echo -e "\033[33m$@\033[0m"; } #
function white_echo { echo -e "\033[1;37m$@\033[0m"; } #
# Coloured Printfs #
function red_printf { printf "\033[31m$@\033[0m"; } #
function green_printf { printf "\033[32m$@\033[0m"; } #
function yellow_printf { printf "\033[33m$@\033[0m"; } #
function white_printf { printf "\033[1;37m$@\033[0m"; } #
# Debugging Outputs #
function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; } #
function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; } #
function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } #
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
function restartTaosd {
systemctl stop taosd
pkill -KILL -x taosd
sleep 10
rm -rf /mnt/var/log/taos/*
rm -rf /mnt/var/lib/taos/*
taosd 2>&1 > /dev/null &
sleep 10
}
function runPerfTest1d {
echoInfo "Restart Taosd"
restartTaosd
cd /home/taos/tliu/timeseriesdatabase-comparisons/build/tsdbcompare
./runreal-1d-csv.sh 2>&1 | tee /root/perftest-1d-$today.log
}
function generatePerfPlot1d {
cd /root
csvLines=`cat perftest-1d-report.csv | wc -l`
if [ "$csvLines" -gt "10" ]; then
sed -i '2d' perftest-1d-report.csv
fi
gnuplot -e "filename='perftest-1d-report'" -p perftest-csv2png.gnuplot
}
today=`date +"%Y%m%d"`
cd /root
echoInfo "run Performance Test with 1 day data"
runPerfTest1d
echoInfo "Generate plot of 1 day data"
generatePerfPlot1d
echoInfo "End of TSDB-Compare 1-day-data Test"

209
tests/perftest-scripts/run-csv.sh Executable file
View File

@ -0,0 +1,209 @@
#!/bin/bash
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
docker rm -f `docker ps -a -q`
#set -x
echo
echo "---------------Generating Data-----------------"
echo
echo "Prepare data for InfluxDB...."
#bin/bulk_data_gen -seed 123 -format influx-bulk -scale-var 100 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" >data/influx.dat
bin/bulk_data_gen -seed 123 -format influx-bulk -sampling-interval 1s -scale-var 10 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" >data/influx.dat
echo
echo "Prepare data for TDengine...."
#bin/bulk_data_gen -seed 123 -format tdengine -tdschema-file config/TDengineSchema.toml -scale-var 100 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" > data/tdengine.dat
bin/bulk_data_gen -seed 123 -format tdengine -sampling-interval 1s -tdschema-file config/TDengineSchema.toml -scale-var 10 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" > data/tdengine.dat
docker network create --ip-range 172.15.1.255/24 --subnet 172.15.1.1/16 tsdbcomp >>/dev/null 2>&1
TDENGINE=`docker run -d --net tsdbcomp --ip 172.15.1.6 -p 6030:6030 -p 6020:6020 -p 6031:6031 -p 6032:6032 -p 6033:6033 -p 6034:6034 -p 6035:6035 -p 6036:6036 -p 6037:6037 -p 6038:6038 -p 6039:6039 tdengine/tdengine:1.6.4.5`
echo
echo "------------------Writing Data-----------------"
echo
sleep 5
echo
echo -e "Start test TDengine, result in ${GREEN}Green line${NC}"
TDENGINERES=`cat data/tdengine.dat |bin/bulk_load_tdengine --url 172.15.1.6:0 --batch-size 300 -do-load -report-tags n1 -workers 20 -fileout=false| grep loaded`
#TDENGINERES=`cat data/tdengine.dat |gunzip|bin/bulk_load_tdengine --url 172.15.1.6:0 --batch-size 300 -do-load -report-tags n1 -workers 10 -fileout=false| grep loaded`
echo
echo -e "${GREEN}TDengine writing result:${NC}"
echo -e "${GREEN}$TDENGINERES${NC}"
DATA=`echo $TDENGINERES|awk '{print($2)}'`
TMP=`echo $TDENGINERES|awk '{print($5)}'`
TDWTM=`echo ${TMP%s*}`
INFLUX=`docker run -d -p 8086:8086 --net tsdbcomp --ip 172.15.1.5 influxdb` >>/dev/null 2>&1
sleep 10
echo
echo -e "Start test InfluxDB, result in ${GREEN}Green line${NC}"
INFLUXRES=`cat data/influx.dat |bin/bulk_load_influx --batch-size=5000 --workers=20 --urls="http://172.15.1.5:8086" | grep loaded`
echo
echo -e "${GREEN}InfluxDB writing result:${NC}"
echo -e "${GREEN}$INFLUXRES${NC}"
TMP=`echo $INFLUXRES|awk '{print($5)}'`
IFWTM=`echo ${TMP%s*}`
echo
echo "------------------Querying Data-----------------"
echo
sleep 10
echo
echo "start query test, query max from 8 hosts group by 1 hour, TDengine"
echo
#Test case 1
#测试用例1查询所有数据中用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') ;
# a,b,c,d,e,f,g,h are random 8 numbers.
TDQS1=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-all -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://172.15.1.6:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 1 result:${NC}"
echo -e "${GREEN}$TDQS1${NC}"
TMP=`echo $TDQS1|awk '{print($4)}'`
TDQ1=`echo ${TMP%s*}`
#Test case 2
#测试用例2查询所有数据中用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据以1小时为粒度查询每1小时的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') interval(1h);
# a,b,c,d,e,f,g,h are random 8 numbers
TDQS2=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-allbyhr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://172.15.1.6:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 2 result:${NC}"
echo -e "${GREEN}$TDQS2${NC}"
TMP=`echo $TDQS2|awk '{print($4)}'`
TDQ2=`echo ${TMP%s*}`
#Test case 3
#测试用例3测试用例3随机查询12个小时的数据用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据以10分钟为粒度查询每10分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =12 hour
TDQS3=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-12-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://172.15.1.6:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 3 result:${NC}"
echo -e "${GREEN}$TDQS3${NC}"
TMP=`echo $TDQS3|awk '{print($4)}'`
TDQ3=`echo ${TMP%s*}`
#Test case 4
#测试用例4随机查询1个小时的数据用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据以1分钟为粒度查询每1分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =1 hours
TDQS4=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://172.15.1.6:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 4 result:${NC}"
echo -e "${GREEN}$TDQS4${NC}"
TMP=`echo $TDQS4|awk '{print($4)}'`
TDQ4=`echo ${TMP%s*}`
sleep 10
echo
echo "start query test, query max from 8 hosts group by 1hour, Influxdb"
echo
#Test case 1
#测试用例1查询所有数据中用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') ;
# a,b,c,d,e,f,g,h are random 8 numbers.
IFQS1=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-all -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 1 result:${NC}"
echo -e "${GREEN}$IFQS1${NC}"
TMP=`echo $IFQS1|awk '{print($4)}'`
IFQ1=`echo ${TMP%s*}`
#Test case 2
#测试用例2查询所有数据中用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据以1小时为粒度查询每1小时的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') interval(1h);
# a,b,c,d,e,f,g,h are random 8 numbers
IFQS2=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-allbyhr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 2 result:${NC}"
echo -e "${GREEN}$IFQS2${NC}"
TMP=`echo $IFQS2|awk '{print($4)}'`
IFQ2=`echo ${TMP%s*}`
#Test case 3
#测试用例3测试用例3随机查询12个小时的数据用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据以10分钟为粒度查询每10分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =12 hour
#INFLUXQUERY=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
IFQS3=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-12-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 3 result:${NC}"
echo -e "${GREEN}$IFQS3${NC}"
TMP=`echo $IFQS3|awk '{print($4)}'`
IFQ3=`echo ${TMP%s*}`
#Test case 4
#测试用例4随机查询1个小时的数据用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据以1分钟为粒度查询每1分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =1 hours
#INFLUXQUERY=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
IFQS4=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 4 result:${NC}"
echo -e "${GREEN}$IFQS4${NC}"
TMP=`echo $IFQS4|awk '{print($4)}'`
IFQ4=`echo ${TMP%s*}`
echo
echo
echo "======================================================"
echo " tsdb performance comparision "
echo "======================================================"
echo -e " Writing $DATA records test takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFWTM
printf " TDengine | %-4.2f Seconds \n" $TDWTM
echo "------------------------------------------------------"
echo " Query test cases: "
echo " case 1: select the max(value) from all data "
echo " filtered out 8 hosts "
echo " Query test case 1 takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFQ1
printf " TDengine | %-4.2f Seconds \n" $TDQ1
echo "------------------------------------------------------"
echo " case 2: select the max(value) from all data "
echo " filtered out 8 hosts with an interval of 1 hour "
echo " case 2 takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFQ2
printf " TDengine | %-4.2f Seconds \n" $TDQ2
echo "------------------------------------------------------"
echo " case 3: select the max(value) from random 12 hours"
echo " data filtered out 8 hosts with an interval of 10 min "
echo " filtered out 8 hosts interval(1h) "
echo " case 3 takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFQ3
printf " TDengine | %-4.2f Seconds \n" $TDQ3
echo "------------------------------------------------------"
echo " case 4: select the max(value) from random 1 hour data "
echo " data filtered out 8 hosts with an interval of 1 min "
echo " case 4 takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFQ4
printf " TDengine | %-4.2f Seconds \n" $TDQ4
echo "------------------------------------------------------"
echo
docker stop $INFLUX >>/dev/null 2>&1
docker container rm -f $INFLUX >>/dev/null 2>&1
docker stop $TDENGINE >>/dev/null 2>&1
docker container rm -f $TDENGINE >>/dev/null 2>&1
docker network rm tsdbcomp >>/dev/null 2>&1
#bulk_query_gen/bulk_query_gen -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_influxdb/query_benchmarker_influxdb -urls="http://172.26.89.231:8086"
#bulk_query_gen/bulk_query_gen -format tdengine -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_tdengine/query_benchmarker_tdengine -urls="http://172.26.89.231:6020"
today=`date +"%Y%m%d"`
echo "${today}, ${IFWTM}, ${IFQ1}, ${IFQ2}, ${IFQ3}, ${IFQ4}" >> /root/perftest-influx-report.csv

View File

@ -0,0 +1,126 @@
#!/bin/bash
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
docker rm -f `docker ps -a -q`
#set -x
echo
echo "---------------Generating Data-----------------"
echo
echo "Prepare data for InfluxDB...."
#bin/bulk_data_gen -seed 123 -format influx-bulk -scale-var 100 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" >data/influx.dat
bin/bulk_data_gen -seed 123 -format influx-bulk -sampling-interval 1s -scale-var 10 -use-case devops -timestamp-start "2018-01-02T00:00:00Z" -timestamp-end "2018-01-15T00:00:00Z" > /mnt/data/influx.dat
docker network create --ip-range 172.15.1.255/24 --subnet 172.15.1.1/16 tsdbcomp >>/dev/null 2>&1
INFLUX=`docker run -d -p 8086:8086 --net tsdbcomp --ip 172.15.1.5 influxdb` >>/dev/null 2>&1
sleep 10
echo
echo -e "Start test InfluxDB, result in ${GREEN}Green line${NC}"
INFLUXRES=`cat /mnt/data/influx.dat |bin/bulk_load_influx --batch-size=5000 --workers=20 --urls="http://172.15.1.5:8086" | grep loaded`
echo
echo -e "${GREEN}InfluxDB writing result:${NC}"
echo -e "${GREEN}$INFLUXRES${NC}"
DATA=`echo $INFLUXRES|awk '{print($2)}'`
TMP=`echo $INFLUXRES|awk '{print($5)}'`
IFWTM=`echo ${TMP%s*}`
echo
echo "------------------Querying Data-----------------"
echo
sleep 10
echo
echo "start query test, query max from 8 hosts group by 1hour, Influxdb"
echo
#Test case 1
#测试用例1查询所有数据中用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') ;
# a,b,c,d,e,f,g,h are random 8 numbers.
IFQS1=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-all -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 1 result:${NC}"
echo -e "${GREEN}$IFQS1${NC}"
TMP=`echo $IFQS1|awk '{print($4)}'`
IFQ1=`echo ${TMP%s*}`
#Test case 2
#测试用例2查询所有数据中用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据以1小时为粒度查询每1小时的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') interval(1h);
# a,b,c,d,e,f,g,h are random 8 numbers
IFQS2=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-allbyhr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 2 result:${NC}"
echo -e "${GREEN}$IFQS2${NC}"
TMP=`echo $IFQS2|awk '{print($4)}'`
IFQ2=`echo ${TMP%s*}`
#Test case 3
#测试用例3测试用例3随机查询12个小时的数据用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据以10分钟为粒度查询每10分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =12 hour
#INFLUXQUERY=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
IFQS3=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-12-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 3 result:${NC}"
echo -e "${GREEN}$IFQS3${NC}"
TMP=`echo $IFQS3|awk '{print($4)}'`
IFQ3=`echo ${TMP%s*}`
#Test case 4
#测试用例4随机查询1个小时的数据用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据以1分钟为粒度查询每1分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =1 hours
#INFLUXQUERY=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
IFQS4=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 4 result:${NC}"
echo -e "${GREEN}$IFQS4${NC}"
TMP=`echo $IFQS4|awk '{print($4)}'`
IFQ4=`echo ${TMP%s*}`
echo
echo
echo "======================================================"
echo " tsdb performance comparision "
echo "======================================================"
echo -e " Writing $DATA records test takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFWTM
echo "------------------------------------------------------"
echo " Query test cases: "
echo " case 1: select the max(value) from all data "
echo " filtered out 8 hosts "
echo " Query test case 1 takes: "
printf " InfluxDB | %-4.5f Seconds \n" $IFQ1
echo "------------------------------------------------------"
echo " case 2: select the max(value) from all data "
echo " filtered out 8 hosts with an interval of 1 hour "
echo " case 2 takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFQ2
echo "------------------------------------------------------"
echo " case 3: select the max(value) from random 12 hours"
echo " data filtered out 8 hosts with an interval of 10 min "
echo " filtered out 8 hosts interval(1h) "
echo " case 3 takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFQ3
echo "------------------------------------------------------"
echo " case 4: select the max(value) from random 1 hour data "
echo " data filtered out 8 hosts with an interval of 1 min "
echo " case 4 takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFQ4
echo "------------------------------------------------------"
echo
today=`date +"%Y%m%d"`
echo "${today}, ${IFWTM}, ${IFQ1}, ${IFQ2}, ${IFQ3}, ${IFQ4}" >> /root/perftest-influxdb-report-13d.csv
docker stop $INFLUX >>/dev/null 2>&1
docker container rm -f $INFLUX >>/dev/null 2>&1
docker network rm tsdbcomp >>/dev/null 2>&1
#bulk_query_gen/bulk_query_gen -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_influxdb/query_benchmarker_influxdb -urls="http://172.26.89.231:8086"
#bulk_query_gen/bulk_query_gen -format tdengine -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_tdengine/query_benchmarker_tdengine -urls="http://172.26.89.231:6020"

View File

@ -0,0 +1,149 @@
#!/bin/bash
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
#set -x
echo
echo "---------------Generating Data-----------------"
echo
echo
echo "Prepare data for TDengine...."
#bin/bulk_data_gen -seed 123 -format tdengine -tdschema-file config/TDengineSchema.toml -scale-var 100 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" > data/tdengine.dat
bin/bulk_data_gen -seed 123 -format tdengine -sampling-interval 1s -tdschema-file config/TDengineSchema.toml -scale-var 10 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-14T00:00:00Z" > /mnt/data/tdengine.dat
echo
echo -e "Start test TDengine, result in ${GREEN}Green line${NC}"
for i in {1..5}; do
TDENGINERES=`cat /mnt/data/tdengine.dat |bin/bulk_load_tdengine --url 127.0.0.1:0 --batch-size 5000 -do-load -report-tags n1 -workers 20 -fileout=false| grep loaded`
#TDENGINERES=`cat data/tdengine.dat |gunzip|bin/bulk_load_tdengine --url 127.0.0.1:0 --batch-size 300 -do-load -report-tags n1 -workers 10 -fileout=false| grep loaded`
echo
echo -e "${GREEN}TDengine writing result:${NC}"
echo -e "${GREEN}$TDENGINERES${NC}"
DATA=`echo $TDENGINERES|awk '{print($2)}'`
TMP=`echo $TDENGINERES|awk '{print($5)}'`
TDWTM=`echo ${TMP%s*}`
[ -z "$TDWTM" ] || break
done
echo
echo "------------------Querying Data-----------------"
echo
sleep 10
echo
echo "start query test, query max from 8 hosts group by 1 hour, TDengine"
echo
#Test case 1
#测试用例1查询所有数据中用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') ;
# a,b,c,d,e,f,g,h are random 8 numbers.
for i in {1..5}; do
TDQS1=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-all -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 1 result:${NC}"
echo -e "${GREEN}$TDQS1${NC}"
TMP=`echo $TDQS1|awk '{print($4)}'`
TDQ1=`echo ${TMP%s*}`
[ -z "$TDQ1" ] || break
done
#Test case 2
#测试用例2查询所有数据中用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据以1小时为粒度查询每1小时的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') interval(1h);
# a,b,c,d,e,f,g,h are random 8 numbers
for i in {1..5}; do
TDQS2=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-allbyhr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 2 result:${NC}"
echo -e "${GREEN}$TDQS2${NC}"
TMP=`echo $TDQS2|awk '{print($4)}'`
TDQ2=`echo ${TMP%s*}`
[ -z "$TDQ2" ] || break
done
#Test case 3
#测试用例3测试用例3随机查询12个小时的数据用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据以10分钟为粒度查询每10分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =12 hour
for i in {1..5}; do
TDQS3=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-12-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 3 result:${NC}"
echo -e "${GREEN}$TDQS3${NC}"
TMP=`echo $TDQS3|awk '{print($4)}'`
TDQ3=`echo ${TMP%s*}`
[ -z "$TDQ3" ] || break
done
#Test case 4
#测试用例4随机查询1个小时的数据用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据以1分钟为粒度查询每1分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =1 hours
for i in {1..5}; do
TDQS4=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 4 result:${NC}"
echo -e "${GREEN}$TDQS4${NC}"
TMP=`echo $TDQS4|awk '{print($4)}'`
TDQ4=`echo ${TMP%s*}`
[ -z "$TDQ4" ] || break
done
sleep 10
echo
echo
echo "======================================================"
echo " tsdb performance comparision "
echo "======================================================"
echo -e " Writing $DATA records test takes: "
printf " TDengine | %-4.2f Seconds \n" $TDWTM
echo "------------------------------------------------------"
echo " Query test cases: "
echo " case 1: select the max(value) from all data "
echo " filtered out 8 hosts "
echo " Query test case 1 takes: "
printf " TDengine | %-4.5f Seconds \n" $TDQ1
echo "------------------------------------------------------"
echo " case 2: select the max(value) from all data "
echo " filtered out 8 hosts with an interval of 1 hour "
echo " case 2 takes: "
printf " TDengine | %-4.2f Seconds \n" $TDQ2
echo "------------------------------------------------------"
echo " case 3: select the max(value) from random 12 hours"
echo " data filtered out 8 hosts with an interval of 10 min "
echo " filtered out 8 hosts interval(1h) "
echo " case 3 takes: "
printf " TDengine | %-4.2f Seconds \n" $TDQ3
echo "------------------------------------------------------"
echo " case 4: select the max(value) from random 1 hour data "
echo " data filtered out 8 hosts with an interval of 1 min "
echo " case 4 takes: "
printf " TDengine | %-4.2f Seconds \n" $TDQ4
echo "------------------------------------------------------"
echo
today=`date +"%Y%m%d"`
echo "${today}, ${TDWTM}, ${TDQ1}, ${TDQ2}, ${TDQ3}, ${TDQ4}" >> /root/perftest-13d-report.csv
#bulk_query_gen/bulk_query_gen -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_influxdb/query_benchmarker_influxdb -urls="http://172.26.89.231:8086"
#bulk_query_gen/bulk_query_gen -format tdengine -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_tdengine/query_benchmarker_tdengine -urls="http://172.26.89.231:6020"

View File

@ -0,0 +1,149 @@
#!/bin/bash
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
#set -x
echo
echo "---------------Generating Data-----------------"
echo
echo
echo "Prepare data for TDengine...."
#bin/bulk_data_gen -seed 123 -format tdengine -tdschema-file config/TDengineSchema.toml -scale-var 100 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" > data/tdengine.dat
bin/bulk_data_gen -seed 123 -format tdengine -sampling-interval 1s -tdschema-file config/TDengineSchema.toml -scale-var 10 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" > data/tdengine.dat
echo
echo -e "Start test TDengine, result in ${GREEN}Green line${NC}"
for i in {1..5}; do
TDENGINERES=`cat data/tdengine.dat |bin/bulk_load_tdengine --url 127.0.0.1:0 --batch-size 300 -do-load -report-tags n1 -workers 20 -fileout=false| grep loaded`
#TDENGINERES=`cat data/tdengine.dat |gunzip|bin/bulk_load_tdengine --url 127.0.0.1:0 --batch-size 300 -do-load -report-tags n1 -workers 10 -fileout=false| grep loaded`
echo
echo -e "${GREEN}TDengine writing result:${NC}"
echo -e "${GREEN}$TDENGINERES${NC}"
DATA=`echo $TDENGINERES|awk '{print($2)}'`
TMP=`echo $TDENGINERES|awk '{print($5)}'`
TDWTM=`echo ${TMP%s*}`
[ -z "$TDWTM" ] || break
done
echo
echo "------------------Querying Data-----------------"
echo
sleep 10
echo
echo "start query test, query max from 8 hosts group by 1 hour, TDengine"
echo
#Test case 1
#测试用例1查询所有数据中用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') ;
# a,b,c,d,e,f,g,h are random 8 numbers.
for i in {1..5}; do
TDQS1=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-all -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 1 result:${NC}"
echo -e "${GREEN}$TDQS1${NC}"
TMP=`echo $TDQS1|awk '{print($4)}'`
TDQ1=`echo ${TMP%s*}`
[ -z "$TDQ1" ] || break
done
#Test case 2
#测试用例2查询所有数据中用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据以1小时为粒度查询每1小时的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') interval(1h);
# a,b,c,d,e,f,g,h are random 8 numbers
for i in {1..5}; do
TDQS2=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-allbyhr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 2 result:${NC}"
echo -e "${GREEN}$TDQS2${NC}"
TMP=`echo $TDQS2|awk '{print($4)}'`
TDQ2=`echo ${TMP%s*}`
[ -z "$TDQ2" ] || break
done
#Test case 3
#测试用例3测试用例3随机查询12个小时的数据用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据以10分钟为粒度查询每10分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =12 hour
for i in {1..5}; do
TDQS3=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-12-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 3 result:${NC}"
echo -e "${GREEN}$TDQS3${NC}"
TMP=`echo $TDQS3|awk '{print($4)}'`
TDQ3=`echo ${TMP%s*}`
[ -z "$TDQ3" ] || break
done
#Test case 4
#测试用例4随机查询1个小时的数据用8个hostname标签进行匹配匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据以1分钟为粒度查询每1分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =1 hours
for i in {1..5}; do
TDQS4=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 4 result:${NC}"
echo -e "${GREEN}$TDQS4${NC}"
TMP=`echo $TDQS4|awk '{print($4)}'`
TDQ4=`echo ${TMP%s*}`
[ -z "$TDQ4" ] || break
done
sleep 10
echo
echo
echo "======================================================"
echo " tsdb performance comparision "
echo "======================================================"
echo -e " Writing $DATA records test takes: "
printf " TDengine | %-4.2f Seconds \n" $TDWTM
echo "------------------------------------------------------"
echo " Query test cases: "
echo " case 1: select the max(value) from all data "
echo " filtered out 8 hosts "
echo " Query test case 1 takes: "
printf " TDengine | %-4.5f Seconds \n" $TDQ1
echo "------------------------------------------------------"
echo " case 2: select the max(value) from all data "
echo " filtered out 8 hosts with an interval of 1 hour "
echo " case 2 takes: "
printf " TDengine | %-4.2f Seconds \n" $TDQ2
echo "------------------------------------------------------"
echo " case 3: select the max(value) from random 12 hours"
echo " data filtered out 8 hosts with an interval of 10 min "
echo " filtered out 8 hosts interval(1h) "
echo " case 3 takes: "
printf " TDengine | %-4.2f Seconds \n" $TDQ3
echo "------------------------------------------------------"
echo " case 4: select the max(value) from random 1 hour data "
echo " data filtered out 8 hosts with an interval of 1 min "
echo " case 4 takes: "
printf " TDengine | %-4.2f Seconds \n" $TDQ4
echo "------------------------------------------------------"
echo
today=`date +"%Y%m%d"`
echo "${today}, ${TDWTM}, ${TDQ1}, ${TDQ2}, ${TDQ3}, ${TDQ4}" >> /root/perftest-1d-report.csv
#bulk_query_gen/bulk_query_gen -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_influxdb/query_benchmarker_influxdb -urls="http://172.26.89.231:8086"
#bulk_query_gen/bulk_query_gen -format tdengine -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_tdengine/query_benchmarker_tdengine -urls="http://172.26.89.231:6020"

View File

@ -0,0 +1,26 @@
#!/user/bin/gnuplot
reset
set terminal png
set title "TaosDemo Performance Report" font ",20"
set ylabel "Time in Seconds"
set xdata time
set timefmt "%Y%m%d"
set format x "%Y-%m-%d"
set xlabel "Date"
set style data linespoints
set terminal pngcairo size 1024,768 enhanced font 'Segoe UI, 10'
set output 'taosdemo-report.png'
set datafile separator ','
set key reverse Left outside
set grid
plot 'taosdemo-report.csv' using 1:2 title "Create 10,000 Table", \
"" using 1:3 title "Create 10,000 Table and Insert 100,000 data", \
"" using 1:4 title "Request Per Second of Insert 100,000 data"

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,6 @@
#!/bin/bash
ulimit -c unlimited
python3 ./test.py -f insert/basic.py
python3 ./test.py -f insert/int.py
python3 ./test.py -f insert/float.py

View File

@ -49,7 +49,8 @@ class TDTestCase:
tdSql.error("select * from db.st where ts > '2020-05-13 10:00:00.002' OR tagtype < 2")
# illegal condition
tdSql.error("select * from db.st where ts != '2020-05-13 10:00:00.002' OR tagtype < 2")
tdSql.error("select * from db.st where ts != '2020-05-13 10:00:00.002' OR tagtype < 2")
tdSql.error("select * from db.st where tagtype <> 1 OR tagtype < 2")
def stop(self):
tdSql.close()

View File

@ -41,13 +41,12 @@ class TDTestCase:
('2020-05-13 10:00:00.002', 3, 'third') dev_002 VALUES('2020-05-13 10:00:00.003', 1, 'first'), ('2020-05-13 10:00:00.004', 2, 'second'),
('2020-05-13 10:00:00.005', 3, 'third')""")
"""Error expected here, but no errors
# query first .. as ..
tdSql.error("select first(*) as one from st")
# query last .. as ..
tdSql.error("select last(*) as latest from st")
"""
tdSql.error("select last(*) as latest from st")
# query last row .. as ..
tdSql.error("select last_row as latest from st")

View File

@ -31,14 +31,22 @@ class TDTestCase:
tdSql.execute("create table stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 binary(10), t3 nchar(10))")
tdSql.execute("insert into tb1 using stb1 tags(1,'tb1', '表1') values ('2020-04-18 15:00:00.000', 1, 0.1), ('2020-04-18 15:00:01.000', 2, 0.1)")
tdSql.execute("insert into tb2 using stb1 tags(2,'tb2', '表2') values ('2020-04-18 15:00:02.000', 3, 2.1), ('2020-04-18 15:00:03.000', 4, 2.2)")
# join 2 tables -- bug exists
# tdSql.query("select * from tb1 a, tb2 b where a.ts = b.ts")
# tdSql.checkRows(1)
# inner join --- bug
tdSql.query("select * from tb1 a, tb2 b where a.ts = b.ts")
tdSql.checkRows(1)
# join 3 tables -- bug exists
# tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
# query show stable
tdSql.query("show stables")
tdSql.checkRows(1)
# query show tables
tdSql.query("show table")
tdSql.checkRows(2)
# query count
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 4)
@ -51,6 +59,10 @@ class TDTestCase:
tdSql.query("select last(*) from stb1")
tdSql.checkData(0, 1, 4)
# query last_row
tdSql.query("select last_row(*) from stb1")
tdSql.checkData(0, 1, 4)
# query as
tdSql.query("select t2 as number from stb1")
tdSql.checkRows(2)
@ -63,6 +75,10 @@ class TDTestCase:
tdSql.query("select last(*) as end from stb1")
tdSql.checkData(0, 1, 4)
# query last_row ... as
tdSql.query("select last_row(*) as end from stb1")
tdSql.checkData(0, 1, 4)
# query group .. by
tdSql.query("select sum(c1), t2 from stb1 group by t2")
tdSql.checkRows(2)
@ -75,6 +91,34 @@ class TDTestCase:
tdSql.query("select * from stb1 limit 2 offset 3")
tdSql.checkRows(1)
# query ... alias for table ---- bug
tdSql.query("select t.ts from tb1 t")
tdSql.checkRows(2)
# query ... tbname
tdSql.query("select tbname from stb1")
tdSql.checkRows(2)
# query ... tbname count ---- bug
tdSql.query("select count(tbname) from stb1")
tdSql.checkRows(2)
# query ... select database ---- bug
tdSql.query("SELECT database()")
tdSql.checkRows(1)
# query ... select client_version ---- bug
tdSql.query("SELECT client_version()")
tdSql.checkRows(1)
# query ... select server_version ---- bug
tdSql.query("SELECT server_version()")
tdSql.checkRows(1)
# query ... select server_status ---- bug
tdSql.query("SELECT server_status()")
tdSql.checkRows(1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -0,0 +1,215 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import random
import threading
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
current_tb = ""
last_tb = ""
written = 0
class Test (threading.Thread):
def __init__(self, threadId, name, sleepTime):
threading.Thread.__init__(self)
self.threadId = threadId
self.name = name
self.sleepTime = sleepTime
self.threadLock = threading.Lock()
def create_table(self):
global current_tb
global last_tb
global written
tdLog.info("create a table")
current_tb = "tb%d" % int(round(time.time() * 1000))
tdLog.info("current table %s" % current_tb)
if (current_tb == last_tb):
return
else:
tdSql.execute(
'create table %s (ts timestamp, speed int)' %
current_tb)
last_tb = current_tb
written = 0
def insert_data(self):
global current_tb
global last_tb
global written
tdLog.info("will insert data to table")
if (current_tb == ""):
tdLog.info("no table, create first")
self.create_table()
tdLog.info("insert data to table")
for i in range(0, 10):
self.threadLock.acquire()
insertRows = 1000
tdLog.info("insert %d rows to %s" % (insertRows, current_tb))
for j in range(0, insertRows):
ret = tdSql.execute(
'insert into %s values (now + %dm, %d)' %
(current_tb, j, j))
written = written + 1
self.threadLock.release()
def query_data(self):
global current_tb
global last_tb
global written
if (written > 0):
tdLog.info("query data from table")
tdSql.query("select * from %s" % last_tb)
tdSql.checkRows(written)
def create_stable(self):
global current_tb
global last_tb
global written
tdLog.info("create a super table")
def restart_database(self):
global current_tb
global last_tb
global written
tdLog.info("restart databae")
tdDnodes.stop(1)
tdDnodes.start(1)
tdLog.sleep(5)
def force_restart(self):
global current_tb
global last_tb
global written
tdLog.info("force restart database")
tdDnodes.forcestop(1)
tdDnodes.start(1)
tdLog.sleep(5)
def drop_table(self):
global current_tb
global last_tb
global written
for i in range(0, 10):
self.threadLock.acquire()
tdLog.info("current_tb %s" % current_tb)
if (current_tb != ""):
tdLog.info("drop current tb %s" % current_tb)
tdSql.execute("drop table %s" % current_tb)
current_tb = ""
last_tb = ""
written = 0
tdLog.sleep(self.sleepTime)
self.threadLock.release()
def reset_query_cache(self):
global current_tb
global last_tb
global written
tdLog.info("reset query cache")
tdSql.execute("reset query cache")
tdLog.sleep(1)
def reset_database(self):
global current_tb
global last_tb
global written
tdLog.info("reset database")
tdDnodes.forcestop(1)
tdDnodes.deploy(1)
current_tb = ""
last_tb = ""
written = 0
tdDnodes.start(1)
tdSql.prepare()
def delete_datafiles(self):
global current_tb
global last_tb
global written
tdLog.info("delete data files")
dnodesDir = tdDnodes.getDnodesRootDir()
dataDir = dnodesDir + '/dnode1/*'
deleteCmd = 'rm -rf %s' % dataDir
os.system(deleteCmd)
current_tb = ""
last_tb = ""
written = 0
tdDnodes.start(1)
tdSql.prepare()
def run(self):
switch = {
1: self.create_table,
2: self.insert_data,
3: self.query_data,
4: self.create_stable,
5: self.restart_database,
6: self.force_restart,
7: self.drop_table,
8: self.reset_query_cache,
9: self.reset_database,
10: self.delete_datafiles,
}
switch.get(self.threadId, lambda: "ERROR")()
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
test1 = Test(2, "insert_data", 1)
test2 = Test(7, "drop_table", 2)
test1.start()
test2.start()
test1.join()
test2.join()
tdLog.info("end of test")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -1,4 +1,6 @@
#!/bin/bash
ulimit -c unlimited
# insert
python3 ./test.py $1 -f insert/basic.py
python3 ./test.py $1 -s && sleep 1
@ -56,10 +58,4 @@ python3 ./test.py $1 -s && sleep 1
#query
python3 ./test.py $1 -f query/filter.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f query/filterCombo.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f query/queryNormal.py
python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f query/queryError.py
python3 ./test.py $1 -s && sleep 1

View File

@ -38,9 +38,9 @@ class TDSimClient:
tdLog.exit(cmd)
def deploy(self):
self.logDir = "%s/pysim/psim/log" % (self.path,)
self.cfgDir = "%s/pysim/psim/cfg" % (self.path)
self.cfgPath = "%s/pysim/psim/cfg/taos.cfg" % (self.path)
self.logDir = "%s/sim/psim/log" % (self.path,)
self.cfgDir = "%s/sim/psim/cfg" % (self.path)
self.cfgPath = "%s/sim/psim/cfg/taos.cfg" % (self.path)
cmd = "rm -rf " + self.logDir
if os.system(cmd) != 0:
@ -113,10 +113,10 @@ class TDDnode:
return totalSize
def deploy(self):
self.logDir = "%s/pysim/dnode%d/log" % (self.path, self.index)
self.dataDir = "%s/pysim/dnode%d/data" % (self.path, self.index)
self.cfgDir = "%s/pysim/dnode%d/cfg" % (self.path, self.index)
self.cfgPath = "%s/pysim/dnode%d/cfg/taos.cfg" % (
self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index)
self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index)
self.cfgDir = "%s/sim/dnode%d/cfg" % (self.path, self.index)
self.cfgPath = "%s/sim/dnode%d/cfg/taos.cfg" % (
self.path, self.index)
cmd = "rm -rf " + self.dataDir
@ -163,7 +163,7 @@ class TDDnode:
self.cfg("clog", "1")
self.cfg("statusInterval", "1")
self.cfg("numOfTotalVnodes", "64")
self.cfg("numOfMPeers", "3")
self.cfg("numOfMnodes", "3")
self.cfg("numOfThreadsPerCore", "2.0")
self.cfg("monitor", "0")
self.cfg("maxVnodeConnections", "30000")
@ -298,11 +298,11 @@ class TDDnode:
tdLog.exit(cmd)
def getDnodeRootDir(self, index):
dnodeRootDir = "%s/pysim/psim/dnode%d" % (self.path, index)
dnodeRootDir = "%s/sim/psim/dnode%d" % (self.path, index)
return dnodeRootDir
def getDnodesRootDir(self):
dnodesRootDir = "%s/pysim/psim" % (self.path)
dnodesRootDir = "%s/sim/psim" % (self.path)
return dnodesRootDir
@ -455,7 +455,7 @@ class TDDnodes:
# tdLog.exit(cmd)
def getDnodesRootDir(self):
dnodesRootDir = "%s/pysim" % (self.path)
dnodesRootDir = "%s/sim" % (self.path)
return dnodesRootDir
def getSimCfgPath(self):

View File

@ -3,7 +3,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c numOfMPeers -v 1
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
print ========= start dnode1 as master

Some files were not shown because too many files have changed in this diff Show More