Merge from develop

This commit is contained in:
Shengliang Guan 2020-06-06 06:39:52 +00:00
commit dd7bce4647
50 changed files with 1579 additions and 685 deletions

View File

@ -63,19 +63,21 @@ typedef struct SLocalReducer {
// char * pBufForInterpo; // intermediate buffer for interpolation // char * pBufForInterpo; // intermediate buffer for interpolation
tFilePage * pTempBuffer; tFilePage * pTempBuffer;
struct SQLFunctionCtx *pCtx; struct SQLFunctionCtx *pCtx;
int32_t rowSize; // size of each intermediate result. int32_t rowSize; // size of each intermediate result.
int32_t status; // denote it is in reduce process, in reduce process, it int32_t finalRowSize; // final result row size
bool hasPrevRow; // cannot be released int32_t status; // denote it is in reduce process, in reduce process, it
bool hasPrevRow; // cannot be released
bool hasUnprocessedRow; bool hasUnprocessedRow;
tOrderDescriptor * pDesc; tOrderDescriptor * pDesc;
SColumnModel * resColModel; SColumnModel * resColModel;
tExtMemBuffer ** pExtMemBuffer; // disk-based buffer tExtMemBuffer ** pExtMemBuffer; // disk-based buffer
SFillInfo* pFillInfo; // interpolation support structure SFillInfo* pFillInfo; // interpolation support structure
char * pFinalRes; // result data after interpo char * pFinalRes; // result data after interpo
tFilePage * discardData; tFilePage * discardData;
SResultInfo * pResInfo; SResultInfo * pResInfo;
bool discard; bool discard;
int32_t offset; // limit offset value int32_t offset; // limit offset value
bool orderPrjOnSTable; // projection query on stable
} SLocalReducer; } SLocalReducer;
typedef struct SSubqueryState { typedef struct SSubqueryState {

View File

@ -285,8 +285,6 @@ typedef struct {
typedef struct STscObj { typedef struct STscObj {
void * signature; void * signature;
void * pTimer; void * pTimer;
char mnodeIp[TSDB_USER_LEN];
uint16_t mnodePort;
char user[TSDB_USER_LEN]; char user[TSDB_USER_LEN];
char pass[TSDB_KEY_LEN]; char pass[TSDB_KEY_LEN];
char acctId[TSDB_DB_NAME_LEN]; char acctId[TSDB_DB_NAME_LEN];

View File

@ -341,16 +341,6 @@ bool stableQueryFunctChanged(int32_t funcId) {
*/ */
void resetResultInfo(SResultInfo *pResInfo) { pResInfo->initialized = false; } void resetResultInfo(SResultInfo *pResInfo) { pResInfo->initialized = false; }
void initResultInfo(SResultInfo *pResInfo) {
pResInfo->initialized = true; // the this struct has been initialized flag
pResInfo->complete = false;
pResInfo->hasResult = false;
pResInfo->numOfRes = 0;
memset(pResInfo->interResultBuf, 0, (size_t)pResInfo->bufLen);
}
void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable) { void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable) {
assert(pResInfo->interResultBuf == NULL); assert(pResInfo->interResultBuf == NULL);
@ -387,9 +377,7 @@ static bool function_setup(SQLFunctionCtx *pCtx) {
*/ */
static void function_finalizer(SQLFunctionCtx *pCtx) { static void function_finalizer(SQLFunctionCtx *pCtx) {
SResultInfo *pResInfo = GET_RES_INFO(pCtx); SResultInfo *pResInfo = GET_RES_INFO(pCtx);
if (pResInfo->hasResult != DATA_SET_FLAG) { if (pResInfo->hasResult != DATA_SET_FLAG) {
tscTrace("no result generated, result is set to NULL");
if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) { if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) {
setVardataNull(pCtx->aOutputBuf, pCtx->outputType); setVardataNull(pCtx->aOutputBuf, pCtx->outputType);
} else { } else {

View File

@ -26,7 +26,7 @@
#include "tschemautil.h" #include "tschemautil.h"
#include "tname.h" #include "tname.h"
static void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, size_t valueLength); static void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, int16_t type, size_t valueLength);
static int32_t getToStringLength(const char *pData, int32_t length, int32_t type) { static int32_t getToStringLength(const char *pData, int32_t length, int32_t type) {
char buf[512] = {0}; char buf[512] = {0};
@ -275,22 +275,37 @@ static void tscProcessCurrentUser(SSqlObj *pSql) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
tscSetLocalQueryResult(pSql, pSql->pTscObj->user, pExpr->aliasName, TSDB_USER_LEN); pExpr->resBytes = TSDB_USER_LEN + TSDB_DATA_TYPE_BINARY;
pExpr->resType = TSDB_DATA_TYPE_BINARY;
char* vx = calloc(1, pExpr->resBytes);
STR_WITH_MAXSIZE_TO_VARSTR(vx, pSql->pTscObj->user, TSDB_USER_LEN);
tscSetLocalQueryResult(pSql, vx, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
free(vx);
} }
static void tscProcessCurrentDB(SSqlObj *pSql) { static void tscProcessCurrentDB(SSqlObj *pSql) {
char db[TSDB_DB_NAME_LEN + 1] = {0}; char db[TSDB_DB_NAME_LEN + 1] = {0};
extractDBName(pSql->pTscObj->db, db); extractDBName(pSql->pTscObj->db, db);
// no use db is invoked before.
if (strlen(db) == 0) {
setNull(db, TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN);
}
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
tscSetLocalQueryResult(pSql, db, pExpr->aliasName, TSDB_DB_NAME_LEN); pExpr->resType = TSDB_DATA_TYPE_BINARY;
size_t t = strlen(db);
pExpr->resBytes = TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE;
char* vx = calloc(1, pExpr->resBytes);
if (t == 0) {
setVardataNull(vx, TSDB_DATA_TYPE_BINARY);
} else {
STR_WITH_SIZE_TO_VARSTR(vx, db, t);
}
tscSetLocalQueryResult(pSql, vx, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
free(vx);
} }
static void tscProcessServerVer(SSqlObj *pSql) { static void tscProcessServerVer(SSqlObj *pSql) {
@ -298,14 +313,32 @@ static void tscProcessServerVer(SSqlObj *pSql) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
tscSetLocalQueryResult(pSql, v, pExpr->aliasName, tListLen(pSql->pTscObj->sversion)); pExpr->resType = TSDB_DATA_TYPE_BINARY;
size_t t = strlen(v);
pExpr->resBytes = t + VARSTR_HEADER_SIZE;
char* vx = calloc(1, pExpr->resBytes);
STR_WITH_SIZE_TO_VARSTR(vx, v, t);
tscSetLocalQueryResult(pSql, vx, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
tfree(vx);
} }
static void tscProcessClientVer(SSqlObj *pSql) { static void tscProcessClientVer(SSqlObj *pSql) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
tscSetLocalQueryResult(pSql, version, pExpr->aliasName, strlen(version)); pExpr->resType = TSDB_DATA_TYPE_BINARY;
size_t t = strlen(version);
pExpr->resBytes = t + VARSTR_HEADER_SIZE;
char* v = calloc(1, pExpr->resBytes);
STR_WITH_SIZE_TO_VARSTR(v, version, t);
tscSetLocalQueryResult(pSql, v, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
tfree(v);
} }
static void tscProcessServStatus(SSqlObj *pSql) { static void tscProcessServStatus(SSqlObj *pSql) {
@ -325,10 +358,11 @@ static void tscProcessServStatus(SSqlObj *pSql) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
tscSetLocalQueryResult(pSql, "1", pExpr->aliasName, 2); int32_t val = 1;
tscSetLocalQueryResult(pSql, (char*) &val, pExpr->aliasName, TSDB_DATA_TYPE_INT, sizeof(int32_t));
} }
void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, size_t valueLength) { void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, int16_t type, size_t valueLength) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res; SSqlRes *pRes = &pSql->res;
@ -338,8 +372,10 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa
pQueryInfo->order.order = TSDB_ORDER_ASC; pQueryInfo->order.order = TSDB_ORDER_ASC;
tscFieldInfoClear(&pQueryInfo->fieldsInfo); tscFieldInfoClear(&pQueryInfo->fieldsInfo);
pQueryInfo->fieldsInfo.pFields = taosArrayInit(1, sizeof(TAOS_FIELD));
pQueryInfo->fieldsInfo.pSupportInfo = taosArrayInit(1, sizeof(SFieldSupInfo));
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_BINARY, columnName, valueLength); TAOS_FIELD f = tscCreateField(type, columnName, valueLength);
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
tscInitResObjForLocalQuery(pSql, 1, valueLength); tscInitResObjForLocalQuery(pSql, 1, valueLength);
@ -348,7 +384,7 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa
SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, 0); SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, 0);
pInfo->pSqlExpr = taosArrayGetP(pQueryInfo->exprList, 0); pInfo->pSqlExpr = taosArrayGetP(pQueryInfo->exprList, 0);
strncpy(pRes->data, val, pField->bytes); memcpy(pRes->data, val, pField->bytes);
} }
int tscProcessLocalCmd(SSqlObj *pSql) { int tscProcessLocalCmd(SSqlObj *pSql) {
@ -385,7 +421,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
// keep the code in local variable in order to avoid invalid read in case of async query // keep the code in local variable in order to avoid invalid read in case of async query
int32_t code = pSql->res.code; int32_t code = pSql->res.code;
if (code == TSDB_CODE_SUCCESS) { if (code == TSDB_CODE_SUCCESS) {
(*pSql->fp)(pSql->param, pSql, 0); (*pSql->fp)(pSql->param, pSql, code);
} else { } else {
tscQueueAsyncRes(pSql); tscQueueAsyncRes(pSql);
} }

View File

@ -48,7 +48,7 @@ static int32_t tscToInteger(SSQLToken *pToken, int64_t *value, char **endPtr) {
int32_t radix = 10; int32_t radix = 10;
int32_t radixList[3] = {16, 8, 2}; int32_t radixList[3] = {16, 8, 2}; // the integer number with different radix: hex, oct, bin
if (pToken->type == TK_HEX || pToken->type == TK_OCT || pToken->type == TK_BIN) { if (pToken->type == TK_HEX || pToken->type == TK_OCT || pToken->type == TK_BIN) {
radix = radixList[pToken->type - TK_HEX]; radix = radixList[pToken->type - TK_HEX];
} }

View File

@ -494,7 +494,6 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
tsem_init(&pSql->rspSem, 0, 0); tsem_init(&pSql->rspSem, 0, 0);
pSql->signature = pSql; pSql->signature = pSql;
pSql->pTscObj = pObj; pSql->pTscObj = pObj;
//pSql->pTscObj->pSql = pSql;
pSql->maxRetry = TSDB_MAX_REPLICA_NUM; pSql->maxRetry = TSDB_MAX_REPLICA_NUM;
pStmt->pSql = pSql; pStmt->pSql = pSql;
@ -515,7 +514,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
//doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen); //doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen);
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res; SSqlRes *pRes = &pSql->res;
pSql->param = (void*)pSql; pSql->param = (void*) pSql;
pSql->fp = waitForQueryRsp; pSql->fp = waitForQueryRsp;
pSql->insertType = TSDB_QUERY_TYPE_STMT_INSERT; pSql->insertType = TSDB_QUERY_TYPE_STMT_INSERT;

View File

@ -1881,22 +1881,38 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
// functions can not be applied to normal columns // functions can not be applied to normal columns
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta); int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
if (index.columnIndex < numOfCols) { if (index.columnIndex < numOfCols && index.columnIndex != TSDB_TBNAME_COLUMN_INDEX) {
return invalidSqlErrMsg(pQueryInfo->msg, msg6); return invalidSqlErrMsg(pQueryInfo->msg, msg6);
} }
index.columnIndex -= numOfCols; if (index.columnIndex > 0) {
index.columnIndex -= numOfCols;
}
// 2. valid the column type // 2. valid the column type
int16_t colType = pSchema[index.columnIndex].type; int16_t colType = 0;
if (colType == TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) { if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
colType = TSDB_DATA_TYPE_BINARY;
} else {
colType = pSchema[index.columnIndex].type;
}
if (colType == TSDB_DATA_TYPE_BOOL) {
return invalidSqlErrMsg(pQueryInfo->msg, msg1); return invalidSqlErrMsg(pQueryInfo->msg, msg1);
} }
tscColumnListInsert(pTableMetaInfo->tagColList, &index); tscColumnListInsert(pTableMetaInfo->tagColList, &index);
SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
SSchema s = pTagSchema[index.columnIndex];
SSchema s = {0};
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
s.bytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE;
s.type = TSDB_DATA_TYPE_BINARY;
s.colId = TSDB_TBNAME_COLUMN_INDEX;
} else {
s = pTagSchema[index.columnIndex];
}
int16_t bytes = 0; int16_t bytes = 0;
int16_t type = 0; int16_t type = 0;
int32_t inter = 0; int32_t inter = 0;
@ -5379,20 +5395,15 @@ int32_t doLocalQueryProcess(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
switch (index) { switch (index) {
case 0: case 0:
pQueryInfo->command = TSDB_SQL_CURRENT_DB; pQueryInfo->command = TSDB_SQL_CURRENT_DB;break;
return TSDB_CODE_SUCCESS;
case 1: case 1:
pQueryInfo->command = TSDB_SQL_SERV_VERSION; pQueryInfo->command = TSDB_SQL_SERV_VERSION;break;
return TSDB_CODE_SUCCESS; case 2:
case 2: pQueryInfo->command = TSDB_SQL_SERV_STATUS;break;
pQueryInfo->command = TSDB_SQL_SERV_STATUS;
return TSDB_CODE_SUCCESS;
case 3: case 3:
pQueryInfo->command = TSDB_SQL_CLI_VERSION; pQueryInfo->command = TSDB_SQL_CLI_VERSION;break;
return TSDB_CODE_SUCCESS;
case 4: case 4:
pQueryInfo->command = TSDB_SQL_CURRENT_USER; pQueryInfo->command = TSDB_SQL_CURRENT_USER;break;
return TSDB_CODE_SUCCESS;
default: { return invalidSqlErrMsg(pQueryInfo->msg, msg3); } default: { return invalidSqlErrMsg(pQueryInfo->msg, msg3); }
} }
@ -5402,6 +5413,8 @@ int32_t doLocalQueryProcess(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
const char* name = (pExprList->a[0].aliasName != NULL)? pExprList->a[0].aliasName:functionsInfo[index].name; const char* name = (pExprList->a[0].aliasName != NULL)? pExprList->a[0].aliasName:functionsInfo[index].name;
strncpy(pExpr1->aliasName, name, tListLen(pExpr1->aliasName)); strncpy(pExpr1->aliasName, name, tListLen(pExpr1->aliasName));
return TSDB_CODE_SUCCESS;
} }
// can only perform the parameters based on the macro definitation // can only perform the parameters based on the macro definitation

View File

@ -131,13 +131,6 @@ SSchema* tscGetTableColumnSchema(const STableMeta* pTableMeta, int32_t startCol)
assert(pTableMeta != NULL); assert(pTableMeta != NULL);
SSchema* pSchema = (SSchema*) pTableMeta->schema; SSchema* pSchema = (SSchema*) pTableMeta->schema;
#if 0
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
assert (pTableMeta->pSTable != NULL);
pSchema = pTableMeta->pSTable->schema;
}
#endif
return &pSchema[startCol]; return &pSchema[startCol];
} }

View File

@ -217,7 +217,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pReducer->numOfBuffer = numOfFlush; pReducer->numOfBuffer = numOfFlush;
pReducer->numOfVnode = numOfBuffer; pReducer->numOfVnode = numOfBuffer;
pReducer->pDesc = pDesc; pReducer->pDesc = pDesc;
tscTrace("%p the number of merged leaves is: %d", pSql, pReducer->numOfBuffer); tscTrace("%p the number of merged leaves is: %d", pSql, pReducer->numOfBuffer);
@ -278,6 +278,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
param->groupOrderType = pQueryInfo->groupbyExpr.orderType; param->groupOrderType = pQueryInfo->groupbyExpr.orderType;
pReducer->orderPrjOnSTable = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0);
pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator); pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator);
if (pReducer->pLoserTree == NULL || pRes->code != 0) { if (pReducer->pLoserTree == NULL || pRes->code != 0) {
@ -309,10 +310,10 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pReducer->nResultBufSize = pMemBuffer[0]->pageSize * 16; pReducer->nResultBufSize = pMemBuffer[0]->pageSize * 16;
pReducer->pResultBuf = (tFilePage *)calloc(1, pReducer->nResultBufSize + sizeof(tFilePage)); pReducer->pResultBuf = (tFilePage *)calloc(1, pReducer->nResultBufSize + sizeof(tFilePage));
int32_t finalRowLength = tscGetResRowLength(pQueryInfo->exprList); pReducer->finalRowSize = tscGetResRowLength(pQueryInfo->exprList);
pReducer->resColModel = finalmodel; pReducer->resColModel = finalmodel;
pReducer->resColModel->capacity = pReducer->nResultBufSize / finalRowLength; pReducer->resColModel->capacity = pReducer->nResultBufSize / pReducer->finalRowSize;
assert(finalRowLength <= pReducer->rowSize); assert(pReducer->finalRowSize <= pReducer->rowSize);
pReducer->pFinalRes = calloc(1, pReducer->rowSize * pReducer->resColModel->capacity); pReducer->pFinalRes = calloc(1, pReducer->rowSize * pReducer->resColModel->capacity);
// pReducer->pBufForInterpo = calloc(1, pReducer->nResultBufSize); // pReducer->pBufForInterpo = calloc(1, pReducer->nResultBufSize);
@ -389,7 +390,7 @@ static int32_t tscFlushTmpBufferImpl(tExtMemBuffer *pMemoryBuf, tOrderDescriptor
assert(pPage->num <= pDesc->pColumnModel->capacity); assert(pPage->num <= pDesc->pColumnModel->capacity);
// sort before flush to disk, the data must be consecutively put on tFilePage. // sort before flush to disk, the data must be consecutively put on tFilePage.
if (pDesc->orderIdx.numOfCols > 0) { if (pDesc->orderInfo.numOfCols > 0) {
tColDataQSort(pDesc, pPage->num, 0, pPage->num - 1, pPage->data, orderType); tColDataQSort(pDesc, pPage->num, 0, pPage->num - 1, pPage->data, orderType);
} }
@ -590,12 +591,10 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage *tmpBuffer) { bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage *tmpBuffer) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
int16_t functionId = tscSqlExprGet(pQueryInfo, 0)->functionId;
// disable merge procedure for column projection query // disable merge procedure for column projection query
int16_t functionId = pReducer->pCtx[0].functionId;
assert(functionId != TSDB_FUNC_ARITHM); assert(functionId != TSDB_FUNC_ARITHM);
if (pReducer->orderPrjOnSTable) {
if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
return true; return true;
} }
@ -604,26 +603,33 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
} }
tOrderDescriptor *pOrderDesc = pReducer->pDesc; tOrderDescriptor *pOrderDesc = pReducer->pDesc;
int32_t numOfCols = pOrderDesc->orderIdx.numOfCols; SColumnOrderInfo* orderInfo = &pOrderDesc->orderInfo;
// no group by columns, all data belongs to one group // no group by columns, all data belongs to one group
int32_t numOfCols = orderInfo->numOfCols;
if (numOfCols <= 0) { if (numOfCols <= 0) {
return true; return true;
} }
if (pOrderDesc->orderIdx.pData[numOfCols - 1] == PRIMARYKEY_TIMESTAMP_COL_INDEX) { //<= 0 if (orderInfo->pData[numOfCols - 1] == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
// super table interval query /*
* super table interval query
* if the order columns is the primary timestamp, all result data belongs to one group
*/
assert(pQueryInfo->intervalTime > 0); assert(pQueryInfo->intervalTime > 0);
pOrderDesc->orderIdx.numOfCols -= 1; if (numOfCols == 1) {
return true;
}
} else { // simple group by query } else { // simple group by query
assert(pQueryInfo->intervalTime == 0); assert(pQueryInfo->intervalTime == 0);
} }
// only one row exists // only one row exists
int32_t ret = compare_a(pOrderDesc, 1, 0, pPrev, 1, 0, tmpBuffer->data); int32_t index = orderInfo->pData[0];
pOrderDesc->orderIdx.numOfCols = numOfCols; int32_t offset = (pOrderDesc->pColumnModel)->pFields[index].offset;
return (ret == 0); int32_t ret = memcmp(pPrev + offset, tmpBuffer->data + offset, pOrderDesc->pColumnModel->rowSize - offset);
return ret == 0;
} }
int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pOrderDesc, int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pOrderDesc,
@ -873,24 +879,24 @@ static void reversedCopyFromInterpolationToDstBuf(SQueryInfo *pQueryInfo, SSqlRe
* Note: pRes->pLocalReducer may be null, due to the fact that "tscDestroyLocalReducer" is called * Note: pRes->pLocalReducer may be null, due to the fact that "tscDestroyLocalReducer" is called
* by "interuptHandler" function in shell * by "interuptHandler" function in shell
*/ */
static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneOutput) { static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneOutput) {
SSqlCmd * pCmd = &pSql->cmd; SSqlCmd * pCmd = &pSql->cmd;
SSqlRes * pRes = &pSql->res; SSqlRes * pRes = &pSql->res;
tFilePage * pFinalDataPage = pLocalReducer->pResultBuf; tFilePage * pFinalDataPage = pLocalReducer->pResultBuf;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (pRes->pLocalReducer != pLocalReducer) { // if (pRes->pLocalReducer != pLocalReducer) {
/* // /*
* Release the SSqlObj is called, and it is int destroying function invoked by other thread. // * Release the SSqlObj is called, and it is int destroying function invoked by other thread.
* However, the other thread will WAIT until current process fully completes. // * However, the other thread will WAIT until current process fully completes.
* Since the flag of release struct is set by doLocalReduce function // * Since the flag of release struct is set by doLocalReduce function
*/ // */
assert(pRes->pLocalReducer == NULL); // assert(pRes->pLocalReducer == NULL);
} // }
// no interval query, no fill operation
if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) { if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
// no interval query, no fill operation
pRes->data = pLocalReducer->pFinalRes; pRes->data = pLocalReducer->pFinalRes;
pRes->numOfRows = pFinalDataPage->num; pRes->numOfRows = pFinalDataPage->num;
pRes->numOfClauseTotal += pRes->numOfRows; pRes->numOfClauseTotal += pRes->numOfRows;
@ -929,9 +935,7 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo
savePrevRecordAndSetupInterpoInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo); savePrevRecordAndSetupInterpoInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo);
} }
int32_t rowSize = tscGetResRowLength(pQueryInfo->exprList); memcpy(pRes->data, pFinalDataPage->data, pRes->numOfRows * pLocalReducer->finalRowSize);
memcpy(pRes->data, pFinalDataPage->data, pRes->numOfRows * rowSize);
pFinalDataPage->num = 0; pFinalDataPage->num = 0;
return; return;
} }
@ -1037,16 +1041,13 @@ static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer)
static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, bool needInit) { static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, bool needInit) {
// the tag columns need to be set before all functions execution // the tag columns need to be set before all functions execution
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
size_t size = tscSqlExprNumOfExprs(pQueryInfo); size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t j = 0; j < size; ++j) { for (int32_t j = 0; j < size; ++j) {
SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, j);
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[j]; SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[j];
tVariantAssign(&pCtx->param[0], &pExpr->param[0]);
// tags/tags_dummy function, the tag field of SQLFunctionCtx is from the input buffer // tags/tags_dummy function, the tag field of SQLFunctionCtx is from the input buffer
int32_t functionId = pExpr->functionId; int32_t functionId = pCtx->functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS_DUMMY) { if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS_DUMMY) {
tVariantDestroy(&pCtx->tag); tVariantDestroy(&pCtx->tag);
char* input = pCtx->aInputElemBuf; char* input = pCtx->aInputElemBuf;
@ -1057,17 +1058,20 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer,
} else { } else {
tVariantCreateFromBinary(&pCtx->tag, input, pCtx->inputBytes, pCtx->inputType); tVariantCreateFromBinary(&pCtx->tag, input, pCtx->inputBytes, pCtx->inputType);
} }
} else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, j);
pCtx->param[0].i64Key = pExpr->param[0].i64Key;
} }
pCtx->currentStage = SECONDARY_STAGE_MERGE; pCtx->currentStage = SECONDARY_STAGE_MERGE;
if (needInit) { if (needInit) {
aAggs[pExpr->functionId].init(pCtx); aAggs[pCtx->functionId].init(pCtx);
} }
} }
for (int32_t j = 0; j < size; ++j) { for (int32_t j = 0; j < size; ++j) {
int32_t functionId = tscSqlExprGet(pQueryInfo, j)->functionId; int32_t functionId = pLocalReducer->pCtx[j].functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue; continue;
} }
@ -1101,8 +1105,7 @@ static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx)
* ts, tag, tagprj function can not decide the output number of current query * ts, tag, tagprj function can not decide the output number of current query
* the number of output result is decided by main output * the number of output result is decided by main output
*/ */
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, j); int32_t functionId = pCtx[j].functionId;
int32_t functionId = pExpr->functionId;
if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ) { if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ) {
continue; continue;
} }
@ -1136,15 +1139,13 @@ static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLo
char *buf = malloc((size_t)maxBufSize); char *buf = malloc((size_t)maxBufSize);
for (int32_t k = 0; k < size; ++k) { for (int32_t k = 0; k < size; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k); SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
if (pExpr->functionId != TSDB_FUNC_TAG) { if (pCtx->functionId != TSDB_FUNC_TAG) {
continue; continue;
} }
int32_t inc = numOfRes - 1; // tsdb_func_tag function only produce one row of result int32_t inc = numOfRes - 1; // tsdb_func_tag function only produce one row of result
memset(buf, 0, (size_t)maxBufSize); memset(buf, 0, (size_t)maxBufSize);
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
memcpy(buf, pCtx->aOutputBuf, (size_t)pCtx->outputBytes); memcpy(buf, pCtx->aOutputBuf, (size_t)pCtx->outputBytes);
for (int32_t i = 0; i < inc; ++i) { for (int32_t i = 0; i < inc; ++i) {
@ -1160,8 +1161,8 @@ int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {
size_t size = tscSqlExprNumOfExprs(pQueryInfo); size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t k = 0; k < size; ++k) { for (int32_t k = 0; k < size; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k); SQLFunctionCtx* pCtx = &pLocalReducer->pCtx[k];
aAggs[pExpr->functionId].xFinalize(&pLocalReducer->pCtx[k]); aAggs[pCtx->functionId].xFinalize(pCtx);
} }
pLocalReducer->hasPrevRow = false; pLocalReducer->hasPrevRow = false;
@ -1182,13 +1183,13 @@ int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {
*/ */
bool needToMerge(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) { bool needToMerge(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
int32_t ret = 0; // merge all result by default int32_t ret = 0; // merge all result by default
int16_t functionId = tscSqlExprGet(pQueryInfo, 0)->functionId;
int16_t functionId = pLocalReducer->pCtx[0].functionId;
if (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_ARITHM) { // column projection query if (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_ARITHM) { // column projection query
ret = 1; // disable merge procedure ret = 1; // disable merge procedure
} else { } else {
tOrderDescriptor *pDesc = pLocalReducer->pDesc; tOrderDescriptor *pDesc = pLocalReducer->pDesc;
if (pDesc->orderIdx.numOfCols > 0) { if (pDesc->orderInfo.numOfCols > 0) {
if (pDesc->tsOrder == TSDB_ORDER_ASC) { // asc if (pDesc->tsOrder == TSDB_ORDER_ASC) { // asc
// todo refactor comparator // todo refactor comparator
ret = compare_a(pLocalReducer->pDesc, 1, 0, pLocalReducer->prevRowOfInput, 1, 0, tmpBuffer->data); ret = compare_a(pLocalReducer->pDesc, 1, 0, pLocalReducer->prevRowOfInput, 1, 0, tmpBuffer->data);
@ -1274,7 +1275,7 @@ bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool no
taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf); taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf);
} }
doInterpolateResult(pSql, pLocalReducer, noMoreCurrentGroupRes); doFillResult(pSql, pLocalReducer, noMoreCurrentGroupRes);
return true; return true;
} }
@ -1341,7 +1342,7 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
// the first column must be the timestamp column // the first column must be the timestamp column
int32_t rows = taosGetNumOfResultWithFill(pFillInfo, remain, ekey, pLocalReducer->resColModel->capacity); int32_t rows = taosGetNumOfResultWithFill(pFillInfo, remain, ekey, pLocalReducer->resColModel->capacity);
if (rows > 0) { // do interpo if (rows > 0) { // do interpo
doInterpolateResult(pSql, pLocalReducer, false); doFillResult(pSql, pLocalReducer, false);
} }
return true; return true;
@ -1374,7 +1375,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
pQueryInfo->slidingTimeUnit, tinfo.precision); pQueryInfo->slidingTimeUnit, tinfo.precision);
int32_t rows = taosGetNumOfResultWithFill(pFillInfo, 0, etime, pLocalReducer->resColModel->capacity); int32_t rows = taosGetNumOfResultWithFill(pFillInfo, 0, etime, pLocalReducer->resColModel->capacity);
if (rows > 0) { // do interpo if (rows > 0) { // do interpo
doInterpolateResult(pSql, pLocalReducer, true); doFillResult(pSql, pLocalReducer, true);
} }
} }
@ -1408,13 +1409,11 @@ static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) {
size_t size = tscSqlExprNumOfExprs(pQueryInfo); size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t k = 0; k < size; ++k) { for (int32_t k = 0; k < size; ++k) {
SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, k);
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k]; SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
pCtx->aOutputBuf += pCtx->outputBytes * numOfRes; pCtx->aOutputBuf += pCtx->outputBytes * numOfRes;
// set the correct output timestamp column position // set the correct output timestamp column position
if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM) { if (pCtx->functionId == TSDB_FUNC_TOP || pCtx->functionId == TSDB_FUNC_BOTTOM) {
pCtx->ptsOutputBuf = ((char *)pCtx->ptsOutputBuf + TSDB_KEYSIZE * numOfRes); pCtx->ptsOutputBuf = ((char *)pCtx->ptsOutputBuf + TSDB_KEYSIZE * numOfRes);
} }
} }

View File

@ -333,10 +333,10 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
if (pRes->code == TSDB_CODE_SUCCESS && tscProcessMsgRsp[pCmd->command]) if (pRes->code == TSDB_CODE_SUCCESS && tscProcessMsgRsp[pCmd->command])
rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql); rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql);
if (rpcMsg->code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) { if (rpcMsg->code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
rpcMsg->code = pRes->code ? pRes->code : pRes->numOfRows; rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? pRes->numOfRows: pRes->code;
tscTrace("%p SQL result:%s res:%p", pSql, tstrerror(pRes->code), pSql);
bool shouldFree = tscShouldBeFreed(pSql); bool shouldFree = tscShouldBeFreed(pSql);
(*pSql->fp)(pSql->param, pSql, rpcMsg->code); (*pSql->fp)(pSql->param, pSql, rpcMsg->code);
@ -1392,22 +1392,9 @@ int tscProcessDescribeTableRsp(SSqlObj *pSql) {
return tscLocalResultCommonBuilder(pSql, numOfRes); return tscLocalResultCommonBuilder(pSql, numOfRes);
} }
int tscProcessTagRetrieveRsp(SSqlObj *pSql) { int tscProcessLocalRetrieveRsp(SSqlObj *pSql) {
// SSqlCmd *pCmd = &pSql->cmd; int32_t numOfRes = 1;
pSql->res.completed = true;
// SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
int32_t numOfRes = 0;
#if 0
if (tscSqlExprGet(pQueryInfo, 0)->functionId == TSDB_FUNC_TAGPRJ) {
numOfRes = pTableMetaInfo->pMetricMeta->numOfTables;
} else {
numOfRes = 1; // for count function, there is only one output.
}
#endif
return tscLocalResultCommonBuilder(pSql, numOfRes); return tscLocalResultCommonBuilder(pSql, numOfRes);
} }
@ -2312,7 +2299,7 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
} }
pRes->row = 0; pRes->row = 0;
tscTrace("%p numOfRows:%d, offset:%d", pSql, pRes->numOfRows, pRes->offset); tscTrace("%p numOfRows:%d, offset:%d, complete:%d", pSql, pRes->numOfRows, pRes->offset, pRes->completed);
return 0; return 0;
} }
@ -2563,11 +2550,11 @@ void tscInitMsgsFp() {
tscProcessMsgRsp[TSDB_SQL_RETRIEVE] = tscProcessRetrieveRspFromNode; // rsp handled by same function. tscProcessMsgRsp[TSDB_SQL_RETRIEVE] = tscProcessRetrieveRspFromNode; // rsp handled by same function.
tscProcessMsgRsp[TSDB_SQL_DESCRIBE_TABLE] = tscProcessDescribeTableRsp; tscProcessMsgRsp[TSDB_SQL_DESCRIBE_TABLE] = tscProcessDescribeTableRsp;
tscProcessMsgRsp[TSDB_SQL_CURRENT_DB] = tscProcessTagRetrieveRsp; tscProcessMsgRsp[TSDB_SQL_CURRENT_DB] = tscProcessLocalRetrieveRsp;
tscProcessMsgRsp[TSDB_SQL_CURRENT_USER] = tscProcessTagRetrieveRsp; tscProcessMsgRsp[TSDB_SQL_CURRENT_USER] = tscProcessLocalRetrieveRsp;
tscProcessMsgRsp[TSDB_SQL_SERV_VERSION] = tscProcessTagRetrieveRsp; tscProcessMsgRsp[TSDB_SQL_SERV_VERSION] = tscProcessLocalRetrieveRsp;
tscProcessMsgRsp[TSDB_SQL_CLI_VERSION] = tscProcessTagRetrieveRsp; tscProcessMsgRsp[TSDB_SQL_CLI_VERSION] = tscProcessLocalRetrieveRsp;
tscProcessMsgRsp[TSDB_SQL_SERV_STATUS] = tscProcessTagRetrieveRsp; tscProcessMsgRsp[TSDB_SQL_SERV_STATUS] = tscProcessLocalRetrieveRsp;
tscProcessMsgRsp[TSDB_SQL_RETRIEVE_EMPTY_RESULT] = tscProcessEmptyResultRsp; tscProcessMsgRsp[TSDB_SQL_RETRIEVE_EMPTY_RESULT] = tscProcessEmptyResultRsp;

View File

@ -88,7 +88,6 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
strncpy(pObj->user, user, TSDB_USER_LEN); strncpy(pObj->user, user, TSDB_USER_LEN);
taosEncryptPass((uint8_t *)pass, strlen(pass), pObj->pass); taosEncryptPass((uint8_t *)pass, strlen(pass), pObj->pass);
pObj->mnodePort = port ? port : tsDnodeShellPort;
if (db) { if (db) {
int32_t len = strlen(db); int32_t len = strlen(db);
@ -414,10 +413,6 @@ int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) {
static void waitForRetrieveRsp(void *param, TAOS_RES *tres, int numOfRows) { static void waitForRetrieveRsp(void *param, TAOS_RES *tres, int numOfRows) {
SSqlObj* pSql = (SSqlObj*) tres; SSqlObj* pSql = (SSqlObj*) tres;
if (numOfRows < 0) { // set the error code
pSql->res.code = -numOfRows;
}
sem_post(&pSql->rspSem); sem_post(&pSql->rspSem);
} }
@ -445,7 +440,12 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
pCmd->command == TSDB_SQL_FETCH || pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW || pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SELECT || pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE)) { pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
pCmd->command == TSDB_SQL_SERV_STATUS ||
pCmd->command == TSDB_SQL_CURRENT_DB ||
pCmd->command == TSDB_SQL_SERV_VERSION ||
pCmd->command == TSDB_SQL_CLI_VERSION ||
pCmd->command == TSDB_SQL_CURRENT_USER )) {
taos_fetch_rows_a(res, waitForRetrieveRsp, pSql->pTscObj); taos_fetch_rows_a(res, waitForRetrieveRsp, pSql->pTscObj);
sem_wait(&pSql->rspSem); sem_wait(&pSql->rspSem);
} }

View File

@ -1557,8 +1557,8 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
assert(pRes->numOfRows == numOfRows); assert(pRes->numOfRows == numOfRows);
int64_t num = atomic_add_fetch_64(&pState->numOfRetrievedRows, numOfRows); int64_t num = atomic_add_fetch_64(&pState->numOfRetrievedRows, numOfRows);
// tscTrace("%p sub:%p retrieve numOfRows:%d totalNumOfRows:%d from ip:%u,vid:%d,orderOfSub:%d", pPObj, pSql, tscTrace("%p sub:%p retrieve numOfRows:%d totalNumOfRows:%d from ip:%s, orderOfSub:%d", pPObj, pSql,
// pRes->numOfRows, pState->numOfRetrievedRows, pSvd->ip, pSvd->vnode, idx); pRes->numOfRows, pState->numOfRetrievedRows, pSql->ipList.fqdn[pSql->ipList.inUse], idx);
if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) { if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
tscError("%p sub:%p num of OrderedRes is too many, max allowed:%" PRId64 " , current:%" PRId64, tscError("%p sub:%p num of OrderedRes is too many, max allowed:%" PRId64 " , current:%" PRId64,
@ -1713,6 +1713,11 @@ static void multiVnodeInsertMerge(void* param, TAOS_RES* tres, int numOfRows) {
// increase the total inserted rows // increase the total inserted rows
if (numOfRows > 0) { if (numOfRows > 0) {
pParentObj->res.numOfRows += numOfRows; pParentObj->res.numOfRows += numOfRows;
} else {
SSqlObj* pSql = (SSqlObj*) tres;
assert(pSql != NULL && pSql->res.code == numOfRows);
pParentObj->res.code = pSql->res.code;
} }
taos_free_result(tres); taos_free_result(tres);
@ -1947,7 +1952,8 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
for (int i = 0; i < tscNumOfFields(pQueryInfo); ++i) { size_t size = tscNumOfFields(pQueryInfo);
for (int i = 0; i < size; ++i) {
SFieldSupInfo* pSup = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, i); SFieldSupInfo* pSup = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, i);
if (pSup->pSqlExpr != NULL) { if (pSup->pSqlExpr != NULL) {
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i); tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i);

View File

@ -404,8 +404,6 @@ void tscPartiallyFreeSqlObj(SSqlObj* pSql) {
pSql->numOfSubs = 0; pSql->numOfSubs = 0;
tscResetSqlCmdObj(pCmd); tscResetSqlCmdObj(pCmd);
tscTrace("%p partially free sqlObj completed", pSql);
} }
void tscFreeSqlObj(SSqlObj* pSql) { void tscFreeSqlObj(SSqlObj* pSql) {
@ -967,7 +965,8 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
pExpr->colInfo.colId = pSchema[pColIndex->columnIndex].colId; pExpr->colInfo.colId = pSchema[pColIndex->columnIndex].colId;
strncpy(pExpr->colInfo.name, pSchema[pColIndex->columnIndex].name, TSDB_COL_NAME_LEN); strncpy(pExpr->colInfo.name, pSchema[pColIndex->columnIndex].name, TSDB_COL_NAME_LEN);
} else { } else if (pTableMetaInfo->pTableMeta != NULL) {
// in handling select database/version/server_status(), the pTableMeta is NULL
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pColIndex->columnIndex); SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pColIndex->columnIndex);
pExpr->colInfo.colId = pSchema->colId; pExpr->colInfo.colId = pSchema->colId;
strncpy(pExpr->colInfo.name, pSchema->name, TSDB_COL_NAME_LEN); strncpy(pExpr->colInfo.name, pSchema->name, TSDB_COL_NAME_LEN);
@ -979,8 +978,12 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol
pExpr->colInfo.colIndex = pColIndex->columnIndex; pExpr->colInfo.colIndex = pColIndex->columnIndex;
pExpr->resType = type; pExpr->resType = type;
pExpr->resBytes = size; pExpr->resBytes = size;
pExpr->interBytes = interSize; pExpr->interBytes = interSize;
pExpr->uid = pTableMetaInfo->pTableMeta->uid;
if (pTableMetaInfo->pTableMeta) {
pExpr->uid = pTableMetaInfo->pTableMeta->uid;
}
return pExpr; return pExpr;
} }
@ -2104,7 +2107,7 @@ void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()) {
} }
void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) { void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) {
SFieldSupInfo* pInfo = tscFieldInfoGetSupp(pFieldInfo, columnIndex); SFieldSupInfo* pInfo = taosArrayGet(pFieldInfo->pSupportInfo, columnIndex);//tscFieldInfoGetSupp(pFieldInfo, columnIndex);
assert(pInfo->pSqlExpr != NULL); assert(pInfo->pSqlExpr != NULL);
int32_t type = pInfo->pSqlExpr->resType; int32_t type = pInfo->pSqlExpr->resType;

View File

@ -112,7 +112,7 @@ int32_t tsMaxShellConns = 5000;
char tsDefaultDB[TSDB_DB_NAME_LEN] = {0}; char tsDefaultDB[TSDB_DB_NAME_LEN] = {0};
char tsDefaultUser[64] = "root"; char tsDefaultUser[64] = "root";
char tsDefaultPass[64] = "taosdata"; char tsDefaultPass[64] = "taosdata";
int32_t tsMaxConnections = 50; int32_t tsMaxConnections = 5000;
int32_t tsBalanceInterval = 300; // seconds int32_t tsBalanceInterval = 300; // seconds
int32_t tsOfflineThreshold = 86400*100; // seconds 10days int32_t tsOfflineThreshold = 86400*100; // seconds 10days
@ -840,7 +840,7 @@ static void doInitGlobalConfig() {
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 1; cfg.minValue = 1;
cfg.maxValue = 100; cfg.maxValue = 100000;
cfg.ptrLength = 0; cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);

View File

@ -150,6 +150,7 @@ class CTaosInterface(object):
libtaos.taos_fetch_lengths.restype = ctypes.c_void_p libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
libtaos.taos_free_result.restype = None libtaos.taos_free_result.restype = None
libtaos.taos_errno.restype = ctypes.c_int libtaos.taos_errno.restype = ctypes.c_int
libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
def __init__(self, config=None): def __init__(self, config=None):
''' '''

View File

@ -150,6 +150,7 @@ class CTaosInterface(object):
libtaos.taos_fetch_lengths.restype = ctypes.c_void_p libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
libtaos.taos_free_result.restype = None libtaos.taos_free_result.restype = None
libtaos.taos_errno.restype = ctypes.c_int libtaos.taos_errno.restype = ctypes.c_int
libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
def __init__(self, config=None): def __init__(self, config=None):
''' '''

View File

@ -150,6 +150,7 @@ class CTaosInterface(object):
libtaos.taos_fetch_lengths.restype = ctypes.c_void_p libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
libtaos.taos_free_result.restype = None libtaos.taos_free_result.restype = None
libtaos.taos_errno.restype = ctypes.c_int libtaos.taos_errno.restype = ctypes.c_int
libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
def __init__(self, config=None): def __init__(self, config=None):
''' '''

View File

@ -150,6 +150,7 @@ class CTaosInterface(object):
libtaos.taos_fetch_lengths.restype = ctypes.c_void_p libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
libtaos.taos_free_result.restype = None libtaos.taos_free_result.restype = None
libtaos.taos_errno.restype = ctypes.c_int libtaos.taos_errno.restype = ctypes.c_int
libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
def __init__(self, config=None): def __init__(self, config=None):
''' '''

View File

@ -188,9 +188,10 @@ typedef void *TsdbPosT;
* @param tsdb tsdb handle * @param tsdb tsdb handle
* @param pCond query condition, including time window, result set order, and basic required columns for each block * @param pCond query condition, including time window, result set order, and basic required columns for each block
* @param groupInfo tableId list in the form of set, seperated into different groups according to group by condition * @param groupInfo tableId list in the form of set, seperated into different groups according to group by condition
* @param qinfo query info handle from query processor
* @return * @return
*/ */
TsdbQueryHandleT *tsdbQueryTables(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupInfo); TsdbQueryHandleT *tsdbQueryTables(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupInfo, void* qinfo);
/** /**
* Get the last row of the given query time window for all the tables in STableGroupInfo object. * Get the last row of the given query time window for all the tables in STableGroupInfo object.
@ -202,11 +203,11 @@ TsdbQueryHandleT *tsdbQueryTables(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STable
* @param groupInfo tableId list. * @param groupInfo tableId list.
* @return * @return
*/ */
TsdbQueryHandleT tsdbQueryLastRow(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupInfo); TsdbQueryHandleT tsdbQueryLastRow(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupInfo, void* qinfo);
SArray* tsdbGetQueriedTableIdList(TsdbQueryHandleT *pHandle); SArray* tsdbGetQueriedTableIdList(TsdbQueryHandleT *pHandle);
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TsdbRepoT *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList); TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TsdbRepoT *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, void* qinfo);
/** /**
* move to next block if exists * move to next block if exists

View File

@ -1284,7 +1284,7 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
char *msg = (char *)pRsp + sizeof(SCMSTableVgroupRspMsg); char *msg = (char *)pRsp + sizeof(SCMSTableVgroupRspMsg);
for (int32_t i = 0; i < numOfTable; ++i) { for (int32_t i = 0; i < numOfTable; ++i) {
char *stableName = (char*)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_ID_LEN) * i; char * stableName = (char *)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_ID_LEN)*i;
SSuperTableObj *pTable = mnodeGetSuperTable(stableName); SSuperTableObj *pTable = mnodeGetSuperTable(stableName);
if (pTable == NULL) { if (pTable == NULL) {
mError("stable:%s, not exist while get stable vgroup info", stableName); mError("stable:%s, not exist while get stable vgroup info", stableName);
@ -1294,41 +1294,48 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
if (pTable->vgHash == NULL) { if (pTable->vgHash == NULL) {
mError("stable:%s, not vgroup exist while get stable vgroup info", stableName); mError("stable:%s, not vgroup exist while get stable vgroup info", stableName);
mnodeDecTableRef(pTable); mnodeDecTableRef(pTable);
continue;
}
SVgroupsInfo *pVgroupInfo = (SVgroupsInfo *)msg; // even this super table has no corresponding table, still return
pRsp->numOfTables++;
SHashMutableIterator *pIter = taosHashCreateIter(pTable->vgHash); SVgroupsInfo *pVgroupInfo = (SVgroupsInfo *)msg;
int32_t vgSize = 0; pVgroupInfo->numOfVgroups = 0;
while (taosHashIterNext(pIter)) {
int32_t *pVgId = taosHashIterGet(pIter); msg += sizeof(SVgroupsInfo);
SVgObj * pVgroup = mnodeGetVgroup(*pVgId); } else {
if (pVgroup == NULL) continue; SVgroupsInfo *pVgroupInfo = (SVgroupsInfo *)msg;
pVgroupInfo->vgroups[vgSize].vgId = htonl(pVgroup->vgId); SHashMutableIterator *pIter = taosHashCreateIter(pTable->vgHash);
for (int32_t vn = 0; vn < pVgroup->numOfVnodes; ++vn) { int32_t vgSize = 0;
SDnodeObj *pDnode = pVgroup->vnodeGid[vn].pDnode; while (taosHashIterNext(pIter)) {
if (pDnode == NULL) break; int32_t *pVgId = taosHashIterGet(pIter);
SVgObj * pVgroup = mnodeGetVgroup(*pVgId);
if (pVgroup == NULL) continue;
strncpy(pVgroupInfo->vgroups[vgSize].ipAddr[vn].fqdn, pDnode->dnodeFqdn, tListLen(pDnode->dnodeFqdn)); pVgroupInfo->vgroups[vgSize].vgId = htonl(pVgroup->vgId);
pVgroupInfo->vgroups[vgSize].ipAddr[vn].port = htons(pDnode->dnodePort); for (int32_t vn = 0; vn < pVgroup->numOfVnodes; ++vn) {
SDnodeObj *pDnode = pVgroup->vnodeGid[vn].pDnode;
if (pDnode == NULL) break;
pVgroupInfo->vgroups[vgSize].numOfIps++; strncpy(pVgroupInfo->vgroups[vgSize].ipAddr[vn].fqdn, pDnode->dnodeFqdn, tListLen(pDnode->dnodeFqdn));
pVgroupInfo->vgroups[vgSize].ipAddr[vn].port = htons(pDnode->dnodePort);
pVgroupInfo->vgroups[vgSize].numOfIps++;
}
vgSize++;
mnodeDecVgroupRef(pVgroup);
} }
vgSize++; taosHashDestroyIter(pIter);
mnodeDecVgroupRef(pVgroup); mnodeDecTableRef(pTable);
pVgroupInfo->numOfVgroups = htonl(vgSize);
// one table is done, try the next table
msg += sizeof(SVgroupsInfo) + vgSize * sizeof(SCMVgroupInfo);
pRsp->numOfTables++;
} }
taosHashDestroyIter(pIter);
mnodeDecTableRef(pTable);
pVgroupInfo->numOfVgroups = htonl(vgSize);
// one table is done, try the next table
msg += sizeof(SVgroupsInfo) + vgSize * sizeof(SCMVgroupInfo);
pRsp->numOfTables++;
} }
if (pRsp->numOfTables != numOfTable) { if (pRsp->numOfTables != numOfTable) {

View File

@ -172,10 +172,11 @@ typedef struct SQueryRuntimeEnv {
STSBuf* pTSBuf; STSBuf* pTSBuf;
STSCursor cur; STSCursor cur;
SQueryCostInfo summary; SQueryCostInfo summary;
bool stableQuery; // super table query or not bool stableQuery; // super table query or not
void* pQueryHandle; void* pQueryHandle;
void* pSecQueryHandle; // another thread for void* pSecQueryHandle; // another thread for
SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
bool topBotQuery; // false;
} SQueryRuntimeEnv; } SQueryRuntimeEnv;
typedef struct SQInfo { typedef struct SQInfo {

View File

@ -31,7 +31,8 @@ void closeTimeWindow(SWindowResInfo* pWindowResInfo, int32_t slot);
void closeAllTimeWindow(SWindowResInfo* pWindowResInfo); void closeAllTimeWindow(SWindowResInfo* pWindowResInfo);
void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_t order); void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_t order);
SWindowResult *getWindowResult(SWindowResInfo *pWindowResInfo, int32_t slot); SWindowResult *getWindowResult(SWindowResInfo *pWindowResInfo, int32_t slot);
int32_t curTimeWindow(SWindowResInfo *pWindowResInfo);
#define curTimeWindow(_winres) ((_winres)->curIndex)
bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot); bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot);
void createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo); void createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo);

View File

@ -28,7 +28,7 @@ extern "C" {
#include "tdataformat.h" #include "tdataformat.h"
#include "talgo.h" #include "talgo.h"
#define DEFAULT_PAGE_SIZE (1024L*56) // 16k larger than the SHistoInfo #define DEFAULT_PAGE_SIZE (1024L*64) // 16k larger than the SHistoInfo
#define MAX_TMPFILE_PATH_LENGTH PATH_MAX #define MAX_TMPFILE_PATH_LENGTH PATH_MAX
#define INITIAL_ALLOCATION_BUFFER_SIZE 64 #define INITIAL_ALLOCATION_BUFFER_SIZE 64
@ -96,7 +96,7 @@ typedef struct SColumnOrderInfo {
typedef struct tOrderDescriptor { typedef struct tOrderDescriptor {
SColumnModel * pColumnModel; SColumnModel * pColumnModel;
int32_t tsOrder; // timestamp order type if exists int32_t tsOrder; // timestamp order type if exists
SColumnOrderInfo orderIdx; SColumnOrderInfo orderInfo;
} tOrderDescriptor; } tOrderDescriptor;
typedef struct tExtMemBuffer { typedef struct tExtMemBuffer {

View File

@ -85,7 +85,7 @@ SIDList getDataBufPagesIdList(SDiskbasedResultBuf* pResultBuf, int32_t groupId);
* @param id * @param id
* @return * @return
*/ */
tFilePage* getResultBufferPageById(SDiskbasedResultBuf* pResultBuf, int32_t id); #define GET_RES_BUF_PAGE_BY_ID(buf, id) ((tFilePage*)((buf)->pBuf + DEFAULT_INTERN_BUF_PAGE_SIZE*(id)))
/** /**
* get the total buffer size in the format of disk file * get the total buffer size in the format of disk file

View File

@ -647,9 +647,9 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
} }
////////////////////////////////////////kill statement/////////////////////////////////////// ////////////////////////////////////////kill statement///////////////////////////////////////
cmd ::= KILL CONNECTION IPTOKEN(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &X);} cmd ::= KILL CONNECTION INTEGER(Y). {setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &Y);}
cmd ::= KILL STREAM IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &X);} cmd ::= KILL STREAM INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &X);}
cmd ::= KILL QUERY IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &X);} cmd ::= KILL QUERY INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &X);}
%fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED %fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED
DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD

View File

@ -272,9 +272,18 @@ bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *mi
bool stableQueryFunctChanged(int32_t funcId); bool stableQueryFunctChanged(int32_t funcId);
void resetResultInfo(SResultInfo *pResInfo); void resetResultInfo(SResultInfo *pResInfo);
void initResultInfo(SResultInfo *pResInfo);
void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable); void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable);
static FORCE_INLINE void initResultInfo(SResultInfo *pResInfo) {
pResInfo->initialized = true; // the this struct has been initialized flag
pResInfo->complete = false;
pResInfo->hasResult = false;
pResInfo->numOfRes = 0;
memset(pResInfo->interResultBuf, 0, (size_t)pResInfo->bufLen);
}
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -98,6 +98,7 @@ static bool isIntervalQuery(SQuery *pQuery) { return pQuery->intervalTime > 0; }
static int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *group); static int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *group);
static void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult); static void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult);
static void setWindowResOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult);
static void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx, SResultInfo *pResultInfo); static void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx, SResultInfo *pResultInfo);
static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId); static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId);
static void getNextTimeWindow(SQuery *pQuery, STimeWindow *pTimeWindow); static void getNextTimeWindow(SQuery *pQuery, STimeWindow *pTimeWindow);
@ -441,7 +442,7 @@ static int32_t addNewWindowResultBuf(SWindowResult *pWindowRes, SDiskbasedResult
pData = getNewDataBuf(pResultBuf, sid, &pageId); pData = getNewDataBuf(pResultBuf, sid, &pageId);
} else { } else {
pageId = getLastPageId(&list); pageId = getLastPageId(&list);
pData = getResultBufferPageById(pResultBuf, pageId); pData = GET_RES_BUF_PAGE_BY_ID(pResultBuf, pageId);
if (pData->num >= numOfRowsPerPage) { if (pData->num >= numOfRowsPerPage) {
pData = getNewDataBuf(pResultBuf, sid, &pageId); pData = getNewDataBuf(pResultBuf, sid, &pageId);
@ -484,10 +485,8 @@ static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowRes
// set time window for current result // set time window for current result
pWindowRes->window = *win; pWindowRes->window = *win;
setWindowResOutputBuf(pRuntimeEnv, pWindowRes); setWindowResOutputBufInitCtx(pRuntimeEnv, pWindowRes);
initCtxOutputBuf(pRuntimeEnv);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -651,7 +650,7 @@ static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SWindowStat
pCtx[k].startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1); pCtx[k].startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1);
if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) { if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
pCtx[k].ptsList = tsBuf; pCtx[k].ptsList = &tsBuf[offset];
} }
// not a whole block involved in query processing, statistics data can not be used // not a whole block involved in query processing, statistics data can not be used
@ -687,47 +686,52 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
SDataBlockInfo *pDataBlockInfo, TSKEY *primaryKeys, SDataBlockInfo *pDataBlockInfo, TSKEY *primaryKeys,
__block_search_fn_t searchFn) { __block_search_fn_t searchFn) {
SQuery *pQuery = pRuntimeEnv->pQuery; SQuery *pQuery = pRuntimeEnv->pQuery;
while (1) { // tumbling time window query, a special case of sliding time window query
if ((pNextWin->ekey > pQuery->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || if (pQuery->slidingTime == pQuery->intervalTime) {
(pNextWin->skey < pQuery->window.ekey && !QUERY_IS_ASC_QUERY(pQuery))) { // todo opt
return -1;
}
getNextTimeWindow(pQuery, pNextWin);
// next time window is not in current block
if ((pNextWin->skey > pDataBlockInfo->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) ||
(pNextWin->ekey < pDataBlockInfo->window.skey && !QUERY_IS_ASC_QUERY(pQuery))) {
return -1;
}
TSKEY startKey = -1;
if (QUERY_IS_ASC_QUERY(pQuery)) {
startKey = pNextWin->skey;
if (startKey < pQuery->window.skey) {
startKey = pQuery->window.skey;
}
} else {
startKey = pNextWin->ekey;
if (startKey > pQuery->window.skey) {
startKey = pQuery->window.skey;
}
}
int32_t startPos = searchFn((char *)primaryKeys, pDataBlockInfo->rows, startKey, pQuery->order.order);
/*
* This time window does not cover any data, try next time window,
* this case may happen when the time window is too small
*/
if ((primaryKeys[startPos] > pNextWin->ekey && QUERY_IS_ASC_QUERY(pQuery)) ||
(primaryKeys[startPos] < pNextWin->skey && !QUERY_IS_ASC_QUERY(pQuery))) {
continue;
}
return startPos;
} }
getNextTimeWindow(pQuery, pNextWin);
// next time window is not in current block
if ((pNextWin->skey > pDataBlockInfo->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) ||
(pNextWin->ekey < pDataBlockInfo->window.skey && !QUERY_IS_ASC_QUERY(pQuery))) {
return -1;
}
TSKEY startKey = -1;
if (QUERY_IS_ASC_QUERY(pQuery)) {
startKey = pNextWin->skey;
if (startKey < pQuery->window.skey) {
startKey = pQuery->window.skey;
}
} else {
startKey = pNextWin->ekey;
if (startKey > pQuery->window.skey) {
startKey = pQuery->window.skey;
}
}
int32_t startPos = searchFn((char *)primaryKeys, pDataBlockInfo->rows, startKey, pQuery->order.order);
/*
* This time window does not cover any data, try next time window,
* this case may happen when the time window is too small
*/
if (QUERY_IS_ASC_QUERY(pQuery) && primaryKeys[startPos] > pNextWin->ekey) {
TSKEY next = primaryKeys[startPos];
pNextWin->ekey += ((next - pNextWin->ekey + pQuery->slidingTime - 1)/pQuery->slidingTime) * pQuery->slidingTime;
pNextWin->skey = pNextWin->ekey - pQuery->intervalTime + 1;
} else if ((!QUERY_IS_ASC_QUERY(pQuery)) && primaryKeys[startPos] < pNextWin->skey) {
TSKEY next = primaryKeys[startPos];
pNextWin->skey -= ((pNextWin->skey - next + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
pNextWin->ekey = pNextWin->skey + pQuery->intervalTime - 1;
}
return startPos;
} }
static TSKEY reviseWindowEkey(SQuery *pQuery, STimeWindow *pWindow) { static TSKEY reviseWindowEkey(SQuery *pQuery, STimeWindow *pWindow) {
@ -1027,7 +1031,7 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx
} }
// in the supplementary scan, only the following functions need to be executed // in the supplementary scan, only the following functions need to be executed
if (IS_REVERSE_SCAN(pRuntimeEnv)) {// && (functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS)) { if (IS_REVERSE_SCAN(pRuntimeEnv)) {
return false; return false;
} }
@ -1183,7 +1187,6 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl
STableQueryInfo* pTableQInfo = pQuery->current; STableQueryInfo* pTableQInfo = pQuery->current;
SWindowResInfo* pWindowResInfo = &pRuntimeEnv->windowResInfo; SWindowResInfo* pWindowResInfo = &pRuntimeEnv->windowResInfo;
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : pDataBlockInfo->rows - 1;
if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock); rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock);
@ -1598,8 +1601,7 @@ static bool onlyQueryTags(SQuery* pQuery) {
///////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////
void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, int64_t *realSkey, void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *realWin, STimeWindow *win) {
int64_t *realEkey, STimeWindow *win) {
assert(key >= keyFirst && key <= keyLast && pQuery->slidingTime <= pQuery->intervalTime); assert(key >= keyFirst && key <= keyLast && pQuery->slidingTime <= pQuery->intervalTime);
win->skey = taosGetIntervalStartTimestamp(key, pQuery->slidingTime, pQuery->slidingTimeUnit, pQuery->precision); win->skey = taosGetIntervalStartTimestamp(key, pQuery->slidingTime, pQuery->slidingTimeUnit, pQuery->precision);
@ -1611,8 +1613,8 @@ void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int6
*/ */
assert(keyLast - keyFirst < pQuery->intervalTime); assert(keyLast - keyFirst < pQuery->intervalTime);
*realSkey = keyFirst; realWin->skey = keyFirst;
*realEkey = keyLast; realWin->ekey = keyLast;
win->ekey = INT64_MAX; win->ekey = INT64_MAX;
return; return;
@ -1620,17 +1622,8 @@ void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int6
win->ekey = win->skey + pQuery->intervalTime - 1; win->ekey = win->skey + pQuery->intervalTime - 1;
if (win->skey < keyFirst) { realWin->skey = (win->skey < keyFirst)? keyFirst : win->skey;
*realSkey = keyFirst; realWin->ekey = (win->ekey < keyLast) ? win->ekey : keyLast;
} else {
*realSkey = win->skey;
}
if (win->ekey < keyLast) {
*realEkey = win->ekey;
} else {
*realEkey = keyLast;
}
} }
static void setScanLimitationByResultBuffer(SQuery *pQuery) { static void setScanLimitationByResultBuffer(SQuery *pQuery) {
@ -1847,31 +1840,21 @@ static int32_t getInitialPageNum(SQInfo *pQInfo) {
return num; return num;
} }
static int32_t getRowParamForMultiRowsOutput(SQuery *pQuery, bool isSTableQuery) { #define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pSelectExpr[1].base.arg->argValue.i64:1)
int32_t rowparam = 1;
if (isTopBottomQuery(pQuery) && (!isSTableQuery)) { static FORCE_INLINE int32_t getNumOfRowsInResultPage(SQuery *pQuery, bool topBotQuery, bool isSTableQuery) {
rowparam = pQuery->pSelectExpr[1].base.arg->argValue.i64; int32_t rowSize = pQuery->rowSize * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, topBotQuery, isSTableQuery);
}
return rowparam;
}
static int32_t getNumOfRowsInResultPage(SQuery *pQuery, bool isSTableQuery) {
int32_t rowSize = pQuery->rowSize * getRowParamForMultiRowsOutput(pQuery, isSTableQuery);
return (DEFAULT_INTERN_BUF_PAGE_SIZE - sizeof(tFilePage)) / rowSize; return (DEFAULT_INTERN_BUF_PAGE_SIZE - sizeof(tFilePage)) / rowSize;
} }
char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SWindowResult *pResult) { char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SWindowResult *pResult) {
assert(pResult != NULL && pRuntimeEnv != NULL); assert(pResult != NULL && pRuntimeEnv != NULL);
SQuery * pQuery = pRuntimeEnv->pQuery; SQuery *pQuery = pRuntimeEnv->pQuery;
tFilePage *page = getResultBufferPageById(pRuntimeEnv->pResultBuf, pResult->pos.pageId); tFilePage *page = GET_RES_BUF_PAGE_BY_ID(pRuntimeEnv->pResultBuf, pResult->pos.pageId);
int32_t realRowId = pResult->pos.rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery);
int32_t numOfRows = getNumOfRowsInResultPage(pQuery, pRuntimeEnv->stableQuery); return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * pRuntimeEnv->numOfRowsPerPage +
int32_t realRowId = pResult->pos.rowId * getRowParamForMultiRowsOutput(pQuery, pRuntimeEnv->stableQuery);
return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * numOfRows +
pQuery->pSelectExpr[columnIndex].bytes * realRowId; pQuery->pSelectExpr[columnIndex].bytes * realRowId;
} }
@ -2034,6 +2017,7 @@ SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle,
} }
pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows; pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows;
pRuntimeEnv->summary.loadBlocks += 1;
pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL); pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL);
} }
@ -2143,6 +2127,7 @@ static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pB
static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
SQuery *pQuery = pRuntimeEnv->pQuery; SQuery *pQuery = pRuntimeEnv->pQuery;
STableQueryInfo* pTableQueryInfo = pQuery->current; STableQueryInfo* pTableQueryInfo = pQuery->current;
SQueryCostInfo* summary = &pRuntimeEnv->summary;
qTrace("QInfo:%p query start, qrange:%" PRId64 "-%" PRId64 ", lastkey:%" PRId64 ", order:%d", qTrace("QInfo:%p query start, qrange:%" PRId64 "-%" PRId64 ", lastkey:%" PRId64 ", order:%d",
GET_QINFO_ADDR(pRuntimeEnv), pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey, pTableQueryInfo->lastKey, GET_QINFO_ADDR(pRuntimeEnv), pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey, pTableQueryInfo->lastKey,
@ -2150,7 +2135,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle; TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle;
while (tsdbNextDataBlock(pQueryHandle)) { while (tsdbNextDataBlock(pQueryHandle)) {
pRuntimeEnv->summary.totalBlocks += 1; summary->totalBlocks += 1;
if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) { if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) {
return 0; return 0;
} }
@ -2159,19 +2144,16 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
// todo extract methods // todo extract methods
if (isIntervalQuery(pQuery) && pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL) { if (isIntervalQuery(pQuery) && pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL) {
TSKEY skey1, ekey1; STimeWindow realWin = TSWINDOW_INITIALIZER, w = TSWINDOW_INITIALIZER;
STimeWindow w = TSWINDOW_INITIALIZER;
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
if (QUERY_IS_ASC_QUERY(pQuery)) { if (QUERY_IS_ASC_QUERY(pQuery)) {
getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &skey1, getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &realWin, &w);
&ekey1, &w);
pWindowResInfo->startTime = w.skey; pWindowResInfo->startTime = w.skey;
pWindowResInfo->prevSKey = w.skey; pWindowResInfo->prevSKey = w.skey;
} else { } else {
// the start position of the first time window in the endpoint that spreads beyond the queried last timestamp // the start position of the first time window in the endpoint that spreads beyond the queried last timestamp
getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &skey1, getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &realWin, &w);
&ekey1, &w);
pWindowResInfo->startTime = pQuery->window.skey; pWindowResInfo->startTime = pQuery->window.skey;
pWindowResInfo->prevSKey = w.skey; pWindowResInfo->prevSKey = w.skey;
@ -2187,9 +2169,12 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
SDataStatis *pStatis = NULL; SDataStatis *pStatis = NULL;
SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis); SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis);
// query start position can not move into tableApplyFunctionsOnBlock due to limit/offset condition
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : blockInfo.rows - 1;
int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, binarySearchForKey, pDataBlock); int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, binarySearchForKey, pDataBlock);
pRuntimeEnv->summary.totalRows += blockInfo.rows; summary->totalRows += blockInfo.rows;
qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64, GET_QINFO_ADDR(pRuntimeEnv), qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64, GET_QINFO_ADDR(pRuntimeEnv),
blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, numOfRes, pQuery->current->lastKey); blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, numOfRes, pQuery->current->lastKey);
@ -2531,7 +2516,7 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
int32_t total = 0; int32_t total = 0;
for (int32_t i = 0; i < list.size; ++i) { for (int32_t i = 0; i < list.size; ++i) {
tFilePage *pData = getResultBufferPageById(pResultBuf, list.pData[i]); tFilePage *pData = GET_RES_BUF_PAGE_BY_ID(pResultBuf, list.pData[i]);
total += pData->num; total += pData->num;
} }
@ -2539,7 +2524,7 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
int32_t offset = 0; int32_t offset = 0;
for (int32_t num = 0; num < list.size; ++num) { for (int32_t num = 0; num < list.size; ++num) {
tFilePage *pData = getResultBufferPageById(pResultBuf, list.pData[num]); tFilePage *pData = GET_RES_BUF_PAGE_BY_ID(pResultBuf, list.pData[num]);
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes; int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes;
@ -2558,10 +2543,8 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
pQInfo->offset += 1; pQInfo->offset += 1;
} }
int64_t getNumOfResultWindowRes(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindowRes) { int64_t getNumOfResultWindowRes(SQuery *pQuery, SWindowResult *pWindowRes) {
SQuery *pQuery = pRuntimeEnv->pQuery; // int64_t maxOutput = 0;
int64_t maxOutput = 0;
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
int32_t functionId = pQuery->pSelectExpr[j].base.functionId; int32_t functionId = pQuery->pSelectExpr[j].base.functionId;
@ -2574,12 +2557,23 @@ int64_t getNumOfResultWindowRes(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pW
} }
SResultInfo *pResultInfo = &pWindowRes->resultInfo[j]; SResultInfo *pResultInfo = &pWindowRes->resultInfo[j];
if (pResultInfo != NULL && maxOutput < pResultInfo->numOfRes) { assert(pResultInfo != NULL);
maxOutput = pResultInfo->numOfRes;
if (pResultInfo->numOfRes > 0) {
return pResultInfo->numOfRes;
} }
// if (pResultInfo != NULL && maxOutput < pResultInfo->numOfRes) {
// maxOutput = pResultInfo->numOfRes;
//
// if (maxOutput > 0) {
// break;
// }
// }
//
// assert(pResultInfo != NULL);
} }
return maxOutput; return 0;
} }
int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
@ -2612,6 +2606,8 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
assert(pQInfo->numOfGroupResultPages == 0); assert(pQInfo->numOfGroupResultPages == 0);
return 0; return 0;
} else if (numOfTables == 1) { // no need to merge results since only one table in each group
} }
SCompSupporter cs = {pTableList, posList, pQInfo}; SCompSupporter cs = {pTableList, posList, pQInfo};
@ -2636,7 +2632,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
TSKEY ts = GET_INT64_VAL(b); TSKEY ts = GET_INT64_VAL(b);
assert(ts == pWindowRes->window.skey); assert(ts == pWindowRes->window.skey);
int64_t num = getNumOfResultWindowRes(pRuntimeEnv, pWindowRes); int64_t num = getNumOfResultWindowRes(pQuery, pWindowRes);
if (num <= 0) { if (num <= 0) {
cs.position[pos] += 1; cs.position[pos] += 1;
@ -2699,10 +2695,11 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
displayInterResult(pQuery->sdata, pRuntimeEnv, pQuery->sdata[0]->num); displayInterResult(pQuery->sdata, pRuntimeEnv, pQuery->sdata[0]->num);
#endif #endif
qTrace("QInfo:%p result merge completed, elapsed time:%" PRId64 " ms", GET_QINFO_ADDR(pQuery), endt - startt); qTrace("QInfo:%p result merge completed for group:%d, elapsed time:%" PRId64 " ms", pQInfo, pQInfo->groupIndex, endt - startt);
tfree(pTree);
tfree(pTableList); tfree(pTableList);
tfree(posList); tfree(posList);
tfree(pTree);
pQInfo->offset = 0; pQInfo->offset = 0;
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
@ -2928,8 +2925,13 @@ void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) {
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
int32_t functionId = pQuery->pSelectExpr[j].base.functionId; int32_t functionId = pQuery->pSelectExpr[j].base.functionId;
pRuntimeEnv->pCtx[j].currentStage = 0; pRuntimeEnv->pCtx[j].currentStage = 0;
SResultInfo* pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[j]);
if (pResInfo->initialized) {
continue;
}
aAggs[functionId].init(&pRuntimeEnv->pCtx[j]); aAggs[functionId].init(&pRuntimeEnv->pCtx[j]);
} }
} }
@ -3078,7 +3080,7 @@ static void setEnvBeforeReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatusI
tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle); tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle);
} }
pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableIdGroupInfo); pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableIdGroupInfo, pQInfo);
setQueryStatus(pQuery, QUERY_NOT_COMPLETED); setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
switchCtxOrder(pRuntimeEnv); switchCtxOrder(pRuntimeEnv);
@ -3150,7 +3152,7 @@ void scanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) {
tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle); tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle);
} }
pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableIdGroupInfo); pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableIdGroupInfo, pQInfo);
pRuntimeEnv->windowResInfo.curIndex = qstatus.windowIndex; pRuntimeEnv->windowResInfo.curIndex = qstatus.windowIndex;
setQueryStatus(pQuery, QUERY_NOT_COMPLETED); setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
@ -3295,13 +3297,14 @@ void setExecutionContext(SQInfo *pQInfo, STableId* pTableId, int32_t groupIndex,
setAdditionalInfo(pQInfo, pTableId, pTableQueryInfo); setAdditionalInfo(pQInfo, pTableId, pTableQueryInfo);
} }
static void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult) { void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult) {
SQuery *pQuery = pRuntimeEnv->pQuery; SQuery *pQuery = pRuntimeEnv->pQuery;
// Note: pResult->pos[i]->num == 0, there is only fixed number of results for each group // Note: pResult->pos[i]->num == 0, there is only fixed number of results for each group
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i];
pCtx->aOutputBuf = getPosInResultPage(pRuntimeEnv, i, pResult); pCtx->aOutputBuf = getPosInResultPage(pRuntimeEnv, i, pResult);
int32_t functionId = pQuery->pSelectExpr[i].base.functionId; int32_t functionId = pQuery->pSelectExpr[i].base.functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf; pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf;
@ -3319,6 +3322,38 @@ static void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *
} }
} }
void setWindowResOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult) {
SQuery *pQuery = pRuntimeEnv->pQuery;
// Note: pResult->pos[i]->num == 0, there is only fixed number of results for each group
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i];
pCtx->resultInfo = &pResult->resultInfo[i];
if (pCtx->resultInfo->complete) {
continue;
}
pCtx->aOutputBuf = getPosInResultPage(pRuntimeEnv, i, pResult);
pCtx->currentStage = 0;
int32_t functionId = pCtx->functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf;
}
/*
* set the output buffer information and intermediate buffer
* not all queries require the interResultBuf, such as COUNT
*/
pCtx->resultInfo->superTableQ = pRuntimeEnv->stableQuery; // set super table query flag
if (!pCtx->resultInfo->initialized) {
aAggs[functionId].init(pCtx);
}
}
}
int32_t setAdditionalInfo(SQInfo *pQInfo, STableId* pTableId, STableQueryInfo *pTableQueryInfo) { int32_t setAdditionalInfo(SQInfo *pQInfo, STableId* pTableId, STableQueryInfo *pTableQueryInfo) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
assert(pTableQueryInfo->lastKey >= TSKEY_INITIAL_VAL); assert(pTableQueryInfo->lastKey >= TSKEY_INITIAL_VAL);
@ -3374,13 +3409,12 @@ void setIntervalQueryRange(SQInfo *pQInfo, TSKEY key) {
* In ascending query, key is the first qualified timestamp. However, in the descending order query, additional * In ascending query, key is the first qualified timestamp. However, in the descending order query, additional
* operations involve. * operations involve.
*/ */
TSKEY skey1, ekey1; STimeWindow w = TSWINDOW_INITIALIZER, realWin = TSWINDOW_INITIALIZER;
STimeWindow w = {0};
SWindowResInfo *pWindowResInfo = &pTableQueryInfo->windowResInfo; SWindowResInfo *pWindowResInfo = &pTableQueryInfo->windowResInfo;
TSKEY sk = MIN(win.skey, win.ekey); TSKEY sk = MIN(win.skey, win.ekey);
TSKEY ek = MAX(win.skey, win.ekey); TSKEY ek = MAX(win.skey, win.ekey);
getAlignQueryTimeWindow(pQuery, win.skey, sk, ek, &skey1, &ekey1, &w); getAlignQueryTimeWindow(pQuery, win.skey, sk, ek, &realWin, &w);
pWindowResInfo->startTime = pTableQueryInfo->win.skey; // windowSKey may be 0 in case of 1970 timestamp pWindowResInfo->startTime = pTableQueryInfo->win.skey; // windowSKey may be 0 in case of 1970 timestamp
if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) { if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) {
@ -3738,7 +3772,7 @@ static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBloc
SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock, 0); SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock, 0);
// update the pQuery->limit.offset value, and pQuery->pos value // update the pQuery->limit.offset value, and pQuery->pos value
TSKEY *keys = (TSKEY *)pColInfoData->pData; TSKEY *keys = (TSKEY *) pColInfoData->pData;
// update the offset value // update the offset value
pTableQueryInfo->lastKey = keys[pQuery->pos]; pTableQueryInfo->lastKey = keys[pQuery->pos];
@ -3800,8 +3834,7 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) {
*/ */
assert(pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL); assert(pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL);
TSKEY skey1, ekey1; STimeWindow w = TSWINDOW_INITIALIZER, realWin = TSWINDOW_INITIALIZER;
STimeWindow w = TSWINDOW_INITIALIZER;
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
STableQueryInfo *pTableQueryInfo = pQuery->current; STableQueryInfo *pTableQueryInfo = pQuery->current;
@ -3811,14 +3844,12 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) {
if (QUERY_IS_ASC_QUERY(pQuery)) { if (QUERY_IS_ASC_QUERY(pQuery)) {
if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) { if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) {
getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &skey1, getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &realWin, &w);
&ekey1, &w);
pWindowResInfo->startTime = w.skey; pWindowResInfo->startTime = w.skey;
pWindowResInfo->prevSKey = w.skey; pWindowResInfo->prevSKey = w.skey;
} }
} else { } else {
getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &skey1, &ekey1, getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &realWin, &w);
&w);
pWindowResInfo->startTime = pQuery->window.skey; pWindowResInfo->startTime = pQuery->window.skey;
pWindowResInfo->prevSKey = w.skey; pWindowResInfo->prevSKey = w.skey;
@ -3939,11 +3970,11 @@ static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) {
} }
if (isFirstLastRowQuery(pQuery)) { if (isFirstLastRowQuery(pQuery)) {
pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQInfo->tableIdGroupInfo); pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQInfo->tableIdGroupInfo, pQInfo);
} else if (isPointInterpoQuery(pQuery)) { } else if (isPointInterpoQuery(pQuery)) {
pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(tsdb, &cond, &pQInfo->tableIdGroupInfo); pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(tsdb, &cond, &pQInfo->tableIdGroupInfo, pQInfo);
} else { } else {
pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQInfo->tableIdGroupInfo); pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQInfo->tableIdGroupInfo, pQInfo);
} }
} }
@ -3999,7 +4030,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool
return code; return code;
} }
pRuntimeEnv->numOfRowsPerPage = getNumOfRowsInResultPage(pQuery, isSTableQuery); pRuntimeEnv->numOfRowsPerPage = getNumOfRowsInResultPage(pQuery, pRuntimeEnv->topBotQuery, isSTableQuery);
if (isSTableQuery) { if (isSTableQuery) {
int32_t rows = getInitialPageNum(pQInfo); int32_t rows = getInitialPageNum(pQInfo);
@ -4057,6 +4088,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool
pQuery->slidingTime, pQuery->fillType, pColInfo); pQuery->slidingTime, pQuery->fillType, pColInfo);
} }
pRuntimeEnv->topBotQuery = isTopBottomQuery(pQuery);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -4086,10 +4118,10 @@ static void enableExecutionForNextTable(SQueryRuntimeEnv *pRuntimeEnv) {
} }
} }
static int64_t queryOnDataBlocks(SQInfo *pQInfo) { static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery * pQuery = pRuntimeEnv->pQuery; SQuery* pQuery = pRuntimeEnv->pQuery;
SQueryCostInfo* summary = &pRuntimeEnv->summary; SQueryCostInfo* summary = &pRuntimeEnv->summary;
int64_t st = taosGetTimestampMs(); int64_t st = taosGetTimestampMs();
@ -4144,8 +4176,9 @@ static int64_t queryOnDataBlocks(SQInfo *pQInfo) {
summary->totalRows += blockInfo.rows; summary->totalRows += blockInfo.rows;
stableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, pDataBlock, binarySearchForKey); stableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, pDataBlock, binarySearchForKey);
qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, lastKey:%"PRId64, GET_QINFO_ADDR(pRuntimeEnv), qTrace("QInfo:%p check data block, uid:%"PRId64", tid:%d, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, lastKey:%" PRId64,
blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, pQuery->current->lastKey); GET_QINFO_ADDR(pRuntimeEnv), blockInfo.uid, blockInfo.tid, blockInfo.window.skey, blockInfo.window.ekey,
blockInfo.rows, pQuery->current->lastKey);
} }
int64_t et = taosGetTimestampMs(); int64_t et = taosGetTimestampMs();
@ -4186,7 +4219,7 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) {
pRuntimeEnv->pQueryHandle = NULL; pRuntimeEnv->pQueryHandle = NULL;
} }
pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp); pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp, pQInfo);
taosArrayDestroy(tx); taosArrayDestroy(tx);
taosArrayDestroy(g1); taosArrayDestroy(g1);
@ -4252,9 +4285,9 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
} }
if (isFirstLastRowQuery(pQuery)) { if (isFirstLastRowQuery(pQuery)) {
pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(pQInfo->tsdb, &cond, &gp); pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(pQInfo->tsdb, &cond, &gp, pQInfo);
} else { } else {
pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(pQInfo->tsdb, &cond, &gp); pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(pQInfo->tsdb, &cond, &gp, pQInfo);
} }
initCtxOutputBuf(pRuntimeEnv); initCtxOutputBuf(pRuntimeEnv);
@ -4448,7 +4481,7 @@ static void doSaveContext(SQInfo *pQInfo) {
tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle); tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle);
} }
pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableIdGroupInfo); pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableIdGroupInfo, pQInfo);
setQueryStatus(pQuery, QUERY_NOT_COMPLETED); setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
switchCtxOrder(pRuntimeEnv); switchCtxOrder(pRuntimeEnv);
@ -4515,7 +4548,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
pQuery->window.skey, pQuery->window.ekey, pQuery->order.order); pQuery->window.skey, pQuery->window.ekey, pQuery->order.order);
// do check all qualified data blocks // do check all qualified data blocks
int64_t el = queryOnDataBlocks(pQInfo); int64_t el = scanMultiTableDataBlocks(pQInfo);
qTrace("QInfo:%p master scan completed, elapsed time: %lldms, reverse scan start", pQInfo, el); qTrace("QInfo:%p master scan completed, elapsed time: %lldms, reverse scan start", pQInfo, el);
// query error occurred or query is killed, abort current execution // query error occurred or query is killed, abort current execution
@ -4530,7 +4563,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
if (needReverseScan(pQuery)) { if (needReverseScan(pQuery)) {
doSaveContext(pQInfo); doSaveContext(pQInfo);
el = queryOnDataBlocks(pQInfo); el = scanMultiTableDataBlocks(pQInfo);
qTrace("QInfo:%p reversed scan completed, elapsed time: %lldms", pQInfo, el); qTrace("QInfo:%p reversed scan completed, elapsed time: %lldms", pQInfo, el);
doRestoreContext(pQInfo); doRestoreContext(pQInfo);
@ -4817,7 +4850,7 @@ static void tableQueryImpl(SQInfo *pQInfo) {
/* check if query is killed or not */ /* check if query is killed or not */
if (isQueryKilled(pQInfo)) { if (isQueryKilled(pQInfo)) {
qTrace("QInfo:%p query is killed", pQInfo); qTrace("QInfo:%p query is killed", pQInfo);
} else {// todo set the table uid and tid in log } else {
qTrace("QInfo:%p query paused, %" PRId64 " rows returned, numOfTotal:%" PRId64 " rows", qTrace("QInfo:%p query paused, %" PRId64 " rows returned, numOfTotal:%" PRId64 " rows",
pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows); pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows);
} }
@ -4909,9 +4942,12 @@ static bool validateQuerySourceCols(SQueryTableMsg *pQueryMsg, SSqlFuncMsg** pEx
return false; return false;
} else if (numOfTotal == 0) { } else if (numOfTotal == 0) {
for(int32_t i = 0; i < pQueryMsg->numOfOutput; ++i) { for(int32_t i = 0; i < pQueryMsg->numOfOutput; ++i) {
if (pExprMsg[i]->functionId != TSDB_FUNC_TAGPRJ) { if ((pExprMsg[i]->functionId == TSDB_FUNC_TAGPRJ) ||
return false; (pExprMsg[i]->functionId == TSDB_FUNC_TID_TAG && pExprMsg[i]->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX)) {
continue;
} }
return false;
} }
} }
@ -5996,53 +6032,58 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
num = taosArrayGetSize(pa); num = taosArrayGetSize(pa);
assert(num == pQInfo->groupInfo.numOfTables); assert(num == pQInfo->groupInfo.numOfTables);
// int16_t type, bytes;
int32_t functionId = pQuery->pSelectExpr[0].base.functionId; int32_t functionId = pQuery->pSelectExpr[0].base.functionId;
if (functionId == TSDB_FUNC_TID_TAG) { // return the tags & table Id if (functionId == TSDB_FUNC_TID_TAG) { // return the tags & table Id
assert(pQuery->numOfOutput == 1); assert(pQuery->numOfOutput == 1);
SExprInfo* pExprInfo = &pQuery->pSelectExpr[0]; SExprInfo* pExprInfo = &pQuery->pSelectExpr[0];
int32_t rsize = pExprInfo->bytes; int32_t rsize = pExprInfo->bytes;
for(int32_t i = 0; i < num; ++i) { for(int32_t i = 0; i < num; ++i) {
SGroupItem* item = taosArrayGet(pa, i); SGroupItem *item = taosArrayGet(pa, i);
char* output = pQuery->sdata[0]->data + i * rsize; char *output = pQuery->sdata[0]->data + i * rsize;
varDataSetLen(output, rsize - VARSTR_HEADER_SIZE); varDataSetLen(output, rsize - VARSTR_HEADER_SIZE);
output = varDataVal(output); output = varDataVal(output);
*(int64_t*) output = item->id.uid; // memory align problem, todo serialize *(int64_t *)output = item->id.uid; // memory align problem, todo serialize
output += sizeof(item->id.uid); output += sizeof(item->id.uid);
*(int32_t*) output = item->id.tid; *(int32_t *)output = item->id.tid;
output += sizeof(item->id.tid); output += sizeof(item->id.tid);
*(int32_t*) output = pQInfo->vgId; *(int32_t *)output = pQInfo->vgId;
output += sizeof(pQInfo->vgId); output += sizeof(pQInfo->vgId);
int16_t bytes = pExprInfo->bytes; int16_t bytes = pExprInfo->bytes;
int16_t type = pExprInfo->type; int16_t type = pExprInfo->type;
char* val = tsdbGetTableTagVal(pQInfo->tsdb, &item->id, pExprInfo->base.colInfo.colId, type, bytes); if (pExprInfo->base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) {
char *data = tsdbGetTableName(pQInfo->tsdb, &item->id);
// todo refactor memcpy(output, data, varDataTLen(data));
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
if (val == NULL) {
setVardataNull(output, type);
} else {
memcpy(output, val, varDataTLen(val));
}
} else { } else {
if (val == NULL) { char *val = tsdbGetTableTagVal(pQInfo->tsdb, &item->id, pExprInfo->base.colInfo.colId, type, bytes);
setNull(output, type, bytes);
// todo refactor
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
if (val == NULL) {
setVardataNull(output, type);
} else {
memcpy(output, val, varDataTLen(val));
}
} else { } else {
memcpy(output, val, bytes); if (val == NULL) {
setNull(output, type, bytes);
} else {
memcpy(output, val, bytes);
}
} }
} }
} }
pQInfo->tableIndex = pQInfo->groupInfo.numOfTables;
qTrace("QInfo:%p create (tableId, tag) info completed, rows:%d", pQInfo, num); qTrace("QInfo:%p create (tableId, tag) info completed, rows:%d", pQInfo, num);
} else { // return only the tags|table name etc. } else { // return only the tags|table name etc.
for(int32_t i = 0; i < num; ++i) { for(int32_t i = 0; i < num; ++i) {
SExprInfo* pExprInfo = pQuery->pSelectExpr; SExprInfo* pExprInfo = pQuery->pSelectExpr;

View File

@ -206,11 +206,6 @@ bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot) {
return (getWindowResult(pWindowResInfo, slot)->status.closed == true); return (getWindowResult(pWindowResInfo, slot)->status.closed == true);
} }
int32_t curTimeWindow(SWindowResInfo *pWindowResInfo) {
assert(pWindowResInfo->curIndex >= 0 && pWindowResInfo->curIndex < pWindowResInfo->size);
return pWindowResInfo->curIndex;
}
void closeTimeWindow(SWindowResInfo *pWindowResInfo, int32_t slot) { void closeTimeWindow(SWindowResInfo *pWindowResInfo, int32_t slot) {
getWindowResult(pWindowResInfo, slot)->status.closed = true; getWindowResult(pWindowResInfo, slot)->status.closed = true;
} }

View File

@ -356,17 +356,15 @@ static FORCE_INLINE int32_t primaryKeyComparator(int64_t f1, int64_t f2, int32_t
static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, int32_t type, int32_t bytes) { static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, int32_t type, int32_t bytes) {
switch (type) { switch (type) {
case TSDB_DATA_TYPE_INT: { case TSDB_DATA_TYPE_INT: {
int32_t first = *(int32_t *)f1; int32_t first = *(int32_t *) f1;
int32_t second = *(int32_t *)f2; int32_t second = *(int32_t *) f2;
if (first == second) { if (first == second) {
return 0; return 0;
} }
return (first < second) ? -1 : 1; return (first < second) ? -1 : 1;
}; };
case TSDB_DATA_TYPE_DOUBLE: { case TSDB_DATA_TYPE_DOUBLE: {
//double first = *(double *)f1; double first = GET_DOUBLE_VAL(f1);
double first = GET_DOUBLE_VAL(f1);
//double second = *(double *)f2;
double second = GET_DOUBLE_VAL(f2); double second = GET_DOUBLE_VAL(f2);
if (first == second) { if (first == second) {
return 0; return 0;
@ -374,9 +372,7 @@ static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, i
return (first < second) ? -1 : 1; return (first < second) ? -1 : 1;
}; };
case TSDB_DATA_TYPE_FLOAT: { case TSDB_DATA_TYPE_FLOAT: {
//float first = *(float *)f1; float first = GET_FLOAT_VAL(f1);
//float second = *(float *)f2;
float first = GET_FLOAT_VAL(f1);
float second = GET_FLOAT_VAL(f2); float second = GET_FLOAT_VAL(f2);
if (first == second) { if (first == second) {
return 0; return 0;
@ -439,9 +435,9 @@ int32_t compare_a(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1,
int32_t s2, char *data2) { int32_t s2, char *data2) {
assert(numOfRows1 == numOfRows2); assert(numOfRows1 == numOfRows2);
int32_t cmpCnt = pDescriptor->orderIdx.numOfCols; int32_t cmpCnt = pDescriptor->orderInfo.numOfCols;
for (int32_t i = 0; i < cmpCnt; ++i) { for (int32_t i = 0; i < cmpCnt; ++i) {
int32_t colIdx = pDescriptor->orderIdx.pData[i]; int32_t colIdx = pDescriptor->orderInfo.pData[i];
char *f1 = COLMODEL_GET_VAL(data1, pDescriptor->pColumnModel, numOfRows1, s1, colIdx); char *f1 = COLMODEL_GET_VAL(data1, pDescriptor->pColumnModel, numOfRows1, s1, colIdx);
char *f2 = COLMODEL_GET_VAL(data2, pDescriptor->pColumnModel, numOfRows2, s2, colIdx); char *f2 = COLMODEL_GET_VAL(data2, pDescriptor->pColumnModel, numOfRows2, s2, colIdx);
@ -471,9 +467,9 @@ int32_t compare_d(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1,
int32_t s2, char *data2) { int32_t s2, char *data2) {
assert(numOfRows1 == numOfRows2); assert(numOfRows1 == numOfRows2);
int32_t cmpCnt = pDescriptor->orderIdx.numOfCols; int32_t cmpCnt = pDescriptor->orderInfo.numOfCols;
for (int32_t i = 0; i < cmpCnt; ++i) { for (int32_t i = 0; i < cmpCnt; ++i) {
int32_t colIdx = pDescriptor->orderIdx.pData[i]; int32_t colIdx = pDescriptor->orderInfo.pData[i];
char *f1 = COLMODEL_GET_VAL(data1, pDescriptor->pColumnModel, numOfRows1, s1, colIdx); char *f1 = COLMODEL_GET_VAL(data1, pDescriptor->pColumnModel, numOfRows1, s1, colIdx);
char *f2 = COLMODEL_GET_VAL(data2, pDescriptor->pColumnModel, numOfRows2, s2, colIdx); char *f2 = COLMODEL_GET_VAL(data2, pDescriptor->pColumnModel, numOfRows2, s2, colIdx);
@ -563,13 +559,13 @@ static void median(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
int32_t midIdx = ((end - start) >> 1) + start; int32_t midIdx = ((end - start) >> 1) + start;
#if defined(_DEBUG_VIEW) #if defined(_DEBUG_VIEW)
int32_t f = pDescriptor->orderIdx.pData[0]; int32_t f = pDescriptor->orderInfo.pData[0];
char *midx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, midIdx, f); char *midx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, midIdx, f);
char *startx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, start, f); char *startx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, start, f);
char *endx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, end, f); char *endx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, end, f);
int32_t colIdx = pDescriptor->orderIdx.pData[0]; int32_t colIdx = pDescriptor->orderInfo.pData[0];
tSortDataPrint(pDescriptor->pColumnModel->pFields[colIdx].field.type, "before", startx, midx, endx); tSortDataPrint(pDescriptor->pColumnModel->pFields[colIdx].field.type, "before", startx, midx, endx);
#endif #endif
@ -596,7 +592,7 @@ static void median(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
} }
static UNUSED_FUNC void tRowModelDisplay(tOrderDescriptor *pDescriptor, int32_t numOfRows, char *d, int32_t len) { static UNUSED_FUNC void tRowModelDisplay(tOrderDescriptor *pDescriptor, int32_t numOfRows, char *d, int32_t len) {
int32_t colIdx = pDescriptor->orderIdx.pData[0]; int32_t colIdx = pDescriptor->orderInfo.pData[0];
for (int32_t i = 0; i < len; ++i) { for (int32_t i = 0; i < len; ++i) {
char *startx = COLMODEL_GET_VAL(d, pDescriptor->pColumnModel, numOfRows, i, colIdx); char *startx = COLMODEL_GET_VAL(d, pDescriptor->pColumnModel, numOfRows, i, colIdx);
@ -1062,9 +1058,9 @@ tOrderDescriptor *tOrderDesCreate(const int32_t *orderColIdx, int32_t numOfOrder
desc->pColumnModel = pModel; desc->pColumnModel = pModel;
desc->tsOrder = tsOrderType; desc->tsOrder = tsOrderType;
desc->orderIdx.numOfCols = numOfOrderCols; desc->orderInfo.numOfCols = numOfOrderCols;
for (int32_t i = 0; i < numOfOrderCols; ++i) { for (int32_t i = 0; i < numOfOrderCols; ++i) {
desc->orderIdx.pData[i] = orderColIdx[i]; desc->orderInfo.pData[i] = orderColIdx[i];
} }
return desc; return desc;

View File

@ -50,12 +50,6 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t si
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
tFilePage* getResultBufferPageById(SDiskbasedResultBuf* pResultBuf, int32_t id) {
assert(id < pResultBuf->numOfPages && id >= 0);
return (tFilePage*)(pResultBuf->pBuf + DEFAULT_INTERN_BUF_PAGE_SIZE * id);
}
int32_t getNumOfResultBufGroupId(SDiskbasedResultBuf* pResultBuf) { return taosHashGetSize(pResultBuf->idsTable); } int32_t getNumOfResultBufGroupId(SDiskbasedResultBuf* pResultBuf) { return taosHashGetSize(pResultBuf->idsTable); }
int32_t getResBufSize(SDiskbasedResultBuf* pResultBuf) { return pResultBuf->totalBufSize; } int32_t getResBufSize(SDiskbasedResultBuf* pResultBuf) { return pResultBuf->totalBufSize; }
@ -169,7 +163,7 @@ tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32
*pageId = (pResultBuf->allocateId++); *pageId = (pResultBuf->allocateId++);
registerPageId(pResultBuf, groupId, *pageId); registerPageId(pResultBuf, groupId, *pageId);
tFilePage* page = getResultBufferPageById(pResultBuf, *pageId); tFilePage* page = GET_RES_BUF_PAGE_BY_ID(pResultBuf, *pageId);
// clear memory for the new page // clear memory for the new page
memset(page, 0, DEFAULT_INTERN_BUF_PAGE_SIZE); memset(page, 0, DEFAULT_INTERN_BUF_PAGE_SIZE);

View File

@ -127,7 +127,7 @@ void rpcAddConnIntoCache(void *handle, void *data, char *fqdn, uint16_t port, in
hash = rpcHashConn(pCache, fqdn, port, connType); hash = rpcHashConn(pCache, fqdn, port, connType);
pNode = (SConnHash *)taosMemPoolMalloc(pCache->connHashMemPool); pNode = (SConnHash *)taosMemPoolMalloc(pCache->connHashMemPool);
strcpy(pNode->fqdn, fqdn); tstrncpy(pNode->fqdn, fqdn, sizeof(pNode->fqdn));
pNode->port = port; pNode->port = port;
pNode->connType = connType; pNode->connType = connType;
pNode->data = data; pNode->data = data;

View File

@ -60,7 +60,7 @@ typedef struct {
void *idPool; // handle to ID pool void *idPool; // handle to ID pool
void *tmrCtrl; // handle to timer void *tmrCtrl; // handle to timer
void *hash; // handle returned by hash utility SHashObj *hash; // handle returned by hash utility
void *tcphandle;// returned handle from TCP initialization void *tcphandle;// returned handle from TCP initialization
void *udphandle;// returned handle from UDP initialization void *udphandle;// returned handle from UDP initialization
void *pCache; // connection cache void *pCache; // connection cache
@ -211,7 +211,7 @@ void *rpcOpen(const SRpcInit *pInit) {
pRpc = (SRpcInfo *)calloc(1, sizeof(SRpcInfo)); pRpc = (SRpcInfo *)calloc(1, sizeof(SRpcInfo));
if (pRpc == NULL) return NULL; if (pRpc == NULL) return NULL;
if(pInit->label) strcpy(pRpc->label, pInit->label); if(pInit->label) tstrncpy(pRpc->label, pInit->label, sizeof(pRpc->label));
pRpc->connType = pInit->connType; pRpc->connType = pInit->connType;
pRpc->idleTime = pInit->idleTime; pRpc->idleTime = pInit->idleTime;
pRpc->numOfThreads = pInit->numOfThreads>TSDB_MAX_RPC_THREADS ? TSDB_MAX_RPC_THREADS:pInit->numOfThreads; pRpc->numOfThreads = pInit->numOfThreads>TSDB_MAX_RPC_THREADS ? TSDB_MAX_RPC_THREADS:pInit->numOfThreads;
@ -228,7 +228,7 @@ void *rpcOpen(const SRpcInit *pInit) {
size_t size = sizeof(SRpcConn) * pRpc->sessions; size_t size = sizeof(SRpcConn) * pRpc->sessions;
pRpc->connList = (SRpcConn *)calloc(1, size); pRpc->connList = (SRpcConn *)calloc(1, size);
if (pRpc->connList == NULL) { if (pRpc->connList == NULL) {
tError("%s failed to allocate memory for taos connections, size:%d", pRpc->label, size); tError("%s failed to allocate memory for taos connections, size:%ld", pRpc->label, size);
rpcClose(pRpc); rpcClose(pRpc);
return NULL; return NULL;
} }
@ -459,7 +459,7 @@ int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo) {
pInfo->clientPort = pConn->peerPort; pInfo->clientPort = pConn->peerPort;
// pInfo->serverIp = pConn->destIp; // pInfo->serverIp = pConn->destIp;
strcpy(pInfo->user, pConn->user); strncpy(pInfo->user, pConn->user, sizeof(pInfo->user));
return 0; return 0;
} }
@ -503,10 +503,10 @@ static SRpcConn *rpcOpenConn(SRpcInfo *pRpc, char *peerFqdn, uint16_t peerPort,
pConn = rpcAllocateClientConn(pRpc); pConn = rpcAllocateClientConn(pRpc);
if (pConn) { if (pConn) {
strcpy(pConn->peerFqdn, peerFqdn); tstrncpy(pConn->peerFqdn, peerFqdn, sizeof(pConn->peerFqdn));
pConn->peerIp = peerIp; pConn->peerIp = peerIp;
pConn->peerPort = peerPort; pConn->peerPort = peerPort;
strcpy(pConn->user, pRpc->user); tstrncpy(pConn->user, pRpc->user, sizeof(pConn->user));
pConn->connType = connType; pConn->connType = connType;
if (taosOpenConn[connType]) { if (taosOpenConn[connType]) {
@ -804,7 +804,7 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv) {
pConn = rpcGetConnObj(pRpc, sid, pRecv); pConn = rpcGetConnObj(pRpc, sid, pRecv);
if (pConn == NULL) { if (pConn == NULL) {
tTrace("%s %p, failed to get connection obj(%s)", pRpc->label, pHead->ahandle, tstrerror(terrno)); tTrace("%s %p, failed to get connection obj(%s)", pRpc->label, (void *)pHead->ahandle, tstrerror(terrno));
return NULL; return NULL;
} else { } else {
if (rpcIsReq(pHead->msgType)) { if (rpcIsReq(pHead->msgType)) {

View File

@ -73,7 +73,7 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
pServerObj = (SServerObj *)calloc(sizeof(SServerObj), 1); pServerObj = (SServerObj *)calloc(sizeof(SServerObj), 1);
pServerObj->ip = ip; pServerObj->ip = ip;
pServerObj->port = port; pServerObj->port = port;
strcpy(pServerObj->label, label); tstrncpy(pServerObj->label, label, sizeof(pServerObj->label));
pServerObj->numOfThreads = numOfThreads; pServerObj->numOfThreads = numOfThreads;
pServerObj->pThreadObj = (SThreadObj *)calloc(sizeof(SThreadObj), numOfThreads); pServerObj->pThreadObj = (SThreadObj *)calloc(sizeof(SThreadObj), numOfThreads);
@ -87,7 +87,7 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
pThreadObj = pServerObj->pThreadObj; pThreadObj = pServerObj->pThreadObj;
for (int i = 0; i < numOfThreads; ++i) { for (int i = 0; i < numOfThreads; ++i) {
pThreadObj->processData = fp; pThreadObj->processData = fp;
strcpy(pThreadObj->label, label); tstrncpy(pThreadObj->label, label, sizeof(pThreadObj->label));
pThreadObj->shandle = shandle; pThreadObj->shandle = shandle;
code = pthread_mutex_init(&(pThreadObj->mutex), NULL); code = pthread_mutex_init(&(pThreadObj->mutex), NULL);
@ -247,7 +247,7 @@ void *taosInitTcpClient(uint32_t ip, uint16_t port, char *label, int num, void *
pThreadObj = (SThreadObj *)malloc(sizeof(SThreadObj)); pThreadObj = (SThreadObj *)malloc(sizeof(SThreadObj));
memset(pThreadObj, 0, sizeof(SThreadObj)); memset(pThreadObj, 0, sizeof(SThreadObj));
strcpy(pThreadObj->label, label); tstrncpy(pThreadObj->label, label, sizeof(pThreadObj->label));
pThreadObj->ip = ip; pThreadObj->ip = ip;
pThreadObj->shandle = shandle; pThreadObj->shandle = shandle;

View File

@ -72,7 +72,7 @@ void *taosInitUdpConnection(uint32_t ip, uint16_t port, char *label, int threads
pSet->port = port; pSet->port = port;
pSet->shandle = shandle; pSet->shandle = shandle;
pSet->fp = fp; pSet->fp = fp;
strcpy(pSet->label, label); tstrncpy(pSet->label, label, sizeof(pSet->label));
uint16_t ownPort; uint16_t ownPort;
for (int i = 0; i < threads; ++i) { for (int i = 0; i < threads; ++i) {
@ -99,7 +99,7 @@ void *taosInitUdpConnection(uint32_t ip, uint16_t port, char *label, int threads
pConn->localPort = (uint16_t)ntohs(sin.sin_port); pConn->localPort = (uint16_t)ntohs(sin.sin_port);
} }
strcpy(pConn->label, label); tstrncpy(pConn->label, label, sizeof(pConn->label));
pConn->shandle = shandle; pConn->shandle = shandle;
pConn->processData = fp; pConn->processData = fp;
pConn->index = i; pConn->index = i;

View File

@ -76,6 +76,7 @@ int main(int argc, char *argv[]) {
int numOfReqs = 0; int numOfReqs = 0;
int appThreads = 1; int appThreads = 1;
char serverIp[40] = "127.0.0.1"; char serverIp[40] = "127.0.0.1";
char secret[TSDB_KEY_LEN] = "mypassword";
struct timeval systemTime; struct timeval systemTime;
int64_t startTime, endTime; int64_t startTime, endTime;
pthread_attr_t thattr; pthread_attr_t thattr;
@ -97,7 +98,7 @@ int main(int argc, char *argv[]) {
rpcInit.sessions = 100; rpcInit.sessions = 100;
rpcInit.idleTime = tsShellActivityTimer*1000; rpcInit.idleTime = tsShellActivityTimer*1000;
rpcInit.user = "michael"; rpcInit.user = "michael";
rpcInit.secret = "mypassword"; rpcInit.secret = secret;
rpcInit.ckey = "key"; rpcInit.ckey = "key";
rpcInit.spi = 1; rpcInit.spi = 1;
rpcInit.connType = TAOS_CONN_CLIENT; rpcInit.connType = TAOS_CONN_CLIENT;
@ -106,7 +107,7 @@ int main(int argc, char *argv[]) {
if (strcmp(argv[i], "-p")==0 && i < argc-1) { if (strcmp(argv[i], "-p")==0 && i < argc-1) {
ipSet.port[0] = atoi(argv[++i]); ipSet.port[0] = atoi(argv[++i]);
} else if (strcmp(argv[i], "-i") ==0 && i < argc-1) { } else if (strcmp(argv[i], "-i") ==0 && i < argc-1) {
tstrncpy(ipSet.fqdn[0], argv[++i], sizeof(ipSet.fqdn)); tstrncpy(ipSet.fqdn[0], argv[++i], sizeof(ipSet.fqdn[0]));
} else if (strcmp(argv[i], "-t")==0 && i < argc-1) { } else if (strcmp(argv[i], "-t")==0 && i < argc-1) {
rpcInit.numOfThreads = atoi(argv[++i]); rpcInit.numOfThreads = atoi(argv[++i]);
} else if (strcmp(argv[i], "-m")==0 && i < argc-1) { } else if (strcmp(argv[i], "-m")==0 && i < argc-1) {

View File

@ -77,6 +77,7 @@ int main(int argc, char *argv[]) {
int numOfReqs = 0; int numOfReqs = 0;
int appThreads = 1; int appThreads = 1;
char serverIp[40] = "127.0.0.1"; char serverIp[40] = "127.0.0.1";
char secret[TSDB_KEY_LEN] = "mypassword";
struct timeval systemTime; struct timeval systemTime;
int64_t startTime, endTime; int64_t startTime, endTime;
pthread_attr_t thattr; pthread_attr_t thattr;
@ -98,7 +99,7 @@ int main(int argc, char *argv[]) {
rpcInit.sessions = 100; rpcInit.sessions = 100;
rpcInit.idleTime = tsShellActivityTimer*1000; rpcInit.idleTime = tsShellActivityTimer*1000;
rpcInit.user = "michael"; rpcInit.user = "michael";
rpcInit.secret = "mypassword"; rpcInit.secret = secret;
rpcInit.ckey = "key"; rpcInit.ckey = "key";
rpcInit.spi = 1; rpcInit.spi = 1;
rpcInit.connType = TAOS_CONN_CLIENT; rpcInit.connType = TAOS_CONN_CLIENT;

View File

@ -17,6 +17,7 @@
#include "tulog.h" #include "tulog.h"
#include "talgo.h" #include "talgo.h"
#include "tutil.h" #include "tutil.h"
#include "ttime.h"
#include "tcompare.h" #include "tcompare.h"
#include "exception.h" #include "exception.h"
@ -71,6 +72,8 @@ typedef struct STableCheckInfo {
int32_t compSize; int32_t compSize;
int32_t numOfBlocks; // number of qualified data blocks not the original blocks int32_t numOfBlocks; // number of qualified data blocks not the original blocks
SDataCols* pDataCols; SDataCols* pDataCols;
int32_t chosen; // indicate which iterator should move forward
bool initBuf; // whether to initialize the in-memory skip list iterator or not bool initBuf; // whether to initialize the in-memory skip list iterator or not
SSkipListIterator* iter; // mem buffer skip list iterator SSkipListIterator* iter; // mem buffer skip list iterator
SSkipListIterator* iiter; // imem buffer skip list iterator SSkipListIterator* iiter; // imem buffer skip list iterator
@ -79,8 +82,6 @@ typedef struct STableCheckInfo {
typedef struct STableBlockInfo { typedef struct STableBlockInfo {
SCompBlock* compBlock; SCompBlock* compBlock;
STableCheckInfo* pTableCheckInfo; STableCheckInfo* pTableCheckInfo;
// int32_t blockIndex;
// int32_t groupIdx; /* number of group is less than the total number of tables */
} STableBlockInfo; } STableBlockInfo;
typedef struct SBlockOrderSupporter { typedef struct SBlockOrderSupporter {
@ -120,7 +121,7 @@ static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle);
static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock, static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock,
SArray* sa); SArray* sa);
static int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order); static int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey, static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey,
STsdbQueryHandle* pQueryHandle); STsdbQueryHandle* pQueryHandle);
static void tsdbInitDataBlockLoadInfo(SDataBlockLoadInfo* pBlockLoadInfo) { static void tsdbInitDataBlockLoadInfo(SDataBlockLoadInfo* pBlockLoadInfo) {
@ -134,7 +135,7 @@ static void tsdbInitCompBlockLoadInfo(SLoadCompBlockInfo* pCompBlockLoadInfo) {
pCompBlockLoadInfo->fileId = -1; pCompBlockLoadInfo->fileId = -1;
} }
TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList) { TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo) {
// todo 1. filter not exist table // todo 1. filter not exist table
// todo 2. add the reference count for each table that is involved in query // todo 2. add the reference count for each table that is involved in query
@ -147,6 +148,7 @@ TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STable
pQueryHandle->cur.win = TSWINDOW_INITIALIZER; pQueryHandle->cur.win = TSWINDOW_INITIALIZER;
pQueryHandle->checkFiles = true;//ASCENDING_TRAVERSE(pQueryHandle->order); pQueryHandle->checkFiles = true;//ASCENDING_TRAVERSE(pQueryHandle->order);
pQueryHandle->activeIndex = 0; // current active table index pQueryHandle->activeIndex = 0; // current active table index
pQueryHandle->qinfo = qinfo;
pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock; pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock;
tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb); tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb);
@ -201,8 +203,8 @@ TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STable
return (TsdbQueryHandleT) pQueryHandle; return (TsdbQueryHandleT) pQueryHandle;
} }
TsdbQueryHandleT tsdbQueryLastRow(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList) { TsdbQueryHandleT tsdbQueryLastRow(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, void* qinfo) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList); STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo);
pQueryHandle->type = TSDB_QUERY_TYPE_LAST; pQueryHandle->type = TSDB_QUERY_TYPE_LAST;
pQueryHandle->order = TSDB_ORDER_DESC; pQueryHandle->order = TSDB_ORDER_DESC;
@ -227,8 +229,8 @@ SArray* tsdbGetQueriedTableIdList(TsdbQueryHandleT *pHandle) {
return res; return res;
} }
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TsdbRepoT *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList) { TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TsdbRepoT *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, void* qinfo) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList); STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo);
pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL; pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL;
// pQueryHandle->outputCapacity = 2; // only allowed two rows to be loaded // pQueryHandle->outputCapacity = 2; // only allowed two rows to be loaded
@ -303,6 +305,83 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
return true; return true;
} }
SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo) {
SDataRow rmem = NULL, rimem = NULL;
if (pCheckInfo->iter) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter);
if (node != NULL) {
rmem = SL_GET_NODE_DATA(node);
}
}
if (pCheckInfo->iiter) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter);
if (node != NULL) {
rimem = SL_GET_NODE_DATA(node);
}
}
if (rmem != NULL && rimem != NULL) {
if (dataRowKey(rmem) < dataRowKey(rimem)) {
pCheckInfo->chosen = 0;
return rmem;
} else if (dataRowKey(rmem) == dataRowKey(rimem)) {
// data ts are duplicated, ignore the data in mem
tSkipListIterNext(pCheckInfo->iter);
pCheckInfo->chosen = 1;
return rimem;
} else {
pCheckInfo->chosen = 1;
return rimem;
}
}
if (rmem != NULL) {
pCheckInfo->chosen = 0;
return rmem;
}
if (rimem != NULL) {
pCheckInfo->chosen = 1;
return rimem;
}
return NULL;
}
bool moveToNextRow(STableCheckInfo* pCheckInfo) {
bool hasNext = false;
if (pCheckInfo->chosen == 0) {
if (pCheckInfo->iter != NULL) {
hasNext = tSkipListIterNext(pCheckInfo->iter);
}
if (hasNext) {
return hasNext;
}
if (pCheckInfo->iiter != NULL) {
return tSkipListIterGet(pCheckInfo->iiter) != NULL;
}
} else {
if (pCheckInfo->chosen == 1) {
if (pCheckInfo->iiter != NULL) {
hasNext = tSkipListIterNext(pCheckInfo->iiter);
}
if (hasNext) {
return hasNext;
}
if (pCheckInfo->iter != NULL) {
return tSkipListIterGet(pCheckInfo->iter) != NULL;
}
}
}
return hasNext;
}
static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) { static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
size_t size = taosArrayGetSize(pHandle->pTableCheckInfo); size_t size = taosArrayGetSize(pHandle->pTableCheckInfo);
assert(pHandle->activeIndex < size && pHandle->activeIndex >= 0 && size >= 1); assert(pHandle->activeIndex < size && pHandle->activeIndex >= 0 && size >= 1);
@ -312,31 +391,13 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
STable* pTable = pCheckInfo->pTableObj; STable* pTable = pCheckInfo->pTableObj;
assert(pTable != NULL); assert(pTable != NULL);
// no data in cache, abort initTableMemIterator(pHandle, pCheckInfo);
if (pTable->mem == NULL && pTable->imem == NULL) { SDataRow row = getSDataRowInTableMem(pCheckInfo);
if (row == NULL) {
return false; return false;
} }
if (pCheckInfo->iter == NULL && pTable->mem) {
pCheckInfo->iter = tSkipListCreateIterFromVal(pTable->mem->pData, (const char*) &pCheckInfo->lastKey,
TSDB_DATA_TYPE_TIMESTAMP, pHandle->order);
if (pCheckInfo->iter == NULL) {
return false;
}
if (!tSkipListIterNext(pCheckInfo->iter)) { // buffer is empty
return false;
}
}
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter);
if (node == NULL) {
return false;
}
SDataRow row = SL_GET_NODE_DATA(node);
pCheckInfo->lastKey = dataRowKey(row); // first timestamp in buffer pCheckInfo->lastKey = dataRowKey(row); // first timestamp in buffer
tsdbTrace("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, %p", pHandle, tsdbTrace("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, %p", pHandle,
pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pCheckInfo->lastKey, pHandle->order, pHandle->qinfo); pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pCheckInfo->lastKey, pHandle->order, pHandle->qinfo);
@ -349,7 +410,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
int32_t step = ASCENDING_TRAVERSE(pHandle->order)? 1:-1; int32_t step = ASCENDING_TRAVERSE(pHandle->order)? 1:-1;
STimeWindow* win = &pHandle->cur.win; STimeWindow* win = &pHandle->cur.win;
pHandle->cur.rows = tsdbReadRowsFromCache(pCheckInfo->iter, pCheckInfo->pTableObj, pHandle->window.ekey, pHandle->cur.rows = tsdbReadRowsFromCache(pCheckInfo, pHandle->window.ekey,
pHandle->outputCapacity, &win->skey, &win->ekey, pHandle); // todo refactor API pHandle->outputCapacity, &win->skey, &win->ekey, pHandle); // todo refactor API
// update the last key value // update the last key value
@ -382,7 +443,7 @@ static int32_t getFileIdFromKey(TSKEY key, int32_t daysPerFile, int32_t precisio
return fid; return fid;
} }
static int32_t binarySearchForBlockImpl(SCompBlock* pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) { static int32_t binarySearchForBlock(SCompBlock* pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) {
int32_t firstSlot = 0; int32_t firstSlot = 0;
int32_t lastSlot = numOfBlocks - 1; int32_t lastSlot = numOfBlocks - 1;
@ -448,7 +509,7 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo
TSKEY e = MAX(pCheckInfo->lastKey, pQueryHandle->window.ekey); TSKEY e = MAX(pCheckInfo->lastKey, pQueryHandle->window.ekey);
// discard the unqualified data block based on the query time window // discard the unqualified data block based on the query time window
int32_t start = binarySearchForBlockImpl(pCompInfo->blocks, compIndex->numOfBlocks, s, TSDB_ORDER_ASC); int32_t start = binarySearchForBlock(pCompInfo->blocks, compIndex->numOfBlocks, s, TSDB_ORDER_ASC);
int32_t end = start; int32_t end = start;
if (s > pCompInfo->blocks[start].keyLast) { if (s > pCompInfo->blocks[start].keyLast) {
@ -522,7 +583,9 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
bool blockLoaded = false; bool blockLoaded = false;
SArray* sa = getDefaultLoadColumns(pQueryHandle, true); SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
int64_t st = taosGetTimestampUs();
if (pCheckInfo->pDataCols == NULL) { if (pCheckInfo->pDataCols == NULL) {
STsdbMeta* pMeta = tsdbGetMeta(pRepo); STsdbMeta* pMeta = tsdbGetMeta(pRepo);
pCheckInfo->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock); pCheckInfo->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock);
@ -540,9 +603,15 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
blockLoaded = true; blockLoaded = true;
} }
SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0];
assert(pCols->numOfRows != 0);
taosArrayDestroy(sa); taosArrayDestroy(sa);
tfree(data); tfree(data);
int64_t et = taosGetTimestampUs() - st;
tsdbTrace("%p load file block into buffer, elapsed time:%"PRId64 " us", pQueryHandle, et);
return blockLoaded; return blockLoaded;
} }
@ -600,7 +669,7 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock*
// do not load file block into buffer // do not load file block into buffer
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order) ? 1 : -1; int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order) ? 1 : -1;
cur->rows = tsdbReadRowsFromCache(pCheckInfo->iter, pCheckInfo->pTableObj, binfo.window.skey - step, cur->rows = tsdbReadRowsFromCache(pCheckInfo, binfo.window.skey - step,
pQueryHandle->outputCapacity, &cur->win.skey, &cur->win.ekey, pQueryHandle); pQueryHandle->outputCapacity, &cur->win.skey, &cur->win.ekey, pQueryHandle);
pQueryHandle->realNumOfRows = cur->rows; pQueryHandle->realNumOfRows = cur->rows;
@ -649,7 +718,7 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo)) { if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo)) {
return false; return false;
} }
SDataCols* pTSCol = pQueryHandle->rhelper.pDataCols[0]; SDataCols* pTSCol = pQueryHandle->rhelper.pDataCols[0];
assert(pTSCol->cols->type == TSDB_DATA_TYPE_TIMESTAMP && pTSCol->numOfRows == pBlock->numOfRows); assert(pTSCol->cols->type == TSDB_DATA_TYPE_TIMESTAMP && pTSCol->numOfRows == pBlock->numOfRows);
@ -1155,7 +1224,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
*numOfAllocBlocks = numOfBlocks; *numOfAllocBlocks = numOfBlocks;
int32_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo); int32_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
SBlockOrderSupporter sup = {0}; SBlockOrderSupporter sup = {0};
sup.numOfTables = numOfTables; sup.numOfTables = numOfTables;
sup.numOfBlocksPerTable = calloc(1, sizeof(int32_t) * numOfTables); sup.numOfBlocksPerTable = calloc(1, sizeof(int32_t) * numOfTables);
@ -1192,15 +1261,26 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
pBlockInfo->compBlock = &pBlock[k]; pBlockInfo->compBlock = &pBlock[k];
pBlockInfo->pTableCheckInfo = pTableCheck; pBlockInfo->pTableCheckInfo = pTableCheck;
// pBlockInfo->groupIdx = pTableCheckInfo[j]->groupIdx; // set the group index
// pBlockInfo->blockIndex = pTableCheckInfo[j]->start + k; // set the block index in original table
cnt++; cnt++;
} }
numOfQualTables++; numOfQualTables++;
} }
tsdbTrace("%p create data blocks info struct completed, %d blocks in %d tables", pQueryHandle, cnt, numOfQualTables); assert(numOfBlocks == cnt);
// since there is only one table qualified, blocks are not sorted
if (numOfQualTables == 1) {
memcpy(pQueryHandle->pDataBlockInfo, sup.pDataBlockInfo[0], sizeof(STableBlockInfo) * numOfBlocks);
cleanBlockOrderSupporter(&sup, numOfQualTables);
tsdbTrace("%p create data blocks info struct completed for 1 table, %d blocks not sorted %p ", pQueryHandle, cnt,
pQueryHandle->qinfo);
return TSDB_CODE_SUCCESS;
}
tsdbTrace("%p create data blocks info struct completed, %d blocks in %d tables %p", pQueryHandle, cnt,
numOfQualTables, pQueryHandle->qinfo);
assert(cnt <= numOfBlocks && numOfQualTables <= numOfTables); // the pTableQueryInfo[j]->numOfBlocks may be 0 assert(cnt <= numOfBlocks && numOfQualTables <= numOfTables); // the pTableQueryInfo[j]->numOfBlocks may be 0
sup.numOfTables = numOfQualTables; sup.numOfTables = numOfQualTables;
@ -1257,8 +1337,8 @@ static bool getDataBlocksInFilesImpl(STsdbQueryHandle* pQueryHandle) {
break; break;
} }
tsdbTrace("%p %d blocks found in file for %d table(s), fid:%d", pQueryHandle, numOfBlocks, tsdbTrace("%p %d blocks found in file for %d table(s), fid:%d, %p", pQueryHandle, numOfBlocks,
numOfTables, pQueryHandle->pFileGroup->fileId); numOfTables, pQueryHandle->pFileGroup->fileId, pQueryHandle->qinfo);
assert(numOfBlocks >= 0); assert(numOfBlocks >= 0);
if (numOfBlocks == 0) { if (numOfBlocks == 0) {
@ -1565,19 +1645,22 @@ static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle) {
pQueryHandle->window = (STimeWindow) {info.lastKey, TSKEY_INITIAL_VAL}; pQueryHandle->window = (STimeWindow) {info.lastKey, TSKEY_INITIAL_VAL};
} }
static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey, static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey,
STsdbQueryHandle* pQueryHandle) { STsdbQueryHandle* pQueryHandle) {
int numOfRows = 0; int numOfRows = 0;
int32_t numOfCols = taosArrayGetSize(pQueryHandle->pColumns); int32_t numOfCols = taosArrayGetSize(pQueryHandle->pColumns);
*skey = TSKEY_INITIAL_VAL; *skey = TSKEY_INITIAL_VAL;
int64_t st = taosGetTimestampUs();
STSchema* pSchema = tsdbGetTableSchema(tsdbGetMeta(pQueryHandle->pTsdb), pCheckInfo->pTableObj);
int32_t numOfTableCols = schemaNCols(pSchema);
do { do {
SSkipListNode* node = tSkipListIterGet(pIter); SDataRow row = getSDataRowInTableMem(pCheckInfo);
if (node == NULL) { if (row == NULL) {
break; break;
} }
SDataRow row = SL_GET_NODE_DATA(node);
TSKEY key = dataRowKey(row); TSKEY key = dataRowKey(row);
if ((key > maxKey && ASCENDING_TRAVERSE(pQueryHandle->order)) || if ((key > maxKey && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
@ -1590,49 +1673,69 @@ static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY
} }
if (*skey == INT64_MIN) { if (*skey == INT64_MIN) {
*skey = dataRowKey(row); *skey = key;
} }
*ekey = dataRowKey(row); *ekey = key;
int32_t offset = -1;
char* pData = NULL; char* pData = NULL;
STSchema* pSchema = tsdbGetTableSchema(tsdbGetMeta(pQueryHandle->pTsdb), pTable);
int32_t numOfTableCols = schemaNCols(pSchema);
for (int32_t i = 0; i < numOfCols; ++i) { int32_t i = 0, j = 0;
while(i < numOfCols && j < numOfTableCols) {
SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
if (pSchema->columns[j].colId < pColInfo->info.colId) {
j++;
continue;
}
if (ASCENDING_TRAVERSE(pQueryHandle->order)) { if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
pData = pColInfo->pData + numOfRows * pColInfo->info.bytes; pData = pColInfo->pData + numOfRows * pColInfo->info.bytes;
} else { } else {
pData = pColInfo->pData + (maxRowsToRead - numOfRows - 1) * pColInfo->info.bytes; pData = pColInfo->pData + (maxRowsToRead - numOfRows - 1) * pColInfo->info.bytes;
} }
for(int32_t j = 0; j < numOfTableCols; ++j) { if (pSchema->columns[j].colId == pColInfo->info.colId) {
if (pColInfo->info.colId == pSchema->columns[j].colId) { void* value = tdGetRowDataOfCol(row, pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset);
offset = pSchema->columns[j].offset; if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
break; memcpy(pData, value, varDataTLen(value));
} else {
memcpy(pData, value, pColInfo->info.bytes);
} }
}
j++;
assert(offset != -1); // todo handle error i++;
void *value = tdGetRowDataOfCol(row, pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + offset); } else { // pColInfo->info.colId < pSchema->columns[j].colId, it is a NULL data
if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { setVardataNull(pData, pColInfo->info.type);
memcpy(pData, value, varDataTLen(value)); } else {
} else { setNull(pData, pColInfo->info.type, pColInfo->info.bytes);
memcpy(pData, value, pColInfo->info.bytes); }
i++;
} }
} }
while (i < numOfCols) { // the remain columns are all null data
SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
pData = pColInfo->pData + numOfRows * pColInfo->info.bytes;
} else {
pData = pColInfo->pData + (maxRowsToRead - numOfRows - 1) * pColInfo->info.bytes;
}
if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
setVardataNull(pData, pColInfo->info.type);
} else {
setNull(pData, pColInfo->info.type, pColInfo->info.bytes);
}
i++;
}
if (++numOfRows >= maxRowsToRead) { if (++numOfRows >= maxRowsToRead) {
tSkipListIterNext(pIter); moveToNextRow(pCheckInfo);
break; break;
} }
} while(tSkipListIterNext(pIter)); } while(moveToNextRow(pCheckInfo));
assert(numOfRows <= maxRowsToRead); assert(numOfRows <= maxRowsToRead);
@ -1646,6 +1749,10 @@ static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY
} }
} }
int64_t elapsedTime = taosGetTimestampUs() - st;
tsdbTrace("%p build data block from cache completed, elapsed time:%"PRId64" us, numOfRows:%d, numOfCols:%d", pQueryHandle,
elapsedTime, numOfRows, numOfCols);
return numOfRows; return numOfRows;
} }

View File

@ -153,6 +153,8 @@ bool taosMbsToUcs4(char *mbs, size_t mbs_len, char *ucs4, int32_t ucs4_max_len,
int tasoUcs4Compare(void* f1_ucs4, void *f2_ucs4, int bytes); int tasoUcs4Compare(void* f1_ucs4, void *f2_ucs4, int bytes);
void taosRandStr(char* str, int32_t size);
int32_t taosUcs4ToMbs(void *ucs4, int32_t ucs4_max_len, char *mbs); int32_t taosUcs4ToMbs(void *ucs4, int32_t ucs4_max_len, char *mbs);
bool taosValidateEncodec(const char *encodec); bool taosValidateEncodec(const char *encodec);

View File

@ -92,12 +92,14 @@ void* taosArrayGet(const SArray* pArray, size_t index) {
} }
void* taosArrayGetP(const SArray* pArray, size_t index) { void* taosArrayGetP(const SArray* pArray, size_t index) {
void* ret = taosArrayGet(pArray, index); assert(index < pArray->size);
if (ret == NULL) {
void* d = TARRAY_GET_ELEM(pArray, index);
if (d == NULL) {
return NULL; return NULL;
} }
return *(void**)ret; return *(void**)d;
} }
size_t taosArrayGetSize(const SArray* pArray) { return pArray->size; } size_t taosArrayGetSize(const SArray* pArray) { return pArray->size; }

View File

@ -19,6 +19,7 @@
#include "tutil.h" #include "tutil.h"
int taosGetFqdn(char *fqdn) { int taosGetFqdn(char *fqdn) {
int code = 0;
char hostname[1024]; char hostname[1024];
hostname[1023] = '\0'; hostname[1023] = '\0';
gethostname(hostname, 1023); gethostname(hostname, 1023);
@ -27,13 +28,15 @@ int taosGetFqdn(char *fqdn) {
h = gethostbyname(hostname); h = gethostbyname(hostname);
if (h != NULL) { if (h != NULL) {
strcpy(fqdn, h->h_name); strcpy(fqdn, h->h_name);
return 0;
} else { } else {
uError("failed to get host name"); uError("failed to get host name(%s)", strerror(errno));
return -1; code = -1;
} }
free(h); // to do: free the resources
// free(h);
return code;
} }
uint32_t taosGetIpFromFqdn(const char *fqdn) { uint32_t taosGetIpFromFqdn(const char *fqdn) {
@ -47,7 +50,7 @@ uint32_t ip2uint(const char *const ip_addr) {
char ip_addr_cpy[20]; char ip_addr_cpy[20];
char ip[5]; char ip[5];
strcpy(ip_addr_cpy, ip_addr); tstrncpy(ip_addr_cpy, ip_addr, sizeof(ip_addr_cpy));
char *s_start, *s_end; char *s_start, *s_end;
s_start = ip_addr_cpy; s_start = ip_addr_cpy;
@ -206,7 +209,7 @@ int taosOpenUdpSocket(uint32_t ip, uint16_t port) {
int reuse, nocheck; int reuse, nocheck;
int bufSize = 8192000; int bufSize = 8192000;
uTrace("open udp socket:%s:%hu", ip, port); uTrace("open udp socket:0x%x:%hu", ip, port);
memset((char *)&localAddr, 0, sizeof(localAddr)); memset((char *)&localAddr, 0, sizeof(localAddr));
localAddr.sin_family = AF_INET; localAddr.sin_family = AF_INET;
@ -257,7 +260,7 @@ int taosOpenUdpSocket(uint32_t ip, uint16_t port) {
/* bind socket to local address */ /* bind socket to local address */
if (bind(sockFd, (struct sockaddr *)&localAddr, sizeof(localAddr)) < 0) { if (bind(sockFd, (struct sockaddr *)&localAddr, sizeof(localAddr)) < 0) {
uError("failed to bind udp socket: %d (%s), %s:%hu", errno, strerror(errno), ip, port); uError("failed to bind udp socket: %d (%s), 0x%x:%hu", errno, strerror(errno), ip, port);
taosCloseSocket(sockFd); taosCloseSocket(sockFd);
return -1; return -1;
} }
@ -363,7 +366,7 @@ int taosOpenTcpServerSocket(uint32_t ip, uint16_t port) {
int sockFd; int sockFd;
int reuse; int reuse;
uTrace("open tcp server socket:%s:%hu", ip, port); uTrace("open tcp server socket:0x%x:%hu", ip, port);
bzero((char *)&serverAdd, sizeof(serverAdd)); bzero((char *)&serverAdd, sizeof(serverAdd));
serverAdd.sin_family = AF_INET; serverAdd.sin_family = AF_INET;

View File

@ -27,8 +27,6 @@
#include "tulog.h" #include "tulog.h"
#include "taoserror.h" #include "taoserror.h"
int32_t tmpFileSerialNum = 0;
int32_t strdequote(char *z) { int32_t strdequote(char *z) {
if (z == NULL) { if (z == NULL) {
return 0; return 0;
@ -433,12 +431,24 @@ void getTmpfilePath(const char *fileNamePrefix, char *dstPath) {
#else #else
char *tmpDir = "/tmp/"; char *tmpDir = "/tmp/";
#endif #endif
int64_t ts = taosGetTimestampUs();
strcpy(tmpPath, tmpDir); strcpy(tmpPath, tmpDir);
strcat(tmpPath, tdengineTmpFileNamePrefix); strcat(tmpPath, tdengineTmpFileNamePrefix);
strcat(tmpPath, fileNamePrefix); strcat(tmpPath, fileNamePrefix);
strcat(tmpPath, "-%d-%"PRIu64"-%u-%"PRIu64); strcat(tmpPath, "-%d-%s");
snprintf(dstPath, PATH_MAX, tmpPath, getpid(), taosGetPthreadId(), atomic_add_fetch_32(&tmpFileSerialNum, 1), ts);
char rand[8] = {0};
taosRandStr(rand, tListLen(rand) - 1);
snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand);
}
void taosRandStr(char* str, int32_t size) {
const char* set = "abcdefghijklmnopqrstuvwxyz0123456789-_.";
int32_t len = 39;
for(int32_t i = 0; i < size; ++i) {
str[i] = set[rand()%len];
}
} }
int tasoUcs4Compare(void* f1_ucs4, void *f2_ucs4, int bytes) { int tasoUcs4Compare(void* f1_ucs4, void *f2_ucs4, int bytes) {

View File

@ -68,11 +68,19 @@ void *walOpen(const char *path, const SWalCfg *pCfg) {
pWal->num = 0; pWal->num = 0;
pWal->level = pCfg->walLevel; pWal->level = pCfg->walLevel;
pWal->keep = pCfg->keep; pWal->keep = pCfg->keep;
strcpy(pWal->path, path); tstrncpy(pWal->path, path, sizeof(pWal->path));
pthread_mutex_init(&pWal->mutex, NULL); pthread_mutex_init(&pWal->mutex, NULL);
if (access(path, F_OK) != 0) mkdir(path, 0755); if (access(path, F_OK) != 0) {
if (mkdir(path, 0755) != 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
wError("wal:%s, failed to create directory(%s)", path, strerror(errno));
pthread_mutex_destroy(&pWal->mutex);
free(pWal);
pWal = NULL;
}
}
if (pCfg->keep == 1) return pWal; if (pCfg->keep == 1) return pWal;
if (walHandleExistingFiles(path) == 0) if (walHandleExistingFiles(path) == 0)
@ -80,7 +88,7 @@ void *walOpen(const char *path, const SWalCfg *pCfg) {
if (pWal->fd <0) { if (pWal->fd <0) {
terrno = TAOS_SYSTEM_ERROR(errno); terrno = TAOS_SYSTEM_ERROR(errno);
wError("wal:%s, failed to open", path); wError("wal:%s, failed to open(%s)", path, strerror(errno));
pthread_mutex_destroy(&pWal->mutex); pthread_mutex_destroy(&pWal->mutex);
free(pWal); free(pWal);
pWal = NULL; pWal = NULL;
@ -119,7 +127,8 @@ void walClose(void *handle) {
int walRenew(void *handle) { int walRenew(void *handle) {
if (handle == NULL) return 0; if (handle == NULL) return 0;
SWal *pWal = handle; SWal *pWal = handle;
int code = 0;
terrno = 0;
pthread_mutex_lock(&pWal->mutex); pthread_mutex_lock(&pWal->mutex);
@ -135,8 +144,8 @@ int walRenew(void *handle) {
pWal->fd = open(pWal->name, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); pWal->fd = open(pWal->name, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO);
if (pWal->fd < 0) { if (pWal->fd < 0) {
wError("wal:%d, failed to open(%s)", pWal->name, strerror(errno)); wError("wal:%s, failed to open(%s)", pWal->name, strerror(errno));
code = -1; terrno = TAOS_SYSTEM_ERROR(errno);
} else { } else {
wTrace("wal:%s, it is created", pWal->name); wTrace("wal:%s, it is created", pWal->name);
@ -156,14 +165,15 @@ int walRenew(void *handle) {
pthread_mutex_unlock(&pWal->mutex); pthread_mutex_unlock(&pWal->mutex);
return code; return terrno;
} }
int walWrite(void *handle, SWalHead *pHead) { int walWrite(void *handle, SWalHead *pHead) {
SWal *pWal = handle; SWal *pWal = handle;
int code = 0;
if (pWal == NULL) return -1; if (pWal == NULL) return -1;
terrno = 0;
// no wal // no wal
if (pWal->level == TAOS_WAL_NOLOG) return 0; if (pWal->level == TAOS_WAL_NOLOG) return 0;
if (pHead->version <= pWal->version) return 0; if (pHead->version <= pWal->version) return 0;
@ -174,12 +184,12 @@ int walWrite(void *handle, SWalHead *pHead) {
if(write(pWal->fd, pHead, contLen) != contLen) { if(write(pWal->fd, pHead, contLen) != contLen) {
wError("wal:%s, failed to write(%s)", pWal->name, strerror(errno)); wError("wal:%s, failed to write(%s)", pWal->name, strerror(errno));
code = -1; terrno = TAOS_SYSTEM_ERROR(errno);
} else { } else {
pWal->version = pHead->version; pWal->version = pHead->version;
} }
return code; return terrno;
} }
void walFsync(void *handle) { void walFsync(void *handle) {
@ -196,11 +206,11 @@ void walFsync(void *handle) {
int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int)) { int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int)) {
SWal *pWal = handle; SWal *pWal = handle;
int code = 0;
struct dirent *ent; struct dirent *ent;
int count = 0; int count = 0;
uint32_t maxId = 0, minId = -1, index =0; uint32_t maxId = 0, minId = -1, index =0;
terrno = 0;
int plen = strlen(walPrefix); int plen = strlen(walPrefix);
char opath[TSDB_FILENAME_LEN+5]; char opath[TSDB_FILENAME_LEN+5];
@ -224,30 +234,30 @@ int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int))
closedir(dir); closedir(dir);
if (count == 0) { if (count == 0) {
if (pWal->keep) code = walRenew(pWal); if (pWal->keep) terrno = walRenew(pWal);
return code; return terrno;
} }
if ( count != (maxId-minId+1) ) { if ( count != (maxId-minId+1) ) {
wError("wal:%s, messed up, count:%d max:%d min:%d", opath, count, maxId, minId); wError("wal:%s, messed up, count:%d max:%d min:%d", opath, count, maxId, minId);
code = -1; terrno = TAOS_SYSTEM_ERROR(TSDB_CODE_APP_ERROR);
} else { } else {
wTrace("wal:%s, %d files will be restored", opath, count); wTrace("wal:%s, %d files will be restored", opath, count);
for (index = minId; index<=maxId; ++index) { for (index = minId; index<=maxId; ++index) {
sprintf(pWal->name, "%s/%s%d", opath, walPrefix, index); sprintf(pWal->name, "%s/%s%d", opath, walPrefix, index);
code = walRestoreWalFile(pWal, pVnode, writeFp); terrno = walRestoreWalFile(pWal, pVnode, writeFp);
if (code < 0) break; if (terrno < 0) break;
} }
} }
if (code == 0) { if (terrno == 0) {
if (pWal->keep == 0) { if (pWal->keep == 0) {
code = walRemoveWalFiles(opath); terrno = walRemoveWalFiles(opath);
if (code == 0) { if (terrno == 0) {
if (remove(opath) < 0) { if (remove(opath) < 0) {
wError("wal:%s, failed to remove directory(%s)", opath, strerror(errno)); wError("wal:%s, failed to remove directory(%s)", opath, strerror(errno));
code = -1; terrno = TAOS_SYSTEM_ERROR(errno);
} }
} }
} else { } else {
@ -258,12 +268,12 @@ int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int))
pWal->fd = open(pWal->name, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU | S_IRWXG | S_IRWXO); pWal->fd = open(pWal->name, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU | S_IRWXG | S_IRWXO);
if (pWal->fd < 0) { if (pWal->fd < 0) {
wError("wal:%s, failed to open file(%s)", pWal->name, strerror(errno)); wError("wal:%s, failed to open file(%s)", pWal->name, strerror(errno));
code = -1; terrno = TAOS_SYSTEM_ERROR(errno);
} }
} }
} }
return code; return terrno;
} }
int walGetWalFile(void *handle, char *name, uint32_t *index) { int walGetWalFile(void *handle, char *name, uint32_t *index) {
@ -292,40 +302,47 @@ int walGetWalFile(void *handle, char *name, uint32_t *index) {
} }
static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) { static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) {
int code = 0;
char *name = pWal->name; char *name = pWal->name;
terrno = 0;
char *buffer = malloc(1024000); // size for one record char *buffer = malloc(1024000); // size for one record
if (buffer == NULL) return -1; if (buffer == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
return terrno;
}
SWalHead *pHead = (SWalHead *)buffer; SWalHead *pHead = (SWalHead *)buffer;
int fd = open(name, O_RDONLY); int fd = open(name, O_RDONLY);
if (fd < 0) { if (fd < 0) {
wError("wal:%s, failed to open for restore(%s)", name, strerror(errno)); wError("wal:%s, failed to open for restore(%s)", name, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
free(buffer); free(buffer);
return -1; return terrno;
} }
wTrace("wal:%s, start to restore", name); wTrace("wal:%s, start to restore", name);
while (1) { while (1) {
int ret = read(fd, pHead, sizeof(SWalHead)); int ret = read(fd, pHead, sizeof(SWalHead));
if ( ret == 0) { code = 0; break;} if ( ret == 0) break;
if (ret != sizeof(SWalHead)) { if (ret != sizeof(SWalHead)) {
wWarn("wal:%s, failed to read head, skip, ret:%d(%s)", name, ret, strerror(errno)); wWarn("wal:%s, failed to read head, skip, ret:%d(%s)", name, ret, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
break; break;
} }
if (!taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SWalHead))) { if (!taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SWalHead))) {
wWarn("wal:%s, cksum is messed up, skip the rest of file", name); wWarn("wal:%s, cksum is messed up, skip the rest of file", name);
terrno = TAOS_SYSTEM_ERROR(errno);
break; break;
} }
ret = read(fd, pHead->cont, pHead->len); ret = read(fd, pHead->cont, pHead->len);
if ( ret != pHead->len) { if ( ret != pHead->len) {
wWarn("wal:%s, failed to read body, skip, len:%d ret:%d", name, pHead->len, ret); wWarn("wal:%s, failed to read body, skip, len:%d ret:%d", name, pHead->len, ret);
terrno = TAOS_SYSTEM_ERROR(errno);
break; break;
} }
@ -336,11 +353,10 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) {
close(fd); close(fd);
free(buffer); free(buffer);
return code; return terrno;
} }
int walHandleExistingFiles(const char *path) { int walHandleExistingFiles(const char *path) {
int code = 0;
char oname[TSDB_FILENAME_LEN * 3]; char oname[TSDB_FILENAME_LEN * 3];
char nname[TSDB_FILENAME_LEN * 3]; char nname[TSDB_FILENAME_LEN * 3];
char opath[TSDB_FILENAME_LEN]; char opath[TSDB_FILENAME_LEN];
@ -350,6 +366,7 @@ int walHandleExistingFiles(const char *path) {
struct dirent *ent; struct dirent *ent;
DIR *dir = opendir(path); DIR *dir = opendir(path);
int plen = strlen(walPrefix); int plen = strlen(walPrefix);
terrno = 0;
if (access(opath, F_OK) == 0) { if (access(opath, F_OK) == 0) {
// old directory is there, it means restore process is not finished // old directory is there, it means restore process is not finished
@ -360,13 +377,19 @@ int walHandleExistingFiles(const char *path) {
int count = 0; int count = 0;
while ((ent = readdir(dir))!= NULL) { while ((ent = readdir(dir))!= NULL) {
if ( strncmp(ent->d_name, walPrefix, plen) == 0) { if ( strncmp(ent->d_name, walPrefix, plen) == 0) {
if (access(opath, F_OK) != 0) mkdir(opath, 0755);
sprintf(oname, "%s/%s", path, ent->d_name); sprintf(oname, "%s/%s", path, ent->d_name);
sprintf(nname, "%s/old/%s", path, ent->d_name); sprintf(nname, "%s/old/%s", path, ent->d_name);
if (access(opath, F_OK) != 0) {
if (mkdir(opath, 0755) != 0) {
wError("wal:%s, failed to create directory:%s(%s)", oname, opath, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
break;
}
}
if (rename(oname, nname) < 0) { if (rename(oname, nname) < 0) {
wError("wal:%s, failed to move to new:%s", oname, nname); wError("wal:%s, failed to move to new:%s", oname, nname);
code = -1; terrno = TAOS_SYSTEM_ERROR(errno);
break; break;
} }
@ -378,14 +401,14 @@ int walHandleExistingFiles(const char *path) {
} }
closedir(dir); closedir(dir);
return code; return terrno;
} }
static int walRemoveWalFiles(const char *path) { static int walRemoveWalFiles(const char *path) {
int plen = strlen(walPrefix); int plen = strlen(walPrefix);
char name[TSDB_FILENAME_LEN * 3]; char name[TSDB_FILENAME_LEN * 3];
int code = 0;
terrno = 0;
if (access(path, F_OK) != 0) return 0; if (access(path, F_OK) != 0) return 0;
struct dirent *ent; struct dirent *ent;
@ -396,13 +419,13 @@ static int walRemoveWalFiles(const char *path) {
sprintf(name, "%s/%s", path, ent->d_name); sprintf(name, "%s/%s", path, ent->d_name);
if (remove(name) <0) { if (remove(name) <0) {
wError("wal:%s, failed to remove(%s)", name, strerror(errno)); wError("wal:%s, failed to remove(%s)", name, strerror(errno));
code = -1; break; terrno = TAOS_SYSTEM_ERROR(errno);
} }
} }
} }
closedir(dir); closedir(dir);
return code; return terrno;
} }

View File

@ -1,42 +1,42 @@
#!/bin/bash #!/bin/bash
# insert # insert
python3 ./test.py -g -f insert/basic.py PYTHONMALLOC=malloc python3 ./test.py -g -f insert/basic.py
python3 ./test.py -g -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/int.py PYTHONMALLOC=malloc python3 ./test.py -g -f insert/int.py
python3 ./test.py -g -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/float.py PYTHONMALLOC=malloc python3 ./test.py -g -f insert/float.py
python3 ./test.py -g -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/bigint.py PYTHONMALLOC=malloc python3 ./test.py -g -f insert/bigint.py
python3 ./test.py -g -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/bool.py PYTHONMALLOC=malloc python3 ./test.py -g -f insert/bool.py
python3 ./test.py -g -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/double.py PYTHONMALLOC=malloc python3 ./test.py -g -f insert/double.py
python3 ./test.py -g -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/smallint.py PYTHONMALLOC=malloc python3 ./test.py -g -f insert/smallint.py
python3 ./test.py -g -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/tinyint.py PYTHONMALLOC=malloc python3 ./test.py -g -f insert/tinyint.py
python3 ./test.py -g -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/binary.py PYTHONMALLOC=malloc python3 ./test.py -g -f insert/binary.py
python3 ./test.py -g -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/date.py PYTHONMALLOC=malloc python3 ./test.py -g -f insert/date.py
python3 ./test.py -g -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/nchar.py PYTHONMALLOC=malloc python3 ./test.py -g -f insert/nchar.py
python3 ./test.py -g -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/multi.py PYTHONMALLOC=malloc python3 ./test.py -g -f insert/multi.py
python3 ./test.py -g -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
# table # table
python3 ./test.py -g -f table/column_name.py PYTHONMALLOC=malloc python3 ./test.py -g -f table/column_name.py
python3 ./test.py -g -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f table/column_num.py PYTHONMALLOC=malloc python3 ./test.py -g -f table/column_num.py
python3 ./test.py -g -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f table/db_table.py PYTHONMALLOC=malloc python3 ./test.py -g -f table/db_table.py
python3 ./test.py -g -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
# import # import
python3 ./test.py -g -f import_merge/importDataLastSub.py PYTHONMALLOC=malloc python3 ./test.py -g -f import_merge/importDataLastSub.py
python3 ./test.py -g -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
#tag #tag
python3 ./test.py $1 -f tag_lite/filter.py PYTHONMALLOC=malloc python3 ./test.py $1 -f tag_lite/filter.py
python3 ./test.py $1 -s && sleep 1 PYTHONMALLOC=malloc python3 ./test.py $1 -s && sleep 1

View File

@ -2,11 +2,11 @@
# step 1: start dnode1 # step 1: start dnode1
# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode) # step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode)
# step 3: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841) # step 3: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841)
# step 4: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc # step 4: insert old data(now-20d) and new data(now-40d), control data rows in order to save in cache, not falling disc
# step 5: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2 # step 5: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2
# step 6: insert two data rows: now-16d, now+16d # step 6: insert two data rows: now-21d, now-41d
# step 7: restart dnode2, waiting sync end # step 7: restart dnode2, waiting sync end
# expect: in dnode2, the files 1840 and 1842 will be removed # expect: in dnode2, the files 1837 and 1839 will be removed
system sh/stop_dnodes.sh system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
@ -14,10 +14,10 @@ system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3 system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4 system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMPeers -v 1 system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode2 -c numOfMPeers -v 1 system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode3 -c numOfMPeers -v 1 system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode4 -c numOfMPeers -v 1 system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2 system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2 system sh/cfg.sh -n dnode2 -c walLevel -v 2
@ -39,11 +39,6 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
@ -64,16 +59,16 @@ sql connect
print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 2, and create table, insert data print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 2, and create table, insert data
system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode2 -s start
#system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname2 sql create dnode $hostname2
#sql create dnode $hostname3 sql create dnode $hostname3
sleep 3000 sleep 3000
$totalTableNum = 1 $totalTableNum = 1
$sleepTimer = 3000 $sleepTimer = 3000
$db = db $db = db
sql create database $db replica 1 cache 1 sql create database $db replica 2 cache 1
sql use $db sql use $db
# create table , insert data # create table , insert data
@ -82,7 +77,7 @@ sql create table $stb (ts timestamp, c1 double) tags(t1 int)
$rowNum = 130000 $rowNum = 130000
$tblNum = $totalTableNum $tblNum = $totalTableNum
$totalRows = 0 $totalRows = 0
#$tsStart = 1420041600000 $tsStart = 1577808000000 # 2020-01-01 00:00:00.000
# insert over 2M data in order to falling disc, generate one file # insert over 2M data in order to falling disc, generate one file
$i = 0 $i = 0
@ -102,19 +97,24 @@ while $i < $tblNum
endw endw
sql select count(*) from $stb sql select count(*) from $stb
sleep 1000 print rows:$rows data00:$data00
print data00 $data00 if $rows != 1 then
if $data00 != $totalRows then return -1
return -1
endi endi
if $data00 == 0 then
return -1
endi
$totalRows = $data00
print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
sql insert into $tb values ( now - 15d , -15 ) sql insert into $tb values ( now - 20d , -20 )
sql insert into $tb values ( now + 15d , 15 ) sql insert into $tb values ( now - 40d , -40 )
$totalRows = $totalRows + 2 $totalRows = $totalRows + 2
print ============== step4: stop dnode2, so date rows falling disc, generate two new files in dnode2 print ============== step4: stop dnode2, so date rows falling disc, generate two new files in dnode2
system sh/exec.sh -n dnode2 -s stop system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep $sleepTimer sleep $sleepTimer
wait_dnode2_offline: wait_dnode2_offline:
@ -153,48 +153,14 @@ if $data00 != $totalRows then
endi endi
print ============== step5: insert two data rows: now-16d, now+16d, print ============== step5: insert two data rows: now-16d, now+16d,
sql insert into $tb values ( now - 16d , -16 ) sql insert into $tb values ( now - 21d , -21 )
sql insert into $tb values ( now + 16d , 16 ) sql insert into $tb values ( now - 41d , -41 )
$totalRows = $totalRows + 2 $totalRows = $totalRows + 2
return 1
print ============== step5: restart dnode2, waiting sync end
system sh/exec.sh -n dnode2 -s start
sleep 3000
wait_dnode2_ready:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready
endi
sleep $sleepTimer
# check using select
sql select count(*) from $stb sql select count(*) from $stb
print data00 $data00 print data00 $data00
if $data00 != $totalRows then if $data00 != $totalRows then
return -1 return -1
endi endi
print ============== step6: in dnode2, the files 1840 and 1842 will be removed print ============== step6: please check there should be 3 file in sim/dnode2/data/vnode/vnode2/tsdb/data/, and 1 file sim/dnode3/data/vnode/vnode2/tsdb/data/
# how check in script ???

View File

@ -0,0 +1,52 @@
# Test case describe: dnode1 is only mnode, dnode2/dnode3 are only vnode
# step 1: start dnode1
# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode)
# step 3: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841)
# step 4: insert old data(now-20d) and new data(now-40d), control data rows in order to save in cache, not falling disc
# step 5: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2
# step 6: insert two data rows: now-21d, now-41d
# step 7: restart dnode2, waiting sync end
# expect: in dnode2, the files 1837 and 1839 will be removed
sql connect
sleep 3000
print ============== step7: restart dnode2, waiting sync end
system sh/exec.sh -n dnode2 -s start
sleep 3000
wait_dnode2_ready:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready
endi
sleep 1000
# check using select
$db = db
$stb = stb
sql use $db
sql select count(*) from $stb
print data00 $data00, should equal to dn2_mn1_cache_file_sync.sim output
#if $data00 != $totalRows then
# return -1
#endi
print ============== step8: please check there should be 1 file in sim/dnode2/data/vnode/vnode2/tsdb/data/, and 1 file sim/dnode3/data/vnode/vnode2/tsdb/data/

View File

@ -23,28 +23,39 @@ system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10 system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode5 -c balanceInterval -v 10 system sh/cfg.sh -n dnode5 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 200 system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode5 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode5 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 200 system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 200
#system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 200 #system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 200
#system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 200 #system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 200
#system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 200 #system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 200
system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode5 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode5 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode1 -c offlineThreshold -v 20 system sh/cfg.sh -n dnode1 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode2 -c offlineThreshold -v 20 system sh/cfg.sh -n dnode2 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode3 -c offlineThreshold -v 20 system sh/cfg.sh -n dnode3 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode4 -c offlineThreshold -v 20 system sh/cfg.sh -n dnode4 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode5 -c offlineThreshold -v 20 system sh/cfg.sh -n dnode5 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode1 -c enableCoreFile -v 1 system sh/cfg.sh -n dnode1 -c enableCoreFile -v 1
system sh/cfg.sh -n dnode2 -c enableCoreFile -v 1 system sh/cfg.sh -n dnode2 -c enableCoreFile -v 1
@ -63,9 +74,9 @@ sql connect
print ============== step2: start dnode2/dnode3 and add into cluster, then create database, create table , and insert data print ============== step2: start dnode2/dnode3 and add into cluster, then create database, create table , and insert data
system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode3 -s start
sleep 1000
sql create dnode $hostname2 sql create dnode $hostname2
sql create dnode $hostname3 sql create dnode $hostname3
sleep 3000
$rowNum = 100 $rowNum = 100
$tblNum = 16 $tblNum = 16
@ -151,7 +162,7 @@ endi
# return -1 # return -1
#endi #endi
sleep 30000 sleep 15000
wait_drop: wait_drop:
sql show dnodes sql show dnodes
@ -174,22 +185,36 @@ endi
if $dnode2Status != ready then if $dnode2Status != ready then
return -1 return -1
endi endi
if $dnode3Status != null then
return -1
endi
if $dnode4Status != ready then if $dnode4Status != ready then
return -1 return -1
endi endi
print ============== step4-1: restart dnode3, adn add into cluster
print ============== step5: start dnode5 and add into cluster , drop database system rm -rf ../../sim/dnode3
sql drop database $db
sleep 1000
system sh/exec.sh -n dnode5 -s start
sql create dnode $hostname5
sleep 3000 sleep 3000
wait_dnode5:
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode3 -c walLevel -v 1
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode3 -c enableCoreFile -v 1
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3
sleep 3000
wait_dnode3_ready:
sql show dnodes sql show dnodes
print rows: $rows
if $rows != 4 then if $rows != 4 then
sleep 3000 sleep 3000
goto wait_dnode5 goto wait_dnode3_ready
endi endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
@ -198,6 +223,7 @@ print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
$dnode1Status = $data4_1 $dnode1Status = $data4_1
$dnode2Status = $data4_2 $dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4 $dnode4Status = $data4_4
$dnode5Status = $data4_5 $dnode5Status = $data4_5
@ -207,6 +233,9 @@ endi
if $dnode2Status != ready then if $dnode2Status != ready then
return -1 return -1
endi endi
if $dnode3Status != null then
return -1
endi
if $dnode4Status != ready then if $dnode4Status != ready then
return -1 return -1
endi endi
@ -214,10 +243,58 @@ if $dnode5Status != ready then
return -1 return -1
endi endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step5: start dnode5 and add into cluster , drop database
sql drop database $db
sleep 1000
system sh/exec.sh -n dnode5 -s start
sql create dnode $hostname5
sleep 3000
wait_dnode5:
sql show dnodes
if $rows != 5 then
sleep 3000
goto wait_dnode5
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode4Status = $data4_4
$dnode5Status = $data4_5
$dnode6Status = $data4_6
if $dnode1Status != ready then
return -1
endi
if $dnode2Status != ready then
return -1
endi
if $dnode3Status != null then
return -1
endi
if $dnode4Status != ready then
return -1
endi
if $dnode5Status != ready then
return -1
endi
if $dnode6Status != ready then
return -1
endi
print ============== step6: create database and table until not free vnodes print ============== step6: create database and table until not free vnodes
$rowNum = 100 $rowNum = 100
$tblNum = 24 $tblNum = 32
$totalRows = 0 $totalRows = 0
$tsStart = 1420041600000 $tsStart = 1420041600000
@ -259,8 +336,9 @@ if $data00 != $totalRows then
return -1 return -1
endi endi
print ============== step7: drop dnode3, and system should prompt cannot drop dnodes print ============== step7: drop dnode $hostname5, system should prompt "DB error: no enough dnodes"
sql_error drop dnode $hostname3 sql_error drop dnode $hostname5
print ============== step8: add one new table, and system should prompt 'need more dnode' print error: $error
print ============== step8: create table tb_more using $stb tags( 1000 ), system should prompt 'DB error: no enough dnodes'
sql_error create table tb_more using $stb tags( 1000 ) sql_error create table tb_more using $stb tags( 1000 )
print error: $error

View File

@ -89,11 +89,8 @@ if $data00 != $totalRows then
return -1 return -1
endi endi
print ============== step2-1: stop dnode2 for falling disc, then restart dnode2, and check rows
system sh/exec.sh -n dnode2 -s stop -x SIGINT
print ============== step2-1: start dnode2 for falling disc, then restart dnode2, and check rows
system sh/exec.sh -n dnode2 -s stop
sleep $sleepTimer sleep $sleepTimer
wait_dnode2_offline_0: wait_dnode2_offline_0:
sql show dnodes sql show dnodes
@ -151,10 +148,6 @@ if $data00 != $totalRows then
endi endi
print ============== step3: start dnode3 and add into cluster , then alter replica from 1 to 2, and waiting sync print ============== step3: start dnode3 and add into cluster , then alter replica from 1 to 2, and waiting sync
system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3 sql create dnode $hostname3

View File

@ -0,0 +1,273 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1, only deploy mnode
system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode4 -s start
sql create dnode $hostname2
sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
$totalTableNum = 100
$sleepTimer = 3000
$db = db
sql create database $db replica 3 maxTables $totalTableNum
sql use $db
# create table , insert data
$stb = stb
sql create table $stb (ts timestamp, c1 int) tags(t1 int)
$rowNum = 100
$tblNum = $totalTableNum
$totalRows = 0
$tsStart = 1420041600000
$i = 0
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags( $i )
$x = 0
while $x < $rowNum
$ts = $tsStart + $x
sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
$x = $x + 60
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
$i = $i + 1
endw
sql select count(*) from $stb
sleep 1000
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step3: stop dnode4, and remove its vnodeX subdirector
system sh/exec.sh -n dnode4 -s stop -x SIGINT
sleep $sleepTimer
wait_dnode4_offline_0:
sql show dnodes
if $rows != 4 then
sleep 2000
goto wait_dnode4_offline_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode4Status != offline then
sleep 2000
goto wait_dnode4_offline_0
endi
wait_dnode4_vgroup_offline:
sql show vgroups
if $rows != 1 then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
system rm -rf ../../../sim/dnode4/data/vnode/*
sleep 1000
print ============== step4: restart dnode4, waiting sync end
system sh/exec.sh -n dnode4 -s start
sleep $sleepTimer
wait_dnode4_reready:
sql show dnodes
if $rows != 4 then
sleep 2000
goto wait_dnode4_reready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode4Status != ready then
sleep 2000
goto wait_dnode4_reready
endi
wait_dnode4_vgroup_slave:
sql show vgroups
if $rows != 1 then
sleep 2000
goto wait_dnode4_vgroup_slave
endi
print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2
$dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus
if $dnode4Vtatus != slave then
sleep 2000
goto wait_dnode4_vgroup_slave
endi
if $dnode3Vtatus != master then
sleep 2000
goto wait_dnode4_vgroup_slave
endi
print ============== step5: stop dnode3/dnode2, and check rows
system sh/exec.sh -n dnode2 -s stop
system sh/exec.sh -n dnode3 -s stop
sleep $sleepTimer
wait_dnode23_offline:
sql show dnodes
if $rows != 4 then
sleep 2000
goto wait_dnode23_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != offline then
sleep 2000
goto wait_dnode23_offline
endi
if $dnode3Status != offline then
sleep 2000
goto wait_dnode23_offline
endi
if $dnode4Status != ready then
sleep 2000
goto wait_dnode23_offline
endi
wait_dnode4_vgroup_master:
sql show vgroups
if $rows != 1 then
sleep 2000
goto wait_dnode4_vgroup_master
endi
print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2
$dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus
if $dnode4Vtatus != master then
sleep 2000
goto wait_dnode4_vgroup_master
endi
if $dnode3Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_master
endi
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
sql insert into $tb values ( now , 20000 ) ( now + 1a, 20001 ) ( now + 2a, 20002 )
$totalRows = $totalRows + 3
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi

View File

@ -0,0 +1,194 @@
# Test case describe: dnode1 is only mnode, dnode2/dnode3 are only vnode
# step 1: start dnode1
# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode)
# step 3: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841)
# step 4: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
# step 5: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2
# step 6: insert two data rows: now-16d, now+16d
# step 7: restart dnode2, waiting sync end
# expect: in dnode2, the files 1840 and 1842 will be removed
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1, only deploy mnode
system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 2, and create table, insert data
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname2
sql create dnode $hostname3
sleep 3000
$totalTableNum = 1
$sleepTimer = 3000
$db = db
sql create database $db replica 2 cache 1
sql use $db
# create table , insert data
$stb = stb
sql create table $stb (ts timestamp, c1 double) tags(t1 int)
$rowNum = 130000
$tblNum = $totalTableNum
$totalRows = 0
$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
# insert over 2M data in order to falling disc, generate one file
$i = 0
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags( $i )
$x = 0
while $x < $rowNum
# $ts = $tsStart + $x
sql insert into $tb values ( now + 0s , $x ) ( now + 1s , $x ) ( now + 2s , $x ) ( now + 3s , $x ) ( now + 4s , $x ) ( now + 5s , $x ) ( now + 6s , $x ) ( now + 7s , $x ) ( now + 8s , $x ) ( now + 9s , $x )
$x = $x + 10
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
$i = $i + 1
endw
sql select count(*) from $stb
print rows:$rows data00:$data00
if $rows != 1 then
return -1
endi
if $data00 == 0 then
return -1
endi
$totalRows = $data00
print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
sql insert into $tb values ( now - 20d , -20 )
sql insert into $tb values ( now - 40d , -40 )
$totalRows = $totalRows + 2
print ============== step4: stop dnode2, so date rows falling disc, generate two new files in dnode2
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep $sleepTimer
wait_dnode2_offline:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != offline then
sleep 2000
goto wait_dnode2_offline
endi
if $dnode3Status != ready then
sleep 2000
goto wait_dnode2_offline
endi
sleep $sleepTimer # waitting for move master vnode of dnode2 to dnode3
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step5: insert two data rows: now-16d, now+16d,
sql insert into $tb values ( now - 21d , -21 )
sql insert into $tb values ( now - 41d , -41 )
$totalRows = $totalRows + 2
print ============== step5: restart dnode2, waiting sync end
system sh/exec.sh -n dnode2 -s start
sleep 3000
wait_dnode2_ready:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready
endi
sleep $sleepTimer
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi

View File

@ -2,18 +2,42 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2 system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3 system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 1 system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 1 system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10 system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
#system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
#system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
#system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
system sh/cfg.sh -n dnode3 -c alternativeRole -v 0
system sh/cfg.sh -n dnode4 -c alternativeRole -v 0
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -22,21 +46,23 @@ system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start system sh/exec_tarbitrator.sh -s start
print ============== step1: replica is 1, and start 1 dnode, then create tables and insert data
print ============== step1: replica is 1, and start 1 dnode
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
sql connect sql connect
$db = replica_db1 $totalTableNum = 12
sql create database $db replica 1 maxTables 4
$db = db
sql create database $db replica 1 maxTables $totalTableNum
sql use $db sql use $db
# create table , insert data # create table , insert data
$stb = repl_stb $stb = stb
sql create table $stb (ts timestamp, c1 int) tags(t1 int) sql create table $stb (ts timestamp, c1 int) tags(t1 int)
$rowNum = 10 $rowNum = 10
$tblNum = 12 $tblNum = $totalTableNum
$totalRows = $rowNum * $tblNum
$ts0 = 1420041600000 $ts0 = 1420041600000
$ts = $ts0 $ts = $ts0
@ -55,46 +81,136 @@ while $i < $tblNum
$x = $x + 1 $x = $x + 1
endw endw
$i = $i + 1 $i = $i + 1
print $tb inserted rows: $x
endw endw
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step2: add 1 new dnode, expect balanced print ============== step2: add 1 new dnode, expect balanced
system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2 sql create dnode $hostname2
sleep 3000
# expect after balanced, 2 vondes in dnode1, 1 vonde in dnode2 # expect after balanced, 2 vondes in dnode1, 1 vonde in dnode2
$x = 0 wait_dnode2_ready:
show2:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show dnodes sql show dnodes
print dnode1 openVnodes $data2_1 if $rows != 2 then
print dnode2 openVnodes $data2_2 sleep 2000
if $data2_1 != 2 then goto wait_dnode2_ready
goto show2
endi endi
if $data2_2 != 1 then print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
goto show2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode1Status = $data4_1
$dnode2Status = $data4_2
#$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode1Status != ready then
sleep 2000
goto wait_dnode2_ready
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready
endi endi
print ============== step4: stop dnode1, and wait dnode2 master print ============== step3: stop dnode1/dnode2, modify cfg mpeers to 2, and restart dnode1/dnode2
system sh/exec.sh -n dnode1 -s stop system sh/exec.sh -n dnode1 -s stop
system sh/exec.sh -n dnode2 -s stop
sleep 3000
$x = 0 system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
loop_wait: system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
$x = $x + 1
sleep 2000
if $x == 10 then
print ERROR: after dnode1 stop, dnode2 didn't become a master!
return -1
endi
sql show mnodes
$dnodeRole = $data2_1
print dnodeRole ==> $dnodeRole
if $dnodeRole != master then system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
goto loop_wait system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 5000
print ============= step4: wait dnode ready
wait_dnode_ready:
sql show dnodes
if $rows != 2 then
sleep 2000
goto wait_dnode_ready
endi endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode1Status = $data4_1
$dnode2Status = $data4_2
#$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode1Status != ready then
sleep 2000
goto wait_dnode_ready
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode_ready
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step5: stop dnode1
system sh/exec.sh -n dnode1 -s stop
sleep 3000
wait_dnode2_master:
sql show mnodes
if $rows != 2 then
sleep 2000
goto wait_dnode2_master
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode1Status = $data4_1
$dnode2Status = $data4_2
#$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $data2_1 != offline then
sleep 2000
goto wait_dnode2_master
endi
if $data2_2 != master then
sleep 2000
goto wait_dnode2_master
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi