Merge remote-tracking branch 'origin/develop' into feature/crash_gen

This commit is contained in:
Steven Li 2020-06-05 13:16:55 -07:00
commit adb621426d
75 changed files with 2400 additions and 1628 deletions

View File

@ -26,7 +26,7 @@ void tscAddIntoSqlList(SSqlObj *pSql);
void tscRemoveFromSqlList(SSqlObj *pSql);
void tscAddIntoStreamList(SSqlStream *pStream);
void tscRemoveFromStreamList(SSqlStream *pStream, SSqlObj *pSqlObj);
char *tscBuildQueryStreamDesc(char *pMsg, STscObj *pObj);
int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj);
void tscKillQuery(STscObj *pObj, uint32_t killId);
void tscKillStream(STscObj *pObj, uint32_t killId);
void tscKillConnection(STscObj *pObj);

View File

@ -64,6 +64,7 @@ typedef struct SLocalReducer {
tFilePage * pTempBuffer;
struct SQLFunctionCtx *pCtx;
int32_t rowSize; // size of each intermediate result.
int32_t finalRowSize; // final result row size
int32_t status; // denote it is in reduce process, in reduce process, it
bool hasPrevRow; // cannot be released
bool hasUnprocessedRow;
@ -76,6 +77,7 @@ typedef struct SLocalReducer {
SResultInfo * pResInfo;
bool discard;
int32_t offset; // limit offset value
bool orderPrjOnSTable; // projection query on stable
} SLocalReducer;
typedef struct SSubqueryState {

View File

@ -285,8 +285,6 @@ typedef struct {
typedef struct STscObj {
void * signature;
void * pTimer;
char mnodeIp[TSDB_USER_LEN];
uint16_t mnodePort;
char user[TSDB_USER_LEN];
char pass[TSDB_KEY_LEN];
char acctId[TSDB_DB_NAME_LEN];
@ -294,6 +292,7 @@ typedef struct STscObj {
char sversion[TSDB_VERSION_LEN];
char writeAuth : 1;
char superAuth : 1;
uint32_t connId;
struct SSqlObj * pHb;
struct SSqlObj * sqlList;
struct SSqlStream *streamList;

View File

@ -341,16 +341,6 @@ bool stableQueryFunctChanged(int32_t funcId) {
*/
void resetResultInfo(SResultInfo *pResInfo) { pResInfo->initialized = false; }
void initResultInfo(SResultInfo *pResInfo) {
pResInfo->initialized = true; // the this struct has been initialized flag
pResInfo->complete = false;
pResInfo->hasResult = false;
pResInfo->numOfRes = 0;
memset(pResInfo->interResultBuf, 0, (size_t)pResInfo->bufLen);
}
void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable) {
assert(pResInfo->interResultBuf == NULL);
@ -387,9 +377,7 @@ static bool function_setup(SQLFunctionCtx *pCtx) {
*/
static void function_finalizer(SQLFunctionCtx *pCtx) {
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
if (pResInfo->hasResult != DATA_SET_FLAG) {
tscTrace("no result generated, result is set to NULL");
if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) {
setVardataNull(pCtx->aOutputBuf, pCtx->outputType);
} else {

View File

@ -48,7 +48,7 @@ static int32_t tscToInteger(SSQLToken *pToken, int64_t *value, char **endPtr) {
int32_t radix = 10;
int32_t radixList[3] = {16, 8, 2};
int32_t radixList[3] = {16, 8, 2}; // the integer number with different radix: hex, oct, bin
if (pToken->type == TK_HEX || pToken->type == TK_OCT || pToken->type == TK_BIN) {
radix = radixList[pToken->type - TK_HEX];
}

View File

@ -494,7 +494,6 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
tsem_init(&pSql->rspSem, 0, 0);
pSql->signature = pSql;
pSql->pTscObj = pObj;
//pSql->pTscObj->pSql = pSql;
pSql->maxRetry = TSDB_MAX_REPLICA_NUM;
pStmt->pSql = pSql;
@ -515,7 +514,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
//doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen);
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
pSql->param = (void*)pSql;
pSql->param = (void*) pSql;
pSql->fp = waitForQueryRsp;
pSql->insertType = TSDB_QUERY_TYPE_STMT_INSERT;

View File

@ -19,6 +19,7 @@
#include "ttime.h"
#include "ttimer.h"
#include "tutil.h"
#include "taosmsg.h"
void tscSaveSlowQueryFp(void *handle, void *tmrId);
void *tscSlowQueryConn = NULL;
@ -96,7 +97,7 @@ void tscSaveSlowQuery(SSqlObj *pSql) {
}
tscTrace("%p query time:%" PRId64 " sql:%s", pSql, pSql->res.useconds, pSql->sqlstr);
int32_t sqlSize = TSDB_SHOW_SQL_LEN + size;
int32_t sqlSize = TSDB_SLOW_QUERY_SQL_LEN + size;
char *sql = malloc(sqlSize);
if (sql == NULL) {
@ -106,9 +107,9 @@ void tscSaveSlowQuery(SSqlObj *pSql) {
int len = snprintf(sql, size, "insert into %s.slowquery values(now, '%s', %" PRId64 ", %" PRId64 ", '", tsMonitorDbName,
pSql->pTscObj->user, pSql->stime, pSql->res.useconds);
int sqlLen = snprintf(sql + len, TSDB_SHOW_SQL_LEN, "%s", pSql->sqlstr);
if (sqlLen > TSDB_SHOW_SQL_LEN - 1) {
sqlLen = len + TSDB_SHOW_SQL_LEN - 1;
int sqlLen = snprintf(sql + len, TSDB_SLOW_QUERY_SQL_LEN, "%s", pSql->sqlstr);
if (sqlLen > TSDB_SLOW_QUERY_SQL_LEN - 1) {
sqlLen = len + TSDB_SLOW_QUERY_SQL_LEN - 1;
} else {
sqlLen += len;
}
@ -208,25 +209,25 @@ void tscKillStream(STscObj *pObj, uint32_t killId) {
if (pStream) {
tscTrace("%p stream:%p is killed, streamId:%d", pStream->pSql, pStream, killId);
}
if (pStream->callback) {
pStream->callback(pStream->param);
}
taos_close_stream(pStream);
} else {
tscError("failed to kill stream, streamId:%d not exist", killId);
}
}
char *tscBuildQueryStreamDesc(char *pMsg, STscObj *pObj) {
char * pMax = pMsg + TSDB_PAYLOAD_SIZE - 256;
int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
SCMHeartBeatMsg *pHeartbeat = pMsg;
int allocedQueriesNum = pHeartbeat->numOfQueries;
int allocedStreamsNum = pHeartbeat->numOfStreams;
SQqueryList *pQList = (SQqueryList *)pMsg;
pQList->numOfQueries = 0;
SQueryDesc *pQdesc = (SQueryDesc*)(pMsg + sizeof(SQqueryList));
pHeartbeat->numOfQueries = 0;
SQueryDesc *pQdesc = (SQueryDesc *)pHeartbeat->pData;
// We extract the lock to tscBuildHeartBeatMsg function.
/* pthread_mutex_lock (&pObj->mutex); */
pMsg += sizeof(SQqueryList);
SSqlObj *pSql = pObj->sqlList;
while (pSql) {
/*
@ -240,47 +241,46 @@ char *tscBuildQueryStreamDesc(char *pMsg, STscObj *pObj) {
strncpy(pQdesc->sql, pSql->sqlstr, TSDB_SHOW_SQL_LEN - 1);
pQdesc->sql[TSDB_SHOW_SQL_LEN - 1] = 0;
pQdesc->stime = pSql->stime;
pQdesc->queryId = pSql->queryId;
pQdesc->useconds = pSql->res.useconds;
pQdesc->stime = htobe64(pSql->stime);
pQdesc->queryId = htonl(pSql->queryId);
pQdesc->useconds = htobe64(pSql->res.useconds);
pQList->numOfQueries++;
pHeartbeat->numOfQueries++;
pQdesc++;
pSql = pSql->next;
pMsg += sizeof(SQueryDesc);
if (pMsg > pMax) break;
if (pHeartbeat->numOfQueries >= allocedQueriesNum) break;
}
SStreamList *pSList = (SStreamList *)pMsg;
pSList->numOfStreams = 0;
pHeartbeat->numOfStreams = 0;
SStreamDesc *pSdesc = (SStreamDesc *)pQdesc;
SStreamDesc *pSdesc = (SStreamDesc*) (pMsg + sizeof(SStreamList));
pMsg += sizeof(SStreamList);
SSqlStream *pStream = pObj->streamList;
while (pStream) {
strncpy(pSdesc->sql, pStream->pSql->sqlstr, TSDB_SHOW_SQL_LEN - 1);
pSdesc->sql[TSDB_SHOW_SQL_LEN - 1] = 0;
pSdesc->streamId = pStream->streamId;
pSdesc->num = pStream->num;
pSdesc->streamId = htonl(pStream->streamId);
pSdesc->num = htobe64(pStream->num);
pSdesc->useconds = pStream->useconds;
pSdesc->stime = pStream->stime - pStream->interval;
pSdesc->ctime = pStream->ctime;
pSdesc->useconds = htobe64(pStream->useconds);
pSdesc->stime = htobe64(pStream->stime - pStream->interval);
pSdesc->ctime = htobe64(pStream->ctime);
pSdesc->slidingTime = pStream->slidingTime;
pSdesc->interval = pStream->interval;
pSdesc->slidingTime = htobe64(pStream->slidingTime);
pSdesc->interval = htobe64(pStream->interval);
pSList->numOfStreams++;
pHeartbeat->numOfStreams++;
pSdesc++;
pStream = pStream->next;
pMsg += sizeof(SStreamDesc);
if (pMsg > pMax) break;
if (pHeartbeat->numOfStreams >= allocedStreamsNum) break;
}
/* pthread_mutex_unlock (&pObj->mutex); */
int32_t msgLen = pHeartbeat->numOfQueries * sizeof(SQueryDesc) + pHeartbeat->numOfStreams * sizeof(SStreamDesc) +
sizeof(SCMHeartBeatMsg);
pHeartbeat->connId = htonl(pObj->connId);
pHeartbeat->numOfQueries = htonl(pHeartbeat->numOfQueries);
pHeartbeat->numOfStreams = htonl(pHeartbeat->numOfStreams);
return pMsg;
return msgLen;
}
void tscKillConnection(STscObj *pObj) {

View File

@ -93,7 +93,7 @@ static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo
static int32_t validateDNodeConfig(tDCLSQL* pOptions);
static int32_t validateLocalConfig(tDCLSQL* pOptions);
static int32_t validateColumnName(char* name);
static int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo);
static int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType);
static bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField);
static bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo);
@ -531,7 +531,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_KILL_QUERY:
case TSDB_SQL_KILL_STREAM:
case TSDB_SQL_KILL_CONNECTION: {
if ((code = setKillInfo(pSql, pInfo)) != TSDB_CODE_SUCCESS) {
if ((code = setKillInfo(pSql, pInfo, pInfo->type)) != TSDB_CODE_SUCCESS) {
return code;
}
@ -1881,21 +1881,37 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
// functions can not be applied to normal columns
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
if (index.columnIndex < numOfCols) {
if (index.columnIndex < numOfCols && index.columnIndex != TSDB_TBNAME_COLUMN_INDEX) {
return invalidSqlErrMsg(pQueryInfo->msg, msg6);
}
if (index.columnIndex > 0) {
index.columnIndex -= numOfCols;
}
// 2. valid the column type
int16_t colType = pSchema[index.columnIndex].type;
if (colType == TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) {
int16_t colType = 0;
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
colType = TSDB_DATA_TYPE_BINARY;
} else {
colType = pSchema[index.columnIndex].type;
}
if (colType == TSDB_DATA_TYPE_BOOL) {
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
}
tscColumnListInsert(pTableMetaInfo->tagColList, &index);
SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
SSchema s = pTagSchema[index.columnIndex];
SSchema s = {0};
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
s.bytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE;
s.type = TSDB_DATA_TYPE_BINARY;
s.colId = TSDB_TBNAME_COLUMN_INDEX;
} else {
s = pTagSchema[index.columnIndex];
}
int16_t bytes = 0;
int16_t type = 0;
@ -2229,35 +2245,43 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return TSDB_CODE_SUCCESS;
}
int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg1 = "invalid ip address";
const char* msg2 = "invalid port";
int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType) {
const char* msg1 = "invalid connection ID";
const char* msg2 = "invalid query ID";
const char* msg3 = "invalid stream ID";
SSqlCmd* pCmd = &pSql->cmd;
pCmd->command = pInfo->type;
SSQLToken* ip = &(pInfo->pDCLInfo->ip);
if (ip->n > TSDB_KILL_MSG_LEN) {
SSQLToken* idStr = &(pInfo->pDCLInfo->ip);
if (idStr->n > TSDB_KILL_MSG_LEN) {
return TSDB_CODE_INVALID_SQL;
}
strncpy(pCmd->payload, ip->z, ip->n);
strncpy(pCmd->payload, idStr->z, idStr->n);
const char delim = ':';
char* connIdStr = strtok(idStr->z, &delim);
char* queryIdStr = strtok(NULL, &delim);
char* ipStr = strtok(ip->z, &delim);
char* portStr = strtok(NULL, &delim);
if (!validateIpAddress(ipStr, strlen(ipStr))) {
int32_t connId = (int32_t)strtol(connIdStr, NULL, 10);
if (connId <= 0) {
memset(pCmd->payload, 0, strlen(pCmd->payload));
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
uint16_t port = (uint16_t)strtol(portStr, NULL, 10);
if (port <= 0 || port > 65535) {
if (killType == TSDB_SQL_KILL_CONNECTION) {
return TSDB_CODE_SUCCESS;
}
int32_t queryId = (int32_t)strtol(queryIdStr, NULL, 10);
if (queryId <= 0) {
memset(pCmd->payload, 0, strlen(pCmd->payload));
if (killType == TSDB_SQL_KILL_QUERY) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
} else {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
return TSDB_CODE_SUCCESS;

View File

@ -278,6 +278,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
param->groupOrderType = pQueryInfo->groupbyExpr.orderType;
pReducer->orderPrjOnSTable = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0);
pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator);
if (pReducer->pLoserTree == NULL || pRes->code != 0) {
@ -309,10 +310,10 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pReducer->nResultBufSize = pMemBuffer[0]->pageSize * 16;
pReducer->pResultBuf = (tFilePage *)calloc(1, pReducer->nResultBufSize + sizeof(tFilePage));
int32_t finalRowLength = tscGetResRowLength(pQueryInfo->exprList);
pReducer->finalRowSize = tscGetResRowLength(pQueryInfo->exprList);
pReducer->resColModel = finalmodel;
pReducer->resColModel->capacity = pReducer->nResultBufSize / finalRowLength;
assert(finalRowLength <= pReducer->rowSize);
pReducer->resColModel->capacity = pReducer->nResultBufSize / pReducer->finalRowSize;
assert(pReducer->finalRowSize <= pReducer->rowSize);
pReducer->pFinalRes = calloc(1, pReducer->rowSize * pReducer->resColModel->capacity);
// pReducer->pBufForInterpo = calloc(1, pReducer->nResultBufSize);
@ -389,7 +390,7 @@ static int32_t tscFlushTmpBufferImpl(tExtMemBuffer *pMemoryBuf, tOrderDescriptor
assert(pPage->num <= pDesc->pColumnModel->capacity);
// sort before flush to disk, the data must be consecutively put on tFilePage.
if (pDesc->orderIdx.numOfCols > 0) {
if (pDesc->orderInfo.numOfCols > 0) {
tColDataQSort(pDesc, pPage->num, 0, pPage->num - 1, pPage->data, orderType);
}
@ -590,12 +591,10 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage *tmpBuffer) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
int16_t functionId = tscSqlExprGet(pQueryInfo, 0)->functionId;
// disable merge procedure for column projection query
int16_t functionId = pReducer->pCtx[0].functionId;
assert(functionId != TSDB_FUNC_ARITHM);
if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
if (pReducer->orderPrjOnSTable) {
return true;
}
@ -604,26 +603,33 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
}
tOrderDescriptor *pOrderDesc = pReducer->pDesc;
int32_t numOfCols = pOrderDesc->orderIdx.numOfCols;
SColumnOrderInfo* orderInfo = &pOrderDesc->orderInfo;
// no group by columns, all data belongs to one group
int32_t numOfCols = orderInfo->numOfCols;
if (numOfCols <= 0) {
return true;
}
if (pOrderDesc->orderIdx.pData[numOfCols - 1] == PRIMARYKEY_TIMESTAMP_COL_INDEX) { //<= 0
// super table interval query
if (orderInfo->pData[numOfCols - 1] == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
/*
* super table interval query
* if the order columns is the primary timestamp, all result data belongs to one group
*/
assert(pQueryInfo->intervalTime > 0);
pOrderDesc->orderIdx.numOfCols -= 1;
if (numOfCols == 1) {
return true;
}
} else { // simple group by query
assert(pQueryInfo->intervalTime == 0);
}
// only one row exists
int32_t ret = compare_a(pOrderDesc, 1, 0, pPrev, 1, 0, tmpBuffer->data);
pOrderDesc->orderIdx.numOfCols = numOfCols;
int32_t index = orderInfo->pData[0];
int32_t offset = (pOrderDesc->pColumnModel)->pFields[index].offset;
return (ret == 0);
int32_t ret = memcmp(pPrev + offset, tmpBuffer->data + offset, pOrderDesc->pColumnModel->rowSize - offset);
return ret == 0;
}
int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pOrderDesc,
@ -873,24 +879,24 @@ static void reversedCopyFromInterpolationToDstBuf(SQueryInfo *pQueryInfo, SSqlRe
* Note: pRes->pLocalReducer may be null, due to the fact that "tscDestroyLocalReducer" is called
* by "interuptHandler" function in shell
*/
static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneOutput) {
static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneOutput) {
SSqlCmd * pCmd = &pSql->cmd;
SSqlRes * pRes = &pSql->res;
tFilePage * pFinalDataPage = pLocalReducer->pResultBuf;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (pRes->pLocalReducer != pLocalReducer) {
/*
* Release the SSqlObj is called, and it is int destroying function invoked by other thread.
* However, the other thread will WAIT until current process fully completes.
* Since the flag of release struct is set by doLocalReduce function
*/
assert(pRes->pLocalReducer == NULL);
}
// if (pRes->pLocalReducer != pLocalReducer) {
// /*
// * Release the SSqlObj is called, and it is int destroying function invoked by other thread.
// * However, the other thread will WAIT until current process fully completes.
// * Since the flag of release struct is set by doLocalReduce function
// */
// assert(pRes->pLocalReducer == NULL);
// }
if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
// no interval query, no fill operation
if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
pRes->data = pLocalReducer->pFinalRes;
pRes->numOfRows = pFinalDataPage->num;
pRes->numOfClauseTotal += pRes->numOfRows;
@ -929,9 +935,7 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo
savePrevRecordAndSetupInterpoInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo);
}
int32_t rowSize = tscGetResRowLength(pQueryInfo->exprList);
memcpy(pRes->data, pFinalDataPage->data, pRes->numOfRows * rowSize);
memcpy(pRes->data, pFinalDataPage->data, pRes->numOfRows * pLocalReducer->finalRowSize);
pFinalDataPage->num = 0;
return;
}
@ -1037,16 +1041,13 @@ static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer)
static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, bool needInit) {
// the tag columns need to be set before all functions execution
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t j = 0; j < size; ++j) {
SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, j);
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[j];
tVariantAssign(&pCtx->param[0], &pExpr->param[0]);
// tags/tags_dummy function, the tag field of SQLFunctionCtx is from the input buffer
int32_t functionId = pExpr->functionId;
int32_t functionId = pCtx->functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS_DUMMY) {
tVariantDestroy(&pCtx->tag);
char* input = pCtx->aInputElemBuf;
@ -1057,17 +1058,20 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer,
} else {
tVariantCreateFromBinary(&pCtx->tag, input, pCtx->inputBytes, pCtx->inputType);
}
} else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, j);
pCtx->param[0].i64Key = pExpr->param[0].i64Key;
}
pCtx->currentStage = SECONDARY_STAGE_MERGE;
if (needInit) {
aAggs[pExpr->functionId].init(pCtx);
aAggs[pCtx->functionId].init(pCtx);
}
}
for (int32_t j = 0; j < size; ++j) {
int32_t functionId = tscSqlExprGet(pQueryInfo, j)->functionId;
int32_t functionId = pLocalReducer->pCtx[j].functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
@ -1101,8 +1105,7 @@ static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx)
* ts, tag, tagprj function can not decide the output number of current query
* the number of output result is decided by main output
*/
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, j);
int32_t functionId = pExpr->functionId;
int32_t functionId = pCtx[j].functionId;
if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ) {
continue;
}
@ -1136,15 +1139,13 @@ static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLo
char *buf = malloc((size_t)maxBufSize);
for (int32_t k = 0; k < size; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k);
if (pExpr->functionId != TSDB_FUNC_TAG) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
if (pCtx->functionId != TSDB_FUNC_TAG) {
continue;
}
int32_t inc = numOfRes - 1; // tsdb_func_tag function only produce one row of result
memset(buf, 0, (size_t)maxBufSize);
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
memcpy(buf, pCtx->aOutputBuf, (size_t)pCtx->outputBytes);
for (int32_t i = 0; i < inc; ++i) {
@ -1160,8 +1161,8 @@ int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t k = 0; k < size; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k);
aAggs[pExpr->functionId].xFinalize(&pLocalReducer->pCtx[k]);
SQLFunctionCtx* pCtx = &pLocalReducer->pCtx[k];
aAggs[pCtx->functionId].xFinalize(pCtx);
}
pLocalReducer->hasPrevRow = false;
@ -1182,13 +1183,13 @@ int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {
*/
bool needToMerge(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
int32_t ret = 0; // merge all result by default
int16_t functionId = tscSqlExprGet(pQueryInfo, 0)->functionId;
int16_t functionId = pLocalReducer->pCtx[0].functionId;
if (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_ARITHM) { // column projection query
ret = 1; // disable merge procedure
} else {
tOrderDescriptor *pDesc = pLocalReducer->pDesc;
if (pDesc->orderIdx.numOfCols > 0) {
if (pDesc->orderInfo.numOfCols > 0) {
if (pDesc->tsOrder == TSDB_ORDER_ASC) { // asc
// todo refactor comparator
ret = compare_a(pLocalReducer->pDesc, 1, 0, pLocalReducer->prevRowOfInput, 1, 0, tmpBuffer->data);
@ -1274,7 +1275,7 @@ bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool no
taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf);
}
doInterpolateResult(pSql, pLocalReducer, noMoreCurrentGroupRes);
doFillResult(pSql, pLocalReducer, noMoreCurrentGroupRes);
return true;
}
@ -1341,7 +1342,7 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
// the first column must be the timestamp column
int32_t rows = taosGetNumOfResultWithFill(pFillInfo, remain, ekey, pLocalReducer->resColModel->capacity);
if (rows > 0) { // do interpo
doInterpolateResult(pSql, pLocalReducer, false);
doFillResult(pSql, pLocalReducer, false);
}
return true;
@ -1374,7 +1375,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
pQueryInfo->slidingTimeUnit, tinfo.precision);
int32_t rows = taosGetNumOfResultWithFill(pFillInfo, 0, etime, pLocalReducer->resColModel->capacity);
if (rows > 0) { // do interpo
doInterpolateResult(pSql, pLocalReducer, true);
doFillResult(pSql, pLocalReducer, true);
}
}
@ -1408,13 +1409,11 @@ static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) {
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t k = 0; k < size; ++k) {
SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, k);
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
pCtx->aOutputBuf += pCtx->outputBytes * numOfRes;
// set the correct output timestamp column position
if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM) {
if (pCtx->functionId == TSDB_FUNC_TOP || pCtx->functionId == TSDB_FUNC_BOTTOM) {
pCtx->ptsOutputBuf = ((char *)pCtx->ptsOutputBuf + TSDB_KEYSIZE * numOfRes);
}
}

View File

@ -114,6 +114,8 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
if (pIpList->numOfIps > 0)
tscSetMgmtIpList(pIpList);
pSql->pTscObj->connId = htonl(pRsp->connId);
if (pRsp->killConnection) {
tscKillConnection(pObj);
} else {
@ -332,9 +334,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql);
if (rpcMsg->code != TSDB_CODE_ACTION_IN_PROGRESS) {
rpcMsg->code = pRes->code ? pRes->code : pRes->numOfRows;
rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? pRes->numOfRows: pRes->code;
tscTrace("%p SQL result:%s res:%p", pSql, tstrerror(pRes->code), pSql);
bool shouldFree = tscShouldBeFreed(pSql);
(*pSql->fp)(pSql->param, pSql, rpcMsg->code);
@ -1129,13 +1130,6 @@ int32_t tscBuildKillMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCMKillQueryMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
SCMKillQueryMsg *pKill = (SCMKillQueryMsg*)pCmd->payload;
strncpy(pKill->queryId, pInfo->pDCLInfo->ip.z, pInfo->pDCLInfo->ip.n);
switch (pCmd->command) {
case TSDB_SQL_KILL_QUERY:
pCmd->msgType = TSDB_MSG_TYPE_CM_KILL_QUERY;
@ -1743,57 +1737,43 @@ int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_SUCCESS;
}
int tscEstimateHeartBeatMsgLength(SSqlObj *pSql) {
int size = 0;
STscObj *pObj = pSql->pTscObj;
size += tsRpcHeadSize;
size += sizeof(SQqueryList);
SSqlObj *tpSql = pObj->sqlList;
while (tpSql) {
size += sizeof(SQueryDesc);
tpSql = tpSql->next;
}
size += sizeof(SStreamList);
SSqlStream *pStream = pObj->streamList;
while (pStream) {
size += sizeof(SStreamDesc);
pStream = pStream->next;
}
return size + TSDB_EXTRA_PAYLOAD_SIZE;
}
int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
char *pMsg, *pStart;
int msgLen = 0;
int size = 0;
SSqlCmd *pCmd = &pSql->cmd;
STscObj *pObj = pSql->pTscObj;
pthread_mutex_lock(&pObj->mutex);
size = tscEstimateHeartBeatMsgLength(pSql);
int32_t numOfQueries = 2;
SSqlObj *tpSql = pObj->sqlList;
while (tpSql) {
tpSql = tpSql->next;
numOfQueries++;
}
int32_t numOfStreams = 2;
SSqlStream *pStream = pObj->streamList;
while (pStream) {
pStream = pStream->next;
numOfStreams++;
}
int size = numOfQueries * sizeof(SQueryDesc) + numOfStreams * sizeof(SStreamDesc) + sizeof(SCMHeartBeatMsg) + 100;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
pthread_mutex_unlock(&pObj->mutex);
tscError("%p failed to malloc for heartbeat msg", pSql);
return -1;
}
pMsg = pCmd->payload;
pStart = pMsg;
SCMHeartBeatMsg *pHeartbeat = (SCMHeartBeatMsg *)pCmd->payload;
pHeartbeat->numOfQueries = numOfQueries;
pHeartbeat->numOfStreams = numOfStreams;
int msgLen = tscBuildQueryStreamDesc(pHeartbeat, pObj);
pMsg = tscBuildQueryStreamDesc(pMsg, pObj);
pthread_mutex_unlock(&pObj->mutex);
msgLen = pMsg - pStart;
pCmd->payloadLen = msgLen;
pCmd->msgType = TSDB_MSG_TYPE_CM_HEARTBEAT;
assert(msgLen + minMsgSize() <= size);
return TSDB_CODE_SUCCESS;
}
@ -2204,6 +2184,7 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
strcpy(pObj->sversion, pConnect->serverVersion);
pObj->writeAuth = pConnect->writeAuth;
pObj->superAuth = pConnect->superAuth;
pObj->connId = htonl(pConnect->connId);
taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, pObj, tscTmr, &pObj->pTimer);
return 0;
@ -2330,7 +2311,7 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
}
pRes->row = 0;
tscTrace("%p numOfRows:%d, offset:%d", pSql, pRes->numOfRows, pRes->offset);
tscTrace("%p numOfRows:%d, offset:%d, complete:%d", pSql, pRes->numOfRows, pRes->offset, pRes->completed);
return 0;
}

View File

@ -88,7 +88,6 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
strncpy(pObj->user, user, TSDB_USER_LEN);
taosEncryptPass((uint8_t *)pass, strlen(pass), pObj->pass);
pObj->mnodePort = port ? port : tsDnodeShellPort;
if (db) {
int32_t len = strlen(db);

View File

@ -1557,8 +1557,8 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
assert(pRes->numOfRows == numOfRows);
int64_t num = atomic_add_fetch_64(&pState->numOfRetrievedRows, numOfRows);
// tscTrace("%p sub:%p retrieve numOfRows:%d totalNumOfRows:%d from ip:%u,vid:%d,orderOfSub:%d", pPObj, pSql,
// pRes->numOfRows, pState->numOfRetrievedRows, pSvd->ip, pSvd->vnode, idx);
tscTrace("%p sub:%p retrieve numOfRows:%d totalNumOfRows:%d from ip:%s, orderOfSub:%d", pPObj, pSql,
pRes->numOfRows, pState->numOfRetrievedRows, pSql->ipList.fqdn[pSql->ipList.inUse], idx);
if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
tscError("%p sub:%p num of OrderedRes is too many, max allowed:%" PRId64 " , current:%" PRId64,
@ -1713,6 +1713,11 @@ static void multiVnodeInsertMerge(void* param, TAOS_RES* tres, int numOfRows) {
// increase the total inserted rows
if (numOfRows > 0) {
pParentObj->res.numOfRows += numOfRows;
} else {
SSqlObj* pSql = (SSqlObj*) tres;
assert(pSql != NULL && pSql->res.code == numOfRows);
pParentObj->res.code = pSql->res.code;
}
taos_free_result(tres);
@ -1947,7 +1952,8 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
for (int i = 0; i < tscNumOfFields(pQueryInfo); ++i) {
size_t size = tscNumOfFields(pQueryInfo);
for (int i = 0; i < size; ++i) {
SFieldSupInfo* pSup = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, i);
if (pSup->pSqlExpr != NULL) {
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i);

View File

@ -404,8 +404,6 @@ void tscPartiallyFreeSqlObj(SSqlObj* pSql) {
pSql->numOfSubs = 0;
tscResetSqlCmdObj(pCmd);
tscTrace("%p partially free sqlObj completed", pSql);
}
void tscFreeSqlObj(SSqlObj* pSql) {
@ -2104,7 +2102,7 @@ void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()) {
}
void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) {
SFieldSupInfo* pInfo = tscFieldInfoGetSupp(pFieldInfo, columnIndex);
SFieldSupInfo* pInfo = taosArrayGet(pFieldInfo->pSupportInfo, columnIndex);//tscFieldInfoGetSupp(pFieldInfo, columnIndex);
assert(pInfo->pSqlExpr != NULL);
int32_t type = pInfo->pSqlExpr->resType;

View File

@ -251,7 +251,7 @@ void tdFreeDataCols(SDataCols *pCols);
void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols);
void tdPopDataColsPoints(SDataCols *pCols, int pointsToPop); //!!!!
int tdMergeDataCols(SDataCols *target, SDataCols *src, int rowsToMerge);
void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, SDataCols *src2, int *iter2, int tRows);
void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, int limit2, int tRows);
// ----------------- K-V data row structure
/*

View File

@ -450,7 +450,8 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge) {
int iter1 = 0;
int iter2 = 0;
tdMergeTwoDataCols(target, pTarget, &iter1, source, &iter2, pTarget->numOfRows + rowsToMerge);
tdMergeTwoDataCols(target, pTarget, &iter1, pTarget->numOfRows, source, &iter2, source->numOfRows,
pTarget->numOfRows + rowsToMerge);
}
tdFreeDataCols(pTarget);
@ -461,15 +462,15 @@ _err:
return -1;
}
void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, SDataCols *src2, int *iter2, int tRows) {
// TODO: add resolve duplicate key here
void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, int limit2, int tRows) {
tdResetDataCols(target);
ASSERT(limit1 <= src1->numOfRows && limit2 <= src2->numOfRows);
while (target->numOfRows < tRows) {
if (*iter1 >= src1->numOfRows && *iter2 >= src2->numOfRows) break;
if (*iter1 >= limit1 && *iter2 >= limit2) break;
TSKEY key1 = (*iter1 >= src1->numOfRows) ? INT64_MAX : ((TSKEY *)(src1->cols[0].pData))[*iter1];
TSKEY key2 = (*iter2 >= src2->numOfRows) ? INT64_MAX : ((TSKEY *)(src2->cols[0].pData))[*iter2];
TSKEY key1 = (*iter1 >= limit1) ? INT64_MAX : ((TSKEY *)(src1->cols[0].pData))[*iter1];
TSKEY key2 = (*iter2 >= limit2) ? INT64_MAX : ((TSKEY *)(src2->cols[0].pData))[*iter2];
if (key1 <= key2) {
for (int i = 0; i < src1->numOfCols; i++) {

View File

@ -112,7 +112,7 @@ int32_t tsMaxShellConns = 5000;
char tsDefaultDB[TSDB_DB_NAME_LEN] = {0};
char tsDefaultUser[64] = "root";
char tsDefaultPass[64] = "taosdata";
int32_t tsMaxConnections = 50;
int32_t tsMaxConnections = 5000;
int32_t tsBalanceInterval = 300; // seconds
int32_t tsOfflineThreshold = 86400*100; // seconds 10days
@ -840,7 +840,7 @@ static void doInitGlobalConfig() {
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 1;
cfg.maxValue = 100;
cfg.maxValue = 100000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);

View File

@ -150,6 +150,7 @@ class CTaosInterface(object):
libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
libtaos.taos_free_result.restype = None
libtaos.taos_errno.restype = ctypes.c_int
libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
def __init__(self, config=None):
'''

View File

@ -150,6 +150,7 @@ class CTaosInterface(object):
libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
libtaos.taos_free_result.restype = None
libtaos.taos_errno.restype = ctypes.c_int
libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
def __init__(self, config=None):
'''

View File

@ -150,6 +150,7 @@ class CTaosInterface(object):
libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
libtaos.taos_free_result.restype = None
libtaos.taos_errno.restype = ctypes.c_int
libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
def __init__(self, config=None):
'''

View File

@ -150,6 +150,7 @@ class CTaosInterface(object):
libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
libtaos.taos_free_result.restype = None
libtaos.taos_errno.restype = ctypes.c_int
libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
def __init__(self, config=None):
'''

View File

@ -46,7 +46,7 @@ typedef struct {
void (*cleanup)();
} SDnodeComponent;
static const SDnodeComponent SDnodeComponents[] = {
static const SDnodeComponent tsDnodeComponents[] = {
{"storage", dnodeInitStorage, dnodeCleanupStorage},
{"vread", dnodeInitVnodeRead, dnodeCleanupVnodeRead},
{"vwrite", dnodeInitVnodeWrite, dnodeCleanupVnodeWrite},
@ -72,14 +72,14 @@ static int dnodeCreateDir(const char *dir) {
static void dnodeCleanupComponents(int32_t stepId) {
for (int32_t i = stepId; i >= 0; i--) {
SDnodeComponents[i].cleanup();
tsDnodeComponents[i].cleanup();
}
}
static int32_t dnodeInitComponents() {
int32_t code = 0;
for (int32_t i = 0; i < sizeof(SDnodeComponents) / sizeof(SDnodeComponents[0]); i++) {
if (SDnodeComponents[i].init() != 0) {
for (int32_t i = 0; i < sizeof(tsDnodeComponents) / sizeof(tsDnodeComponents[0]); i++) {
if (tsDnodeComponents[i].init() != 0) {
dnodeCleanupComponents(i);
code = -1;
break;
@ -133,7 +133,7 @@ int32_t dnodeInitSystem() {
void dnodeCleanUpSystem() {
if (dnodeGetRunStatus() != TSDB_DNODE_RUN_STATUS_STOPPED) {
dnodeSetRunStatus(TSDB_DNODE_RUN_STATUS_STOPPED);
dnodeCleanupComponents(sizeof(SDnodeComponents) / sizeof(SDnodeComponents[0]) - 1);
dnodeCleanupComponents(sizeof(tsDnodeComponents) / sizeof(tsDnodeComponents[0]) - 1);
taos_cleanup();
taosCloseLog();
}

View File

@ -231,7 +231,8 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_SHELL_VNODE_BITS 24
#define TSDB_SHELL_SID_MASK 0xFF
#define TSDB_HTTP_TOKEN_LEN 20
#define TSDB_SHOW_SQL_LEN 512
#define TSDB_SHOW_SQL_LEN 64
#define TSDB_SLOW_QUERY_SQL_LEN 512
#define TSDB_METER_STATE_OFFLINE 0
#define TSDB_METER_STATE_ONLLINE 1

View File

@ -150,6 +150,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_NO_DISK_PERMISSIONS, 0, 0x0405, "no disk perm
TAOS_DEFINE_ERROR(TSDB_CODE_FILE_CORRUPTED, 0, 0x0406, "file corrupted")
TAOS_DEFINE_ERROR(TSDB_CODE_MEMORY_CORRUPTED, 0, 0x0407, "memory corrupted")
TAOS_DEFINE_ERROR(TSDB_CODE_NOT_SUCH_FILE_OR_DIR, 0, 0x0408, "no such file or directory")
TAOS_DEFINE_ERROR(TSDB_CODE_TOO_MANY_SHELL_CONNS, 0, 0x0409, "too many shell conns")
// client
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_CLIENT_VERSION, 0, 0x0481, "invalid client version")

View File

@ -137,6 +137,7 @@ enum _mgmt_table {
TSDB_MGMT_TABLE_SCORES,
TSDB_MGMT_TABLE_GRANTS,
TSDB_MGMT_TABLE_VNODES,
TSDB_MGMT_TABLE_STREAMTABLES,
TSDB_MGMT_TABLE_MAX,
};
@ -299,6 +300,9 @@ typedef struct {
char serverVersion[TSDB_VERSION_LEN];
int8_t writeAuth;
int8_t superAuth;
int8_t reserved1;
int8_t reserved2;
int32_t connId;
SRpcIpSet ipList;
} SCMConnectRsp;
@ -716,16 +720,10 @@ typedef struct {
} SStreamDesc;
typedef struct {
uint32_t connId;
int32_t numOfQueries;
} SQqueryList;
typedef struct {
int32_t numOfStreams;
} SStreamList;
typedef struct {
SQqueryList qlist;
SStreamList slist;
char pData[];
} SCMHeartBeatMsg;
typedef struct {
@ -733,6 +731,7 @@ typedef struct {
uint32_t streamId;
uint32_t totalDnodes;
uint32_t onlineDnodes;
uint32_t connId;
int8_t killConnection;
SRpcIpSet ipList;
} SCMHeartBeatRsp;

View File

@ -188,9 +188,10 @@ typedef void *TsdbPosT;
* @param tsdb tsdb handle
* @param pCond query condition, including time window, result set order, and basic required columns for each block
* @param groupInfo tableId list in the form of set, seperated into different groups according to group by condition
* @param qinfo query info handle from query processor
* @return
*/
TsdbQueryHandleT *tsdbQueryTables(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupInfo);
TsdbQueryHandleT *tsdbQueryTables(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupInfo, void* qinfo);
/**
* Get the last row of the given query time window for all the tables in STableGroupInfo object.
@ -202,11 +203,11 @@ TsdbQueryHandleT *tsdbQueryTables(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STable
* @param groupInfo tableId list.
* @return
*/
TsdbQueryHandleT tsdbQueryLastRow(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupInfo);
TsdbQueryHandleT tsdbQueryLastRow(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupInfo, void* qinfo);
SArray* tsdbGetQueriedTableIdList(TsdbQueryHandleT *pHandle);
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TsdbRepoT *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList);
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TsdbRepoT *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, void* qinfo);
/**
* move to next block if exists

View File

@ -148,8 +148,8 @@
#define TK_SET 130
#define TK_KILL 131
#define TK_CONNECTION 132
#define TK_COLON 133
#define TK_STREAM 134
#define TK_STREAM 133
#define TK_COLON 134
#define TK_ABORT 135
#define TK_AFTER 136
#define TK_ATTACH 137

View File

@ -182,8 +182,6 @@ typedef struct SUserObj {
int8_t updateEnd[1];
int32_t refCount;
struct SAcctObj * pAcct;
SQqueryList * pQList; // query list
SStreamList * pSList; // stream list
} SUserObj;
typedef struct {

View File

@ -21,9 +21,30 @@ extern "C" {
#endif
#include "mnodeDef.h"
typedef struct {
char user[TSDB_USER_LEN + 1];
int8_t killed;
uint16_t port;
uint32_t ip;
uint32_t connId;
uint64_t stime;
uint64_t lastAccess;
uint32_t queryId;
uint32_t streamId;
int32_t numOfQueries;
int32_t numOfStreams;
SStreamDesc *pStreams;
SQueryDesc * pQueries;
} SConnObj;
int32_t mnodeInitProfile();
void mnodeCleanupProfile();
SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port);
SConnObj *mnodeAccquireConn(uint32_t connId, char *user, uint32_t ip, uint16_t port);
void mnodeReleaseConn(SConnObj *pConn);
int32_t mnodeSaveQueryStreamList(SConnObj *pConn, SCMHeartBeatMsg *pHBMsg);
#ifdef __cplusplus
}
#endif

View File

@ -28,6 +28,7 @@ typedef int32_t (*SShowMetaFp)(STableMetaMsg *pMeta, SShowObj *pShow, void *pCon
typedef int32_t (*SShowRetrieveFp)(SShowObj *pShow, char *data, int32_t rows, void *pConn);
void mnodeAddShowMetaHandle(uint8_t showType, SShowMetaFp fp);
void mnodeAddShowRetrieveHandle(uint8_t showType, SShowRetrieveFp fp);
void mnodeVacuumResult(char *data, int32_t numOfCols, int32_t rows, int32_t capacity, SShowObj *pShow);
#ifdef __cplusplus
}

View File

@ -33,14 +33,54 @@
#include "mnodeUser.h"
#include "mnodeTable.h"
#include "mnodeShow.h"
#include "mnodeProfile.h"
typedef struct {
const char *const name;
int (*init)();
void (*cleanup)();
} SMnodeComponent;
void *tsMnodeTmr;
static bool tsMgmtIsRunning = false;
static const SMnodeComponent tsMnodeComponents[] = {
{"profile", mnodeInitProfile, mnodeCleanupProfile},
{"accts", mnodeInitAccts, mnodeCleanupAccts},
{"users", mnodeInitUsers, mnodeCleanupUsers},
{"dnodes", mnodeInitDnodes, mnodeCleanupDnodes},
{"dbs", mnodeInitDbs, mnodeCleanupDbs},
{"vgroups", mnodeInitVgroups, mnodeCleanupVgroups},
{"tables", mnodeInitTables, mnodeCleanupTables},
{"mnodes", mnodeInitMnodes, mnodeCleanupMnodes},
{"sdb", sdbInit, sdbCleanUp},
{"balance", balanceInit, balanceCleanUp},
{"grant", grantInit, grantCleanUp},
{"show", mnodeInitShow, mnodeCleanUpShow}
};
static void mnodeInitTimer();
static void mnodeCleanupTimer();
static bool mnodeNeedStart() ;
static void mnodeCleanupComponents(int32_t stepId) {
for (int32_t i = stepId; i >= 0; i--) {
tsMnodeComponents[i].cleanup();
}
}
static int32_t mnodeInitComponents() {
int32_t code = 0;
for (int32_t i = 0; i < sizeof(tsMnodeComponents) / sizeof(tsMnodeComponents[0]); i++) {
if (tsMnodeComponents[i].init() != 0) {
mnodeCleanupComponents(i);
code = -1;
break;
}
}
return code;
}
int32_t mnodeStartSystem() {
if (tsMgmtIsRunning) {
mPrint("mnode module already started...");
@ -57,57 +97,7 @@ int32_t mnodeStartSystem() {
dnodeAllocateMnodeRqueue();
dnodeAllocateMnodePqueue();
if (mnodeInitAccts() < 0) {
mError("failed to init accts");
return -1;
}
if (mnodeInitUsers() < 0) {
mError("failed to init users");
return -1;
}
if (mnodeInitDnodes() < 0) {
mError("failed to init dnodes");
return -1;
}
if (mnodeInitDbs() < 0) {
mError("failed to init dbs");
return -1;
}
if (mnodeInitVgroups() < 0) {
mError("failed to init vgroups");
return -1;
}
if (mnodeInitTables() < 0) {
mError("failed to init tables");
return -1;
}
if (mnodeInitMnodes() < 0) {
mError("failed to init mnodes");
return -1;
}
if (sdbInit() < 0) {
mError("failed to init sdb");
return -1;
}
if (balanceInit() < 0) {
mError("failed to init balance")
}
if (grantInit() < 0) {
mError("failed to init grant");
return -1;
}
if (mnodeInitShow() < 0) {
mError("failed to init show");
if (mnodeInitComponents() != 0) {
return -1;
}
@ -115,7 +105,6 @@ int32_t mnodeStartSystem() {
tsMgmtIsRunning = true;
mPrint("mnode is initialized successfully");
return 0;
}
@ -133,17 +122,8 @@ void mnodeCleanupSystem() {
dnodeFreeMnodeRqueue();
dnodeFreeMnodePqueue();
mnodeCleanupTimer();
mnodeCleanUpShow();
grantCleanUp();
balanceCleanUp();
sdbCleanUp();
mnodeCleanupMnodes();
mnodeCleanupTables();
mnodeCleanupVgroups();
mnodeCleanupDbs();
mnodeCleanupDnodes();
mnodeCleanupUsers();
mnodeCleanupAccts();
mnodeCleanupComponents(sizeof(tsMnodeComponents) / sizeof(tsMnodeComponents[0]) - 1);
mPrint("mnode is cleaned up");
}

File diff suppressed because it is too large Load Diff

View File

@ -227,21 +227,47 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) {
return TSDB_CODE_SERV_OUT_OF_MEMORY;
}
SCMHeartBeatMsg *pHBMsg = pMsg->rpcMsg.pCont;
SRpcConnInfo connInfo;
rpcGetConnInfo(pMsg->rpcMsg.handle, &connInfo);
int32_t connId = htonl(pHBMsg->connId);
SConnObj *pConn = mnodeAccquireConn(connId, connInfo.user, connInfo.clientIp, connInfo.clientPort);
if (pConn == NULL) {
pConn = mnodeCreateConn(connInfo.user, connInfo.clientIp, connInfo.clientPort);
}
if (pConn == NULL) {
// do not close existing links, otherwise
// mError("failed to create connId, close connect");
// pHBRsp->killConnection = 1;
} else {
pHBRsp->connId = htonl(pConn->connId);
mnodeSaveQueryStreamList(pConn, pHBMsg);
if (pConn->killed != 0) {
pHBRsp->killConnection = 1;
}
if (pConn->streamId != 0) {
pHBRsp->streamId = htonl(pConn->streamId);
pConn->streamId = 0;
}
if (pConn->queryId != 0) {
pHBRsp->queryId = htonl(pConn->queryId);
pConn->queryId = 0;
}
}
pHBRsp->onlineDnodes = htonl(mnodeGetOnlinDnodesNum());
pHBRsp->totalDnodes = htonl(mnodeGetDnodesNum());
mnodeGetMnodeIpSetForShell(&pHBRsp->ipList);
/*
* TODO
* Dispose kill stream or kill query message
*/
pHBRsp->queryId = 0;
pHBRsp->streamId = 0;
pHBRsp->killConnection = 0;
pMsg->rpcRsp.rsp = pHBRsp;
pMsg->rpcRsp.len = sizeof(SCMHeartBeatRsp);
mnodeReleaseConn(pConn);
return TSDB_CODE_SUCCESS;
}
@ -281,6 +307,14 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
goto connect_over;
}
SConnObj *pConn = mnodeCreateConn(connInfo.user, connInfo.clientIp, connInfo.clientPort);
if (pConn == NULL) {
code = terrno;
} else {
pConnectRsp->connId = htonl(pConn->connId);
mnodeReleaseConn(pConn);
}
sprintf(pConnectRsp->acctId, "%x", pAcct->acctId);
strcpy(pConnectRsp->serverVersion, version);
pConnectRsp->writeAuth = pUser->writeAuth;
@ -358,3 +392,11 @@ static void mnodeReleaseShowObj(void *pShow, bool forceRemove) {
mTrace("%p, show is released, force:%s", pShow, forceRemove ? "true" : "false");
taosCacheRelease(tsMnodeShowCache, &pShow, forceRemove);
}
void mnodeVacuumResult(char *data, int32_t numOfCols, int32_t rows, int32_t capacity, SShowObj *pShow) {
if (rows < capacity) {
for (int32_t i = 0; i < numOfCols; ++i) {
memmove(data + pShow->offset[i] * rows, data + pShow->offset[i] * capacity, pShow->bytes[i] * rows);
}
}
}

View File

@ -61,8 +61,8 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void
static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows, void *pConn);
static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
static int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows, void *pConn);
static int32_t mnodeGetStreamMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
static int32_t mnodeRetrieveStreams(SShowObj *pShow, char *data, int32_t rows, void *pConn);
static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t rows, void *pConn);
static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *mnodeMsg);
static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg);
@ -568,8 +568,8 @@ int32_t mnodeInitTables() {
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_TABLE, mnodeRetrieveShowTables);
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_METRIC, mnodeGetShowSuperTableMeta);
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_METRIC, mnodeRetrieveShowSuperTables);
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_STREAMS, mnodeGetStreamMeta);
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_STREAMS, mnodeRetrieveStreams);
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_STREAMTABLES, mnodeGetStreamTableMeta);
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_STREAMTABLES, mnodeRetrieveStreamTables);
return TSDB_CODE_SUCCESS;
}
@ -1284,7 +1284,7 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
char *msg = (char *)pRsp + sizeof(SCMSTableVgroupRspMsg);
for (int32_t i = 0; i < numOfTable; ++i) {
char *stableName = (char*)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_ID_LEN) * i;
char * stableName = (char *)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_ID_LEN)*i;
SSuperTableObj *pTable = mnodeGetSuperTable(stableName);
if (pTable == NULL) {
mError("stable:%s, not exist while get stable vgroup info", stableName);
@ -1294,9 +1294,15 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
if (pTable->vgHash == NULL) {
mError("stable:%s, not vgroup exist while get stable vgroup info", stableName);
mnodeDecTableRef(pTable);
continue;
}
// even this super table has no corresponding table, still return
pRsp->numOfTables++;
SVgroupsInfo *pVgroupInfo = (SVgroupsInfo *)msg;
pVgroupInfo->numOfVgroups = 0;
msg += sizeof(SVgroupsInfo);
} else {
SVgroupsInfo *pVgroupInfo = (SVgroupsInfo *)msg;
SHashMutableIterator *pIter = taosHashCreateIter(pTable->vgHash);
@ -1330,6 +1336,7 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
msg += sizeof(SVgroupsInfo) + vgSize * sizeof(SCMVgroupInfo);
pRsp->numOfTables++;
}
}
if (pRsp->numOfTables != numOfTable) {
rpcFreeCont(pRsp);
@ -2111,14 +2118,6 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void
return 0;
}
static void mnodeVacuumResult(char *data, int32_t numOfCols, int32_t rows, int32_t capacity, SShowObj *pShow) {
if (rows < capacity) {
for (int32_t i = 0; i < numOfCols; ++i) {
memmove(data + pShow->offset[i] * rows, data + pShow->offset[i] * capacity, pShow->bytes[i] * rows);
}
}
}
static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
SDbObj *pDb = mnodeGetDb(pShow->db);
if (pDb == NULL) return 0;
@ -2262,7 +2261,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
return code;
}
static int32_t mnodeGetStreamMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
SDbObj *pDb = mnodeGetDb(pShow->db);
if (pDb == NULL) return TSDB_CODE_DB_NOT_SELECTED;
@ -2308,7 +2307,7 @@ static int32_t mnodeGetStreamMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
return 0;
}
static int32_t mnodeRetrieveStreams(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
SDbObj *pDb = mnodeGetDb(pShow->db);
if (pDb == NULL) return 0;

View File

@ -183,7 +183,7 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) {
snprintf(sql, SQL_LENGTH,
"create table if not exists %s.slowquery(ts timestamp, username "
"binary(%d), created_time timestamp, time bigint, sql binary(%d))",
tsMonitorDbName, TSDB_TABLE_ID_LEN, TSDB_SHOW_SQL_LEN);
tsMonitorDbName, TSDB_TABLE_ID_LEN, TSDB_SLOW_QUERY_SQL_LEN);
} else if (cmd == MONITOR_CMD_CREATE_TB_LOG) {
snprintf(sql, SQL_LENGTH,
"create table if not exists %s.log(ts timestamp, level tinyint, "

View File

@ -176,6 +176,7 @@ typedef struct SQueryRuntimeEnv {
void* pQueryHandle;
void* pSecQueryHandle; // another thread for
SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
bool topBotQuery; // false;
} SQueryRuntimeEnv;
typedef struct SQInfo {

View File

@ -31,7 +31,8 @@ void closeTimeWindow(SWindowResInfo* pWindowResInfo, int32_t slot);
void closeAllTimeWindow(SWindowResInfo* pWindowResInfo);
void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_t order);
SWindowResult *getWindowResult(SWindowResInfo *pWindowResInfo, int32_t slot);
int32_t curTimeWindow(SWindowResInfo *pWindowResInfo);
#define curTimeWindow(_winres) ((_winres)->curIndex)
bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot);
void createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo);

View File

@ -28,7 +28,7 @@ extern "C" {
#include "tdataformat.h"
#include "talgo.h"
#define DEFAULT_PAGE_SIZE (1024L*56) // 16k larger than the SHistoInfo
#define DEFAULT_PAGE_SIZE (1024L*64) // 16k larger than the SHistoInfo
#define MAX_TMPFILE_PATH_LENGTH PATH_MAX
#define INITIAL_ALLOCATION_BUFFER_SIZE 64
@ -96,7 +96,7 @@ typedef struct SColumnOrderInfo {
typedef struct tOrderDescriptor {
SColumnModel * pColumnModel;
int32_t tsOrder; // timestamp order type if exists
SColumnOrderInfo orderIdx;
SColumnOrderInfo orderInfo;
} tOrderDescriptor;
typedef struct tExtMemBuffer {

View File

@ -85,7 +85,7 @@ SIDList getDataBufPagesIdList(SDiskbasedResultBuf* pResultBuf, int32_t groupId);
* @param id
* @return
*/
tFilePage* getResultBufferPageById(SDiskbasedResultBuf* pResultBuf, int32_t id);
#define GET_RES_BUF_PAGE_BY_ID(buf, id) ((tFilePage*)((buf)->pBuf + DEFAULT_INTERN_BUF_PAGE_SIZE*(id)))
/**
* get the total buffer size in the format of disk file

View File

@ -647,9 +647,9 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
}
////////////////////////////////////////kill statement///////////////////////////////////////
cmd ::= KILL CONNECTION IPTOKEN(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &X);}
cmd ::= KILL STREAM IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &X);}
cmd ::= KILL QUERY IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &X);}
cmd ::= KILL CONNECTION INTEGER(Y). {setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &Y);}
cmd ::= KILL STREAM INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &X);}
cmd ::= KILL QUERY INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &X);}
%fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED
DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD

View File

@ -272,9 +272,18 @@ bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *mi
bool stableQueryFunctChanged(int32_t funcId);
void resetResultInfo(SResultInfo *pResInfo);
void initResultInfo(SResultInfo *pResInfo);
void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable);
static FORCE_INLINE void initResultInfo(SResultInfo *pResInfo) {
pResInfo->initialized = true; // the this struct has been initialized flag
pResInfo->complete = false;
pResInfo->hasResult = false;
pResInfo->numOfRes = 0;
memset(pResInfo->interResultBuf, 0, (size_t)pResInfo->bufLen);
}
#ifdef __cplusplus
}
#endif

View File

@ -98,6 +98,7 @@ static bool isIntervalQuery(SQuery *pQuery) { return pQuery->intervalTime > 0; }
static int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *group);
static void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult);
static void setWindowResOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult);
static void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx, SResultInfo *pResultInfo);
static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId);
static void getNextTimeWindow(SQuery *pQuery, STimeWindow *pTimeWindow);
@ -441,7 +442,7 @@ static int32_t addNewWindowResultBuf(SWindowResult *pWindowRes, SDiskbasedResult
pData = getNewDataBuf(pResultBuf, sid, &pageId);
} else {
pageId = getLastPageId(&list);
pData = getResultBufferPageById(pResultBuf, pageId);
pData = GET_RES_BUF_PAGE_BY_ID(pResultBuf, pageId);
if (pData->num >= numOfRowsPerPage) {
pData = getNewDataBuf(pResultBuf, sid, &pageId);
@ -485,9 +486,7 @@ static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowRes
// set time window for current result
pWindowRes->window = *win;
setWindowResOutputBuf(pRuntimeEnv, pWindowRes);
initCtxOutputBuf(pRuntimeEnv);
setWindowResOutputBufInitCtx(pRuntimeEnv, pWindowRes);
return TSDB_CODE_SUCCESS;
}
@ -651,7 +650,7 @@ static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SWindowStat
pCtx[k].startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1);
if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
pCtx[k].ptsList = tsBuf;
pCtx[k].ptsList = &tsBuf[offset];
}
// not a whole block involved in query processing, statistics data can not be used
@ -688,10 +687,9 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
__block_search_fn_t searchFn) {
SQuery *pQuery = pRuntimeEnv->pQuery;
while (1) {
if ((pNextWin->ekey > pQuery->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) ||
(pNextWin->skey < pQuery->window.ekey && !QUERY_IS_ASC_QUERY(pQuery))) {
return -1;
// tumbling time window query, a special case of sliding time window query
if (pQuery->slidingTime == pQuery->intervalTime) {
// todo opt
}
getNextTimeWindow(pQuery, pNextWin);
@ -721,13 +719,19 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
* This time window does not cover any data, try next time window,
* this case may happen when the time window is too small
*/
if ((primaryKeys[startPos] > pNextWin->ekey && QUERY_IS_ASC_QUERY(pQuery)) ||
(primaryKeys[startPos] < pNextWin->skey && !QUERY_IS_ASC_QUERY(pQuery))) {
continue;
if (QUERY_IS_ASC_QUERY(pQuery) && primaryKeys[startPos] > pNextWin->ekey) {
TSKEY next = primaryKeys[startPos];
pNextWin->ekey += ((next - pNextWin->ekey + pQuery->slidingTime - 1)/pQuery->slidingTime) * pQuery->slidingTime;
pNextWin->skey = pNextWin->ekey - pQuery->intervalTime + 1;
} else if ((!QUERY_IS_ASC_QUERY(pQuery)) && primaryKeys[startPos] < pNextWin->skey) {
TSKEY next = primaryKeys[startPos];
pNextWin->skey -= ((pNextWin->skey - next + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
pNextWin->ekey = pNextWin->skey + pQuery->intervalTime - 1;
}
return startPos;
}
}
static TSKEY reviseWindowEkey(SQuery *pQuery, STimeWindow *pWindow) {
@ -1027,7 +1031,7 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx
}
// in the supplementary scan, only the following functions need to be executed
if (IS_REVERSE_SCAN(pRuntimeEnv)) {// && (functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS)) {
if (IS_REVERSE_SCAN(pRuntimeEnv)) {
return false;
}
@ -1183,7 +1187,6 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl
STableQueryInfo* pTableQInfo = pQuery->current;
SWindowResInfo* pWindowResInfo = &pRuntimeEnv->windowResInfo;
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : pDataBlockInfo->rows - 1;
if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock);
@ -1598,8 +1601,7 @@ static bool onlyQueryTags(SQuery* pQuery) {
/////////////////////////////////////////////////////////////////////////////////////////////
void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, int64_t *realSkey,
int64_t *realEkey, STimeWindow *win) {
void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *realWin, STimeWindow *win) {
assert(key >= keyFirst && key <= keyLast && pQuery->slidingTime <= pQuery->intervalTime);
win->skey = taosGetIntervalStartTimestamp(key, pQuery->slidingTime, pQuery->slidingTimeUnit, pQuery->precision);
@ -1611,8 +1613,8 @@ void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int6
*/
assert(keyLast - keyFirst < pQuery->intervalTime);
*realSkey = keyFirst;
*realEkey = keyLast;
realWin->skey = keyFirst;
realWin->ekey = keyLast;
win->ekey = INT64_MAX;
return;
@ -1620,17 +1622,8 @@ void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int6
win->ekey = win->skey + pQuery->intervalTime - 1;
if (win->skey < keyFirst) {
*realSkey = keyFirst;
} else {
*realSkey = win->skey;
}
if (win->ekey < keyLast) {
*realEkey = win->ekey;
} else {
*realEkey = keyLast;
}
realWin->skey = (win->skey < keyFirst)? keyFirst : win->skey;
realWin->ekey = (win->ekey < keyLast) ? win->ekey : keyLast;
}
static void setScanLimitationByResultBuffer(SQuery *pQuery) {
@ -1847,31 +1840,21 @@ static int32_t getInitialPageNum(SQInfo *pQInfo) {
return num;
}
static int32_t getRowParamForMultiRowsOutput(SQuery *pQuery, bool isSTableQuery) {
int32_t rowparam = 1;
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pSelectExpr[1].base.arg->argValue.i64:1)
if (isTopBottomQuery(pQuery) && (!isSTableQuery)) {
rowparam = pQuery->pSelectExpr[1].base.arg->argValue.i64;
}
return rowparam;
}
static int32_t getNumOfRowsInResultPage(SQuery *pQuery, bool isSTableQuery) {
int32_t rowSize = pQuery->rowSize * getRowParamForMultiRowsOutput(pQuery, isSTableQuery);
static FORCE_INLINE int32_t getNumOfRowsInResultPage(SQuery *pQuery, bool topBotQuery, bool isSTableQuery) {
int32_t rowSize = pQuery->rowSize * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, topBotQuery, isSTableQuery);
return (DEFAULT_INTERN_BUF_PAGE_SIZE - sizeof(tFilePage)) / rowSize;
}
char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SWindowResult *pResult) {
assert(pResult != NULL && pRuntimeEnv != NULL);
SQuery * pQuery = pRuntimeEnv->pQuery;
tFilePage *page = getResultBufferPageById(pRuntimeEnv->pResultBuf, pResult->pos.pageId);
SQuery *pQuery = pRuntimeEnv->pQuery;
tFilePage *page = GET_RES_BUF_PAGE_BY_ID(pRuntimeEnv->pResultBuf, pResult->pos.pageId);
int32_t realRowId = pResult->pos.rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery);
int32_t numOfRows = getNumOfRowsInResultPage(pQuery, pRuntimeEnv->stableQuery);
int32_t realRowId = pResult->pos.rowId * getRowParamForMultiRowsOutput(pQuery, pRuntimeEnv->stableQuery);
return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * numOfRows +
return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * pRuntimeEnv->numOfRowsPerPage +
pQuery->pSelectExpr[columnIndex].bytes * realRowId;
}
@ -2034,6 +2017,7 @@ SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle,
}
pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows;
pRuntimeEnv->summary.loadBlocks += 1;
pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL);
}
@ -2143,6 +2127,7 @@ static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pB
static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
SQuery *pQuery = pRuntimeEnv->pQuery;
STableQueryInfo* pTableQueryInfo = pQuery->current;
SQueryCostInfo* summary = &pRuntimeEnv->summary;
qTrace("QInfo:%p query start, qrange:%" PRId64 "-%" PRId64 ", lastkey:%" PRId64 ", order:%d",
GET_QINFO_ADDR(pRuntimeEnv), pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey, pTableQueryInfo->lastKey,
@ -2150,7 +2135,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle;
while (tsdbNextDataBlock(pQueryHandle)) {
pRuntimeEnv->summary.totalBlocks += 1;
summary->totalBlocks += 1;
if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) {
return 0;
}
@ -2159,19 +2144,16 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
// todo extract methods
if (isIntervalQuery(pQuery) && pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL) {
TSKEY skey1, ekey1;
STimeWindow w = TSWINDOW_INITIALIZER;
STimeWindow realWin = TSWINDOW_INITIALIZER, w = TSWINDOW_INITIALIZER;
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
if (QUERY_IS_ASC_QUERY(pQuery)) {
getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &skey1,
&ekey1, &w);
getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &realWin, &w);
pWindowResInfo->startTime = w.skey;
pWindowResInfo->prevSKey = w.skey;
} else {
// the start position of the first time window in the endpoint that spreads beyond the queried last timestamp
getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &skey1,
&ekey1, &w);
getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &realWin, &w);
pWindowResInfo->startTime = pQuery->window.skey;
pWindowResInfo->prevSKey = w.skey;
@ -2187,9 +2169,12 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
SDataStatis *pStatis = NULL;
SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis);
// query start position can not move into tableApplyFunctionsOnBlock due to limit/offset condition
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : blockInfo.rows - 1;
int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, binarySearchForKey, pDataBlock);
pRuntimeEnv->summary.totalRows += blockInfo.rows;
summary->totalRows += blockInfo.rows;
qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64, GET_QINFO_ADDR(pRuntimeEnv),
blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, numOfRes, pQuery->current->lastKey);
@ -2531,7 +2516,7 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
int32_t total = 0;
for (int32_t i = 0; i < list.size; ++i) {
tFilePage *pData = getResultBufferPageById(pResultBuf, list.pData[i]);
tFilePage *pData = GET_RES_BUF_PAGE_BY_ID(pResultBuf, list.pData[i]);
total += pData->num;
}
@ -2539,7 +2524,7 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
int32_t offset = 0;
for (int32_t num = 0; num < list.size; ++num) {
tFilePage *pData = getResultBufferPageById(pResultBuf, list.pData[num]);
tFilePage *pData = GET_RES_BUF_PAGE_BY_ID(pResultBuf, list.pData[num]);
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes;
@ -2558,10 +2543,8 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
pQInfo->offset += 1;
}
int64_t getNumOfResultWindowRes(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindowRes) {
SQuery *pQuery = pRuntimeEnv->pQuery;
int64_t maxOutput = 0;
int64_t getNumOfResultWindowRes(SQuery *pQuery, SWindowResult *pWindowRes) {
// int64_t maxOutput = 0;
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
int32_t functionId = pQuery->pSelectExpr[j].base.functionId;
@ -2574,12 +2557,23 @@ int64_t getNumOfResultWindowRes(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pW
}
SResultInfo *pResultInfo = &pWindowRes->resultInfo[j];
if (pResultInfo != NULL && maxOutput < pResultInfo->numOfRes) {
maxOutput = pResultInfo->numOfRes;
assert(pResultInfo != NULL);
if (pResultInfo->numOfRes > 0) {
return pResultInfo->numOfRes;
}
// if (pResultInfo != NULL && maxOutput < pResultInfo->numOfRes) {
// maxOutput = pResultInfo->numOfRes;
//
// if (maxOutput > 0) {
// break;
// }
// }
//
// assert(pResultInfo != NULL);
}
return maxOutput;
return 0;
}
int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
@ -2612,6 +2606,8 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
assert(pQInfo->numOfGroupResultPages == 0);
return 0;
} else if (numOfTables == 1) { // no need to merge results since only one table in each group
}
SCompSupporter cs = {pTableList, posList, pQInfo};
@ -2636,7 +2632,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
TSKEY ts = GET_INT64_VAL(b);
assert(ts == pWindowRes->window.skey);
int64_t num = getNumOfResultWindowRes(pRuntimeEnv, pWindowRes);
int64_t num = getNumOfResultWindowRes(pQuery, pWindowRes);
if (num <= 0) {
cs.position[pos] += 1;
@ -2699,10 +2695,11 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
displayInterResult(pQuery->sdata, pRuntimeEnv, pQuery->sdata[0]->num);
#endif
qTrace("QInfo:%p result merge completed, elapsed time:%" PRId64 " ms", GET_QINFO_ADDR(pQuery), endt - startt);
tfree(pTree);
qTrace("QInfo:%p result merge completed for group:%d, elapsed time:%" PRId64 " ms", pQInfo, pQInfo->groupIndex, endt - startt);
tfree(pTableList);
tfree(posList);
tfree(pTree);
pQInfo->offset = 0;
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
@ -2928,8 +2925,13 @@ void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) {
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
int32_t functionId = pQuery->pSelectExpr[j].base.functionId;
pRuntimeEnv->pCtx[j].currentStage = 0;
SResultInfo* pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[j]);
if (pResInfo->initialized) {
continue;
}
aAggs[functionId].init(&pRuntimeEnv->pCtx[j]);
}
}
@ -3078,7 +3080,7 @@ static void setEnvBeforeReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatusI
tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle);
}
pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableIdGroupInfo);
pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableIdGroupInfo, pQInfo);
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
switchCtxOrder(pRuntimeEnv);
@ -3150,7 +3152,7 @@ void scanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) {
tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle);
}
pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableIdGroupInfo);
pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableIdGroupInfo, pQInfo);
pRuntimeEnv->windowResInfo.curIndex = qstatus.windowIndex;
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
@ -3295,13 +3297,14 @@ void setExecutionContext(SQInfo *pQInfo, STableId* pTableId, int32_t groupIndex,
setAdditionalInfo(pQInfo, pTableId, pTableQueryInfo);
}
static void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult) {
void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult) {
SQuery *pQuery = pRuntimeEnv->pQuery;
// Note: pResult->pos[i]->num == 0, there is only fixed number of results for each group
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i];
pCtx->aOutputBuf = getPosInResultPage(pRuntimeEnv, i, pResult);
int32_t functionId = pQuery->pSelectExpr[i].base.functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf;
@ -3319,6 +3322,38 @@ static void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *
}
}
void setWindowResOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult) {
SQuery *pQuery = pRuntimeEnv->pQuery;
// Note: pResult->pos[i]->num == 0, there is only fixed number of results for each group
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i];
pCtx->resultInfo = &pResult->resultInfo[i];
if (pCtx->resultInfo->complete) {
continue;
}
pCtx->aOutputBuf = getPosInResultPage(pRuntimeEnv, i, pResult);
pCtx->currentStage = 0;
int32_t functionId = pCtx->functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf;
}
/*
* set the output buffer information and intermediate buffer
* not all queries require the interResultBuf, such as COUNT
*/
pCtx->resultInfo->superTableQ = pRuntimeEnv->stableQuery; // set super table query flag
if (!pCtx->resultInfo->initialized) {
aAggs[functionId].init(pCtx);
}
}
}
int32_t setAdditionalInfo(SQInfo *pQInfo, STableId* pTableId, STableQueryInfo *pTableQueryInfo) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
assert(pTableQueryInfo->lastKey >= TSKEY_INITIAL_VAL);
@ -3374,13 +3409,12 @@ void setIntervalQueryRange(SQInfo *pQInfo, TSKEY key) {
* In ascending query, key is the first qualified timestamp. However, in the descending order query, additional
* operations involve.
*/
TSKEY skey1, ekey1;
STimeWindow w = {0};
STimeWindow w = TSWINDOW_INITIALIZER, realWin = TSWINDOW_INITIALIZER;
SWindowResInfo *pWindowResInfo = &pTableQueryInfo->windowResInfo;
TSKEY sk = MIN(win.skey, win.ekey);
TSKEY ek = MAX(win.skey, win.ekey);
getAlignQueryTimeWindow(pQuery, win.skey, sk, ek, &skey1, &ekey1, &w);
getAlignQueryTimeWindow(pQuery, win.skey, sk, ek, &realWin, &w);
pWindowResInfo->startTime = pTableQueryInfo->win.skey; // windowSKey may be 0 in case of 1970 timestamp
if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) {
@ -3738,7 +3772,7 @@ static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBloc
SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock, 0);
// update the pQuery->limit.offset value, and pQuery->pos value
TSKEY *keys = (TSKEY *)pColInfoData->pData;
TSKEY *keys = (TSKEY *) pColInfoData->pData;
// update the offset value
pTableQueryInfo->lastKey = keys[pQuery->pos];
@ -3800,8 +3834,7 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) {
*/
assert(pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL);
TSKEY skey1, ekey1;
STimeWindow w = TSWINDOW_INITIALIZER;
STimeWindow w = TSWINDOW_INITIALIZER, realWin = TSWINDOW_INITIALIZER;
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
STableQueryInfo *pTableQueryInfo = pQuery->current;
@ -3811,14 +3844,12 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) {
if (QUERY_IS_ASC_QUERY(pQuery)) {
if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) {
getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &skey1,
&ekey1, &w);
getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &realWin, &w);
pWindowResInfo->startTime = w.skey;
pWindowResInfo->prevSKey = w.skey;
}
} else {
getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &skey1, &ekey1,
&w);
getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &realWin, &w);
pWindowResInfo->startTime = pQuery->window.skey;
pWindowResInfo->prevSKey = w.skey;
@ -3939,11 +3970,11 @@ static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) {
}
if (isFirstLastRowQuery(pQuery)) {
pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQInfo->tableIdGroupInfo);
pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQInfo->tableIdGroupInfo, pQInfo);
} else if (isPointInterpoQuery(pQuery)) {
pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(tsdb, &cond, &pQInfo->tableIdGroupInfo);
pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(tsdb, &cond, &pQInfo->tableIdGroupInfo, pQInfo);
} else {
pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQInfo->tableIdGroupInfo);
pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQInfo->tableIdGroupInfo, pQInfo);
}
}
@ -3999,7 +4030,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool
return code;
}
pRuntimeEnv->numOfRowsPerPage = getNumOfRowsInResultPage(pQuery, isSTableQuery);
pRuntimeEnv->numOfRowsPerPage = getNumOfRowsInResultPage(pQuery, pRuntimeEnv->topBotQuery, isSTableQuery);
if (isSTableQuery) {
int32_t rows = getInitialPageNum(pQInfo);
@ -4057,6 +4088,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool
pQuery->slidingTime, pQuery->fillType, pColInfo);
}
pRuntimeEnv->topBotQuery = isTopBottomQuery(pQuery);
return TSDB_CODE_SUCCESS;
}
@ -4086,9 +4118,9 @@ static void enableExecutionForNextTable(SQueryRuntimeEnv *pRuntimeEnv) {
}
}
static int64_t queryOnDataBlocks(SQInfo *pQInfo) {
static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery * pQuery = pRuntimeEnv->pQuery;
SQuery* pQuery = pRuntimeEnv->pQuery;
SQueryCostInfo* summary = &pRuntimeEnv->summary;
int64_t st = taosGetTimestampMs();
@ -4144,8 +4176,9 @@ static int64_t queryOnDataBlocks(SQInfo *pQInfo) {
summary->totalRows += blockInfo.rows;
stableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, pDataBlock, binarySearchForKey);
qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, lastKey:%"PRId64, GET_QINFO_ADDR(pRuntimeEnv),
blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, pQuery->current->lastKey);
qTrace("QInfo:%p check data block, uid:%"PRId64", tid:%d, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, lastKey:%" PRId64,
GET_QINFO_ADDR(pRuntimeEnv), blockInfo.uid, blockInfo.tid, blockInfo.window.skey, blockInfo.window.ekey,
blockInfo.rows, pQuery->current->lastKey);
}
int64_t et = taosGetTimestampMs();
@ -4186,7 +4219,7 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) {
pRuntimeEnv->pQueryHandle = NULL;
}
pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp);
pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp, pQInfo);
taosArrayDestroy(tx);
taosArrayDestroy(g1);
@ -4252,9 +4285,9 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
}
if (isFirstLastRowQuery(pQuery)) {
pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(pQInfo->tsdb, &cond, &gp);
pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(pQInfo->tsdb, &cond, &gp, pQInfo);
} else {
pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(pQInfo->tsdb, &cond, &gp);
pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(pQInfo->tsdb, &cond, &gp, pQInfo);
}
initCtxOutputBuf(pRuntimeEnv);
@ -4448,7 +4481,7 @@ static void doSaveContext(SQInfo *pQInfo) {
tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle);
}
pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableIdGroupInfo);
pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableIdGroupInfo, pQInfo);
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
switchCtxOrder(pRuntimeEnv);
@ -4515,7 +4548,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
pQuery->window.skey, pQuery->window.ekey, pQuery->order.order);
// do check all qualified data blocks
int64_t el = queryOnDataBlocks(pQInfo);
int64_t el = scanMultiTableDataBlocks(pQInfo);
qTrace("QInfo:%p master scan completed, elapsed time: %lldms, reverse scan start", pQInfo, el);
// query error occurred or query is killed, abort current execution
@ -4530,7 +4563,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
if (needReverseScan(pQuery)) {
doSaveContext(pQInfo);
el = queryOnDataBlocks(pQInfo);
el = scanMultiTableDataBlocks(pQInfo);
qTrace("QInfo:%p reversed scan completed, elapsed time: %lldms", pQInfo, el);
doRestoreContext(pQInfo);
@ -4817,7 +4850,7 @@ static void tableQueryImpl(SQInfo *pQInfo) {
/* check if query is killed or not */
if (isQueryKilled(pQInfo)) {
qTrace("QInfo:%p query is killed", pQInfo);
} else {// todo set the table uid and tid in log
} else {
qTrace("QInfo:%p query paused, %" PRId64 " rows returned, numOfTotal:%" PRId64 " rows",
pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows);
}
@ -4909,9 +4942,12 @@ static bool validateQuerySourceCols(SQueryTableMsg *pQueryMsg, SSqlFuncMsg** pEx
return false;
} else if (numOfTotal == 0) {
for(int32_t i = 0; i < pQueryMsg->numOfOutput; ++i) {
if (pExprMsg[i]->functionId != TSDB_FUNC_TAGPRJ) {
return false;
if ((pExprMsg[i]->functionId == TSDB_FUNC_TAGPRJ) ||
(pExprMsg[i]->functionId == TSDB_FUNC_TID_TAG && pExprMsg[i]->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX)) {
continue;
}
return false;
}
}
@ -6002,35 +6038,37 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
num = taosArrayGetSize(pa);
assert(num == pQInfo->groupInfo.numOfTables);
// int16_t type, bytes;
int32_t functionId = pQuery->pSelectExpr[0].base.functionId;
if (functionId == TSDB_FUNC_TID_TAG) { // return the tags & table Id
assert(pQuery->numOfOutput == 1);
SExprInfo* pExprInfo = &pQuery->pSelectExpr[0];
SExprInfo* pExprInfo = &pQuery->pSelectExpr[0];
int32_t rsize = pExprInfo->bytes;
for(int32_t i = 0; i < num; ++i) {
SGroupItem* item = taosArrayGet(pa, i);
SGroupItem *item = taosArrayGet(pa, i);
char* output = pQuery->sdata[0]->data + i * rsize;
char *output = pQuery->sdata[0]->data + i * rsize;
varDataSetLen(output, rsize - VARSTR_HEADER_SIZE);
output = varDataVal(output);
*(int64_t*) output = item->id.uid; // memory align problem, todo serialize
*(int64_t *)output = item->id.uid; // memory align problem, todo serialize
output += sizeof(item->id.uid);
*(int32_t*) output = item->id.tid;
*(int32_t *)output = item->id.tid;
output += sizeof(item->id.tid);
*(int32_t*) output = pQInfo->vgId;
*(int32_t *)output = pQInfo->vgId;
output += sizeof(pQInfo->vgId);
int16_t bytes = pExprInfo->bytes;
int16_t type = pExprInfo->type;
char* val = tsdbGetTableTagVal(pQInfo->tsdb, &item->id, pExprInfo->base.colInfo.colId, type, bytes);
if (pExprInfo->base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) {
char *data = tsdbGetTableName(pQInfo->tsdb, &item->id);
memcpy(output, data, varDataTLen(data));
} else {
char *val = tsdbGetTableTagVal(pQInfo->tsdb, &item->id, pExprInfo->base.colInfo.colId, type, bytes);
// todo refactor
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
@ -6047,8 +6085,11 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
}
}
}
}
pQInfo->tableIndex = pQInfo->groupInfo.numOfTables;
qTrace("QInfo:%p create (tableId, tag) info completed, rows:%d", pQInfo, num);
} else { // return only the tags|table name etc.
for(int32_t i = 0; i < num; ++i) {
SExprInfo* pExprInfo = pQuery->pSelectExpr;

View File

@ -206,11 +206,6 @@ bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot) {
return (getWindowResult(pWindowResInfo, slot)->status.closed == true);
}
int32_t curTimeWindow(SWindowResInfo *pWindowResInfo) {
assert(pWindowResInfo->curIndex >= 0 && pWindowResInfo->curIndex < pWindowResInfo->size);
return pWindowResInfo->curIndex;
}
void closeTimeWindow(SWindowResInfo *pWindowResInfo, int32_t slot) {
getWindowResult(pWindowResInfo, slot)->status.closed = true;
}

View File

@ -356,17 +356,15 @@ static FORCE_INLINE int32_t primaryKeyComparator(int64_t f1, int64_t f2, int32_t
static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, int32_t type, int32_t bytes) {
switch (type) {
case TSDB_DATA_TYPE_INT: {
int32_t first = *(int32_t *)f1;
int32_t second = *(int32_t *)f2;
int32_t first = *(int32_t *) f1;
int32_t second = *(int32_t *) f2;
if (first == second) {
return 0;
}
return (first < second) ? -1 : 1;
};
case TSDB_DATA_TYPE_DOUBLE: {
//double first = *(double *)f1;
double first = GET_DOUBLE_VAL(f1);
//double second = *(double *)f2;
double second = GET_DOUBLE_VAL(f2);
if (first == second) {
return 0;
@ -374,8 +372,6 @@ static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, i
return (first < second) ? -1 : 1;
};
case TSDB_DATA_TYPE_FLOAT: {
//float first = *(float *)f1;
//float second = *(float *)f2;
float first = GET_FLOAT_VAL(f1);
float second = GET_FLOAT_VAL(f2);
if (first == second) {
@ -439,9 +435,9 @@ int32_t compare_a(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1,
int32_t s2, char *data2) {
assert(numOfRows1 == numOfRows2);
int32_t cmpCnt = pDescriptor->orderIdx.numOfCols;
int32_t cmpCnt = pDescriptor->orderInfo.numOfCols;
for (int32_t i = 0; i < cmpCnt; ++i) {
int32_t colIdx = pDescriptor->orderIdx.pData[i];
int32_t colIdx = pDescriptor->orderInfo.pData[i];
char *f1 = COLMODEL_GET_VAL(data1, pDescriptor->pColumnModel, numOfRows1, s1, colIdx);
char *f2 = COLMODEL_GET_VAL(data2, pDescriptor->pColumnModel, numOfRows2, s2, colIdx);
@ -471,9 +467,9 @@ int32_t compare_d(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1,
int32_t s2, char *data2) {
assert(numOfRows1 == numOfRows2);
int32_t cmpCnt = pDescriptor->orderIdx.numOfCols;
int32_t cmpCnt = pDescriptor->orderInfo.numOfCols;
for (int32_t i = 0; i < cmpCnt; ++i) {
int32_t colIdx = pDescriptor->orderIdx.pData[i];
int32_t colIdx = pDescriptor->orderInfo.pData[i];
char *f1 = COLMODEL_GET_VAL(data1, pDescriptor->pColumnModel, numOfRows1, s1, colIdx);
char *f2 = COLMODEL_GET_VAL(data2, pDescriptor->pColumnModel, numOfRows2, s2, colIdx);
@ -563,13 +559,13 @@ static void median(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
int32_t midIdx = ((end - start) >> 1) + start;
#if defined(_DEBUG_VIEW)
int32_t f = pDescriptor->orderIdx.pData[0];
int32_t f = pDescriptor->orderInfo.pData[0];
char *midx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, midIdx, f);
char *startx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, start, f);
char *endx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, end, f);
int32_t colIdx = pDescriptor->orderIdx.pData[0];
int32_t colIdx = pDescriptor->orderInfo.pData[0];
tSortDataPrint(pDescriptor->pColumnModel->pFields[colIdx].field.type, "before", startx, midx, endx);
#endif
@ -596,7 +592,7 @@ static void median(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
}
static UNUSED_FUNC void tRowModelDisplay(tOrderDescriptor *pDescriptor, int32_t numOfRows, char *d, int32_t len) {
int32_t colIdx = pDescriptor->orderIdx.pData[0];
int32_t colIdx = pDescriptor->orderInfo.pData[0];
for (int32_t i = 0; i < len; ++i) {
char *startx = COLMODEL_GET_VAL(d, pDescriptor->pColumnModel, numOfRows, i, colIdx);
@ -1062,9 +1058,9 @@ tOrderDescriptor *tOrderDesCreate(const int32_t *orderColIdx, int32_t numOfOrder
desc->pColumnModel = pModel;
desc->tsOrder = tsOrderType;
desc->orderIdx.numOfCols = numOfOrderCols;
desc->orderInfo.numOfCols = numOfOrderCols;
for (int32_t i = 0; i < numOfOrderCols; ++i) {
desc->orderIdx.pData[i] = orderColIdx[i];
desc->orderInfo.pData[i] = orderColIdx[i];
}
return desc;

View File

@ -50,12 +50,6 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t si
return TSDB_CODE_SUCCESS;
}
tFilePage* getResultBufferPageById(SDiskbasedResultBuf* pResultBuf, int32_t id) {
assert(id < pResultBuf->numOfPages && id >= 0);
return (tFilePage*)(pResultBuf->pBuf + DEFAULT_INTERN_BUF_PAGE_SIZE * id);
}
int32_t getNumOfResultBufGroupId(SDiskbasedResultBuf* pResultBuf) { return taosHashGetSize(pResultBuf->idsTable); }
int32_t getResBufSize(SDiskbasedResultBuf* pResultBuf) { return pResultBuf->totalBufSize; }
@ -169,7 +163,7 @@ tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32
*pageId = (pResultBuf->allocateId++);
registerPageId(pResultBuf, groupId, *pageId);
tFilePage* page = getResultBufferPageById(pResultBuf, *pageId);
tFilePage* page = GET_RES_BUF_PAGE_BY_ID(pResultBuf, *pageId);
// clear memory for the new page
memset(page, 0, DEFAULT_INTERN_BUF_PAGE_SIZE);

View File

@ -126,17 +126,17 @@ typedef union {
#define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo
#define ParseARG_STORE yypParser->pInfo = pInfo
#define YYFALLBACK 1
#define YYNSTATE 247
#define YYNSTATE 241
#define YYNRULE 220
#define YYNTOKEN 205
#define YY_MAX_SHIFT 246
#define YY_MIN_SHIFTREDUCE 403
#define YY_MAX_SHIFTREDUCE 622
#define YY_ERROR_ACTION 623
#define YY_ACCEPT_ACTION 624
#define YY_NO_ACTION 625
#define YY_MIN_REDUCE 626
#define YY_MAX_REDUCE 845
#define YY_MAX_SHIFT 240
#define YY_MIN_SHIFTREDUCE 397
#define YY_MAX_SHIFTREDUCE 616
#define YY_ERROR_ACTION 617
#define YY_ACCEPT_ACTION 618
#define YY_NO_ACTION 619
#define YY_MIN_REDUCE 620
#define YY_MAX_REDUCE 839
/************* End control #defines *******************************************/
/* Define the yytestcase() macro to be a no-op if is not already defined
@ -202,63 +202,63 @@ typedef union {
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
#define YY_ACTTAB_COUNT (547)
#define YY_ACTTAB_COUNT (541)
static const YYACTIONTYPE yy_action[] = {
/* 0 */ 724, 444, 723, 11, 722, 134, 624, 246, 725, 445,
/* 10 */ 727, 726, 764, 41, 43, 21, 35, 36, 153, 244,
/* 20 */ 135, 29, 135, 444, 203, 39, 37, 40, 38, 158,
/* 30 */ 833, 445, 832, 34, 33, 139, 135, 32, 31, 30,
/* 40 */ 41, 43, 753, 35, 36, 157, 833, 166, 29, 739,
/* 50 */ 103, 203, 39, 37, 40, 38, 188, 21, 103, 99,
/* 60 */ 34, 33, 761, 155, 32, 31, 30, 404, 405, 406,
/* 70 */ 407, 408, 409, 410, 411, 412, 413, 414, 415, 245,
/* 80 */ 444, 742, 41, 43, 103, 35, 36, 103, 445, 168,
/* 90 */ 29, 738, 21, 203, 39, 37, 40, 38, 32, 31,
/* 100 */ 30, 56, 34, 33, 753, 787, 32, 31, 30, 43,
/* 110 */ 191, 35, 36, 788, 829, 198, 29, 21, 154, 203,
/* 120 */ 39, 37, 40, 38, 167, 578, 739, 8, 34, 33,
/* 130 */ 61, 113, 32, 31, 30, 665, 35, 36, 126, 59,
/* 140 */ 200, 29, 58, 17, 203, 39, 37, 40, 38, 221,
/* 150 */ 26, 739, 169, 34, 33, 220, 219, 32, 31, 30,
/* 160 */ 16, 239, 214, 238, 213, 212, 211, 237, 210, 236,
/* 170 */ 235, 209, 720, 828, 709, 710, 711, 712, 713, 714,
/* 180 */ 715, 716, 717, 718, 719, 162, 591, 234, 76, 582,
/* 190 */ 165, 585, 240, 588, 234, 162, 591, 98, 827, 582,
/* 200 */ 225, 585, 60, 588, 26, 162, 591, 12, 742, 582,
/* 210 */ 742, 585, 674, 588, 27, 126, 21, 159, 160, 34,
/* 220 */ 33, 202, 842, 32, 31, 30, 148, 159, 160, 740,
/* 230 */ 536, 539, 88, 87, 142, 18, 666, 159, 160, 126,
/* 240 */ 147, 559, 560, 39, 37, 40, 38, 50, 226, 550,
/* 250 */ 739, 34, 33, 46, 507, 32, 31, 30, 523, 531,
/* 260 */ 17, 520, 151, 521, 51, 522, 190, 26, 16, 239,
/* 270 */ 152, 238, 243, 242, 95, 237, 551, 236, 235, 177,
/* 280 */ 14, 42, 223, 222, 580, 741, 185, 187, 182, 170,
/* 290 */ 171, 42, 590, 584, 150, 587, 74, 78, 83, 86,
/* 300 */ 77, 42, 590, 161, 608, 592, 80, 589, 13, 13,
/* 310 */ 140, 583, 590, 586, 513, 47, 141, 589, 46, 798,
/* 320 */ 581, 116, 117, 68, 64, 67, 143, 589, 130, 128,
/* 330 */ 91, 90, 89, 512, 48, 207, 527, 22, 528, 22,
/* 340 */ 144, 3, 73, 72, 10, 9, 145, 525, 146, 526,
/* 350 */ 85, 84, 137, 797, 133, 138, 136, 163, 794, 524,
/* 360 */ 793, 164, 763, 733, 224, 100, 755, 780, 779, 114,
/* 370 */ 26, 115, 112, 676, 208, 131, 24, 217, 673, 218,
/* 380 */ 841, 70, 840, 838, 118, 694, 25, 93, 23, 132,
/* 390 */ 663, 79, 189, 546, 661, 192, 81, 82, 659, 658,
/* 400 */ 172, 127, 656, 196, 655, 654, 653, 652, 644, 129,
/* 410 */ 650, 648, 646, 52, 752, 767, 49, 44, 768, 781,
/* 420 */ 201, 199, 197, 195, 193, 28, 216, 75, 227, 228,
/* 430 */ 229, 230, 205, 232, 231, 53, 233, 241, 622, 149,
/* 440 */ 173, 62, 65, 174, 176, 175, 621, 178, 179, 180,
/* 450 */ 181, 657, 121, 120, 695, 125, 119, 122, 123, 92,
/* 460 */ 124, 651, 1, 106, 104, 737, 94, 105, 620, 109,
/* 470 */ 107, 108, 110, 111, 2, 184, 613, 183, 186, 190,
/* 480 */ 533, 55, 547, 156, 101, 57, 552, 194, 102, 5,
/* 490 */ 6, 63, 484, 593, 4, 19, 20, 15, 204, 7,
/* 500 */ 206, 481, 479, 478, 477, 475, 448, 215, 66, 45,
/* 510 */ 22, 509, 508, 69, 506, 54, 469, 467, 459, 465,
/* 520 */ 461, 463, 457, 455, 71, 483, 482, 480, 476, 474,
/* 530 */ 46, 446, 419, 417, 626, 625, 625, 625, 625, 625,
/* 540 */ 96, 625, 625, 625, 625, 625, 97,
/* 0 */ 718, 438, 717, 11, 716, 134, 618, 240, 719, 439,
/* 10 */ 721, 720, 758, 41, 43, 21, 35, 36, 153, 238,
/* 20 */ 135, 29, 135, 438, 197, 39, 37, 40, 38, 158,
/* 30 */ 827, 439, 826, 34, 33, 139, 135, 32, 31, 30,
/* 40 */ 41, 43, 747, 35, 36, 157, 827, 166, 29, 733,
/* 50 */ 103, 197, 39, 37, 40, 38, 182, 21, 103, 99,
/* 60 */ 34, 33, 755, 155, 32, 31, 30, 398, 399, 400,
/* 70 */ 401, 402, 403, 404, 405, 406, 407, 408, 409, 239,
/* 80 */ 438, 736, 41, 43, 103, 35, 36, 103, 439, 168,
/* 90 */ 29, 732, 21, 197, 39, 37, 40, 38, 32, 31,
/* 100 */ 30, 56, 34, 33, 747, 781, 32, 31, 30, 43,
/* 110 */ 185, 35, 36, 782, 823, 192, 29, 21, 154, 197,
/* 120 */ 39, 37, 40, 38, 167, 572, 733, 8, 34, 33,
/* 130 */ 61, 113, 32, 31, 30, 659, 35, 36, 126, 59,
/* 140 */ 194, 29, 58, 17, 197, 39, 37, 40, 38, 215,
/* 150 */ 26, 733, 169, 34, 33, 214, 213, 32, 31, 30,
/* 160 */ 16, 233, 208, 232, 207, 206, 205, 231, 204, 230,
/* 170 */ 229, 203, 714, 219, 703, 704, 705, 706, 707, 708,
/* 180 */ 709, 710, 711, 712, 713, 162, 585, 50, 60, 576,
/* 190 */ 175, 579, 165, 582, 234, 162, 585, 179, 178, 576,
/* 200 */ 27, 579, 734, 582, 51, 162, 585, 12, 98, 576,
/* 210 */ 736, 579, 736, 582, 228, 26, 21, 159, 160, 34,
/* 220 */ 33, 196, 836, 32, 31, 30, 148, 159, 160, 76,
/* 230 */ 822, 533, 88, 87, 142, 228, 668, 159, 160, 126,
/* 240 */ 147, 553, 554, 39, 37, 40, 38, 821, 220, 544,
/* 250 */ 733, 34, 33, 46, 501, 32, 31, 30, 517, 525,
/* 260 */ 17, 514, 151, 515, 152, 516, 184, 26, 16, 233,
/* 270 */ 140, 232, 237, 236, 95, 231, 660, 230, 229, 126,
/* 280 */ 530, 42, 217, 216, 578, 18, 581, 181, 161, 170,
/* 290 */ 171, 42, 584, 577, 150, 580, 74, 78, 83, 86,
/* 300 */ 77, 42, 584, 574, 545, 602, 80, 583, 14, 13,
/* 310 */ 141, 586, 584, 143, 507, 13, 47, 583, 46, 73,
/* 320 */ 72, 116, 117, 68, 64, 67, 3, 583, 130, 128,
/* 330 */ 91, 90, 89, 506, 201, 48, 144, 22, 22, 575,
/* 340 */ 521, 519, 522, 520, 10, 9, 85, 84, 145, 146,
/* 350 */ 137, 133, 138, 735, 136, 792, 791, 163, 788, 518,
/* 360 */ 787, 164, 757, 727, 218, 100, 749, 774, 773, 114,
/* 370 */ 26, 115, 112, 670, 202, 131, 24, 211, 667, 212,
/* 380 */ 835, 70, 834, 832, 118, 688, 25, 183, 23, 132,
/* 390 */ 657, 79, 93, 540, 655, 186, 81, 82, 653, 652,
/* 400 */ 172, 190, 127, 746, 650, 649, 648, 647, 646, 638,
/* 410 */ 129, 644, 642, 52, 640, 44, 49, 195, 761, 762,
/* 420 */ 775, 191, 193, 189, 187, 28, 210, 75, 221, 222,
/* 430 */ 223, 224, 225, 199, 226, 227, 235, 53, 616, 174,
/* 440 */ 615, 149, 62, 173, 65, 176, 177, 614, 180, 651,
/* 450 */ 607, 184, 92, 527, 645, 541, 120, 689, 121, 122,
/* 460 */ 119, 123, 125, 124, 94, 104, 1, 731, 105, 111,
/* 470 */ 108, 106, 107, 109, 110, 2, 55, 57, 101, 156,
/* 480 */ 188, 5, 546, 102, 19, 6, 587, 20, 4, 15,
/* 490 */ 63, 7, 198, 478, 200, 475, 473, 472, 471, 469,
/* 500 */ 442, 209, 66, 45, 69, 71, 22, 503, 502, 500,
/* 510 */ 54, 463, 461, 453, 459, 455, 457, 451, 449, 477,
/* 520 */ 476, 474, 470, 468, 46, 440, 96, 413, 411, 620,
/* 530 */ 619, 619, 619, 619, 619, 619, 619, 619, 619, 619,
/* 540 */ 97,
};
static const YYCODETYPE yy_lookahead[] = {
/* 0 */ 225, 1, 227, 258, 229, 258, 206, 207, 233, 9,
@ -278,44 +278,44 @@ static const YYCODETYPE yy_lookahead[] = {
/* 140 */ 262, 21, 264, 97, 24, 25, 26, 27, 28, 241,
/* 150 */ 104, 243, 126, 33, 34, 129, 130, 37, 38, 39,
/* 160 */ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
/* 170 */ 95, 96, 225, 258, 227, 228, 229, 230, 231, 232,
/* 180 */ 233, 234, 235, 236, 237, 1, 2, 78, 72, 5,
/* 190 */ 226, 7, 226, 9, 78, 1, 2, 97, 258, 5,
/* 200 */ 209, 7, 245, 9, 104, 1, 2, 44, 244, 5,
/* 210 */ 244, 7, 213, 9, 257, 216, 209, 33, 34, 33,
/* 220 */ 34, 37, 244, 37, 38, 39, 63, 33, 34, 238,
/* 230 */ 102, 37, 69, 70, 71, 107, 213, 33, 34, 216,
/* 240 */ 77, 114, 115, 25, 26, 27, 28, 102, 241, 98,
/* 170 */ 95, 96, 225, 209, 227, 228, 229, 230, 231, 232,
/* 180 */ 233, 234, 235, 236, 237, 1, 2, 102, 245, 5,
/* 190 */ 125, 7, 226, 9, 226, 1, 2, 132, 133, 5,
/* 200 */ 257, 7, 238, 9, 119, 1, 2, 44, 97, 5,
/* 210 */ 244, 7, 244, 9, 78, 104, 209, 33, 34, 33,
/* 220 */ 34, 37, 244, 37, 38, 39, 63, 33, 34, 72,
/* 230 */ 258, 37, 69, 70, 71, 78, 213, 33, 34, 216,
/* 240 */ 77, 114, 115, 25, 26, 27, 28, 258, 241, 98,
/* 250 */ 243, 33, 34, 102, 5, 37, 38, 39, 2, 98,
/* 260 */ 97, 5, 258, 7, 119, 9, 105, 104, 85, 86,
/* 270 */ 258, 88, 60, 61, 62, 92, 98, 94, 95, 125,
/* 280 */ 102, 97, 33, 34, 1, 244, 132, 124, 134, 33,
/* 260 */ 97, 5, 258, 7, 258, 9, 105, 104, 85, 86,
/* 270 */ 258, 88, 60, 61, 62, 92, 213, 94, 95, 216,
/* 280 */ 102, 97, 33, 34, 5, 107, 7, 124, 59, 33,
/* 290 */ 34, 97, 108, 5, 131, 7, 64, 65, 66, 67,
/* 300 */ 68, 97, 108, 59, 98, 98, 74, 123, 102, 102,
/* 310 */ 258, 5, 108, 7, 98, 102, 258, 123, 102, 239,
/* 320 */ 37, 64, 65, 66, 67, 68, 258, 123, 64, 65,
/* 330 */ 66, 67, 68, 98, 121, 98, 5, 102, 7, 102,
/* 340 */ 258, 97, 127, 128, 127, 128, 258, 5, 258, 7,
/* 350 */ 72, 73, 258, 239, 258, 258, 258, 239, 239, 103,
/* 300 */ 68, 97, 108, 1, 98, 98, 74, 123, 102, 102,
/* 310 */ 258, 98, 108, 258, 98, 102, 102, 123, 102, 127,
/* 320 */ 128, 64, 65, 66, 67, 68, 97, 123, 64, 65,
/* 330 */ 66, 67, 68, 98, 98, 121, 258, 102, 102, 37,
/* 340 */ 5, 5, 7, 7, 127, 128, 72, 73, 258, 258,
/* 350 */ 258, 258, 258, 244, 258, 239, 239, 239, 239, 103,
/* 360 */ 239, 239, 209, 240, 239, 209, 242, 265, 265, 209,
/* 370 */ 104, 209, 246, 209, 209, 209, 209, 209, 209, 209,
/* 380 */ 209, 209, 209, 209, 209, 209, 209, 59, 209, 209,
/* 390 */ 209, 209, 242, 108, 209, 261, 209, 209, 209, 209,
/* 400 */ 209, 209, 209, 261, 209, 209, 209, 209, 209, 209,
/* 410 */ 209, 209, 209, 118, 255, 210, 120, 117, 210, 210,
/* 420 */ 112, 116, 111, 110, 109, 122, 75, 84, 83, 49,
/* 430 */ 80, 82, 210, 81, 53, 210, 79, 75, 5, 210,
/* 440 */ 133, 214, 214, 5, 58, 133, 5, 133, 5, 133,
/* 450 */ 58, 210, 218, 222, 224, 217, 223, 221, 219, 211,
/* 460 */ 220, 210, 215, 252, 254, 242, 211, 253, 5, 249,
/* 470 */ 251, 250, 248, 247, 212, 58, 87, 133, 125, 105,
/* 480 */ 98, 106, 98, 1, 97, 102, 98, 97, 97, 113,
/* 490 */ 113, 72, 9, 98, 97, 102, 102, 97, 99, 97,
/* 500 */ 99, 5, 5, 5, 5, 5, 76, 15, 72, 16,
/* 510 */ 102, 5, 5, 128, 98, 97, 5, 5, 5, 5,
/* 520 */ 5, 5, 5, 5, 128, 5, 5, 5, 5, 5,
/* 530 */ 102, 76, 59, 58, 0, 269, 269, 269, 269, 269,
/* 540 */ 21, 269, 269, 269, 269, 269, 21, 269, 269, 269,
/* 380 */ 209, 209, 209, 209, 209, 209, 209, 242, 209, 209,
/* 390 */ 209, 209, 59, 108, 209, 261, 209, 209, 209, 209,
/* 400 */ 209, 261, 209, 255, 209, 209, 209, 209, 209, 209,
/* 410 */ 209, 209, 209, 118, 209, 117, 120, 112, 210, 210,
/* 420 */ 210, 111, 116, 110, 109, 122, 75, 84, 83, 49,
/* 430 */ 80, 82, 53, 210, 81, 79, 75, 210, 5, 5,
/* 440 */ 5, 210, 214, 134, 214, 134, 5, 5, 125, 210,
/* 450 */ 87, 105, 211, 98, 210, 98, 222, 224, 218, 221,
/* 460 */ 223, 219, 217, 220, 211, 254, 215, 242, 253, 247,
/* 470 */ 250, 252, 251, 249, 248, 212, 106, 102, 97, 1,
/* 480 */ 97, 113, 98, 97, 102, 113, 98, 102, 97, 97,
/* 490 */ 72, 97, 99, 9, 99, 5, 5, 5, 5, 5,
/* 500 */ 76, 15, 72, 16, 128, 128, 102, 5, 5, 98,
/* 510 */ 97, 5, 5, 5, 5, 5, 5, 5, 5, 5,
/* 520 */ 5, 5, 5, 5, 102, 76, 21, 59, 58, 0,
/* 530 */ 269, 269, 269, 269, 269, 269, 269, 269, 269, 269,
/* 540 */ 21, 269, 269, 269, 269, 269, 269, 269, 269, 269,
/* 550 */ 269, 269, 269, 269, 269, 269, 269, 269, 269, 269,
/* 560 */ 269, 269, 269, 269, 269, 269, 269, 269, 269, 269,
/* 570 */ 269, 269, 269, 269, 269, 269, 269, 269, 269, 269,
@ -335,84 +335,83 @@ static const YYCODETYPE yy_lookahead[] = {
/* 710 */ 269, 269, 269, 269, 269, 269, 269, 269, 269, 269,
/* 720 */ 269, 269, 269, 269, 269, 269, 269, 269, 269, 269,
/* 730 */ 269, 269, 269, 269, 269, 269, 269, 269, 269, 269,
/* 740 */ 269, 269, 269, 269, 269, 269, 269, 269, 269, 269,
/* 750 */ 269, 269,
/* 740 */ 269, 269, 269, 269, 269, 269,
};
#define YY_SHIFT_COUNT (246)
#define YY_SHIFT_COUNT (240)
#define YY_SHIFT_MIN (0)
#define YY_SHIFT_MAX (534)
#define YY_SHIFT_MAX (529)
static const unsigned short int yy_shift_ofst[] = {
/* 0 */ 163, 75, 183, 184, 204, 79, 79, 79, 79, 79,
/* 10 */ 79, 0, 22, 204, 256, 256, 256, 46, 79, 79,
/* 20 */ 79, 79, 79, 116, 109, 109, 547, 194, 204, 204,
/* 20 */ 79, 79, 79, 157, 136, 136, 541, 194, 204, 204,
/* 30 */ 204, 204, 204, 204, 204, 204, 204, 204, 204, 204,
/* 40 */ 204, 204, 204, 204, 204, 256, 256, 249, 249, 249,
/* 50 */ 249, 249, 249, 30, 249, 100, 79, 79, 127, 127,
/* 60 */ 128, 79, 79, 79, 79, 79, 79, 79, 79, 79,
/* 50 */ 249, 249, 249, 30, 249, 111, 79, 79, 127, 127,
/* 60 */ 178, 79, 79, 79, 79, 79, 79, 79, 79, 79,
/* 70 */ 79, 79, 79, 79, 79, 79, 79, 79, 79, 79,
/* 80 */ 79, 79, 79, 79, 79, 79, 79, 79, 79, 79,
/* 90 */ 79, 79, 79, 79, 79, 79, 79, 79, 266, 328,
/* 100 */ 328, 285, 285, 328, 295, 296, 300, 308, 305, 311,
/* 110 */ 313, 315, 303, 266, 328, 328, 351, 351, 328, 343,
/* 120 */ 345, 380, 350, 349, 381, 352, 357, 328, 362, 328,
/* 130 */ 362, 547, 547, 27, 69, 69, 69, 95, 120, 218,
/* 90 */ 79, 79, 79, 79, 79, 79, 79, 79, 266, 333,
/* 100 */ 333, 285, 285, 333, 295, 296, 298, 305, 306, 310,
/* 110 */ 313, 315, 303, 266, 333, 333, 351, 351, 333, 343,
/* 120 */ 345, 380, 350, 349, 379, 353, 356, 333, 361, 333,
/* 130 */ 361, 541, 541, 27, 69, 69, 69, 95, 120, 218,
/* 140 */ 218, 218, 232, 186, 186, 186, 186, 257, 264, 26,
/* 150 */ 154, 61, 61, 212, 161, 151, 178, 206, 207, 288,
/* 160 */ 306, 283, 244, 213, 145, 216, 235, 237, 215, 217,
/* 170 */ 331, 342, 278, 433, 307, 438, 312, 386, 441, 314,
/* 180 */ 443, 316, 392, 463, 344, 417, 389, 353, 374, 382,
/* 190 */ 375, 383, 384, 387, 482, 390, 388, 391, 393, 376,
/* 200 */ 394, 377, 395, 397, 400, 399, 402, 401, 419, 483,
/* 210 */ 496, 497, 498, 499, 500, 430, 492, 436, 493, 385,
/* 220 */ 396, 408, 506, 507, 416, 418, 408, 511, 512, 513,
/* 230 */ 514, 515, 516, 517, 518, 520, 521, 522, 523, 524,
/* 240 */ 428, 455, 519, 525, 473, 475, 534,
/* 150 */ 65, 61, 61, 212, 161, 151, 206, 207, 213, 279,
/* 160 */ 288, 302, 229, 214, 85, 216, 235, 236, 192, 217,
/* 170 */ 335, 336, 274, 433, 309, 434, 435, 311, 441, 442,
/* 180 */ 363, 323, 346, 355, 370, 375, 357, 381, 478, 383,
/* 190 */ 384, 386, 382, 368, 385, 372, 388, 391, 392, 393,
/* 200 */ 394, 395, 418, 484, 490, 491, 492, 493, 494, 424,
/* 210 */ 486, 430, 487, 376, 377, 404, 502, 503, 411, 413,
/* 220 */ 404, 506, 507, 508, 509, 510, 511, 512, 513, 514,
/* 230 */ 515, 516, 517, 518, 422, 449, 505, 519, 468, 470,
/* 240 */ 529,
};
#define YY_REDUCE_COUNT (132)
#define YY_REDUCE_MIN (-255)
#define YY_REDUCE_MAX (262)
#define YY_REDUCE_MAX (263)
static const short yy_reduce_ofst[] = {
/* 0 */ -200, -53, -225, -238, -222, -151, -122, -194, -117, -92,
/* 10 */ 7, -197, -190, -236, -163, -36, -34, -138, -150, -159,
/* 20 */ -125, -9, -152, -78, -1, 23, -43, -255, -253, -223,
/* 30 */ -144, -85, -60, 4, 12, 52, 58, 68, 82, 88,
/* 40 */ 90, 94, 96, 97, 98, -22, 41, 80, 114, 118,
/* 10 */ 7, -197, -190, -236, -163, -34, -32, -138, -150, -159,
/* 20 */ -125, -36, -152, -78, 23, 63, -57, -255, -253, -223,
/* 30 */ -144, -28, -11, 4, 6, 12, 52, 55, 78, 90,
/* 40 */ 91, 92, 93, 94, 96, -22, 109, 116, 117, 118,
/* 50 */ 119, 121, 122, 123, 125, 124, 153, 156, 102, 103,
/* 60 */ 126, 160, 162, 164, 165, 166, 167, 168, 169, 170,
/* 70 */ 171, 172, 173, 174, 175, 176, 177, 179, 180, 181,
/* 80 */ 182, 185, 187, 188, 189, 190, 191, 192, 193, 195,
/* 90 */ 196, 197, 198, 199, 200, 201, 202, 203, 150, 205,
/* 100 */ 208, 134, 142, 209, 159, 210, 214, 211, 219, 221,
/* 110 */ 220, 224, 226, 223, 222, 225, 227, 228, 229, 230,
/* 120 */ 233, 231, 234, 236, 239, 240, 238, 241, 248, 251,
/* 130 */ 255, 247, 262,
/* 80 */ 182, 185, 187, 188, 189, 190, 191, 193, 195, 196,
/* 90 */ 197, 198, 199, 200, 201, 202, 203, 205, 145, 208,
/* 100 */ 209, 134, 140, 210, 148, 211, 215, 219, 221, 220,
/* 110 */ 224, 226, 222, 225, 223, 227, 228, 230, 231, 233,
/* 120 */ 237, 234, 240, 238, 242, 243, 245, 239, 241, 244,
/* 130 */ 253, 251, 263,
};
static const YYACTIONTYPE yy_default[] = {
/* 0 */ 623, 675, 664, 835, 835, 623, 623, 623, 623, 623,
/* 10 */ 623, 765, 641, 835, 623, 623, 623, 623, 623, 623,
/* 20 */ 623, 623, 623, 677, 677, 677, 760, 623, 623, 623,
/* 30 */ 623, 623, 623, 623, 623, 623, 623, 623, 623, 623,
/* 40 */ 623, 623, 623, 623, 623, 623, 623, 623, 623, 623,
/* 50 */ 623, 623, 623, 623, 623, 623, 623, 623, 784, 784,
/* 60 */ 758, 623, 623, 623, 623, 623, 623, 623, 623, 623,
/* 70 */ 623, 623, 623, 623, 623, 623, 623, 623, 623, 662,
/* 80 */ 623, 660, 623, 623, 623, 623, 623, 623, 623, 623,
/* 90 */ 623, 623, 623, 623, 623, 649, 623, 623, 623, 643,
/* 100 */ 643, 623, 623, 643, 791, 795, 789, 777, 785, 776,
/* 110 */ 772, 771, 799, 623, 643, 643, 672, 672, 643, 693,
/* 120 */ 691, 689, 681, 687, 683, 685, 679, 643, 670, 643,
/* 130 */ 670, 708, 721, 623, 800, 834, 790, 818, 817, 830,
/* 140 */ 824, 823, 623, 822, 821, 820, 819, 623, 623, 623,
/* 150 */ 623, 826, 825, 623, 623, 623, 623, 623, 623, 623,
/* 160 */ 623, 623, 802, 796, 792, 623, 623, 623, 623, 623,
/* 170 */ 623, 623, 623, 623, 623, 623, 623, 623, 623, 623,
/* 180 */ 623, 623, 623, 623, 623, 623, 623, 623, 757, 623,
/* 190 */ 623, 766, 623, 623, 623, 623, 623, 623, 786, 623,
/* 200 */ 778, 623, 623, 623, 623, 623, 623, 734, 623, 623,
/* 210 */ 623, 623, 623, 623, 623, 623, 623, 623, 623, 623,
/* 220 */ 623, 839, 623, 623, 623, 728, 837, 623, 623, 623,
/* 230 */ 623, 623, 623, 623, 623, 623, 623, 623, 623, 623,
/* 240 */ 696, 623, 647, 645, 623, 639, 623,
/* 0 */ 617, 669, 658, 829, 829, 617, 617, 617, 617, 617,
/* 10 */ 617, 759, 635, 829, 617, 617, 617, 617, 617, 617,
/* 20 */ 617, 617, 617, 671, 671, 671, 754, 617, 617, 617,
/* 30 */ 617, 617, 617, 617, 617, 617, 617, 617, 617, 617,
/* 40 */ 617, 617, 617, 617, 617, 617, 617, 617, 617, 617,
/* 50 */ 617, 617, 617, 617, 617, 617, 617, 617, 778, 778,
/* 60 */ 752, 617, 617, 617, 617, 617, 617, 617, 617, 617,
/* 70 */ 617, 617, 617, 617, 617, 617, 617, 617, 617, 656,
/* 80 */ 617, 654, 617, 617, 617, 617, 617, 617, 617, 617,
/* 90 */ 617, 617, 617, 617, 617, 643, 617, 617, 617, 637,
/* 100 */ 637, 617, 617, 637, 785, 789, 783, 771, 779, 770,
/* 110 */ 766, 765, 793, 617, 637, 637, 666, 666, 637, 687,
/* 120 */ 685, 683, 675, 681, 677, 679, 673, 637, 664, 637,
/* 130 */ 664, 702, 715, 617, 794, 828, 784, 812, 811, 824,
/* 140 */ 818, 817, 617, 816, 815, 814, 813, 617, 617, 617,
/* 150 */ 617, 820, 819, 617, 617, 617, 617, 617, 617, 617,
/* 160 */ 617, 617, 796, 790, 786, 617, 617, 617, 617, 617,
/* 170 */ 617, 617, 617, 617, 617, 617, 617, 617, 617, 617,
/* 180 */ 617, 617, 751, 617, 617, 760, 617, 617, 617, 617,
/* 190 */ 617, 617, 780, 617, 772, 617, 617, 617, 617, 617,
/* 200 */ 617, 728, 617, 617, 617, 617, 617, 617, 617, 617,
/* 210 */ 617, 617, 617, 617, 617, 833, 617, 617, 617, 722,
/* 220 */ 831, 617, 617, 617, 617, 617, 617, 617, 617, 617,
/* 230 */ 617, 617, 617, 617, 690, 617, 641, 639, 617, 633,
/* 240 */ 617,
};
/********** End of lemon-generated parsing tables *****************************/
@ -565,8 +564,8 @@ static const YYCODETYPE yyFallback[] = {
0, /* SET => nothing */
0, /* KILL => nothing */
0, /* CONNECTION => nothing */
0, /* COLON => nothing */
0, /* STREAM => nothing */
0, /* COLON => nothing */
1, /* ABORT => ID */
1, /* AFTER => ID */
1, /* ATTACH => ID */
@ -856,8 +855,8 @@ static const char *const yyTokenName[] = {
/* 130 */ "SET",
/* 131 */ "KILL",
/* 132 */ "CONNECTION",
/* 133 */ "COLON",
/* 134 */ "STREAM",
/* 133 */ "STREAM",
/* 134 */ "COLON",
/* 135 */ "ABORT",
/* 136 */ "AFTER",
/* 137 */ "ATTACH",
@ -1216,9 +1215,9 @@ static const char *const yyRuleName[] = {
/* 214 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids",
/* 215 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids",
/* 216 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem",
/* 217 */ "cmd ::= KILL CONNECTION IPTOKEN COLON INTEGER",
/* 218 */ "cmd ::= KILL STREAM IPTOKEN COLON INTEGER COLON INTEGER",
/* 219 */ "cmd ::= KILL QUERY IPTOKEN COLON INTEGER COLON INTEGER",
/* 217 */ "cmd ::= KILL CONNECTION INTEGER",
/* 218 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER",
/* 219 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER",
};
#endif /* NDEBUG */
@ -1893,9 +1892,9 @@ static const struct {
{ 207, -7 }, /* (214) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
{ 207, -8 }, /* (215) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
{ 207, -9 }, /* (216) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
{ 207, -5 }, /* (217) cmd ::= KILL CONNECTION IPTOKEN COLON INTEGER */
{ 207, -7 }, /* (218) cmd ::= KILL STREAM IPTOKEN COLON INTEGER COLON INTEGER */
{ 207, -7 }, /* (219) cmd ::= KILL QUERY IPTOKEN COLON INTEGER COLON INTEGER */
{ 207, -3 }, /* (217) cmd ::= KILL CONNECTION INTEGER */
{ 207, -5 }, /* (218) cmd ::= KILL STREAM INTEGER COLON INTEGER */
{ 207, -5 }, /* (219) cmd ::= KILL QUERY INTEGER COLON INTEGER */
};
static void yy_accept(yyParser*); /* Forward Declaration */
@ -2734,14 +2733,14 @@ static void yy_reduce(
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
case 217: /* cmd ::= KILL CONNECTION IPTOKEN COLON INTEGER */
{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[-2].minor.yy0);}
case 217: /* cmd ::= KILL CONNECTION INTEGER */
{setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);}
break;
case 218: /* cmd ::= KILL STREAM IPTOKEN COLON INTEGER COLON INTEGER */
{yymsp[-4].minor.yy0.n += (yymsp[-3].minor.yy0.n + yymsp[-2].minor.yy0.n + yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-4].minor.yy0);}
case 218: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */
{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);}
break;
case 219: /* cmd ::= KILL QUERY IPTOKEN COLON INTEGER COLON INTEGER */
{yymsp[-4].minor.yy0.n += (yymsp[-3].minor.yy0.n + yymsp[-2].minor.yy0.n + yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-4].minor.yy0);}
case 219: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */
{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);}
break;
default:
break;

View File

@ -127,7 +127,7 @@ void rpcAddConnIntoCache(void *handle, void *data, char *fqdn, uint16_t port, in
hash = rpcHashConn(pCache, fqdn, port, connType);
pNode = (SConnHash *)taosMemPoolMalloc(pCache->connHashMemPool);
strcpy(pNode->fqdn, fqdn);
tstrncpy(pNode->fqdn, fqdn, sizeof(pNode->fqdn));
pNode->port = port;
pNode->connType = connType;
pNode->data = data;

View File

@ -60,7 +60,7 @@ typedef struct {
void *idPool; // handle to ID pool
void *tmrCtrl; // handle to timer
void *hash; // handle returned by hash utility
SHashObj *hash; // handle returned by hash utility
void *tcphandle;// returned handle from TCP initialization
void *udphandle;// returned handle from UDP initialization
void *pCache; // connection cache
@ -211,7 +211,7 @@ void *rpcOpen(const SRpcInit *pInit) {
pRpc = (SRpcInfo *)calloc(1, sizeof(SRpcInfo));
if (pRpc == NULL) return NULL;
if(pInit->label) strcpy(pRpc->label, pInit->label);
if(pInit->label) tstrncpy(pRpc->label, pInit->label, sizeof(pRpc->label));
pRpc->connType = pInit->connType;
pRpc->idleTime = pInit->idleTime;
pRpc->numOfThreads = pInit->numOfThreads>TSDB_MAX_RPC_THREADS ? TSDB_MAX_RPC_THREADS:pInit->numOfThreads;
@ -228,7 +228,7 @@ void *rpcOpen(const SRpcInit *pInit) {
size_t size = sizeof(SRpcConn) * pRpc->sessions;
pRpc->connList = (SRpcConn *)calloc(1, size);
if (pRpc->connList == NULL) {
tError("%s failed to allocate memory for taos connections, size:%d", pRpc->label, size);
tError("%s failed to allocate memory for taos connections, size:%ld", pRpc->label, size);
rpcClose(pRpc);
return NULL;
}
@ -459,7 +459,7 @@ int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo) {
pInfo->clientPort = pConn->peerPort;
// pInfo->serverIp = pConn->destIp;
strcpy(pInfo->user, pConn->user);
strncpy(pInfo->user, pConn->user, sizeof(pInfo->user));
return 0;
}
@ -503,10 +503,10 @@ static SRpcConn *rpcOpenConn(SRpcInfo *pRpc, char *peerFqdn, uint16_t peerPort,
pConn = rpcAllocateClientConn(pRpc);
if (pConn) {
strcpy(pConn->peerFqdn, peerFqdn);
tstrncpy(pConn->peerFqdn, peerFqdn, sizeof(pConn->peerFqdn));
pConn->peerIp = peerIp;
pConn->peerPort = peerPort;
strcpy(pConn->user, pRpc->user);
tstrncpy(pConn->user, pRpc->user, sizeof(pConn->user));
pConn->connType = connType;
if (taosOpenConn[connType]) {
@ -804,7 +804,7 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv) {
pConn = rpcGetConnObj(pRpc, sid, pRecv);
if (pConn == NULL) {
tTrace("%s %p, failed to get connection obj(%s)", pRpc->label, pHead->ahandle, tstrerror(terrno));
tTrace("%s %p, failed to get connection obj(%s)", pRpc->label, (void *)pHead->ahandle, tstrerror(terrno));
return NULL;
} else {
if (rpcIsReq(pHead->msgType)) {

View File

@ -73,7 +73,7 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
pServerObj = (SServerObj *)calloc(sizeof(SServerObj), 1);
pServerObj->ip = ip;
pServerObj->port = port;
strcpy(pServerObj->label, label);
tstrncpy(pServerObj->label, label, sizeof(pServerObj->label));
pServerObj->numOfThreads = numOfThreads;
pServerObj->pThreadObj = (SThreadObj *)calloc(sizeof(SThreadObj), numOfThreads);
@ -87,7 +87,7 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
pThreadObj = pServerObj->pThreadObj;
for (int i = 0; i < numOfThreads; ++i) {
pThreadObj->processData = fp;
strcpy(pThreadObj->label, label);
tstrncpy(pThreadObj->label, label, sizeof(pThreadObj->label));
pThreadObj->shandle = shandle;
code = pthread_mutex_init(&(pThreadObj->mutex), NULL);
@ -247,7 +247,7 @@ void *taosInitTcpClient(uint32_t ip, uint16_t port, char *label, int num, void *
pThreadObj = (SThreadObj *)malloc(sizeof(SThreadObj));
memset(pThreadObj, 0, sizeof(SThreadObj));
strcpy(pThreadObj->label, label);
tstrncpy(pThreadObj->label, label, sizeof(pThreadObj->label));
pThreadObj->ip = ip;
pThreadObj->shandle = shandle;

View File

@ -72,7 +72,7 @@ void *taosInitUdpConnection(uint32_t ip, uint16_t port, char *label, int threads
pSet->port = port;
pSet->shandle = shandle;
pSet->fp = fp;
strcpy(pSet->label, label);
tstrncpy(pSet->label, label, sizeof(pSet->label));
uint16_t ownPort;
for (int i = 0; i < threads; ++i) {
@ -99,7 +99,7 @@ void *taosInitUdpConnection(uint32_t ip, uint16_t port, char *label, int threads
pConn->localPort = (uint16_t)ntohs(sin.sin_port);
}
strcpy(pConn->label, label);
tstrncpy(pConn->label, label, sizeof(pConn->label));
pConn->shandle = shandle;
pConn->processData = fp;
pConn->index = i;

View File

@ -76,6 +76,7 @@ int main(int argc, char *argv[]) {
int numOfReqs = 0;
int appThreads = 1;
char serverIp[40] = "127.0.0.1";
char secret[TSDB_KEY_LEN] = "mypassword";
struct timeval systemTime;
int64_t startTime, endTime;
pthread_attr_t thattr;
@ -97,7 +98,7 @@ int main(int argc, char *argv[]) {
rpcInit.sessions = 100;
rpcInit.idleTime = tsShellActivityTimer*1000;
rpcInit.user = "michael";
rpcInit.secret = "mypassword";
rpcInit.secret = secret;
rpcInit.ckey = "key";
rpcInit.spi = 1;
rpcInit.connType = TAOS_CONN_CLIENT;
@ -106,7 +107,7 @@ int main(int argc, char *argv[]) {
if (strcmp(argv[i], "-p")==0 && i < argc-1) {
ipSet.port[0] = atoi(argv[++i]);
} else if (strcmp(argv[i], "-i") ==0 && i < argc-1) {
tstrncpy(ipSet.fqdn[0], argv[++i], sizeof(ipSet.fqdn));
tstrncpy(ipSet.fqdn[0], argv[++i], sizeof(ipSet.fqdn[0]));
} else if (strcmp(argv[i], "-t")==0 && i < argc-1) {
rpcInit.numOfThreads = atoi(argv[++i]);
} else if (strcmp(argv[i], "-m")==0 && i < argc-1) {

View File

@ -77,6 +77,7 @@ int main(int argc, char *argv[]) {
int numOfReqs = 0;
int appThreads = 1;
char serverIp[40] = "127.0.0.1";
char secret[TSDB_KEY_LEN] = "mypassword";
struct timeval systemTime;
int64_t startTime, endTime;
pthread_attr_t thattr;
@ -98,7 +99,7 @@ int main(int argc, char *argv[]) {
rpcInit.sessions = 100;
rpcInit.idleTime = tsShellActivityTimer*1000;
rpcInit.user = "michael";
rpcInit.secret = "mypassword";
rpcInit.secret = secret;
rpcInit.ckey = "key";
rpcInit.spi = 1;
rpcInit.connType = TAOS_CONN_CLIENT;

View File

@ -977,7 +977,8 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa
// tdResetDataCols(pHelper->pDataCols[1]);
while (true) {
if (iter1 >= pHelper->pDataCols[0]->numOfRows && iter2 >= rows3) break;
tdMergeTwoDataCols(pHelper->pDataCols[1], pHelper->pDataCols[0], &iter1, pDataCols, &iter2, pHelper->config.maxRowsPerFileBlock * 4 / 5);
tdMergeTwoDataCols(pHelper->pDataCols[1], pHelper->pDataCols[0], &iter1, pHelper->pDataCols[0]->numOfRows,
pDataCols, &iter2, rowsWritten, pHelper->config.maxRowsPerFileBlock * 4 / 5);
ASSERT(pHelper->pDataCols[1]->numOfRows > 0);
if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.dataF), pHelper->pDataCols[1],
pHelper->pDataCols[1]->numOfRows, &compBlock, false, true) < 0)
@ -989,54 +990,6 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa
}
round++;
blkIdx++;
// TODO: the blkIdx here is not correct
// if (iter1 >= pHelper->pDataCols[0]->numOfRows && iter2 >= rows3) {
// if (pHelper->pDataCols[1]->numOfRows > 0) {
// if (tsdbWriteBlockToFile(pHelper, &pHelper->files.dataF, pHelper->pDataCols[1],
// pHelper->pDataCols[1]->numOfRows, &compBlock, false, true) < 0)
// goto _err;
// // TODO: the blkIdx here is not correct
// tsdbAddSubBlock(pHelper, &compBlock, blkIdx, pHelper->pDataCols[1]->numOfRows);
// }
// }
// TSKEY key1 = iter1 >= pHelper->pDataCols[0]->numOfRows
// ? INT64_MAX
// : ((int64_t *)(pHelper->pDataCols[0]->cols[0].pData))[iter1];
// TSKEY key2 = iter2 >= rowsWritten ? INT64_MAX : ((int64_t *)(pDataCols->cols[0].pData))[iter2];
// if (key1 < key2) {
// for (int i = 0; i < pDataCols->numOfCols; i++) {
// SDataCol *pDataCol = pHelper->pDataCols[1]->cols + i;
// memcpy(((char *)pDataCol->pData + TYPE_BYTES[pDataCol->type] * pHelper->pDataCols[1]->numOfRows),
// ((char *)pHelper->pDataCols[0]->cols[i].pData + TYPE_BYTES[pDataCol->type] * iter1),
// TYPE_BYTES[pDataCol->type]);
// }
// pHelper->pDataCols[1]->numOfRows++;
// iter1++;
// } else if (key1 == key2) {
// // TODO: think about duplicate key cases
// ASSERT(false);
// } else {
// for (int i = 0; i < pDataCols->numOfCols; i++) {
// SDataCol *pDataCol = pHelper->pDataCols[1]->cols + i;
// memcpy(((char *)pDataCol->pData + TYPE_BYTES[pDataCol->type] * pHelper->pDataCols[1]->numOfRows),
// ((char *)pDataCols->cols[i].pData +
// TYPE_BYTES[pDataCol->type] * iter2),
// TYPE_BYTES[pDataCol->type]);
// }
// pHelper->pDataCols[1]->numOfRows++;
// iter2++;
// }
// if (pHelper->pDataCols[0]->numOfRows >= pHelper->config.maxRowsPerFileBlock * 4 / 5) {
// if (tsdbWriteBlockToFile(pHelper, &pHelper->files.dataF, pHelper->pDataCols[1], pHelper->pDataCols[1]->numOfRows, &compBlock, false, true) < 0) goto _err;
// // TODO: blkIdx here is not correct, fix it
// tsdbInsertSuperBlock(pHelper, &compBlock, blkIdx);
// tdResetDataCols(pHelper->pDataCols[1]);
// }
}
}
}

View File

@ -17,6 +17,7 @@
#include "tulog.h"
#include "talgo.h"
#include "tutil.h"
#include "ttime.h"
#include "tcompare.h"
#include "exception.h"
@ -71,6 +72,8 @@ typedef struct STableCheckInfo {
int32_t compSize;
int32_t numOfBlocks; // number of qualified data blocks not the original blocks
SDataCols* pDataCols;
int32_t chosen; // indicate which iterator should move forward
bool initBuf; // whether to initialize the in-memory skip list iterator or not
SSkipListIterator* iter; // mem buffer skip list iterator
SSkipListIterator* iiter; // imem buffer skip list iterator
@ -79,8 +82,6 @@ typedef struct STableCheckInfo {
typedef struct STableBlockInfo {
SCompBlock* compBlock;
STableCheckInfo* pTableCheckInfo;
// int32_t blockIndex;
// int32_t groupIdx; /* number of group is less than the total number of tables */
} STableBlockInfo;
typedef struct SBlockOrderSupporter {
@ -120,7 +121,7 @@ static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle);
static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock,
SArray* sa);
static int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey,
static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey,
STsdbQueryHandle* pQueryHandle);
static void tsdbInitDataBlockLoadInfo(SDataBlockLoadInfo* pBlockLoadInfo) {
@ -134,7 +135,7 @@ static void tsdbInitCompBlockLoadInfo(SLoadCompBlockInfo* pCompBlockLoadInfo) {
pCompBlockLoadInfo->fileId = -1;
}
TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList) {
TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo) {
// todo 1. filter not exist table
// todo 2. add the reference count for each table that is involved in query
@ -147,6 +148,7 @@ TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STable
pQueryHandle->cur.win = TSWINDOW_INITIALIZER;
pQueryHandle->checkFiles = true;//ASCENDING_TRAVERSE(pQueryHandle->order);
pQueryHandle->activeIndex = 0; // current active table index
pQueryHandle->qinfo = qinfo;
pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock;
tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb);
@ -201,8 +203,8 @@ TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STable
return (TsdbQueryHandleT) pQueryHandle;
}
TsdbQueryHandleT tsdbQueryLastRow(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList);
TsdbQueryHandleT tsdbQueryLastRow(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, void* qinfo) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo);
pQueryHandle->type = TSDB_QUERY_TYPE_LAST;
pQueryHandle->order = TSDB_ORDER_DESC;
@ -227,8 +229,8 @@ SArray* tsdbGetQueriedTableIdList(TsdbQueryHandleT *pHandle) {
return res;
}
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TsdbRepoT *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList);
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TsdbRepoT *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, void* qinfo) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo);
pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL;
// pQueryHandle->outputCapacity = 2; // only allowed two rows to be loaded
@ -303,6 +305,83 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
return true;
}
SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo) {
SDataRow rmem = NULL, rimem = NULL;
if (pCheckInfo->iter) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter);
if (node != NULL) {
rmem = SL_GET_NODE_DATA(node);
}
}
if (pCheckInfo->iiter) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter);
if (node != NULL) {
rimem = SL_GET_NODE_DATA(node);
}
}
if (rmem != NULL && rimem != NULL) {
if (dataRowKey(rmem) < dataRowKey(rimem)) {
pCheckInfo->chosen = 0;
return rmem;
} else if (dataRowKey(rmem) == dataRowKey(rimem)) {
// data ts are duplicated, ignore the data in mem
tSkipListIterNext(pCheckInfo->iter);
pCheckInfo->chosen = 1;
return rimem;
} else {
pCheckInfo->chosen = 1;
return rimem;
}
}
if (rmem != NULL) {
pCheckInfo->chosen = 0;
return rmem;
}
if (rimem != NULL) {
pCheckInfo->chosen = 1;
return rimem;
}
return NULL;
}
bool moveToNextRow(STableCheckInfo* pCheckInfo) {
bool hasNext = false;
if (pCheckInfo->chosen == 0) {
if (pCheckInfo->iter != NULL) {
hasNext = tSkipListIterNext(pCheckInfo->iter);
}
if (hasNext) {
return hasNext;
}
if (pCheckInfo->iiter != NULL) {
return tSkipListIterGet(pCheckInfo->iiter) != NULL;
}
} else {
if (pCheckInfo->chosen == 1) {
if (pCheckInfo->iiter != NULL) {
hasNext = tSkipListIterNext(pCheckInfo->iiter);
}
if (hasNext) {
return hasNext;
}
if (pCheckInfo->iter != NULL) {
return tSkipListIterGet(pCheckInfo->iter) != NULL;
}
}
}
return hasNext;
}
static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
size_t size = taosArrayGetSize(pHandle->pTableCheckInfo);
assert(pHandle->activeIndex < size && pHandle->activeIndex >= 0 && size >= 1);
@ -313,30 +392,12 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
STable* pTable = pCheckInfo->pTableObj;
assert(pTable != NULL);
// no data in cache, abort
if (pTable->mem == NULL && pTable->imem == NULL) {
initTableMemIterator(pHandle, pCheckInfo);
SDataRow row = getSDataRowInTableMem(pCheckInfo);
if (row == NULL) {
return false;
}
if (pCheckInfo->iter == NULL && pTable->mem) {
pCheckInfo->iter = tSkipListCreateIterFromVal(pTable->mem->pData, (const char*) &pCheckInfo->lastKey,
TSDB_DATA_TYPE_TIMESTAMP, pHandle->order);
if (pCheckInfo->iter == NULL) {
return false;
}
if (!tSkipListIterNext(pCheckInfo->iter)) { // buffer is empty
return false;
}
}
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter);
if (node == NULL) {
return false;
}
SDataRow row = SL_GET_NODE_DATA(node);
pCheckInfo->lastKey = dataRowKey(row); // first timestamp in buffer
tsdbTrace("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, %p", pHandle,
pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pCheckInfo->lastKey, pHandle->order, pHandle->qinfo);
@ -349,7 +410,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
int32_t step = ASCENDING_TRAVERSE(pHandle->order)? 1:-1;
STimeWindow* win = &pHandle->cur.win;
pHandle->cur.rows = tsdbReadRowsFromCache(pCheckInfo->iter, pCheckInfo->pTableObj, pHandle->window.ekey,
pHandle->cur.rows = tsdbReadRowsFromCache(pCheckInfo, pHandle->window.ekey,
pHandle->outputCapacity, &win->skey, &win->ekey, pHandle); // todo refactor API
// update the last key value
@ -382,7 +443,7 @@ static int32_t getFileIdFromKey(TSKEY key, int32_t daysPerFile, int32_t precisio
return fid;
}
static int32_t binarySearchForBlockImpl(SCompBlock* pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) {
static int32_t binarySearchForBlock(SCompBlock* pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) {
int32_t firstSlot = 0;
int32_t lastSlot = numOfBlocks - 1;
@ -448,7 +509,7 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo
TSKEY e = MAX(pCheckInfo->lastKey, pQueryHandle->window.ekey);
// discard the unqualified data block based on the query time window
int32_t start = binarySearchForBlockImpl(pCompInfo->blocks, compIndex->numOfBlocks, s, TSDB_ORDER_ASC);
int32_t start = binarySearchForBlock(pCompInfo->blocks, compIndex->numOfBlocks, s, TSDB_ORDER_ASC);
int32_t end = start;
if (s > pCompInfo->blocks[start].keyLast) {
@ -523,6 +584,8 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
bool blockLoaded = false;
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
int64_t st = taosGetTimestampUs();
if (pCheckInfo->pDataCols == NULL) {
STsdbMeta* pMeta = tsdbGetMeta(pRepo);
pCheckInfo->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock);
@ -540,9 +603,15 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
blockLoaded = true;
}
SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0];
assert(pCols->numOfRows != 0);
taosArrayDestroy(sa);
tfree(data);
int64_t et = taosGetTimestampUs() - st;
tsdbTrace("%p load file block into buffer, elapsed time:%"PRId64 " us", pQueryHandle, et);
return blockLoaded;
}
@ -600,7 +669,7 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock*
// do not load file block into buffer
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order) ? 1 : -1;
cur->rows = tsdbReadRowsFromCache(pCheckInfo->iter, pCheckInfo->pTableObj, binfo.window.skey - step,
cur->rows = tsdbReadRowsFromCache(pCheckInfo, binfo.window.skey - step,
pQueryHandle->outputCapacity, &cur->win.skey, &cur->win.ekey, pQueryHandle);
pQueryHandle->realNumOfRows = cur->rows;
@ -1192,15 +1261,26 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
pBlockInfo->compBlock = &pBlock[k];
pBlockInfo->pTableCheckInfo = pTableCheck;
// pBlockInfo->groupIdx = pTableCheckInfo[j]->groupIdx; // set the group index
// pBlockInfo->blockIndex = pTableCheckInfo[j]->start + k; // set the block index in original table
cnt++;
}
numOfQualTables++;
}
tsdbTrace("%p create data blocks info struct completed, %d blocks in %d tables", pQueryHandle, cnt, numOfQualTables);
assert(numOfBlocks == cnt);
// since there is only one table qualified, blocks are not sorted
if (numOfQualTables == 1) {
memcpy(pQueryHandle->pDataBlockInfo, sup.pDataBlockInfo[0], sizeof(STableBlockInfo) * numOfBlocks);
cleanBlockOrderSupporter(&sup, numOfQualTables);
tsdbTrace("%p create data blocks info struct completed for 1 table, %d blocks not sorted %p ", pQueryHandle, cnt,
pQueryHandle->qinfo);
return TSDB_CODE_SUCCESS;
}
tsdbTrace("%p create data blocks info struct completed, %d blocks in %d tables %p", pQueryHandle, cnt,
numOfQualTables, pQueryHandle->qinfo);
assert(cnt <= numOfBlocks && numOfQualTables <= numOfTables); // the pTableQueryInfo[j]->numOfBlocks may be 0
sup.numOfTables = numOfQualTables;
@ -1257,8 +1337,8 @@ static bool getDataBlocksInFilesImpl(STsdbQueryHandle* pQueryHandle) {
break;
}
tsdbTrace("%p %d blocks found in file for %d table(s), fid:%d", pQueryHandle, numOfBlocks,
numOfTables, pQueryHandle->pFileGroup->fileId);
tsdbTrace("%p %d blocks found in file for %d table(s), fid:%d, %p", pQueryHandle, numOfBlocks,
numOfTables, pQueryHandle->pFileGroup->fileId, pQueryHandle->qinfo);
assert(numOfBlocks >= 0);
if (numOfBlocks == 0) {
@ -1565,19 +1645,22 @@ static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle) {
pQueryHandle->window = (STimeWindow) {info.lastKey, TSKEY_INITIAL_VAL};
}
static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey,
static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey,
STsdbQueryHandle* pQueryHandle) {
int numOfRows = 0;
int32_t numOfCols = taosArrayGetSize(pQueryHandle->pColumns);
*skey = TSKEY_INITIAL_VAL;
int64_t st = taosGetTimestampUs();
STSchema* pSchema = tsdbGetTableSchema(tsdbGetMeta(pQueryHandle->pTsdb), pCheckInfo->pTableObj);
int32_t numOfTableCols = schemaNCols(pSchema);
do {
SSkipListNode* node = tSkipListIterGet(pIter);
if (node == NULL) {
SDataRow row = getSDataRowInTableMem(pCheckInfo);
if (row == NULL) {
break;
}
SDataRow row = SL_GET_NODE_DATA(node);
TSKEY key = dataRowKey(row);
if ((key > maxKey && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
@ -1590,19 +1673,19 @@ static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY
}
if (*skey == INT64_MIN) {
*skey = dataRowKey(row);
*skey = key;
}
*ekey = dataRowKey(row);
int32_t offset = -1;
*ekey = key;
char* pData = NULL;
STSchema* pSchema = tsdbGetTableSchema(tsdbGetMeta(pQueryHandle->pTsdb), pTable);
int32_t numOfTableCols = schemaNCols(pSchema);
for (int32_t i = 0; i < numOfCols; ++i) {
int32_t i = 0, j = 0;
while(i < numOfCols && j < numOfTableCols) {
SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
if (pSchema->columns[j].colId < pColInfo->info.colId) {
j++;
continue;
}
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
pData = pColInfo->pData + numOfRows * pColInfo->info.bytes;
@ -1610,29 +1693,49 @@ static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY
pData = pColInfo->pData + (maxRowsToRead - numOfRows - 1) * pColInfo->info.bytes;
}
for(int32_t j = 0; j < numOfTableCols; ++j) {
if (pColInfo->info.colId == pSchema->columns[j].colId) {
offset = pSchema->columns[j].offset;
break;
}
}
assert(offset != -1); // todo handle error
void *value = tdGetRowDataOfCol(row, pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + offset);
if (pSchema->columns[j].colId == pColInfo->info.colId) {
void* value = tdGetRowDataOfCol(row, pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset);
if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
memcpy(pData, value, varDataTLen(value));
} else {
memcpy(pData, value, pColInfo->info.bytes);
}
j++;
i++;
} else { // pColInfo->info.colId < pSchema->columns[j].colId, it is a NULL data
if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
setVardataNull(pData, pColInfo->info.type);
} else {
setNull(pData, pColInfo->info.type, pColInfo->info.bytes);
}
i++;
}
}
while (i < numOfCols) { // the remain columns are all null data
SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
pData = pColInfo->pData + numOfRows * pColInfo->info.bytes;
} else {
pData = pColInfo->pData + (maxRowsToRead - numOfRows - 1) * pColInfo->info.bytes;
}
if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
setVardataNull(pData, pColInfo->info.type);
} else {
setNull(pData, pColInfo->info.type, pColInfo->info.bytes);
}
i++;
}
if (++numOfRows >= maxRowsToRead) {
tSkipListIterNext(pIter);
moveToNextRow(pCheckInfo);
break;
}
} while(tSkipListIterNext(pIter));
} while(moveToNextRow(pCheckInfo));
assert(numOfRows <= maxRowsToRead);
@ -1646,6 +1749,10 @@ static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY
}
}
int64_t elapsedTime = taosGetTimestampUs() - st;
tsdbTrace("%p build data block from cache completed, elapsed time:%"PRId64" us, numOfRows:%d, numOfCols:%d", pQueryHandle,
elapsedTime, numOfRows, numOfCols);
return numOfRows;
}

View File

@ -111,6 +111,15 @@ void *taosCachePut(SCacheObj *pCacheObj, const char *key, const void *pData, siz
*/
void *taosCacheAcquireByName(SCacheObj *pCacheObj, const char *key);
/**
* update the expire time of data in cache
* @param pCacheObj cache object
* @param key key
* @param expireTime new expire time of data
* @return
*/
void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, const char *key, uint64_t expireTime);
/**
* Add one reference count for the exist data, and assign this data for a new owner.
* The new owner needs to invoke the taosCacheRelease when it does not need this data anymore.

View File

@ -153,6 +153,8 @@ bool taosMbsToUcs4(char *mbs, size_t mbs_len, char *ucs4, int32_t ucs4_max_len,
int tasoUcs4Compare(void* f1_ucs4, void *f2_ucs4, int bytes);
void taosRandStr(char* str, int32_t size);
int32_t taosUcs4ToMbs(void *ucs4, int32_t ucs4_max_len, char *mbs);
bool taosValidateEncodec(const char *encodec);

View File

@ -92,12 +92,14 @@ void* taosArrayGet(const SArray* pArray, size_t index) {
}
void* taosArrayGetP(const SArray* pArray, size_t index) {
void* ret = taosArrayGet(pArray, index);
if (ret == NULL) {
assert(index < pArray->size);
void* d = TARRAY_GET_ELEM(pArray, index);
if (d == NULL) {
return NULL;
}
return *(void**)ret;
return *(void**)d;
}
size_t taosArrayGetSize(const SArray* pArray) { return pArray->size; }

View File

@ -488,6 +488,35 @@ void *taosCacheAcquireByName(SCacheObj *pCacheObj, const char *key) {
return (ptNode != NULL) ? (*ptNode)->data : NULL;
}
void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, const char *key, uint64_t expireTime) {
if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) == 0) {
return NULL;
}
uint32_t keyLen = (uint32_t)strlen(key);
__cache_rd_lock(pCacheObj);
SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen);
if (ptNode != NULL) {
T_REF_INC(*ptNode);
(*ptNode)->expiredTime = expireTime;
}
__cache_unlock(pCacheObj);
if (ptNode != NULL) {
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
uTrace("key:%s expireTime is updated in cache, %p refcnt:%d", key, (*ptNode), T_REF_VAL_GET(*ptNode));
} else {
atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1);
uTrace("key:%s not in cache, retrieved failed", key);
}
atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1);
return (ptNode != NULL) ? (*ptNode)->data : NULL;
}
void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) {
if (pCacheObj == NULL || data == NULL) return NULL;

View File

@ -19,6 +19,7 @@
#include "tutil.h"
int taosGetFqdn(char *fqdn) {
int code = 0;
char hostname[1024];
hostname[1023] = '\0';
gethostname(hostname, 1023);
@ -27,13 +28,15 @@ int taosGetFqdn(char *fqdn) {
h = gethostbyname(hostname);
if (h != NULL) {
strcpy(fqdn, h->h_name);
return 0;
} else {
uError("failed to get host name");
return -1;
uError("failed to get host name(%s)", strerror(errno));
code = -1;
}
free(h);
// to do: free the resources
// free(h);
return code;
}
uint32_t taosGetIpFromFqdn(const char *fqdn) {
@ -47,7 +50,7 @@ uint32_t ip2uint(const char *const ip_addr) {
char ip_addr_cpy[20];
char ip[5];
strcpy(ip_addr_cpy, ip_addr);
tstrncpy(ip_addr_cpy, ip_addr, sizeof(ip_addr_cpy));
char *s_start, *s_end;
s_start = ip_addr_cpy;
@ -206,7 +209,7 @@ int taosOpenUdpSocket(uint32_t ip, uint16_t port) {
int reuse, nocheck;
int bufSize = 8192000;
uTrace("open udp socket:%s:%hu", ip, port);
uTrace("open udp socket:0x%x:%hu", ip, port);
memset((char *)&localAddr, 0, sizeof(localAddr));
localAddr.sin_family = AF_INET;
@ -257,7 +260,7 @@ int taosOpenUdpSocket(uint32_t ip, uint16_t port) {
/* bind socket to local address */
if (bind(sockFd, (struct sockaddr *)&localAddr, sizeof(localAddr)) < 0) {
uError("failed to bind udp socket: %d (%s), %s:%hu", errno, strerror(errno), ip, port);
uError("failed to bind udp socket: %d (%s), 0x%x:%hu", errno, strerror(errno), ip, port);
taosCloseSocket(sockFd);
return -1;
}
@ -363,7 +366,7 @@ int taosOpenTcpServerSocket(uint32_t ip, uint16_t port) {
int sockFd;
int reuse;
uTrace("open tcp server socket:%s:%hu", ip, port);
uTrace("open tcp server socket:0x%x:%hu", ip, port);
bzero((char *)&serverAdd, sizeof(serverAdd));
serverAdd.sin_family = AF_INET;

View File

@ -27,8 +27,6 @@
#include "tulog.h"
#include "taoserror.h"
int32_t tmpFileSerialNum = 0;
int32_t strdequote(char *z) {
if (z == NULL) {
return 0;
@ -433,12 +431,24 @@ void getTmpfilePath(const char *fileNamePrefix, char *dstPath) {
#else
char *tmpDir = "/tmp/";
#endif
int64_t ts = taosGetTimestampUs();
strcpy(tmpPath, tmpDir);
strcat(tmpPath, tdengineTmpFileNamePrefix);
strcat(tmpPath, fileNamePrefix);
strcat(tmpPath, "-%d-%"PRIu64"-%u-%"PRIu64);
snprintf(dstPath, PATH_MAX, tmpPath, getpid(), taosGetPthreadId(), atomic_add_fetch_32(&tmpFileSerialNum, 1), ts);
strcat(tmpPath, "-%d-%s");
char rand[8] = {0};
taosRandStr(rand, tListLen(rand) - 1);
snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand);
}
void taosRandStr(char* str, int32_t size) {
const char* set = "abcdefghijklmnopqrstuvwxyz0123456789-_.";
int32_t len = 39;
for(int32_t i = 0; i < size; ++i) {
str[i] = set[rand()%len];
}
}
int tasoUcs4Compare(void* f1_ucs4, void *f2_ucs4, int bytes) {

View File

@ -68,10 +68,18 @@ void *walOpen(const char *path, const SWalCfg *pCfg) {
pWal->num = 0;
pWal->level = pCfg->walLevel;
pWal->keep = pCfg->keep;
strcpy(pWal->path, path);
tstrncpy(pWal->path, path, sizeof(pWal->path));
pthread_mutex_init(&pWal->mutex, NULL);
if (access(path, F_OK) != 0) mkdir(path, 0755);
if (access(path, F_OK) != 0) {
if (mkdir(path, 0755) != 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
wError("wal:%s, failed to create directory(%s)", path, strerror(errno));
pthread_mutex_destroy(&pWal->mutex);
free(pWal);
pWal = NULL;
}
}
if (pCfg->keep == 1) return pWal;
@ -80,7 +88,7 @@ void *walOpen(const char *path, const SWalCfg *pCfg) {
if (pWal->fd <0) {
terrno = TAOS_SYSTEM_ERROR(errno);
wError("wal:%s, failed to open", path);
wError("wal:%s, failed to open(%s)", path, strerror(errno));
pthread_mutex_destroy(&pWal->mutex);
free(pWal);
pWal = NULL;
@ -119,7 +127,8 @@ void walClose(void *handle) {
int walRenew(void *handle) {
if (handle == NULL) return 0;
SWal *pWal = handle;
int code = 0;
terrno = 0;
pthread_mutex_lock(&pWal->mutex);
@ -135,8 +144,8 @@ int walRenew(void *handle) {
pWal->fd = open(pWal->name, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO);
if (pWal->fd < 0) {
wError("wal:%d, failed to open(%s)", pWal->name, strerror(errno));
code = -1;
wError("wal:%s, failed to open(%s)", pWal->name, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
} else {
wTrace("wal:%s, it is created", pWal->name);
@ -156,14 +165,15 @@ int walRenew(void *handle) {
pthread_mutex_unlock(&pWal->mutex);
return code;
return terrno;
}
int walWrite(void *handle, SWalHead *pHead) {
SWal *pWal = handle;
int code = 0;
if (pWal == NULL) return -1;
terrno = 0;
// no wal
if (pWal->level == TAOS_WAL_NOLOG) return 0;
if (pHead->version <= pWal->version) return 0;
@ -174,12 +184,12 @@ int walWrite(void *handle, SWalHead *pHead) {
if(write(pWal->fd, pHead, contLen) != contLen) {
wError("wal:%s, failed to write(%s)", pWal->name, strerror(errno));
code = -1;
terrno = TAOS_SYSTEM_ERROR(errno);
} else {
pWal->version = pHead->version;
}
return code;
return terrno;
}
void walFsync(void *handle) {
@ -196,11 +206,11 @@ void walFsync(void *handle) {
int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int)) {
SWal *pWal = handle;
int code = 0;
struct dirent *ent;
int count = 0;
uint32_t maxId = 0, minId = -1, index =0;
terrno = 0;
int plen = strlen(walPrefix);
char opath[TSDB_FILENAME_LEN+5];
@ -224,30 +234,30 @@ int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int))
closedir(dir);
if (count == 0) {
if (pWal->keep) code = walRenew(pWal);
return code;
if (pWal->keep) terrno = walRenew(pWal);
return terrno;
}
if ( count != (maxId-minId+1) ) {
wError("wal:%s, messed up, count:%d max:%d min:%d", opath, count, maxId, minId);
code = -1;
terrno = TAOS_SYSTEM_ERROR(TSDB_CODE_APP_ERROR);
} else {
wTrace("wal:%s, %d files will be restored", opath, count);
for (index = minId; index<=maxId; ++index) {
sprintf(pWal->name, "%s/%s%d", opath, walPrefix, index);
code = walRestoreWalFile(pWal, pVnode, writeFp);
if (code < 0) break;
terrno = walRestoreWalFile(pWal, pVnode, writeFp);
if (terrno < 0) break;
}
}
if (code == 0) {
if (terrno == 0) {
if (pWal->keep == 0) {
code = walRemoveWalFiles(opath);
if (code == 0) {
terrno = walRemoveWalFiles(opath);
if (terrno == 0) {
if (remove(opath) < 0) {
wError("wal:%s, failed to remove directory(%s)", opath, strerror(errno));
code = -1;
terrno = TAOS_SYSTEM_ERROR(errno);
}
}
} else {
@ -258,12 +268,12 @@ int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int))
pWal->fd = open(pWal->name, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU | S_IRWXG | S_IRWXO);
if (pWal->fd < 0) {
wError("wal:%s, failed to open file(%s)", pWal->name, strerror(errno));
code = -1;
terrno = TAOS_SYSTEM_ERROR(errno);
}
}
}
return code;
return terrno;
}
int walGetWalFile(void *handle, char *name, uint32_t *index) {
@ -292,40 +302,47 @@ int walGetWalFile(void *handle, char *name, uint32_t *index) {
}
static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) {
int code = 0;
char *name = pWal->name;
terrno = 0;
char *buffer = malloc(1024000); // size for one record
if (buffer == NULL) return -1;
if (buffer == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
return terrno;
}
SWalHead *pHead = (SWalHead *)buffer;
int fd = open(name, O_RDONLY);
if (fd < 0) {
wError("wal:%s, failed to open for restore(%s)", name, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
free(buffer);
return -1;
return terrno;
}
wTrace("wal:%s, start to restore", name);
while (1) {
int ret = read(fd, pHead, sizeof(SWalHead));
if ( ret == 0) { code = 0; break;}
if ( ret == 0) break;
if (ret != sizeof(SWalHead)) {
wWarn("wal:%s, failed to read head, skip, ret:%d(%s)", name, ret, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
break;
}
if (!taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SWalHead))) {
wWarn("wal:%s, cksum is messed up, skip the rest of file", name);
terrno = TAOS_SYSTEM_ERROR(errno);
break;
}
ret = read(fd, pHead->cont, pHead->len);
if ( ret != pHead->len) {
wWarn("wal:%s, failed to read body, skip, len:%d ret:%d", name, pHead->len, ret);
terrno = TAOS_SYSTEM_ERROR(errno);
break;
}
@ -336,11 +353,10 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) {
close(fd);
free(buffer);
return code;
return terrno;
}
int walHandleExistingFiles(const char *path) {
int code = 0;
char oname[TSDB_FILENAME_LEN * 3];
char nname[TSDB_FILENAME_LEN * 3];
char opath[TSDB_FILENAME_LEN];
@ -350,6 +366,7 @@ int walHandleExistingFiles(const char *path) {
struct dirent *ent;
DIR *dir = opendir(path);
int plen = strlen(walPrefix);
terrno = 0;
if (access(opath, F_OK) == 0) {
// old directory is there, it means restore process is not finished
@ -360,13 +377,19 @@ int walHandleExistingFiles(const char *path) {
int count = 0;
while ((ent = readdir(dir))!= NULL) {
if ( strncmp(ent->d_name, walPrefix, plen) == 0) {
if (access(opath, F_OK) != 0) mkdir(opath, 0755);
sprintf(oname, "%s/%s", path, ent->d_name);
sprintf(nname, "%s/old/%s", path, ent->d_name);
if (access(opath, F_OK) != 0) {
if (mkdir(opath, 0755) != 0) {
wError("wal:%s, failed to create directory:%s(%s)", oname, opath, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
break;
}
}
if (rename(oname, nname) < 0) {
wError("wal:%s, failed to move to new:%s", oname, nname);
code = -1;
terrno = TAOS_SYSTEM_ERROR(errno);
break;
}
@ -378,14 +401,14 @@ int walHandleExistingFiles(const char *path) {
}
closedir(dir);
return code;
return terrno;
}
static int walRemoveWalFiles(const char *path) {
int plen = strlen(walPrefix);
char name[TSDB_FILENAME_LEN * 3];
int code = 0;
terrno = 0;
if (access(path, F_OK) != 0) return 0;
struct dirent *ent;
@ -396,13 +419,13 @@ static int walRemoveWalFiles(const char *path) {
sprintf(name, "%s/%s", path, ent->d_name);
if (remove(name) <0) {
wError("wal:%s, failed to remove(%s)", name, strerror(errno));
code = -1; break;
terrno = TAOS_SYSTEM_ERROR(errno);
}
}
}
closedir(dir);
return code;
return terrno;
}

View File

@ -87,7 +87,7 @@ class TDTestCase:
# <> for timestamp type
tdSql.query("select * from db.st where ts <> '2020-05-13 10:00:00.002'")
#tdSql.checkRows(4)
# tdSql.checkRows(4)
# <> for numeric type
tdSql.query("select * from db.st where tagtype <> 2")

View File

@ -42,14 +42,17 @@ class TDTestCase:
('2020-05-13 10:00:00.005', 3, 'third')""")
# query with filter condition A OR condition B
tdSql.query("select * from db.st where ts > '2020-05-13 10:00:00.002' AND tagtype < 2")
tdSql.query(
"select * from db.st where ts > '2020-05-13 10:00:00.002' AND tagtype < 2")
tdSql.checkRows(1)
# query with filter condition A OR condition B, error expected
tdSql.error("select * from db.st where ts > '2020-05-13 10:00:00.002' OR tagtype < 2")
tdSql.error(
"select * from db.st where ts > '2020-05-13 10:00:00.002' OR tagtype < 2")
# illegal condition
tdSql.error("select * from db.st where ts != '2020-05-13 10:00:00.002' OR tagtype < 2")
tdSql.error(
"select * from db.st where ts != '2020-05-13 10:00:00.002' OR tagtype < 2")
tdSql.error("select * from db.st where tagtype <> 1 OR tagtype < 2")
def stop(self):

View File

@ -41,7 +41,6 @@ class TDTestCase:
('2020-05-13 10:00:00.002', 3, 'third') dev_002 VALUES('2020-05-13 10:00:00.003', 1, 'first'), ('2020-05-13 10:00:00.004', 2, 'second'),
('2020-05-13 10:00:00.005', 3, 'third')""")
# query first .. as ..
tdSql.error("select first(*) as one from st")

View File

@ -28,9 +28,12 @@ class TDTestCase:
print("==============step1")
tdSql.execute("create table stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 binary(10), t3 nchar(10))")
tdSql.execute("insert into tb1 using stb1 tags(1,'tb1', '表1') values ('2020-04-18 15:00:00.000', 1, 0.1), ('2020-04-18 15:00:01.000', 2, 0.1)")
tdSql.execute("insert into tb2 using stb1 tags(2,'tb2', '表2') values ('2020-04-18 15:00:02.000', 3, 2.1), ('2020-04-18 15:00:03.000', 4, 2.2)")
tdSql.execute(
"create table stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 binary(10), t3 nchar(10))")
tdSql.execute(
"insert into tb1 using stb1 tags(1,'tb1', '表1') values ('2020-04-18 15:00:00.000', 1, 0.1), ('2020-04-18 15:00:01.000', 2, 0.1)")
tdSql.execute(
"insert into tb2 using stb1 tags(2,'tb2', '表2') values ('2020-04-18 15:00:02.000', 3, 2.1), ('2020-04-18 15:00:03.000', 4, 2.2)")
# inner join --- bug
tdSql.query("select * from tb1 a, tb2 b where a.ts = b.ts")

View File

@ -111,7 +111,6 @@ class Test (threading.Thread):
last_tb)
written = written + 1
def drop_stable(self):
tdLog.info("drop_stable")
global last_stb
@ -152,7 +151,6 @@ class Test (threading.Thread):
last_tb = ""
written = 0
def query_data_from_stable(self):
tdLog.info("query_data_from_stable")
global last_stb
@ -164,7 +162,6 @@ class Test (threading.Thread):
tdLog.info("will query data from super table")
tdSql.execute('select * from %s' % last_stb)
def reset_query_cache(self):
tdLog.info("reset_query_cache")
global last_tb
@ -232,7 +229,7 @@ class Test (threading.Thread):
self.threadLock.acquire()
tdLog.notice("first thread")
randDataOp = random.randint(1, 3)
dataOp.get(randDataOp , lambda: "ERROR")()
dataOp.get(randDataOp, lambda: "ERROR")()
self.threadLock.release()
elif (self.threadId == 2):

View File

@ -111,7 +111,6 @@ class Test (threading.Thread):
last_tb)
written = written + 1
def drop_stable(self):
tdLog.info("drop_stable")
global last_stb
@ -154,7 +153,6 @@ class Test (threading.Thread):
last_tb = ""
written = 0
def query_data_from_stable(self):
tdLog.info("query_data_from_stable")
global last_stb
@ -166,7 +164,6 @@ class Test (threading.Thread):
tdLog.info("will query data from super table")
tdSql.execute('select * from %s' % last_stb)
def reset_query_cache(self):
tdLog.info("reset_query_cache")
global last_tb
@ -230,7 +227,7 @@ class Test (threading.Thread):
self.threadLock.acquire()
tdLog.notice("first thread")
randDataOp = random.randint(1, 3)
dataOp.get(randDataOp , lambda: "ERROR")()
dataOp.get(randDataOp, lambda: "ERROR")()
self.threadLock.release()
elif (self.threadId == 2):

View File

@ -112,7 +112,6 @@ class Test:
tdSql.execute('drop table %s' % self.last_stb)
self.last_stb = ""
def query_data_from_stable(self):
tdLog.info("query_data_from_stable")
if (self.last_stb == ""):
@ -122,20 +121,21 @@ class Test:
tdLog.info("will query data from super table")
tdSql.execute('select * from %s' % self.last_stb)
def restart_database(self):
tdLog.info("restart_databae")
tdDnodes.stop(1)
tdDnodes.start(1)
tdLog.sleep(5)
def force_restart_database(self):
tdLog.info("force_restart_database")
tdDnodes.forcestop(1)
tdDnodes.start(1)
tdLog.sleep(5)
tdSql.prepare()
self.last_tb = ""
self.last_stb = ""
self.written = 0
def drop_table(self):
tdLog.info("drop_table")
@ -159,6 +159,9 @@ class Test:
tdDnodes.start(1)
tdLog.sleep(5)
tdSql.prepare()
self.last_tb = ""
self.last_stb = ""
self.written = 0
def delete_datafiles(self):
tdLog.info("delete_datafiles")
@ -173,6 +176,9 @@ class Test:
tdDnodes.start(1)
tdLog.sleep(10)
tdSql.prepare()
self.last_tb = ""
self.last_stb = ""
self.written = 0
class TDTestCase:

View File

@ -1,42 +1,42 @@
#!/bin/bash
# insert
python3 ./test.py -g -f insert/basic.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/int.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/float.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/bigint.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/bool.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/double.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/smallint.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/tinyint.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/binary.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/date.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/nchar.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/multi.py
python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/basic.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/int.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/float.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/bigint.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/bool.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/double.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/smallint.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/tinyint.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/binary.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/date.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/nchar.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/multi.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
# table
python3 ./test.py -g -f table/column_name.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f table/column_num.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f table/db_table.py
python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f table/column_name.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f table/column_num.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f table/db_table.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
# import
python3 ./test.py -g -f import_merge/importDataLastSub.py
python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f import_merge/importDataLastSub.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
#tag
python3 ./test.py $1 -f tag_lite/filter.py
python3 ./test.py $1 -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py $1 -f tag_lite/filter.py
PYTHONMALLOC=malloc python3 ./test.py $1 -s && sleep 1

View File

@ -2,11 +2,11 @@
# step 1: start dnode1
# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode)
# step 3: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841)
# step 4: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
# step 4: insert old data(now-20d) and new data(now-40d), control data rows in order to save in cache, not falling disc
# step 5: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2
# step 6: insert two data rows: now-16d, now+16d
# step 6: insert two data rows: now-21d, now-41d
# step 7: restart dnode2, waiting sync end
# expect: in dnode2, the files 1840 and 1842 will be removed
# expect: in dnode2, the files 1837 and 1839 will be removed
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
@ -14,10 +14,10 @@ system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMPeers -v 1
system sh/cfg.sh -n dnode2 -c numOfMPeers -v 1
system sh/cfg.sh -n dnode3 -c numOfMPeers -v 1
system sh/cfg.sh -n dnode4 -c numOfMPeers -v 1
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
@ -39,11 +39,6 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
@ -64,16 +59,16 @@ sql connect
print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 2, and create table, insert data
system sh/exec.sh -n dnode2 -s start
#system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname2
#sql create dnode $hostname3
sql create dnode $hostname3
sleep 3000
$totalTableNum = 1
$sleepTimer = 3000
$db = db
sql create database $db replica 1 cache 1
sql create database $db replica 2 cache 1
sql use $db
# create table , insert data
@ -82,7 +77,7 @@ sql create table $stb (ts timestamp, c1 double) tags(t1 int)
$rowNum = 130000
$tblNum = $totalTableNum
$totalRows = 0
#$tsStart = 1420041600000
$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
# insert over 2M data in order to falling disc, generate one file
$i = 0
@ -102,19 +97,24 @@ while $i < $tblNum
endw
sql select count(*) from $stb
sleep 1000
print data00 $data00
if $data00 != $totalRows then
print rows:$rows data00:$data00
if $rows != 1 then
return -1
endi
if $data00 == 0 then
return -1
endi
$totalRows = $data00
print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
sql insert into $tb values ( now - 15d , -15 )
sql insert into $tb values ( now + 15d , 15 )
sql insert into $tb values ( now - 20d , -20 )
sql insert into $tb values ( now - 40d , -40 )
$totalRows = $totalRows + 2
print ============== step4: stop dnode2, so date rows falling disc, generate two new files in dnode2
system sh/exec.sh -n dnode2 -s stop
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep $sleepTimer
wait_dnode2_offline:
@ -153,48 +153,14 @@ if $data00 != $totalRows then
endi
print ============== step5: insert two data rows: now-16d, now+16d,
sql insert into $tb values ( now - 16d , -16 )
sql insert into $tb values ( now + 16d , 16 )
sql insert into $tb values ( now - 21d , -21 )
sql insert into $tb values ( now - 41d , -41 )
$totalRows = $totalRows + 2
return 1
print ============== step5: restart dnode2, waiting sync end
system sh/exec.sh -n dnode2 -s start
sleep 3000
wait_dnode2_ready:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready
endi
sleep $sleepTimer
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step6: in dnode2, the files 1840 and 1842 will be removed
# how check in script ???
print ============== step6: please check there should be 3 file in sim/dnode2/data/vnode/vnode2/tsdb/data/, and 1 file sim/dnode3/data/vnode/vnode2/tsdb/data/

View File

@ -0,0 +1,52 @@
# Test case describe: dnode1 is only mnode, dnode2/dnode3 are only vnode
# step 1: start dnode1
# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode)
# step 3: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841)
# step 4: insert old data(now-20d) and new data(now-40d), control data rows in order to save in cache, not falling disc
# step 5: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2
# step 6: insert two data rows: now-21d, now-41d
# step 7: restart dnode2, waiting sync end
# expect: in dnode2, the files 1837 and 1839 will be removed
sql connect
sleep 3000
print ============== step7: restart dnode2, waiting sync end
system sh/exec.sh -n dnode2 -s start
sleep 3000
wait_dnode2_ready:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready
endi
sleep 1000
# check using select
$db = db
$stb = stb
sql use $db
sql select count(*) from $stb
print data00 $data00, should equal to dn2_mn1_cache_file_sync.sim output
#if $data00 != $totalRows then
# return -1
#endi
print ============== step8: please check there should be 1 file in sim/dnode2/data/vnode/vnode2/tsdb/data/, and 1 file sim/dnode3/data/vnode/vnode2/tsdb/data/

View File

@ -23,28 +23,39 @@ system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode5 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 200
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode5 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 200
#system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 200
#system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 200
#system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 200
system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode5 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode1 -c offlineThreshold -v 20
system sh/cfg.sh -n dnode2 -c offlineThreshold -v 20
system sh/cfg.sh -n dnode3 -c offlineThreshold -v 20
system sh/cfg.sh -n dnode4 -c offlineThreshold -v 20
system sh/cfg.sh -n dnode5 -c offlineThreshold -v 20
system sh/cfg.sh -n dnode1 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode2 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode3 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode4 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode5 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode1 -c enableCoreFile -v 1
system sh/cfg.sh -n dnode2 -c enableCoreFile -v 1
@ -63,9 +74,9 @@ sql connect
print ============== step2: start dnode2/dnode3 and add into cluster, then create database, create table , and insert data
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
sleep 1000
sql create dnode $hostname2
sql create dnode $hostname3
sleep 3000
$rowNum = 100
$tblNum = 16
@ -151,7 +162,7 @@ endi
# return -1
#endi
sleep 30000
sleep 15000
wait_drop:
sql show dnodes
@ -174,22 +185,36 @@ endi
if $dnode2Status != ready then
return -1
endi
if $dnode3Status != null then
return -1
endi
if $dnode4Status != ready then
return -1
endi
print ============== step5: start dnode5 and add into cluster , drop database
sql drop database $db
sleep 1000
system sh/exec.sh -n dnode5 -s start
sql create dnode $hostname5
print ============== step4-1: restart dnode3, adn add into cluster
system rm -rf ../../sim/dnode3
sleep 3000
wait_dnode5:
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode3 -c walLevel -v 1
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode3 -c enableCoreFile -v 1
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3
sleep 3000
wait_dnode3_ready:
sql show dnodes
print rows: $rows
if $rows != 4 then
sleep 3000
goto wait_dnode5
goto wait_dnode3_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
@ -198,6 +223,7 @@ print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
$dnode5Status = $data4_5
@ -207,6 +233,9 @@ endi
if $dnode2Status != ready then
return -1
endi
if $dnode3Status != null then
return -1
endi
if $dnode4Status != ready then
return -1
endi
@ -214,10 +243,58 @@ if $dnode5Status != ready then
return -1
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step5: start dnode5 and add into cluster , drop database
sql drop database $db
sleep 1000
system sh/exec.sh -n dnode5 -s start
sql create dnode $hostname5
sleep 3000
wait_dnode5:
sql show dnodes
if $rows != 5 then
sleep 3000
goto wait_dnode5
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode4Status = $data4_4
$dnode5Status = $data4_5
$dnode6Status = $data4_6
if $dnode1Status != ready then
return -1
endi
if $dnode2Status != ready then
return -1
endi
if $dnode3Status != null then
return -1
endi
if $dnode4Status != ready then
return -1
endi
if $dnode5Status != ready then
return -1
endi
if $dnode6Status != ready then
return -1
endi
print ============== step6: create database and table until not free vnodes
$rowNum = 100
$tblNum = 24
$tblNum = 32
$totalRows = 0
$tsStart = 1420041600000
@ -259,8 +336,9 @@ if $data00 != $totalRows then
return -1
endi
print ============== step7: drop dnode3, and system should prompt cannot drop dnodes
sql_error drop dnode $hostname3
print ============== step8: add one new table, and system should prompt 'need more dnode'
print ============== step7: drop dnode $hostname5, system should prompt "DB error: no enough dnodes"
sql_error drop dnode $hostname5
print error: $error
print ============== step8: create table tb_more using $stb tags( 1000 ), system should prompt 'DB error: no enough dnodes'
sql_error create table tb_more using $stb tags( 1000 )
print error: $error

View File

@ -89,11 +89,8 @@ if $data00 != $totalRows then
return -1
endi
print ============== step2-1: start dnode2 for falling disc, then restart dnode2, and check rows
system sh/exec.sh -n dnode2 -s stop
print ============== step2-1: stop dnode2 for falling disc, then restart dnode2, and check rows
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep $sleepTimer
wait_dnode2_offline_0:
sql show dnodes
@ -151,10 +148,6 @@ if $data00 != $totalRows then
endi
print ============== step3: start dnode3 and add into cluster , then alter replica from 1 to 2, and waiting sync
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3

View File

@ -0,0 +1,202 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1, only deploy mnode
system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode4 -s start
sql create dnode $hostname2
sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
$totalTableNum = 100
$sleepTimer = 3000
$db = db
sql create database $db replica 3 maxTables $totalTableNum
sql use $db
# create table , insert data
$stb = stb
sql create table $stb (ts timestamp, c1 int) tags(t1 int)
$rowNum = 100
$tblNum = $totalTableNum
$totalRows = 0
$tsStart = 1420041600000
$i = 0
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags( $i )
$x = 0
while $x < $rowNum
$ts = $tsStart + $x
sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
$x = $x + 60
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
$i = $i + 1
endw
sql select count(*) from $stb
sleep 1000
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step3: stop dnode2, and remove its vnodeX subdirector
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep $sleepTimer
wait_dnode2_offline_0:
sql show dnodes
if $rows != 4 then
sleep 2000
goto wait_dnode2_offline_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
#$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != offline then
sleep 2000
goto wait_dnode2_offline_0
endi
system rm -rf ../../../sim/dnode2/data/vnode/*
sleep 1000
print ============== step4: restart dnode2, waiting sync end
system sh/exec.sh -n dnode2 -s start
sleep $sleepTimer
wait_dnode2_reready:
sql show dnodes
if $rows != 4 then
sleep 2000
goto wait_dnode2_reready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
#$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_reready
endi
print ============== step5: stop dnode3/dnode4, and check rows
system sh/exec.sh -n dnode3 -s stop
system sh/exec.sh -n dnode4 -s stop
sleep $sleepTimer
wait_dnode34_offline:
sql show dnodes
if $rows != 4 then
sleep 2000
goto wait_dnode34_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != ready then
sleep 2000
goto wait_dnode34_offline
endi
if $dnode3Status != offline then
sleep 2000
goto wait_dnode34_offline
endi
if $dnode4Status != offline then
sleep 2000
goto wait_dnode34_offline
endi
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
sql insert into $tb values ( now , 20000 ) ( now + 1a, 20001 ) ( now + 2a, 20002 )
$totalRows = $totalRows + 3
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi

View File

@ -0,0 +1,194 @@
# Test case describe: dnode1 is only mnode, dnode2/dnode3 are only vnode
# step 1: start dnode1
# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode)
# step 3: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841)
# step 4: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
# step 5: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2
# step 6: insert two data rows: now-16d, now+16d
# step 7: restart dnode2, waiting sync end
# expect: in dnode2, the files 1840 and 1842 will be removed
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1, only deploy mnode
system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 2, and create table, insert data
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname2
sql create dnode $hostname3
sleep 3000
$totalTableNum = 1
$sleepTimer = 3000
$db = db
sql create database $db replica 2 cache 1
sql use $db
# create table , insert data
$stb = stb
sql create table $stb (ts timestamp, c1 double) tags(t1 int)
$rowNum = 130000
$tblNum = $totalTableNum
$totalRows = 0
$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
# insert over 2M data in order to falling disc, generate one file
$i = 0
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags( $i )
$x = 0
while $x < $rowNum
# $ts = $tsStart + $x
sql insert into $tb values ( now + 0s , $x ) ( now + 1s , $x ) ( now + 2s , $x ) ( now + 3s , $x ) ( now + 4s , $x ) ( now + 5s , $x ) ( now + 6s , $x ) ( now + 7s , $x ) ( now + 8s , $x ) ( now + 9s , $x )
$x = $x + 10
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
$i = $i + 1
endw
sql select count(*) from $stb
print rows:$rows data00:$data00
if $rows != 1 then
return -1
endi
if $data00 == 0 then
return -1
endi
$totalRows = $data00
print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
sql insert into $tb values ( now - 20d , -20 )
sql insert into $tb values ( now - 40d , -40 )
$totalRows = $totalRows + 2
print ============== step4: stop dnode2, so date rows falling disc, generate two new files in dnode2
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep $sleepTimer
wait_dnode2_offline:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != offline then
sleep 2000
goto wait_dnode2_offline
endi
if $dnode3Status != ready then
sleep 2000
goto wait_dnode2_offline
endi
sleep $sleepTimer # waitting for move master vnode of dnode2 to dnode3
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step5: insert two data rows: now-16d, now+16d,
sql insert into $tb values ( now - 21d , -21 )
sql insert into $tb values ( now - 41d , -41 )
$totalRows = $totalRows + 2
print ============== step5: restart dnode2, waiting sync end
system sh/exec.sh -n dnode2 -s start
sleep 3000
wait_dnode2_ready:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready
endi
sleep $sleepTimer
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi

View File

@ -2,18 +2,42 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode2 -c walLevel -v 1
system sh/cfg.sh -n dnode3 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
#system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
#system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
#system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
system sh/cfg.sh -n dnode3 -c alternativeRole -v 0
system sh/cfg.sh -n dnode4 -c alternativeRole -v 0
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -22,21 +46,23 @@ system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: replica is 1, and start 1 dnode
print ============== step1: replica is 1, and start 1 dnode, then create tables and insert data
system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
$db = replica_db1
sql create database $db replica 1 maxTables 4
$totalTableNum = 12
$db = db
sql create database $db replica 1 maxTables $totalTableNum
sql use $db
# create table , insert data
$stb = repl_stb
$stb = stb
sql create table $stb (ts timestamp, c1 int) tags(t1 int)
$rowNum = 10
$tblNum = 12
$tblNum = $totalTableNum
$totalRows = $rowNum * $tblNum
$ts0 = 1420041600000
$ts = $ts0
@ -55,46 +81,136 @@ while $i < $tblNum
$x = $x + 1
endw
$i = $i + 1
print $tb inserted rows: $x
endw
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step2: add 1 new dnode, expect balanced
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2
sleep 3000
# expect after balanced, 2 vondes in dnode1, 1 vonde in dnode2
$x = 0
show2:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
wait_dnode2_ready:
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
if $data2_1 != 2 then
goto show2
endi
if $data2_2 != 1 then
goto show2
endi
print ============== step4: stop dnode1, and wait dnode2 master
system sh/exec.sh -n dnode1 -s stop
$x = 0
loop_wait:
$x = $x + 1
if $rows != 2 then
sleep 2000
if $x == 10 then
print ERROR: after dnode1 stop, dnode2 didn't become a master!
return -1
endi
sql show mnodes
$dnodeRole = $data2_1
print dnodeRole ==> $dnodeRole
if $dnodeRole != master then
goto loop_wait
goto wait_dnode2_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode1Status = $data4_1
$dnode2Status = $data4_2
#$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode1Status != ready then
sleep 2000
goto wait_dnode2_ready
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready
endi
print ============== step3: stop dnode1/dnode2, modify cfg mpeers to 2, and restart dnode1/dnode2
system sh/exec.sh -n dnode1 -s stop
system sh/exec.sh -n dnode2 -s stop
sleep 3000
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 5000
print ============= step4: wait dnode ready
wait_dnode_ready:
sql show dnodes
if $rows != 2 then
sleep 2000
goto wait_dnode_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode1Status = $data4_1
$dnode2Status = $data4_2
#$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode1Status != ready then
sleep 2000
goto wait_dnode_ready
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode_ready
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step5: stop dnode1
system sh/exec.sh -n dnode1 -s stop
sleep 3000
wait_dnode2_master:
sql show mnodes
if $rows != 2 then
sleep 2000
goto wait_dnode2_master
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode1Status = $data4_1
$dnode2Status = $data4_2
#$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $data2_1 != offline then
sleep 2000
goto wait_dnode2_master
endi
if $data2_2 != master then
sleep 2000
goto wait_dnode2_master
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi