Merge branch 'develop' into feature/subscribe
This commit is contained in:
commit
194e91ef55
|
@ -64,7 +64,7 @@ matrix:
|
||||||
for memError in `cat uniq-mem-error-out.txt | awk '{print $4}'`
|
for memError in `cat uniq-mem-error-out.txt | awk '{print $4}'`
|
||||||
do
|
do
|
||||||
if [ -n "$memError" ]; then
|
if [ -n "$memError" ]; then
|
||||||
if [ "$memError" -gt 5 ]; then
|
if [ "$memError" -gt 12 ]; then
|
||||||
echo -e "${RED} ## Memory errors number valgrind reports is $memError.\
|
echo -e "${RED} ## Memory errors number valgrind reports is $memError.\
|
||||||
More than our threshold! ## ${NC}"
|
More than our threshold! ## ${NC}"
|
||||||
travis_terminate $memError
|
travis_terminate $memError
|
||||||
|
@ -76,7 +76,7 @@ matrix:
|
||||||
for defiMemError in `cat uniq-definitely-lost-out.txt | awk '{print $7}'`
|
for defiMemError in `cat uniq-definitely-lost-out.txt | awk '{print $7}'`
|
||||||
do
|
do
|
||||||
if [ -n "$defiMemError" ]; then
|
if [ -n "$defiMemError" ]; then
|
||||||
if [ "$defiMemError" -gt 3 ]; then
|
if [ "$defiMemError" -gt 13 ]; then
|
||||||
echo -e "${RED} ## Memory errors number valgrind reports \
|
echo -e "${RED} ## Memory errors number valgrind reports \
|
||||||
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
|
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
|
||||||
travis_terminate $defiMemError
|
travis_terminate $defiMemError
|
||||||
|
|
|
@ -116,7 +116,7 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF
|
||||||
* create local reducer to launch the second-stage reduce process at client site
|
* create local reducer to launch the second-stage reduce process at client site
|
||||||
*/
|
*/
|
||||||
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
|
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
|
||||||
SColumnModel *finalModel, SSqlCmd *pSqlCmd, SSqlRes *pRes);
|
SColumnModel *finalModel, SSqlObj* pSql);
|
||||||
|
|
||||||
void tscDestroyLocalReducer(SSqlObj *pSql);
|
void tscDestroyLocalReducer(SSqlObj *pSql);
|
||||||
|
|
||||||
|
|
|
@ -183,7 +183,7 @@ void tscSqlExprInfoDestroy(SArray* pExprInfo);
|
||||||
|
|
||||||
SColumn* tscColumnClone(const SColumn* src);
|
SColumn* tscColumnClone(const SColumn* src);
|
||||||
SColumn* tscColumnListInsert(SArray* pColList, SColumnIndex* colIndex);
|
SColumn* tscColumnListInsert(SArray* pColList, SColumnIndex* colIndex);
|
||||||
void tscColumnListCopy(SArray* dst, const SArray* src, int16_t tableIndex);
|
SArray* tscColumnListClone(const SArray* src, int16_t tableIndex);
|
||||||
void tscColumnListDestroy(SArray* pColList);
|
void tscColumnListDestroy(SArray* pColList);
|
||||||
|
|
||||||
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
|
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
|
||||||
|
|
|
@ -2904,7 +2904,11 @@ static void leastsquares_finalizer(SQLFunctionCtx *pCtx) {
|
||||||
|
|
||||||
param[1][2] /= param[1][1];
|
param[1][2] /= param[1][1];
|
||||||
|
|
||||||
sprintf(pCtx->aOutputBuf, "(%lf, %lf)", param[0][2], param[1][2]);
|
int32_t maxOutputSize = TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE - VARSTR_HEADER_SIZE;
|
||||||
|
size_t n = snprintf(varDataVal(pCtx->aOutputBuf), maxOutputSize, "{slop:%.6lf, intercept:%.6lf}",
|
||||||
|
param[0][2], param[1][2]);
|
||||||
|
|
||||||
|
varDataSetLen(pCtx->aOutputBuf, n);
|
||||||
doFinalizer(pCtx);
|
doFinalizer(pCtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1297,10 +1297,6 @@ SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t c
|
||||||
pSchema->bytes, functionId == TSDB_FUNC_TAGPRJ);
|
pSchema->bytes, functionId == TSDB_FUNC_TAGPRJ);
|
||||||
}
|
}
|
||||||
|
|
||||||
void addRequiredTagColumn(STableMetaInfo* pTableMetaInfo, SColumnIndex* index) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumnIndex* pIndex, tSQLExprItem* pItem) {
|
static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumnIndex* pIndex, tSQLExprItem* pItem) {
|
||||||
SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, startPos, pIndex->columnIndex, pIndex->tableIndex);
|
SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, startPos, pIndex->columnIndex, pIndex->tableIndex);
|
||||||
|
|
||||||
|
@ -3796,6 +3792,8 @@ static int32_t getTagQueryCondExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr,
|
||||||
|
|
||||||
tSQLExprDestroy(p1);
|
tSQLExprDestroy(p1);
|
||||||
tExprTreeDestroy(&p, NULL);
|
tExprTreeDestroy(&p, NULL);
|
||||||
|
|
||||||
|
taosArrayDestroy(colList);
|
||||||
}
|
}
|
||||||
|
|
||||||
pCondExpr->pTagCond = NULL;
|
pCondExpr->pTagCond = NULL;
|
||||||
|
|
|
@ -55,7 +55,7 @@ int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pReducer, tOrderDescriptor *pDesc) {
|
static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDescriptor *pDesc) {
|
||||||
/*
|
/*
|
||||||
* the fields and offset attributes in pCmd and pModel may be different due to
|
* the fields and offset attributes in pCmd and pModel may be different due to
|
||||||
* merge requirement. So, the final result in pRes structure is formatted in accordance with the pCmd object.
|
* merge requirement. So, the final result in pRes structure is formatted in accordance with the pCmd object.
|
||||||
|
@ -96,13 +96,13 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pRedu
|
||||||
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
|
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
|
||||||
pCtx->ptsOutputBuf = pReducer->pCtx[0].aOutputBuf;
|
pCtx->ptsOutputBuf = pReducer->pCtx[0].aOutputBuf;
|
||||||
pCtx->param[2].i64Key = pQueryInfo->order.order;
|
pCtx->param[2].i64Key = pQueryInfo->order.order;
|
||||||
pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT;
|
pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT;
|
||||||
pCtx->param[1].i64Key = pQueryInfo->order.orderColId;
|
pCtx->param[1].i64Key = pQueryInfo->order.orderColId;
|
||||||
}
|
}
|
||||||
|
|
||||||
SResultInfo *pResInfo = &pReducer->pResInfo[i];
|
SResultInfo *pResInfo = &pReducer->pResInfo[i];
|
||||||
pResInfo->bufLen = pExpr->interBytes;
|
pResInfo->bufLen = pExpr->interBytes;
|
||||||
pResInfo->interResultBuf = calloc(1, (size_t)pResInfo->bufLen);
|
pResInfo->interResultBuf = calloc(1, (size_t) pResInfo->bufLen);
|
||||||
|
|
||||||
pCtx->resultInfo = &pReducer->pResInfo[i];
|
pCtx->resultInfo = &pReducer->pResInfo[i];
|
||||||
pCtx->resultInfo->superTableQ = true;
|
pCtx->resultInfo->superTableQ = true;
|
||||||
|
@ -132,16 +132,15 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pRedu
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* todo release allocated memory process with async process
|
|
||||||
*/
|
|
||||||
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
|
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
|
||||||
SColumnModel *finalmodel, SSqlCmd *pCmd, SSqlRes *pRes) {
|
SColumnModel *finalmodel, SSqlObj* pSql) {
|
||||||
// offset of cmd in SSqlObj structure
|
SSqlCmd* pCmd = &pSql->cmd;
|
||||||
char *pSqlObjAddr = (char *)pCmd - offsetof(SSqlObj, cmd);
|
SSqlRes* pRes = &pSql->res;
|
||||||
|
|
||||||
if (pMemBuffer == NULL) {
|
if (pMemBuffer == NULL) {
|
||||||
tscError("%p pMemBuffer", pMemBuffer);
|
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
||||||
|
|
||||||
|
tscError("%p pMemBuffer is NULL", pMemBuffer);
|
||||||
pRes->code = TSDB_CODE_APP_ERROR;
|
pRes->code = TSDB_CODE_APP_ERROR;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -149,7 +148,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
||||||
if (pDesc->pColumnModel == NULL) {
|
if (pDesc->pColumnModel == NULL) {
|
||||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
||||||
|
|
||||||
tscError("%p no local buffer or intermediate result format model", pSqlObjAddr);
|
tscError("%p no local buffer or intermediate result format model", pSql);
|
||||||
pRes->code = TSDB_CODE_APP_ERROR;
|
pRes->code = TSDB_CODE_APP_ERROR;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -158,7 +157,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
||||||
for (int32_t i = 0; i < numOfBuffer; ++i) {
|
for (int32_t i = 0; i < numOfBuffer; ++i) {
|
||||||
int32_t len = pMemBuffer[i]->fileMeta.flushoutData.nLength;
|
int32_t len = pMemBuffer[i]->fileMeta.flushoutData.nLength;
|
||||||
if (len == 0) {
|
if (len == 0) {
|
||||||
tscTrace("%p no data retrieved from orderOfVnode:%d", pSqlObjAddr, i + 1);
|
tscTrace("%p no data retrieved from orderOfVnode:%d", pSql, i + 1);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -167,13 +166,13 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
||||||
|
|
||||||
if (numOfFlush == 0 || numOfBuffer == 0) {
|
if (numOfFlush == 0 || numOfBuffer == 0) {
|
||||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
||||||
tscTrace("%p retrieved no data", pSqlObjAddr);
|
tscTrace("%p retrieved no data", pSql);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pDesc->pColumnModel->capacity >= pMemBuffer[0]->pageSize) {
|
if (pDesc->pColumnModel->capacity >= pMemBuffer[0]->pageSize) {
|
||||||
tscError("%p Invalid value of buffer capacity %d and page size %d ", pSqlObjAddr, pDesc->pColumnModel->capacity,
|
tscError("%p Invalid value of buffer capacity %d and page size %d ", pSql, pDesc->pColumnModel->capacity,
|
||||||
pMemBuffer[0]->pageSize);
|
pMemBuffer[0]->pageSize);
|
||||||
|
|
||||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
||||||
|
@ -181,10 +180,11 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t nReducerSize = sizeof(SLocalReducer) + sizeof(void *) * numOfFlush;
|
size_t size = sizeof(SLocalReducer) + POINTER_BYTES * numOfFlush;
|
||||||
SLocalReducer *pReducer = (SLocalReducer *)calloc(1, nReducerSize);
|
|
||||||
|
SLocalReducer *pReducer = (SLocalReducer *) calloc(1, size);
|
||||||
if (pReducer == NULL) {
|
if (pReducer == NULL) {
|
||||||
tscError("%p failed to create merge structure", pSqlObjAddr);
|
tscError("%p failed to create local merge structure, out of memory", pSql);
|
||||||
|
|
||||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
||||||
pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
|
pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
|
||||||
|
@ -199,48 +199,52 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
||||||
pReducer->numOfVnode = numOfBuffer;
|
pReducer->numOfVnode = numOfBuffer;
|
||||||
|
|
||||||
pReducer->pDesc = pDesc;
|
pReducer->pDesc = pDesc;
|
||||||
tscTrace("%p the number of merged leaves is: %d", pSqlObjAddr, pReducer->numOfBuffer);
|
tscTrace("%p the number of merged leaves is: %d", pSql, pReducer->numOfBuffer);
|
||||||
|
|
||||||
int32_t idx = 0;
|
int32_t idx = 0;
|
||||||
for (int32_t i = 0; i < numOfBuffer; ++i) {
|
for (int32_t i = 0; i < numOfBuffer; ++i) {
|
||||||
int32_t numOfFlushoutInFile = pMemBuffer[i]->fileMeta.flushoutData.nLength;
|
int32_t numOfFlushoutInFile = pMemBuffer[i]->fileMeta.flushoutData.nLength;
|
||||||
|
|
||||||
for (int32_t j = 0; j < numOfFlushoutInFile; ++j) {
|
for (int32_t j = 0; j < numOfFlushoutInFile; ++j) {
|
||||||
SLocalDataSource *pDS = (SLocalDataSource *)malloc(sizeof(SLocalDataSource) + pMemBuffer[0]->pageSize);
|
SLocalDataSource *ds = (SLocalDataSource *)malloc(sizeof(SLocalDataSource) + pMemBuffer[0]->pageSize);
|
||||||
if (pDS == NULL) {
|
if (ds == NULL) {
|
||||||
tscError("%p failed to create merge structure", pSqlObjAddr);
|
tscError("%p failed to create merge structure", pSql);
|
||||||
pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
|
pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
pReducer->pLocalDataSrc[idx] = pDS;
|
|
||||||
|
pReducer->pLocalDataSrc[idx] = ds;
|
||||||
|
|
||||||
pDS->pMemBuffer = pMemBuffer[i];
|
ds->pMemBuffer = pMemBuffer[i];
|
||||||
pDS->flushoutIdx = j;
|
ds->flushoutIdx = j;
|
||||||
pDS->filePage.numOfElems = 0;
|
ds->filePage.numOfElems = 0;
|
||||||
pDS->pageId = 0;
|
ds->pageId = 0;
|
||||||
pDS->rowIdx = 0;
|
ds->rowIdx = 0;
|
||||||
|
|
||||||
tscTrace("%p load data from disk into memory, orderOfVnode:%d, total:%d", pSqlObjAddr, i + 1, idx + 1);
|
tscTrace("%p load data from disk into memory, orderOfVnode:%d, total:%d", pSql, i + 1, idx + 1);
|
||||||
tExtMemBufferLoadData(pMemBuffer[i], &(pDS->filePage), j, 0);
|
tExtMemBufferLoadData(pMemBuffer[i], &(ds->filePage), j, 0);
|
||||||
#ifdef _DEBUG_VIEW
|
#ifdef _DEBUG_VIEW
|
||||||
printf("load data page into mem for build loser tree: %" PRIu64 " rows\n", pDS->filePage.numOfElems);
|
printf("load data page into mem for build loser tree: %" PRIu64 " rows\n", ds->filePage.numOfElems);
|
||||||
SSrcColumnInfo colInfo[256] = {0};
|
SSrcColumnInfo colInfo[256] = {0};
|
||||||
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||||
|
|
||||||
tscGetSrcColumnInfo(colInfo, pQueryInfo);
|
tscGetSrcColumnInfo(colInfo, pQueryInfo);
|
||||||
|
|
||||||
tColModelDisplayEx(pDesc->pColumnModel, pDS->filePage.data, pDS->filePage.numOfElems,
|
tColModelDisplayEx(pDesc->pColumnModel, ds->filePage.data, ds->filePage.numOfElems,
|
||||||
pMemBuffer[0]->numOfElemsPerPage, colInfo);
|
pMemBuffer[0]->numOfElemsPerPage, colInfo);
|
||||||
#endif
|
#endif
|
||||||
if (pDS->filePage.numOfElems == 0) { // no data in this flush
|
|
||||||
tscTrace("%p flush data is empty, ignore %d flush record", pSqlObjAddr, idx);
|
if (ds->filePage.numOfElems == 0) { // no data in this flush, the index does not increase
|
||||||
tfree(pDS);
|
tscTrace("%p flush data is empty, ignore %d flush record", pSql, idx);
|
||||||
|
tfree(ds);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
idx += 1;
|
idx += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(idx >= pReducer->numOfBuffer);
|
|
||||||
|
// no data actually, no need to merge result.
|
||||||
if (idx == 0) {
|
if (idx == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -262,9 +266,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
||||||
|
|
||||||
// the input data format follows the old format, but output in a new format.
|
// the input data format follows the old format, but output in a new format.
|
||||||
// so, all the input must be parsed as old format
|
// so, all the input must be parsed as old format
|
||||||
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
|
pReducer->pCtx = (SQLFunctionCtx *)calloc(tscSqlExprNumOfExprs(pQueryInfo), sizeof(SQLFunctionCtx));
|
||||||
|
|
||||||
pReducer->pCtx = (SQLFunctionCtx *)calloc(size, sizeof(SQLFunctionCtx));
|
|
||||||
pReducer->rowSize = pMemBuffer[0]->nElemSize;
|
pReducer->rowSize = pMemBuffer[0]->nElemSize;
|
||||||
|
|
||||||
tscRestoreSQLFuncForSTableQuery(pQueryInfo);
|
tscRestoreSQLFuncForSTableQuery(pQueryInfo);
|
||||||
|
@ -313,7 +315,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
||||||
pReducer->pResInfo = calloc(size, sizeof(SResultInfo));
|
pReducer->pResInfo = calloc(size, sizeof(SResultInfo));
|
||||||
|
|
||||||
tscCreateResPointerInfo(pRes, pQueryInfo);
|
tscCreateResPointerInfo(pRes, pQueryInfo);
|
||||||
tscInitSqlContext(pCmd, pRes, pReducer, pDesc);
|
tscInitSqlContext(pCmd, pReducer, pDesc);
|
||||||
|
|
||||||
// we change the capacity of schema to denote that there is only one row in temp buffer
|
// we change the capacity of schema to denote that there is only one row in temp buffer
|
||||||
pReducer->pDesc->pColumnModel->capacity = 1;
|
pReducer->pDesc->pColumnModel->capacity = 1;
|
||||||
|
@ -428,8 +430,7 @@ int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePa
|
||||||
tColModelAppend(pModel, pPage, data, numOfRows - remain, numOfWriteElems, numOfRows);
|
tColModelAppend(pModel, pPage, data, numOfRows - remain, numOfWriteElems, numOfRows);
|
||||||
|
|
||||||
if (pPage->numOfElems == pModel->capacity) {
|
if (pPage->numOfElems == pModel->capacity) {
|
||||||
int32_t ret = tscFlushTmpBuffer(pMemoryBuf, pDesc, pPage, orderType);
|
if (tscFlushTmpBuffer(pMemoryBuf, pDesc, pPage, orderType) != TSDB_CODE_SUCCESS) {
|
||||||
if (ret != 0) {
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -550,8 +550,7 @@ int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* for meter query, simply return the size <= 1k
|
* for table query, simply return the size <= 1k
|
||||||
* for metric query, estimate size according to meter tags
|
|
||||||
*/
|
*/
|
||||||
static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd, int32_t clauseIndex) {
|
static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd, int32_t clauseIndex) {
|
||||||
const static int32_t MIN_QUERY_MSG_PKT_SIZE = TSDB_MAX_BYTES_PER_ROW * 5;
|
const static int32_t MIN_QUERY_MSG_PKT_SIZE = TSDB_MAX_BYTES_PER_ROW * 5;
|
||||||
|
@ -562,15 +561,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd, int32_t clauseIndex) {
|
||||||
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
|
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
|
||||||
int32_t exprSize = sizeof(SSqlFuncMsg) * numOfExprs;
|
int32_t exprSize = sizeof(SSqlFuncMsg) * numOfExprs;
|
||||||
|
|
||||||
//STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + exprSize + 4096;
|
||||||
|
|
||||||
// table query without tags values
|
|
||||||
//if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
|
|
||||||
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + exprSize + 4096;
|
|
||||||
//}
|
|
||||||
|
|
||||||
//int32_t size = 4096;
|
|
||||||
//return size;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg) {
|
static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg) {
|
||||||
|
|
|
@ -1507,8 +1507,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
|
||||||
SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pPObj->cmd, 0);
|
SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pPObj->cmd, 0);
|
||||||
tscClearInterpInfo(pPQueryInfo);
|
tscClearInterpInfo(pPQueryInfo);
|
||||||
|
|
||||||
tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfTotal, pDesc, trsupport->pFinalColModel,
|
tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfTotal, pDesc, trsupport->pFinalColModel, pPObj);
|
||||||
&pPObj->cmd, &pPObj->res);
|
|
||||||
tscTrace("%p build loser tree completed", pPObj);
|
tscTrace("%p build loser tree completed", pPObj);
|
||||||
|
|
||||||
pPObj->res.precision = pSql->res.precision;
|
pPObj->res.precision = pSql->res.precision;
|
||||||
|
|
|
@ -1209,18 +1209,18 @@ void tscColumnListCopy(SArray* dst, const SArray* src, int16_t tableIndex) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void tscColumnListDestroy(SArray* pColumnBaseInfo) {
|
void tscColumnListDestroy(SArray* pColumnList) {
|
||||||
if (pColumnBaseInfo == NULL) {
|
if (pColumnList == NULL) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t num = taosArrayGetSize(pColumnBaseInfo);
|
size_t num = taosArrayGetSize(pColumnList);
|
||||||
for (int32_t i = 0; i < num; ++i) {
|
for (int32_t i = 0; i < num; ++i) {
|
||||||
SColumn* pCol = taosArrayGetP(pColumnBaseInfo, i);
|
SColumn* pCol = taosArrayGetP(pColumnList, i);
|
||||||
tscColumnDestroy(pCol);
|
tscColumnDestroy(pCol);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosArrayDestroy(pColumnBaseInfo);
|
taosArrayDestroy(pColumnList);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1573,7 +1573,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) {
|
||||||
|
|
||||||
assert(pQueryInfo->exprList == NULL);
|
assert(pQueryInfo->exprList == NULL);
|
||||||
pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
|
pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
|
||||||
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
|
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) {
|
int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) {
|
||||||
|
@ -1667,8 +1667,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
|
||||||
assert(pTableMetaInfo != NULL);
|
assert(pTableMetaInfo != NULL);
|
||||||
|
|
||||||
if (name != NULL) {
|
if (name != NULL) {
|
||||||
assert(strlen(name) <= TSDB_TABLE_ID_LEN);
|
strncpy(pTableMetaInfo->name, name, TSDB_TABLE_ID_LEN);
|
||||||
strcpy(pTableMetaInfo->name, name);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pTableMetaInfo->pTableMeta = pTableMeta;
|
pTableMetaInfo->pTableMeta = pTableMeta;
|
||||||
|
@ -1679,10 +1678,9 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
|
||||||
memcpy(pTableMetaInfo->vgroupList, vgroupList, size);
|
memcpy(pTableMetaInfo->vgroupList, vgroupList, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pTagCols == NULL) {
|
pTableMetaInfo->tagColList = taosArrayInit(4, POINTER_BYTES);
|
||||||
pTableMetaInfo->tagColList = taosArrayInit(4, POINTER_BYTES);
|
if (pTagCols != NULL) {
|
||||||
} else {
|
tscColumnListCopy(pTableMetaInfo->tagColList, pTagCols, -1);
|
||||||
pTableMetaInfo->tagColList = taosArrayClone(pTagCols);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pQueryInfo->numOfTables += 1;
|
pQueryInfo->numOfTables += 1;
|
||||||
|
@ -1702,6 +1700,12 @@ void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache)
|
||||||
tfree(pTableMetaInfo->vgroupList);
|
tfree(pTableMetaInfo->vgroupList);
|
||||||
|
|
||||||
if (pTableMetaInfo->tagColList != NULL) {
|
if (pTableMetaInfo->tagColList != NULL) {
|
||||||
|
size_t numOfTags = taosArrayGetSize(pTableMetaInfo->tagColList);
|
||||||
|
for(int32_t i = 0; i < numOfTags; ++i) { // todo do NOT use the allocated object
|
||||||
|
SColumn* pCol = taosArrayGetP(pTableMetaInfo->tagColList, i);
|
||||||
|
tfree(pCol);
|
||||||
|
}
|
||||||
|
|
||||||
taosArrayDestroy(pTableMetaInfo->tagColList);
|
taosArrayDestroy(pTableMetaInfo->tagColList);
|
||||||
pTableMetaInfo->tagColList = NULL;
|
pTableMetaInfo->tagColList = NULL;
|
||||||
}
|
}
|
||||||
|
@ -1789,7 +1793,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
||||||
tscFreeSqlObj(pNew);
|
tscFreeSqlObj(pNew);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
tscColumnListCopy(pNewQueryInfo->colList, pQueryInfo->colList, (int16_t)tableIndex);
|
tscColumnListCopy(pNewQueryInfo->colList, pQueryInfo->colList, (int16_t)tableIndex);
|
||||||
|
|
||||||
// set the correct query type
|
// set the correct query type
|
||||||
|
@ -1848,7 +1852,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
||||||
|
|
||||||
if (pPrevSql == NULL) {
|
if (pPrevSql == NULL) {
|
||||||
STableMeta* pTableMeta = taosCacheAcquireByName(tscCacheHandle, name);
|
STableMeta* pTableMeta = taosCacheAcquireByName(tscCacheHandle, name);
|
||||||
|
// todo handle error
|
||||||
|
assert(pTableMeta != NULL);
|
||||||
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList);
|
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList);
|
||||||
} else { // transfer the ownership of pTableMeta to the newly create sql object.
|
} else { // transfer the ownership of pTableMeta to the newly create sql object.
|
||||||
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
|
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
|
||||||
|
|
|
@ -23,4 +23,5 @@ void extractTableName(const char *tableId, char *name);
|
||||||
|
|
||||||
char* extractDBName(const char *tableId, char *name);
|
char* extractDBName(const char *tableId, char *name);
|
||||||
|
|
||||||
|
|
||||||
#endif // TDENGINE_NAME_H
|
#endif // TDENGINE_NAME_H
|
||||||
|
|
|
@ -32,6 +32,9 @@ extern "C" {
|
||||||
#define TSKEY int64_t
|
#define TSKEY int64_t
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define TSWINDOW_INITIALIZER {INT64_MIN, INT64_MAX};
|
||||||
|
#define TSKEY_INITIAL_VAL INT64_MIN
|
||||||
|
|
||||||
// ----------------- For variable data types such as TSDB_DATA_TYPE_BINARY and TSDB_DATA_TYPE_NCHAR
|
// ----------------- For variable data types such as TSDB_DATA_TYPE_BINARY and TSDB_DATA_TYPE_NCHAR
|
||||||
typedef int32_t VarDataOffsetT;
|
typedef int32_t VarDataOffsetT;
|
||||||
typedef int16_t VarDataLenT;
|
typedef int16_t VarDataLenT;
|
||||||
|
@ -341,8 +344,6 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
|
||||||
#define TSDB_MAX_DBS 100
|
#define TSDB_MAX_DBS 100
|
||||||
#define TSDB_MAX_VGROUPS 1000
|
#define TSDB_MAX_VGROUPS 1000
|
||||||
#define TSDB_MAX_SUPER_TABLES 100
|
#define TSDB_MAX_SUPER_TABLES 100
|
||||||
#define TSDB_MAX_NORMAL_TABLES 1000
|
|
||||||
#define TSDB_MAX_CHILD_TABLES 100000
|
|
||||||
|
|
||||||
#define TSDB_PORT_DNODESHELL 0
|
#define TSDB_PORT_DNODESHELL 0
|
||||||
#define TSDB_PORT_DNODEDNODE 5
|
#define TSDB_PORT_DNODEDNODE 5
|
||||||
|
|
|
@ -74,6 +74,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_OPTION, 0, 26, "invalid option")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_NOT_CONFIGURED, 0, 27, "not configured")
|
TAOS_DEFINE_ERROR(TSDB_CODE_NOT_CONFIGURED, 0, 27, "not configured")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_NODE_OFFLINE, 0, 28, "node offline")
|
TAOS_DEFINE_ERROR(TSDB_CODE_NODE_OFFLINE, 0, 28, "node offline")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_NETWORK_UNAVAIL, 0, 29, "network unavailable")
|
TAOS_DEFINE_ERROR(TSDB_CODE_NETWORK_UNAVAIL, 0, 29, "network unavailable")
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_AUTH_REQUIRED, 0, 30, "auth required")
|
||||||
|
|
||||||
// db
|
// db
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_DB_NOT_SELECTED, 0, 100, "db not selected")
|
TAOS_DEFINE_ERROR(TSDB_CODE_DB_NOT_SELECTED, 0, 100, "db not selected")
|
||||||
|
|
|
@ -627,7 +627,6 @@ typedef struct {
|
||||||
typedef struct STableMetaMsg {
|
typedef struct STableMetaMsg {
|
||||||
int32_t contLen;
|
int32_t contLen;
|
||||||
char tableId[TSDB_TABLE_ID_LEN + 1]; // table id
|
char tableId[TSDB_TABLE_ID_LEN + 1]; // table id
|
||||||
char stableId[TSDB_TABLE_ID_LEN + 1]; // stable name if it is created according to super table
|
|
||||||
uint8_t numOfTags;
|
uint8_t numOfTags;
|
||||||
uint8_t precision;
|
uint8_t precision;
|
||||||
uint8_t tableType;
|
uint8_t tableType;
|
||||||
|
|
|
@ -34,12 +34,14 @@ extern "C" {
|
||||||
|
|
||||||
#define TSDB_INVALID_SUPER_TABLE_ID -1
|
#define TSDB_INVALID_SUPER_TABLE_ID -1
|
||||||
|
|
||||||
|
#define TSDB_STATUS_COMMIT_START 1
|
||||||
|
#define TSDB_STATUS_COMMIT_OVER 2
|
||||||
|
|
||||||
// --------- TSDB APPLICATION HANDLE DEFINITION
|
// --------- TSDB APPLICATION HANDLE DEFINITION
|
||||||
typedef struct {
|
typedef struct {
|
||||||
// WAL handle
|
|
||||||
void *appH;
|
void *appH;
|
||||||
void *cqH;
|
void *cqH;
|
||||||
int (*walCallBack)(void *);
|
int (*notifyStatus)(void *, int status);
|
||||||
int (*eventCallBack)(void *);
|
int (*eventCallBack)(void *);
|
||||||
} STsdbAppH;
|
} STsdbAppH;
|
||||||
|
|
||||||
|
|
|
@ -381,7 +381,7 @@ static void mgmtRemoveTableFromStable(SSuperTableObj *pStable, SChildTableObj *p
|
||||||
if (pStable->vgHash == NULL) return;
|
if (pStable->vgHash == NULL) return;
|
||||||
|
|
||||||
SVgObj *pVgroup = mgmtGetVgroup(pCtable->vgId);
|
SVgObj *pVgroup = mgmtGetVgroup(pCtable->vgId);
|
||||||
if (pVgroup != NULL) {
|
if (pVgroup == NULL) {
|
||||||
taosHashRemove(pStable->vgHash, (char *)&pCtable->vgId, sizeof(pCtable->vgId));
|
taosHashRemove(pStable->vgHash, (char *)&pCtable->vgId, sizeof(pCtable->vgId));
|
||||||
}
|
}
|
||||||
mgmtDecVgroupRef(pVgroup);
|
mgmtDecVgroupRef(pVgroup);
|
||||||
|
@ -1203,8 +1203,10 @@ void mgmtDropAllSuperTables(SDbObj *pDropDb) {
|
||||||
|
|
||||||
static int32_t mgmtSetSchemaFromSuperTable(SSchema *pSchema, SSuperTableObj *pTable) {
|
static int32_t mgmtSetSchemaFromSuperTable(SSchema *pSchema, SSuperTableObj *pTable) {
|
||||||
int32_t numOfCols = pTable->numOfColumns + pTable->numOfTags;
|
int32_t numOfCols = pTable->numOfColumns + pTable->numOfTags;
|
||||||
|
assert(numOfCols <= TSDB_MAX_COLUMNS);
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||||
strncpy(pSchema->name, pTable->schema[i].name, TSDB_TABLE_ID_LEN);
|
strncpy(pSchema->name, pTable->schema[i].name, TSDB_COL_NAME_LEN);
|
||||||
pSchema->type = pTable->schema[i].type;
|
pSchema->type = pTable->schema[i].type;
|
||||||
pSchema->bytes = htons(pTable->schema[i].bytes);
|
pSchema->bytes = htons(pTable->schema[i].bytes);
|
||||||
pSchema->colId = htons(pTable->schema[i].colId);
|
pSchema->colId = htons(pTable->schema[i].colId);
|
||||||
|
@ -1675,7 +1677,6 @@ static int32_t mgmtDoGetChildTableMeta(SQueuedMsg *pMsg, STableMetaMsg *pMeta) {
|
||||||
pMeta->numOfTags = (int8_t)pTable->superTable->numOfTags;
|
pMeta->numOfTags = (int8_t)pTable->superTable->numOfTags;
|
||||||
pMeta->numOfColumns = htons((int16_t)pTable->superTable->numOfColumns);
|
pMeta->numOfColumns = htons((int16_t)pTable->superTable->numOfColumns);
|
||||||
pMeta->contLen = sizeof(STableMetaMsg) + mgmtSetSchemaFromSuperTable(pMeta->schema, pTable->superTable);
|
pMeta->contLen = sizeof(STableMetaMsg) + mgmtSetSchemaFromSuperTable(pMeta->schema, pTable->superTable);
|
||||||
strncpy(pMeta->stableId, pTable->superTable->info.tableId, tListLen(pMeta->stableId));
|
|
||||||
} else {
|
} else {
|
||||||
pMeta->sversion = htons(pTable->sversion);
|
pMeta->sversion = htons(pTable->sversion);
|
||||||
pMeta->numOfTags = 0;
|
pMeta->numOfTags = 0;
|
||||||
|
|
|
@ -120,12 +120,6 @@ typedef struct tExtMemBuffer {
|
||||||
EXT_BUFFER_FLUSH_MODEL flushModel;
|
EXT_BUFFER_FLUSH_MODEL flushModel;
|
||||||
} tExtMemBuffer;
|
} tExtMemBuffer;
|
||||||
|
|
||||||
//typedef struct tTagSchema {
|
|
||||||
// struct SSchema *pSchema;
|
|
||||||
// int32_t numOfCols;
|
|
||||||
// int32_t colOffset[];
|
|
||||||
//} tTagSchema;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @param inMemSize
|
* @param inMemSize
|
||||||
|
|
|
@ -506,7 +506,7 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t
|
||||||
w.ekey = pQuery->window.ekey;
|
w.ekey = pQuery->window.ekey;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(ts >= w.skey && ts <= w.ekey && w.skey != 0);
|
assert(ts >= w.skey && ts <= w.ekey);
|
||||||
|
|
||||||
return w;
|
return w;
|
||||||
}
|
}
|
||||||
|
@ -623,7 +623,7 @@ static void doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey,
|
||||||
setQueryStatus(pQuery, QUERY_COMPLETED | QUERY_RESBUF_FULL);
|
setQueryStatus(pQuery, QUERY_COMPLETED | QUERY_RESBUF_FULL);
|
||||||
} else { // set the current index to be the last unclosed window
|
} else { // set the current index to be the last unclosed window
|
||||||
int32_t i = 0;
|
int32_t i = 0;
|
||||||
int64_t skey = 0;
|
int64_t skey = TSKEY_INITIAL_VAL;
|
||||||
|
|
||||||
for (i = 0; i < pWindowResInfo->size; ++i) {
|
for (i = 0; i < pWindowResInfo->size; ++i) {
|
||||||
SWindowResult *pResult = &pWindowResInfo->pResult[i];
|
SWindowResult *pResult = &pWindowResInfo->pResult[i];
|
||||||
|
@ -641,7 +641,7 @@ static void doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
// all windows are closed, set the last one to be the skey
|
// all windows are closed, set the last one to be the skey
|
||||||
if (skey == 0) {
|
if (skey == TSKEY_INITIAL_VAL) {
|
||||||
assert(i == pWindowResInfo->size);
|
assert(i == pWindowResInfo->size);
|
||||||
pWindowResInfo->curIndex = pWindowResInfo->size - 1;
|
pWindowResInfo->curIndex = pWindowResInfo->size - 1;
|
||||||
} else {
|
} else {
|
||||||
|
@ -659,7 +659,7 @@ static void doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey,
|
||||||
qTrace("QInfo:%p total window:%d, closed:%d", GET_QINFO_ADDR(pRuntimeEnv), pWindowResInfo->size, n);
|
qTrace("QInfo:%p total window:%d, closed:%d", GET_QINFO_ADDR(pRuntimeEnv), pWindowResInfo->size, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(pWindowResInfo->prevSKey != 0);
|
assert(pWindowResInfo->prevSKey != TSKEY_INITIAL_VAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlockInfo, TSKEY *pPrimaryColumn,
|
static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlockInfo, TSKEY *pPrimaryColumn,
|
||||||
|
@ -3079,6 +3079,9 @@ void disableFuncInReverseScan(SQInfo *pQInfo) {
|
||||||
int32_t functId = pQuery->pSelectExpr[j].base.functionId;
|
int32_t functId = pQuery->pSelectExpr[j].base.functionId;
|
||||||
|
|
||||||
SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[j];
|
SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[j];
|
||||||
|
if (pCtx->resultInfo == NULL) {
|
||||||
|
continue; // resultInfo is NULL, means no data checked in previous scan
|
||||||
|
}
|
||||||
|
|
||||||
if (((functId == TSDB_FUNC_FIRST || functId == TSDB_FUNC_FIRST_DST) && order == TSDB_ORDER_ASC) ||
|
if (((functId == TSDB_FUNC_FIRST || functId == TSDB_FUNC_FIRST_DST) && order == TSDB_ORDER_ASC) ||
|
||||||
((functId == TSDB_FUNC_LAST || functId == TSDB_FUNC_LAST_DST) && order == TSDB_ORDER_DESC)) {
|
((functId == TSDB_FUNC_LAST || functId == TSDB_FUNC_LAST_DST) && order == TSDB_ORDER_DESC)) {
|
||||||
|
@ -3593,7 +3596,6 @@ void setIntervalQueryRange(SQInfo *pQInfo, TSKEY key) {
|
||||||
if (pTableQueryInfo->queryRangeSet) {
|
if (pTableQueryInfo->queryRangeSet) {
|
||||||
pTableQueryInfo->lastKey = key;
|
pTableQueryInfo->lastKey = key;
|
||||||
} else {
|
} else {
|
||||||
// pQuery->window.skey = key;
|
|
||||||
pTableQueryInfo->win.skey = key;
|
pTableQueryInfo->win.skey = key;
|
||||||
STimeWindow win = {.skey = key, .ekey = pQuery->window.ekey};
|
STimeWindow win = {.skey = key, .ekey = pQuery->window.ekey};
|
||||||
|
|
||||||
|
@ -3616,18 +3618,16 @@ void setIntervalQueryRange(SQInfo *pQInfo, TSKEY key) {
|
||||||
getAlignQueryTimeWindow(pQuery, win.skey, win.skey, win.ekey, &skey1, &ekey1, &w);
|
getAlignQueryTimeWindow(pQuery, win.skey, win.skey, win.ekey, &skey1, &ekey1, &w);
|
||||||
pWindowResInfo->startTime = pTableQueryInfo->win.skey; // windowSKey may be 0 in case of 1970 timestamp
|
pWindowResInfo->startTime = pTableQueryInfo->win.skey; // windowSKey may be 0 in case of 1970 timestamp
|
||||||
|
|
||||||
if (pWindowResInfo->prevSKey == 0) {
|
if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) {
|
||||||
if (QUERY_IS_ASC_QUERY(pQuery)) {
|
if (!QUERY_IS_ASC_QUERY(pQuery)) {
|
||||||
pWindowResInfo->prevSKey = w.skey;
|
|
||||||
} else {
|
|
||||||
assert(win.ekey == pQuery->window.skey);
|
assert(win.ekey == pQuery->window.skey);
|
||||||
pWindowResInfo->prevSKey = w.skey;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pWindowResInfo->prevSKey = w.skey;
|
||||||
}
|
}
|
||||||
|
|
||||||
pTableQueryInfo->queryRangeSet = 1;
|
pTableQueryInfo->queryRangeSet = 1;
|
||||||
pTableQueryInfo->lastKey = pTableQueryInfo->win.skey;
|
pTableQueryInfo->lastKey = pTableQueryInfo->win.skey;
|
||||||
pTableQueryInfo->win.skey = pTableQueryInfo->win.skey;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4071,10 +4071,11 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv) {
|
||||||
* pQuery->limit.offset times. Since hole exists, pQuery->intervalTime*pQuery->limit.offset value is
|
* pQuery->limit.offset times. Since hole exists, pQuery->intervalTime*pQuery->limit.offset value is
|
||||||
* not valid. otherwise, we only forward pQuery->limit.offset number of points
|
* not valid. otherwise, we only forward pQuery->limit.offset number of points
|
||||||
*/
|
*/
|
||||||
assert(pRuntimeEnv->windowResInfo.prevSKey == 0);
|
assert(pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL);
|
||||||
|
|
||||||
TSKEY skey1, ekey1;
|
TSKEY skey1, ekey1;
|
||||||
STimeWindow w = {0};
|
STimeWindow w = TSWINDOW_INITIALIZER;
|
||||||
|
|
||||||
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
|
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
|
||||||
STableQueryInfo *pTableQueryInfo = pQuery->current;
|
STableQueryInfo *pTableQueryInfo = pQuery->current;
|
||||||
|
|
||||||
|
@ -4739,7 +4740,7 @@ static void doRestoreContext(SQInfo *pQInfo) {
|
||||||
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
|
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
|
||||||
|
|
||||||
if (pRuntimeEnv->pTSBuf != NULL) {
|
if (pRuntimeEnv->pTSBuf != NULL) {
|
||||||
pRuntimeEnv->pTSBuf->cur.order = pRuntimeEnv->pTSBuf->cur.order ^ 1;
|
SWITCH_ORDER(pRuntimeEnv->pTSBuf->cur.order);
|
||||||
}
|
}
|
||||||
|
|
||||||
switchCtxOrder(pRuntimeEnv);
|
switchCtxOrder(pRuntimeEnv);
|
||||||
|
|
|
@ -612,7 +612,12 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) {
|
||||||
pConn->ownId = htonl(pConn->sid);
|
pConn->ownId = htonl(pConn->sid);
|
||||||
pConn->linkUid = pHead->linkUid;
|
pConn->linkUid = pHead->linkUid;
|
||||||
if (pRpc->afp) {
|
if (pRpc->afp) {
|
||||||
terrno = (*pRpc->afp)(pConn->user, &pConn->spi, &pConn->encrypt, pConn->secret, pConn->ckey);
|
if (pConn->user[0] == 0) {
|
||||||
|
terrno = TSDB_CODE_AUTH_REQUIRED;
|
||||||
|
} else {
|
||||||
|
terrno = (*pRpc->afp)(pConn->user, &pConn->spi, &pConn->encrypt, pConn->secret, pConn->ckey);
|
||||||
|
}
|
||||||
|
|
||||||
if (terrno != 0) {
|
if (terrno != 0) {
|
||||||
tWarn("%s %p, user not there or server not ready", pRpc->label, pConn);
|
tWarn("%s %p, user not there or server not ready", pRpc->label, pConn);
|
||||||
taosFreeId(pRpc->idPool, sid); // sid shall be released
|
taosFreeId(pRpc->idPool, sid); // sid shall be released
|
||||||
|
@ -930,6 +935,12 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) {
|
||||||
rpcMsg.handle = pContext->ahandle;
|
rpcMsg.handle = pContext->ahandle;
|
||||||
pConn->pContext = NULL;
|
pConn->pContext = NULL;
|
||||||
|
|
||||||
|
if (pHead->code == TSDB_CODE_AUTH_REQUIRED) {
|
||||||
|
pConn->secured = 0;
|
||||||
|
rpcSendReqToServer(pRpc, pContext);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// for UDP, port may be changed by server, the port in ipSet shall be used for cache
|
// for UDP, port may be changed by server, the port in ipSet shall be used for cache
|
||||||
rpcAddConnIntoCache(pRpc->pCache, pConn, pConn->peerFqdn, pContext->ipSet.port[pContext->ipSet.inUse], pConn->connType);
|
rpcAddConnIntoCache(pRpc->pCache, pConn, pConn->peerFqdn, pContext->ipSet.port[pContext->ipSet.inUse], pConn->connType);
|
||||||
|
|
||||||
|
|
|
@ -284,6 +284,7 @@ int32_t tsdbCloseRepo(TsdbRepoT *repo) {
|
||||||
pRepo->tsdbCache->curBlock = NULL;
|
pRepo->tsdbCache->curBlock = NULL;
|
||||||
tsdbUnLockRepo(repo);
|
tsdbUnLockRepo(repo);
|
||||||
|
|
||||||
|
if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_START);
|
||||||
tsdbCommitData((void *)repo);
|
tsdbCommitData((void *)repo);
|
||||||
|
|
||||||
tsdbCloseFileH(pRepo->tsdbFileH);
|
tsdbCloseFileH(pRepo->tsdbFileH);
|
||||||
|
@ -330,7 +331,7 @@ int32_t tsdbConfigRepo(TsdbRepoT *repo, STsdbCfg *pCfg) {
|
||||||
int32_t tsdbTriggerCommit(TsdbRepoT *repo) {
|
int32_t tsdbTriggerCommit(TsdbRepoT *repo) {
|
||||||
STsdbRepo *pRepo = (STsdbRepo *)repo;
|
STsdbRepo *pRepo = (STsdbRepo *)repo;
|
||||||
|
|
||||||
if (pRepo->appH.walCallBack) pRepo->appH.walCallBack(pRepo->appH.appH);
|
if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_START);
|
||||||
|
|
||||||
tsdbLockRepo(repo);
|
tsdbLockRepo(repo);
|
||||||
if (pRepo->commit) {
|
if (pRepo->commit) {
|
||||||
|
@ -942,7 +943,6 @@ static void tsdbFreeMemTable(SMemTable *pMemTable) {
|
||||||
|
|
||||||
// Commit to file
|
// Commit to file
|
||||||
static void *tsdbCommitData(void *arg) {
|
static void *tsdbCommitData(void *arg) {
|
||||||
printf("Starting to commit....\n");
|
|
||||||
STsdbRepo * pRepo = (STsdbRepo *)arg;
|
STsdbRepo * pRepo = (STsdbRepo *)arg;
|
||||||
STsdbMeta * pMeta = pRepo->tsdbMeta;
|
STsdbMeta * pMeta = pRepo->tsdbMeta;
|
||||||
STsdbCache *pCache = pRepo->tsdbCache;
|
STsdbCache *pCache = pRepo->tsdbCache;
|
||||||
|
@ -951,6 +951,8 @@ static void *tsdbCommitData(void *arg) {
|
||||||
SRWHelper whelper = {0};
|
SRWHelper whelper = {0};
|
||||||
if (pCache->imem == NULL) return NULL;
|
if (pCache->imem == NULL) return NULL;
|
||||||
|
|
||||||
|
tsdbPrint("vgId: %d, starting to commit....", pRepo->config.tsdbId);
|
||||||
|
|
||||||
// Create the iterator to read from cache
|
// Create the iterator to read from cache
|
||||||
SSkipListIterator **iters = tsdbCreateTableIters(pMeta, pCfg->maxTables);
|
SSkipListIterator **iters = tsdbCreateTableIters(pMeta, pCfg->maxTables);
|
||||||
if (iters == NULL) {
|
if (iters == NULL) {
|
||||||
|
@ -974,6 +976,7 @@ static void *tsdbCommitData(void *arg) {
|
||||||
|
|
||||||
// Do retention actions
|
// Do retention actions
|
||||||
tsdbFitRetention(pRepo);
|
tsdbFitRetention(pRepo);
|
||||||
|
if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_OVER);
|
||||||
|
|
||||||
_exit:
|
_exit:
|
||||||
tdFreeDataCols(pDataCols);
|
tdFreeDataCols(pDataCols);
|
||||||
|
@ -1176,4 +1179,4 @@ uint32_t tsdbGetFileInfo(TsdbRepoT *repo, char *name, uint32_t *index, int32_t *
|
||||||
magic = *size;
|
magic = *size;
|
||||||
|
|
||||||
return magic;
|
return magic;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1347,7 +1347,7 @@ int32_t tableGroupComparFn(const void *p1, const void *p2, const void *param) {
|
||||||
int32_t type = 0;
|
int32_t type = 0;
|
||||||
int32_t bytes = 0;
|
int32_t bytes = 0;
|
||||||
|
|
||||||
if (colIndex == TSDB_TBNAME_COLUMN_INDEX) {
|
if (colIndex == TSDB_TBNAME_COLUMN_INDEX) { // todo refactor extract method , to queryExecutor to generate tags values
|
||||||
f1 = (char*) pTable1->name;
|
f1 = (char*) pTable1->name;
|
||||||
f2 = (char*) pTable2->name;
|
f2 = (char*) pTable2->name;
|
||||||
type = TSDB_DATA_TYPE_BINARY;
|
type = TSDB_DATA_TYPE_BINARY;
|
||||||
|
@ -1355,7 +1355,8 @@ int32_t tableGroupComparFn(const void *p1, const void *p2, const void *param) {
|
||||||
} else {
|
} else {
|
||||||
STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex);
|
STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex);
|
||||||
bytes = pCol->bytes;
|
bytes = pCol->bytes;
|
||||||
|
type = pCol->type;
|
||||||
|
|
||||||
f1 = tdGetRowDataOfCol(pTable1->tagVal, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset);
|
f1 = tdGetRowDataOfCol(pTable1->tagVal, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset);
|
||||||
f2 = tdGetRowDataOfCol(pTable2->tagVal, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset);
|
f2 = tdGetRowDataOfCol(pTable2->tagVal, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset);
|
||||||
}
|
}
|
||||||
|
|
|
@ -94,7 +94,7 @@ size_t taosHashGetSize(const SHashObj *pHashObj);
|
||||||
* @param size
|
* @param size
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
int32_t taosHashPut(SHashObj *pHashObj, const char *key, size_t keyLen, void *data, size_t size);
|
int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t size);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* return the payload data with the specified key
|
* return the payload data with the specified key
|
||||||
|
@ -104,7 +104,7 @@ int32_t taosHashPut(SHashObj *pHashObj, const char *key, size_t keyLen, void *da
|
||||||
* @param keyLen
|
* @param keyLen
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
void *taosHashGet(SHashObj *pHashObj, const char *key, size_t keyLen);
|
void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* remove item with the specified key
|
* remove item with the specified key
|
||||||
|
@ -112,7 +112,7 @@ void *taosHashGet(SHashObj *pHashObj, const char *key, size_t keyLen);
|
||||||
* @param key
|
* @param key
|
||||||
* @param keyLen
|
* @param keyLen
|
||||||
*/
|
*/
|
||||||
void taosHashRemove(SHashObj *pHashObj, const char *key, size_t keyLen);
|
void taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* clean up hash table
|
* clean up hash table
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
|
|
||||||
#include "hash.h"
|
#include "hash.h"
|
||||||
#include "tulog.h"
|
#include "tulog.h"
|
||||||
#include "ttime.h"
|
|
||||||
#include "tutil.h"
|
#include "tutil.h"
|
||||||
|
|
||||||
static FORCE_INLINE void __wr_lock(void *lock) {
|
static FORCE_INLINE void __wr_lock(void *lock) {
|
||||||
|
@ -90,154 +89,65 @@ static FORCE_INLINE int32_t taosHashCapacity(int32_t length) {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* inplace update node in hash table
|
* inplace update node in hash table
|
||||||
* @param pHashObj hash table object
|
* @param pHashObj hash table object
|
||||||
* @param pNode data node
|
* @param pNode hash data node
|
||||||
*/
|
*/
|
||||||
static void doUpdateHashTable(SHashObj *pHashObj, SHashNode *pNode) {
|
static void doUpdateHashTable(SHashObj *pHashObj, SHashNode *pNode);
|
||||||
if (pNode->prev1) {
|
|
||||||
pNode->prev1->next = pNode;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pNode->next) {
|
|
||||||
(pNode->next)->prev = pNode;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* get SHashNode from hashlist, nodes from trash are not included.
|
* Get SHashNode from hashlist, nodes from trash are not included.
|
||||||
* @param pHashObj Cache objection
|
* @param pHashObj Cache objection
|
||||||
* @param key key for hash
|
* @param key key for hash
|
||||||
* @param keyLen key length
|
* @param keyLen key length
|
||||||
|
* @param hashVal hash value by hash function
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
static SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const char *key, uint32_t keyLen, uint32_t *hashVal) {
|
static SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, uint32_t *hashVal);
|
||||||
uint32_t hash = (*pHashObj->hashFp)(key, keyLen);
|
|
||||||
|
|
||||||
int32_t slot = HASH_INDEX(hash, pHashObj->capacity);
|
|
||||||
SHashEntry *pEntry = pHashObj->hashList[slot];
|
|
||||||
|
|
||||||
SHashNode *pNode = pEntry->next;
|
|
||||||
while (pNode) {
|
|
||||||
if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
pNode = pNode->next;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pNode) {
|
|
||||||
assert(HASH_INDEX(pNode->hashVal, pHashObj->capacity) == slot);
|
|
||||||
}
|
|
||||||
|
|
||||||
// return the calculated hash value, to avoid calculating it again in other functions
|
|
||||||
if (hashVal != NULL) {
|
|
||||||
*hashVal = hash;
|
|
||||||
}
|
|
||||||
|
|
||||||
return pNode;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* resize the hash list if the threshold is reached
|
* Resize the hash list if the threshold is reached
|
||||||
*
|
*
|
||||||
* @param pHashObj
|
* @param pHashObj
|
||||||
*/
|
*/
|
||||||
static void taosHashTableResize(SHashObj *pHashObj) {
|
static void taosHashTableResize(SHashObj *pHashObj);
|
||||||
if (pHashObj->size < pHashObj->capacity * HASH_DEFAULT_LOAD_FACTOR) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// double the original capacity
|
|
||||||
SHashNode *pNode = NULL;
|
|
||||||
SHashNode *pNext = NULL;
|
|
||||||
|
|
||||||
int32_t newSize = pHashObj->capacity << 1u;
|
|
||||||
if (newSize > HASH_MAX_CAPACITY) {
|
|
||||||
// uTrace("current capacity:%d, maximum capacity:%d, no resize applied due to limitation is reached",
|
|
||||||
// pHashObj->capacity, HASH_MAX_CAPACITY);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// int64_t st = taosGetTimestampUs();
|
|
||||||
|
|
||||||
SHashEntry **pNewEntry = realloc(pHashObj->hashList, sizeof(SHashEntry *) * newSize);
|
|
||||||
if (pNewEntry == NULL) {
|
|
||||||
// uTrace("cache resize failed due to out of memory, capacity remain:%d", pHashObj->capacity);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
pHashObj->hashList = pNewEntry;
|
|
||||||
for (int32_t i = pHashObj->capacity; i < newSize; ++i) {
|
|
||||||
pHashObj->hashList[i] = calloc(1, sizeof(SHashEntry));
|
|
||||||
}
|
|
||||||
|
|
||||||
pHashObj->capacity = newSize;
|
|
||||||
|
|
||||||
for (int32_t i = 0; i < pHashObj->capacity; ++i) {
|
|
||||||
SHashEntry *pEntry = pHashObj->hashList[i];
|
|
||||||
|
|
||||||
pNode = pEntry->next;
|
|
||||||
if (pNode != NULL) {
|
|
||||||
assert(pNode->prev1 == pEntry && pEntry->num > 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
while (pNode) {
|
|
||||||
int32_t j = HASH_INDEX(pNode->hashVal, pHashObj->capacity);
|
|
||||||
if (j == i) { // this key resides in the same slot, no need to relocate it
|
|
||||||
pNode = pNode->next;
|
|
||||||
} else {
|
|
||||||
pNext = pNode->next;
|
|
||||||
|
|
||||||
// remove from current slot
|
|
||||||
assert(pNode->prev1 != NULL);
|
|
||||||
|
|
||||||
if (pNode->prev1 == pEntry) { // first node of the overflow linked list
|
|
||||||
pEntry->next = pNode->next;
|
|
||||||
} else {
|
|
||||||
pNode->prev->next = pNode->next;
|
|
||||||
}
|
|
||||||
|
|
||||||
pEntry->num--;
|
|
||||||
assert(pEntry->num >= 0);
|
|
||||||
|
|
||||||
if (pNode->next != NULL) {
|
|
||||||
(pNode->next)->prev = pNode->prev;
|
|
||||||
}
|
|
||||||
|
|
||||||
// added into new slot
|
|
||||||
pNode->next = NULL;
|
|
||||||
pNode->prev1 = NULL;
|
|
||||||
|
|
||||||
SHashEntry *pNewIndexEntry = pHashObj->hashList[j];
|
|
||||||
|
|
||||||
if (pNewIndexEntry->next != NULL) {
|
|
||||||
assert(pNewIndexEntry->next->prev1 == pNewIndexEntry);
|
|
||||||
|
|
||||||
pNewIndexEntry->next->prev = pNode;
|
|
||||||
}
|
|
||||||
|
|
||||||
pNode->next = pNewIndexEntry->next;
|
|
||||||
pNode->prev1 = pNewIndexEntry;
|
|
||||||
|
|
||||||
pNewIndexEntry->next = pNode;
|
|
||||||
pNewIndexEntry->num++;
|
|
||||||
|
|
||||||
// continue
|
|
||||||
pNode = pNext;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// int64_t et = taosGetTimestampUs();
|
|
||||||
// uTrace("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", pHashObj->capacity,
|
|
||||||
// ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param capacity maximum slots available for hash elements
|
* @param key key of object for hash, usually a null-terminated string
|
||||||
* @param fn hash function
|
* @param keyLen length of key
|
||||||
|
* @param pData actually data. Requires a consecutive memory block, no pointer is allowed in pData.
|
||||||
|
* Pointer copy causes memory access error.
|
||||||
|
* @param dsize size of data
|
||||||
|
* @return SHashNode
|
||||||
|
*/
|
||||||
|
static SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update the hash node
|
||||||
|
*
|
||||||
|
* @param pNode hash node
|
||||||
|
* @param key key for generate hash value
|
||||||
|
* @param keyLen key length
|
||||||
|
* @param pData actual data
|
||||||
|
* @param dsize size of actual data
|
||||||
|
* @return hash node
|
||||||
|
*/
|
||||||
|
static SHashNode *doUpdateHashNode(SHashNode *pNode, const void *key, size_t keyLen, const void *pData, size_t dsize);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* insert the hash node at the front of the linked list
|
||||||
|
*
|
||||||
|
* @param pHashObj
|
||||||
|
* @param pNode
|
||||||
|
*/
|
||||||
|
static void doAddToHashTable(SHashObj *pHashObj, SHashNode *pNode);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the next element in hash table for iterator
|
||||||
|
* @param pIter
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
|
static SHashNode *getNextHashNode(SHashMutableIterator *pIter);
|
||||||
|
|
||||||
SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool threadsafe) {
|
SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool threadsafe) {
|
||||||
if (capacity == 0 || fn == NULL) {
|
if (capacity == 0 || fn == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -285,79 +195,6 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool threadsafe) {
|
||||||
return pHashObj;
|
return pHashObj;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @param key key of object for hash, usually a null-terminated string
|
|
||||||
* @param keyLen length of key
|
|
||||||
* @param pData actually data. required a consecutive memory block, no pointer is allowed
|
|
||||||
* in pData. Pointer copy causes memory access error.
|
|
||||||
* @param size size of block
|
|
||||||
* @return SHashNode
|
|
||||||
*/
|
|
||||||
static SHashNode *doCreateHashNode(const char *key, size_t keyLen, const char *pData, size_t dataSize,
|
|
||||||
uint32_t hashVal) {
|
|
||||||
size_t totalSize = dataSize + sizeof(SHashNode) + keyLen + 1; // one extra byte for null
|
|
||||||
|
|
||||||
SHashNode *pNewNode = calloc(1, totalSize);
|
|
||||||
if (pNewNode == NULL) {
|
|
||||||
uError("failed to allocate memory, reason:%s", strerror(errno));
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(pNewNode->data, pData, dataSize);
|
|
||||||
|
|
||||||
pNewNode->key = pNewNode->data + dataSize;
|
|
||||||
memcpy(pNewNode->key, key, keyLen);
|
|
||||||
pNewNode->keyLen = keyLen;
|
|
||||||
|
|
||||||
pNewNode->hashVal = hashVal;
|
|
||||||
|
|
||||||
return pNewNode;
|
|
||||||
}
|
|
||||||
|
|
||||||
static SHashNode *doUpdateHashNode(SHashNode *pNode, const char *key, size_t keyLen, const char *pData,
|
|
||||||
size_t dataSize) {
|
|
||||||
size_t size = dataSize + sizeof(SHashNode) + keyLen;
|
|
||||||
|
|
||||||
SHashNode *pNewNode = (SHashNode *)realloc(pNode, size);
|
|
||||||
if (pNewNode == NULL) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(pNewNode->data, pData, dataSize);
|
|
||||||
|
|
||||||
pNewNode->key = pNewNode->data + dataSize;
|
|
||||||
|
|
||||||
assert(memcmp(pNewNode->key, key, keyLen) == 0 && keyLen == pNewNode->keyLen);
|
|
||||||
|
|
||||||
memcpy(pNewNode->key, key, keyLen);
|
|
||||||
return pNewNode;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* insert the hash node at the front of the linked list
|
|
||||||
*
|
|
||||||
* @param pHashObj
|
|
||||||
* @param pNode
|
|
||||||
*/
|
|
||||||
static void doAddToHashTable(SHashObj *pHashObj, SHashNode *pNode) {
|
|
||||||
assert(pNode != NULL);
|
|
||||||
|
|
||||||
int32_t index = HASH_INDEX(pNode->hashVal, pHashObj->capacity);
|
|
||||||
SHashEntry *pEntry = pHashObj->hashList[index];
|
|
||||||
|
|
||||||
pNode->next = pEntry->next;
|
|
||||||
|
|
||||||
if (pEntry->next) {
|
|
||||||
pEntry->next->prev = pNode;
|
|
||||||
}
|
|
||||||
|
|
||||||
pEntry->next = pNode;
|
|
||||||
pNode->prev1 = pEntry;
|
|
||||||
|
|
||||||
pEntry->num++;
|
|
||||||
pHashObj->size++;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t taosHashGetSize(const SHashObj *pHashObj) {
|
size_t taosHashGetSize(const SHashObj *pHashObj) {
|
||||||
if (pHashObj == NULL) {
|
if (pHashObj == NULL) {
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -366,12 +203,7 @@ size_t taosHashGetSize(const SHashObj *pHashObj) {
|
||||||
return pHashObj->size;
|
return pHashObj->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t size) {
|
||||||
* add data node into hash table
|
|
||||||
* @param pHashObj hash object
|
|
||||||
* @param pNode hash node
|
|
||||||
*/
|
|
||||||
int32_t taosHashPut(SHashObj *pHashObj, const char *key, size_t keyLen, void *data, size_t size) {
|
|
||||||
__wr_lock(pHashObj->lock);
|
__wr_lock(pHashObj->lock);
|
||||||
|
|
||||||
uint32_t hashVal = 0;
|
uint32_t hashVal = 0;
|
||||||
|
@ -402,7 +234,7 @@ int32_t taosHashPut(SHashObj *pHashObj, const char *key, size_t keyLen, void *da
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *taosHashGet(SHashObj *pHashObj, const char *key, size_t keyLen) {
|
void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) {
|
||||||
__rd_lock(pHashObj->lock);
|
__rd_lock(pHashObj->lock);
|
||||||
|
|
||||||
uint32_t hashVal = 0;
|
uint32_t hashVal = 0;
|
||||||
|
@ -419,12 +251,7 @@ void *taosHashGet(SHashObj *pHashObj, const char *key, size_t keyLen) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
void taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) {
|
||||||
* remove node in hash list
|
|
||||||
* @param pHashObj
|
|
||||||
* @param pNode
|
|
||||||
*/
|
|
||||||
void taosHashRemove(SHashObj *pHashObj, const char *key, size_t keyLen) {
|
|
||||||
__wr_lock(pHashObj->lock);
|
__wr_lock(pHashObj->lock);
|
||||||
|
|
||||||
uint32_t val = 0;
|
uint32_t val = 0;
|
||||||
|
@ -518,23 +345,6 @@ SHashMutableIterator *taosHashCreateIter(SHashObj *pHashObj) {
|
||||||
return pIter;
|
return pIter;
|
||||||
}
|
}
|
||||||
|
|
||||||
static SHashNode *getNextHashNode(SHashMutableIterator *pIter) {
|
|
||||||
assert(pIter != NULL);
|
|
||||||
|
|
||||||
pIter->entryIndex++;
|
|
||||||
while (pIter->entryIndex < pIter->pHashObj->capacity) {
|
|
||||||
SHashEntry *pEntry = pIter->pHashObj->hashList[pIter->entryIndex];
|
|
||||||
if (pEntry->next == NULL) {
|
|
||||||
pIter->entryIndex++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
return pEntry->next;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool taosHashIterNext(SHashMutableIterator *pIter) {
|
bool taosHashIterNext(SHashMutableIterator *pIter) {
|
||||||
if (pIter == NULL) {
|
if (pIter == NULL) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -617,3 +427,205 @@ int32_t taosHashGetMaxOverflowLinkLength(const SHashObj *pHashObj) {
|
||||||
|
|
||||||
return num;
|
return num;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void doUpdateHashTable(SHashObj *pHashObj, SHashNode *pNode) {
|
||||||
|
if (pNode->prev1) {
|
||||||
|
pNode->prev1->next = pNode;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pNode->next) {
|
||||||
|
(pNode->next)->prev = pNode;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, uint32_t *hashVal) {
|
||||||
|
uint32_t hash = (*pHashObj->hashFp)(key, keyLen);
|
||||||
|
|
||||||
|
int32_t slot = HASH_INDEX(hash, pHashObj->capacity);
|
||||||
|
SHashEntry *pEntry = pHashObj->hashList[slot];
|
||||||
|
|
||||||
|
SHashNode *pNode = pEntry->next;
|
||||||
|
while (pNode) {
|
||||||
|
if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
pNode = pNode->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pNode) {
|
||||||
|
assert(HASH_INDEX(pNode->hashVal, pHashObj->capacity) == slot);
|
||||||
|
}
|
||||||
|
|
||||||
|
// return the calculated hash value, to avoid calculating it again in other functions
|
||||||
|
if (hashVal != NULL) {
|
||||||
|
*hashVal = hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
return pNode;
|
||||||
|
}
|
||||||
|
|
||||||
|
void taosHashTableResize(SHashObj *pHashObj) {
|
||||||
|
if (pHashObj->size < pHashObj->capacity * HASH_DEFAULT_LOAD_FACTOR) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// double the original capacity
|
||||||
|
SHashNode *pNode = NULL;
|
||||||
|
SHashNode *pNext = NULL;
|
||||||
|
|
||||||
|
int32_t newSize = pHashObj->capacity << 1u;
|
||||||
|
if (newSize > HASH_MAX_CAPACITY) {
|
||||||
|
// uTrace("current capacity:%d, maximum capacity:%d, no resize applied due to limitation is reached",
|
||||||
|
// pHashObj->capacity, HASH_MAX_CAPACITY);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// int64_t st = taosGetTimestampUs();
|
||||||
|
|
||||||
|
SHashEntry **pNewEntry = realloc(pHashObj->hashList, sizeof(SHashEntry *) * newSize);
|
||||||
|
if (pNewEntry == NULL) {
|
||||||
|
// uTrace("cache resize failed due to out of memory, capacity remain:%d", pHashObj->capacity);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
pHashObj->hashList = pNewEntry;
|
||||||
|
for (int32_t i = pHashObj->capacity; i < newSize; ++i) {
|
||||||
|
pHashObj->hashList[i] = calloc(1, sizeof(SHashEntry));
|
||||||
|
}
|
||||||
|
|
||||||
|
pHashObj->capacity = newSize;
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < pHashObj->capacity; ++i) {
|
||||||
|
SHashEntry *pEntry = pHashObj->hashList[i];
|
||||||
|
|
||||||
|
pNode = pEntry->next;
|
||||||
|
if (pNode != NULL) {
|
||||||
|
assert(pNode->prev1 == pEntry && pEntry->num > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
while (pNode) {
|
||||||
|
int32_t j = HASH_INDEX(pNode->hashVal, pHashObj->capacity);
|
||||||
|
if (j == i) { // this key resides in the same slot, no need to relocate it
|
||||||
|
pNode = pNode->next;
|
||||||
|
} else {
|
||||||
|
pNext = pNode->next;
|
||||||
|
|
||||||
|
// remove from current slot
|
||||||
|
assert(pNode->prev1 != NULL);
|
||||||
|
|
||||||
|
if (pNode->prev1 == pEntry) { // first node of the overflow linked list
|
||||||
|
pEntry->next = pNode->next;
|
||||||
|
} else {
|
||||||
|
pNode->prev->next = pNode->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
pEntry->num--;
|
||||||
|
assert(pEntry->num >= 0);
|
||||||
|
|
||||||
|
if (pNode->next != NULL) {
|
||||||
|
(pNode->next)->prev = pNode->prev;
|
||||||
|
}
|
||||||
|
|
||||||
|
// added into new slot
|
||||||
|
pNode->next = NULL;
|
||||||
|
pNode->prev1 = NULL;
|
||||||
|
|
||||||
|
SHashEntry *pNewIndexEntry = pHashObj->hashList[j];
|
||||||
|
|
||||||
|
if (pNewIndexEntry->next != NULL) {
|
||||||
|
assert(pNewIndexEntry->next->prev1 == pNewIndexEntry);
|
||||||
|
|
||||||
|
pNewIndexEntry->next->prev = pNode;
|
||||||
|
}
|
||||||
|
|
||||||
|
pNode->next = pNewIndexEntry->next;
|
||||||
|
pNode->prev1 = pNewIndexEntry;
|
||||||
|
|
||||||
|
pNewIndexEntry->next = pNode;
|
||||||
|
pNewIndexEntry->num++;
|
||||||
|
|
||||||
|
// continue
|
||||||
|
pNode = pNext;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// int64_t et = taosGetTimestampUs();
|
||||||
|
// uTrace("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", pHashObj->capacity,
|
||||||
|
// ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) {
|
||||||
|
size_t totalSize = dsize + sizeof(SHashNode) + keyLen + 1; // one extra byte for null
|
||||||
|
|
||||||
|
SHashNode *pNewNode = calloc(1, totalSize);
|
||||||
|
if (pNewNode == NULL) {
|
||||||
|
uError("failed to allocate memory, reason:%s", strerror(errno));
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(pNewNode->data, pData, dsize);
|
||||||
|
|
||||||
|
pNewNode->key = pNewNode->data + dsize;
|
||||||
|
memcpy(pNewNode->key, key, keyLen);
|
||||||
|
pNewNode->keyLen = keyLen;
|
||||||
|
|
||||||
|
pNewNode->hashVal = hashVal;
|
||||||
|
|
||||||
|
return pNewNode;
|
||||||
|
}
|
||||||
|
|
||||||
|
SHashNode *doUpdateHashNode(SHashNode *pNode, const void *key, size_t keyLen, const void *pData, size_t dsize) {
|
||||||
|
size_t size = dsize + sizeof(SHashNode) + keyLen;
|
||||||
|
|
||||||
|
SHashNode *pNewNode = (SHashNode *)realloc(pNode, size);
|
||||||
|
if (pNewNode == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(pNewNode->data, pData, dsize);
|
||||||
|
|
||||||
|
pNewNode->key = pNewNode->data + dsize;
|
||||||
|
|
||||||
|
assert(memcmp(pNewNode->key, key, keyLen) == 0 && keyLen == pNewNode->keyLen);
|
||||||
|
|
||||||
|
memcpy(pNewNode->key, key, keyLen);
|
||||||
|
return pNewNode;
|
||||||
|
}
|
||||||
|
|
||||||
|
void doAddToHashTable(SHashObj *pHashObj, SHashNode *pNode) {
|
||||||
|
assert(pNode != NULL);
|
||||||
|
|
||||||
|
int32_t index = HASH_INDEX(pNode->hashVal, pHashObj->capacity);
|
||||||
|
SHashEntry *pEntry = pHashObj->hashList[index];
|
||||||
|
|
||||||
|
pNode->next = pEntry->next;
|
||||||
|
|
||||||
|
if (pEntry->next) {
|
||||||
|
pEntry->next->prev = pNode;
|
||||||
|
}
|
||||||
|
|
||||||
|
pEntry->next = pNode;
|
||||||
|
pNode->prev1 = pEntry;
|
||||||
|
|
||||||
|
pEntry->num++;
|
||||||
|
pHashObj->size++;
|
||||||
|
}
|
||||||
|
|
||||||
|
SHashNode *getNextHashNode(SHashMutableIterator *pIter) {
|
||||||
|
assert(pIter != NULL);
|
||||||
|
|
||||||
|
pIter->entryIndex++;
|
||||||
|
while (pIter->entryIndex < pIter->pHashObj->capacity) {
|
||||||
|
SHashEntry *pEntry = pIter->pHashObj->hashList[pIter->entryIndex];
|
||||||
|
if (pEntry->next == NULL) {
|
||||||
|
pIter->entryIndex++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
return pEntry->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
|
@ -38,6 +38,7 @@ typedef struct {
|
||||||
int status;
|
int status;
|
||||||
int8_t role;
|
int8_t role;
|
||||||
int64_t version;
|
int64_t version;
|
||||||
|
int64_t savedVersion;
|
||||||
void *wqueue;
|
void *wqueue;
|
||||||
void *rqueue;
|
void *rqueue;
|
||||||
void *wal;
|
void *wal;
|
||||||
|
|
|
@ -33,12 +33,11 @@ static int32_t tsOpennedVnodes;
|
||||||
static void *tsDnodeVnodesHash;
|
static void *tsDnodeVnodesHash;
|
||||||
static void vnodeCleanUp(SVnodeObj *pVnode);
|
static void vnodeCleanUp(SVnodeObj *pVnode);
|
||||||
static void vnodeBuildVloadMsg(char *pNode, void * param);
|
static void vnodeBuildVloadMsg(char *pNode, void * param);
|
||||||
static int vnodeWalCallback(void *arg);
|
|
||||||
static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg);
|
static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg);
|
||||||
static int32_t vnodeReadCfg(SVnodeObj *pVnode);
|
static int32_t vnodeReadCfg(SVnodeObj *pVnode);
|
||||||
static int32_t vnodeSaveVersion(SVnodeObj *pVnode);
|
static int32_t vnodeSaveVersion(SVnodeObj *pVnode);
|
||||||
static bool vnodeReadVersion(SVnodeObj *pVnode);
|
static bool vnodeReadVersion(SVnodeObj *pVnode);
|
||||||
static int vnodeWalCallback(void *arg);
|
static int vnodeProcessTsdbStatus(void *arg, int status);
|
||||||
static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size);
|
static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size);
|
||||||
static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index);
|
static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index);
|
||||||
static void vnodeNotifyRole(void *ahandle, int8_t role);
|
static void vnodeNotifyRole(void *ahandle, int8_t role);
|
||||||
|
@ -206,7 +205,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
|
||||||
|
|
||||||
STsdbAppH appH = {0};
|
STsdbAppH appH = {0};
|
||||||
appH.appH = (void *)pVnode;
|
appH.appH = (void *)pVnode;
|
||||||
appH.walCallBack = vnodeWalCallback;
|
appH.notifyStatus = vnodeProcessTsdbStatus;
|
||||||
appH.cqH = pVnode->cq;
|
appH.cqH = pVnode->cq;
|
||||||
|
|
||||||
sprintf(temp, "%s/tsdb", rootDir);
|
sprintf(temp, "%s/tsdb", rootDir);
|
||||||
|
@ -290,7 +289,7 @@ void vnodeRelease(void *pVnodeRaw) {
|
||||||
free(pVnode);
|
free(pVnode);
|
||||||
|
|
||||||
int32_t count = atomic_sub_fetch_32(&tsOpennedVnodes, 1);
|
int32_t count = atomic_sub_fetch_32(&tsOpennedVnodes, 1);
|
||||||
vTrace("vgId:%d, vnode is released, vnodes:%d", pVnode, vgId, count);
|
vTrace("vgId:%d, vnode is released, vnodes:%d", vgId, count);
|
||||||
|
|
||||||
if (count <= 0) {
|
if (count <= 0) {
|
||||||
taosCleanUpIntHash(tsDnodeVnodesHash);
|
taosCleanUpIntHash(tsDnodeVnodesHash);
|
||||||
|
@ -374,14 +373,22 @@ static void vnodeCleanUp(SVnodeObj *pVnode) {
|
||||||
walClose(pVnode->wal);
|
walClose(pVnode->wal);
|
||||||
pVnode->wal = NULL;
|
pVnode->wal = NULL;
|
||||||
|
|
||||||
vnodeSaveVersion(pVnode);
|
|
||||||
vnodeRelease(pVnode);
|
vnodeRelease(pVnode);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: this is a simple implement
|
// TODO: this is a simple implement
|
||||||
static int vnodeWalCallback(void *arg) {
|
static int vnodeProcessTsdbStatus(void *arg, int status) {
|
||||||
SVnodeObj *pVnode = arg;
|
SVnodeObj *pVnode = arg;
|
||||||
return walRenew(pVnode->wal);
|
|
||||||
|
if (status == TSDB_STATUS_COMMIT_START) {
|
||||||
|
pVnode->savedVersion = pVnode->version;
|
||||||
|
return walRenew(pVnode->wal);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (status == TSDB_STATUS_COMMIT_OVER)
|
||||||
|
return vnodeSaveVersion(pVnode);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size) {
|
static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size) {
|
||||||
|
@ -414,7 +421,7 @@ static void vnodeNotifyFileSynced(void *ahandle) {
|
||||||
tsdbCloseRepo(pVnode->tsdb);
|
tsdbCloseRepo(pVnode->tsdb);
|
||||||
STsdbAppH appH = {0};
|
STsdbAppH appH = {0};
|
||||||
appH.appH = (void *)pVnode;
|
appH.appH = (void *)pVnode;
|
||||||
appH.walCallBack = vnodeWalCallback;
|
appH.notifyStatus = vnodeProcessTsdbStatus;
|
||||||
appH.cqH = pVnode->cq;
|
appH.cqH = pVnode->cq;
|
||||||
pVnode->tsdb = tsdbOpenRepo(rootDir, &appH);
|
pVnode->tsdb = tsdbOpenRepo(rootDir, &appH);
|
||||||
}
|
}
|
||||||
|
@ -685,14 +692,14 @@ static int32_t vnodeSaveVersion(SVnodeObj *pVnode) {
|
||||||
char * content = calloc(1, maxLen + 1);
|
char * content = calloc(1, maxLen + 1);
|
||||||
|
|
||||||
len += snprintf(content + len, maxLen - len, "{\n");
|
len += snprintf(content + len, maxLen - len, "{\n");
|
||||||
len += snprintf(content + len, maxLen - len, " \"version\": %" PRId64 "\n", pVnode->version);
|
len += snprintf(content + len, maxLen - len, " \"version\": %" PRId64 "\n", pVnode->savedVersion);
|
||||||
len += snprintf(content + len, maxLen - len, "}\n");
|
len += snprintf(content + len, maxLen - len, "}\n");
|
||||||
|
|
||||||
fwrite(content, 1, len, fp);
|
fwrite(content, 1, len, fp);
|
||||||
fclose(fp);
|
fclose(fp);
|
||||||
free(content);
|
free(content);
|
||||||
|
|
||||||
vPrint("vgId:%d, save vnode version:%" PRId64 " successed", pVnode->vgId, pVnode->version);
|
vPrint("vgId:%d, save vnode version:%" PRId64 " succeed", pVnode->vgId, pVnode->savedVersion);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -734,7 +741,7 @@ static bool vnodeReadVersion(SVnodeObj *pVnode) {
|
||||||
|
|
||||||
ret = true;
|
ret = true;
|
||||||
|
|
||||||
vPrint("vgId:%d, read vnode version successed, version:%%" PRId64, pVnode->vgId, pVnode->version);
|
vPrint("vgId:%d, read vnode version succeed, version:%" PRId64, pVnode->vgId, pVnode->version);
|
||||||
|
|
||||||
PARSE_OVER:
|
PARSE_OVER:
|
||||||
free(content);
|
free(content);
|
||||||
|
|
|
@ -27,7 +27,7 @@ class TDTestCase:
|
||||||
def run(self):
|
def run(self):
|
||||||
self.ntables = 1
|
self.ntables = 1
|
||||||
self.startTime = 1520000010000
|
self.startTime = 1520000010000
|
||||||
self.maxwrows = 200
|
self.maxrows = 200
|
||||||
self.rowsPerTable = 20
|
self.rowsPerTable = 20
|
||||||
|
|
||||||
tdDnodes.stop(1)
|
tdDnodes.stop(1)
|
||||||
|
|
|
@ -33,13 +33,3 @@ python3 ./test.py $1 -f import_merge/importCacheFileT.py
|
||||||
python3 ./test.py $1 -s && sleep 1
|
python3 ./test.py $1 -s && sleep 1
|
||||||
python3 ./test.py $1 -f import_merge/importDataLastSub.py
|
python3 ./test.py $1 -f import_merge/importDataLastSub.py
|
||||||
python3 ./test.py $1 -s && sleep 1
|
python3 ./test.py $1 -s && sleep 1
|
||||||
python3 ./test.py $1 -f import_merge/importHead.py
|
|
||||||
python3 ./test.py $1 -s && sleep 1
|
|
||||||
python3 ./test.py $1 -f import_merge/importLastT.py
|
|
||||||
python3 ./test.py $1 -s && sleep 1
|
|
||||||
python3 ./test.py $1 -f import_merge/importSpan.py
|
|
||||||
python3 ./test.py $1 -s && sleep 1
|
|
||||||
python3 ./test.py $1 -f import_merge/importTail.py
|
|
||||||
python3 ./test.py $1 -s && sleep 1
|
|
||||||
python3 ./test.py $1 -f import_merge/importTRestart.py
|
|
||||||
python3 ./test.py $1 -s && sleep 1
|
|
||||||
|
|
|
@ -9,7 +9,6 @@ run general/import/basic.sim
|
||||||
run general/import/commit.sim
|
run general/import/commit.sim
|
||||||
run general/insert/query_file_memory.sim
|
run general/insert/query_file_memory.sim
|
||||||
run general/parser/binary_escapeCharacter.sim
|
run general/parser/binary_escapeCharacter.sim
|
||||||
run general/parser/columnValue_bigint.sim
|
|
||||||
run general/parser/select_from_cache_disk.sim
|
run general/parser/select_from_cache_disk.sim
|
||||||
run general/table/autocreate.sim
|
run general/table/autocreate.sim
|
||||||
run general/table/column_name.sim
|
run general/table/column_name.sim
|
||||||
|
@ -18,5 +17,5 @@ run general/table/vgroup.sim
|
||||||
run general/user/basic1.sim
|
run general/user/basic1.sim
|
||||||
run general/user/pass_alter.sim
|
run general/user/pass_alter.sim
|
||||||
run general/vector/single.sim
|
run general/vector/single.sim
|
||||||
run general/connection/connection.sim
|
#run general/connection/connection.sim
|
||||||
run general/user/authority.sim
|
run general/user/authority.sim
|
||||||
|
|
|
@ -48,41 +48,41 @@ $tb = $tbPrefix . $i
|
||||||
|
|
||||||
sql select leastsquares(tbcol, 1, 1) from $tb
|
sql select leastsquares(tbcol, 1, 1) from $tb
|
||||||
print ===> $data00
|
print ===> $data00
|
||||||
if $data00 != @(1.000000, 1.000000)@ then
|
if $data00 != @{slop:1.000000, intercept:1.000000}@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
print =============== step3
|
print =============== step3
|
||||||
sql select leastsquares(tbcol, 1, 1) from $tb where ts < now + 4m
|
sql select leastsquares(tbcol, 1, 1) from $tb where ts < now + 4m
|
||||||
print ===> $data00
|
print ===> $data00
|
||||||
if $data00 != @(1.000000, 1.000000)@ then
|
if $data00 != @{slop:1.000000, intercept:1.000000}@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
print =============== step4
|
print =============== step4
|
||||||
sql select leastsquares(tbcol, 1, 1) as b from $tb
|
sql select leastsquares(tbcol, 1, 1) as b from $tb
|
||||||
print ===> $data00
|
print ===> $data00
|
||||||
if $data00 != @(1.000000, 1.000000)@ then
|
if $data00 != @{slop:1.000000, intercept:1.000000}@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
print =============== step5
|
print =============== step5
|
||||||
sql select leastsquares(tbcol, 1, 1) as b from $tb interval(1m)
|
sql select leastsquares(tbcol, 1, 1) as b from $tb interval(1m)
|
||||||
print ===> $data01
|
print ===> $data01
|
||||||
if $data01 != @(1.000000, 1.000000)@ then
|
if $data01 != @{slop:1.000000, intercept:1.000000}@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
sql select leastsquares(tbcol, 1, 1) as b from $tb interval(1d)
|
sql select leastsquares(tbcol, 1, 1) as b from $tb interval(1d)
|
||||||
print ===> $data01
|
print ===> $data01
|
||||||
if $data01 != @(1.000000, 1.000000)@ then
|
if $data01 != @{slop:1.000000, intercept:1.000000}@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
print =============== step6
|
print =============== step6
|
||||||
sql select leastsquares(tbcol, 1, 1) as b from $tb where ts < now + 4m interval(1m)
|
sql select leastsquares(tbcol, 1, 1) as b from $tb where ts < now + 4m interval(1m)
|
||||||
print ===> $data01
|
print ===> $data01
|
||||||
if $data01 != @(1.000000, 1.000000)@ then
|
if $data01 != @{slop:1.000000, intercept:1.000000}@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
print ===> $rows
|
print ===> $rows
|
||||||
|
|
|
@ -157,25 +157,25 @@ endi
|
||||||
|
|
||||||
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(i) from d1.mt group by a"},{"refId":"B","alias":"","sql":"select sum(i) from d1.mt group by b"}]' 127.0.0.1:6020/grafana/query
|
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(i) from d1.mt group by a"},{"refId":"B","alias":"","sql":"select sum(i) from d1.mt group by b"}]' 127.0.0.1:6020/grafana/query
|
||||||
print 16-> $system_content
|
print 16-> $system_content
|
||||||
if $system_content != @[{"refId":"A","target":"1","datapoints":[[5,"-"]]},{"refId":"A","target":"2","datapoints":[[4,"-"]]},{"refId":"A","target":"3","datapoints":[[3,"-"]]},{"refId":"B","target":"a","datapoints":[[5,"-"]]},{"refId":"B","target":"b","datapoints":[[8,"-"]]},{"refId":"B","target":"c","datapoints":[[9,"-"]]}]@ then
|
if $system_content != @[{"refId":"A","target":"{a:1,}","datapoints":[[5,"-"]]},{"refId":"A","target":"{a:2,}","datapoints":[[4,"-"]]},{"refId":"A","target":"{a:3,}","datapoints":[[3,"-"]]},{"refId":"B","target":"{b:a}","datapoints":[[5,"-"]]},{"refId":"B","target":"{b:b}","datapoints":[[8,"-"]]},{"refId":"B","target":"{b:c}","datapoints":[[9,"-"]]}]@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"count","sql":"select count(i) from d1.mt group by a"},{"refId":"B","alias":"sum-","sql":"select sum(i) from d1.mt group by b"}]' 127.0.0.1:6020/grafana/query
|
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"count","sql":"select count(i) from d1.mt group by a"},{"refId":"B","alias":"sum-","sql":"select sum(i) from d1.mt group by b"}]' 127.0.0.1:6020/grafana/query
|
||||||
print 17-> $system_content
|
print 17-> $system_content
|
||||||
if $system_content != @[{"refId":"A","target":"count1","datapoints":[[5,"-"]]},{"refId":"A","target":"count2","datapoints":[[4,"-"]]},{"refId":"A","target":"count3","datapoints":[[3,"-"]]},{"refId":"B","target":"sum-a","datapoints":[[5,"-"]]},{"refId":"B","target":"sum-b","datapoints":[[8,"-"]]},{"refId":"B","target":"sum-c","datapoints":[[9,"-"]]}]@ then
|
if $system_content != @[{"refId":"A","target":"count{a:1,}","datapoints":[[5,"-"]]},{"refId":"A","target":"count{a:2,}","datapoints":[[4,"-"]]},{"refId":"A","target":"count{a:3,}","datapoints":[[3,"-"]]},{"refId":"B","target":"sum-{b:a}","datapoints":[[5,"-"]]},{"refId":"B","target":"sum-{b:b}","datapoints":[[8,"-"]]},{"refId":"B","target":"sum-{b:c}","datapoints":[[9,"-"]]}]@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"count","sql":"select count(i) from d1.mt interval(1m) group by a "}]' 127.0.0.1:6020/grafana/query
|
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"count","sql":"select count(i) from d1.mt interval(1m) group by a "}]' 127.0.0.1:6020/grafana/query
|
||||||
print 18-> $system_content
|
print 18-> $system_content
|
||||||
if $system_content != @[{"refId":"A","target":"count1","datapoints":[[1,1514208300000],[1,1514208360000],[1,1514208420000],[1,1514208480000],[1,1514208540000]]},{"refId":"A","target":"count2","datapoints":[[1,1514208300000],[1,1514208360000],[1,1514208420000],[1,1514208480000]]},{"refId":"A","target":"count3","datapoints":[[1,1514208300000],[1,1514208360000],[1,1514208420000]]}]@ then
|
if $system_content != @[{"refId":"A","target":"count{a:1,}","datapoints":[[1,1514208300000],[1,1514208360000],[1,1514208420000],[1,1514208480000],[1,1514208540000]]},{"refId":"A","target":"count{a:2,}","datapoints":[[1,1514208300000],[1,1514208360000],[1,1514208420000],[1,1514208480000]]},{"refId":"A","target":"count{a:3,}","datapoints":[[1,1514208300000],[1,1514208360000],[1,1514208420000]]}]@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select sum(v2), count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2), sum(v2) from d1.m1"}]' 127.0.0.1:6020/grafana/query
|
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select sum(v2), count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2), sum(v2) from d1.m1"}]' 127.0.0.1:6020/grafana/query
|
||||||
print 19-> $system_content
|
print 19-> $system_content
|
||||||
if $system_content != @[{"refId":"A","target":"3","datapoints":[[15.299999714,"-"]]},{"refId":"B","target":"15.299999714","datapoints":[[3,"-"]]}]@ then
|
if $system_content != @[{"refId":"A","target":"{count(v1):3}","datapoints":[[15.299999714,"-"]]},{"refId":"B","target":"{sum(v2):15.299999714}","datapoints":[[3,"-"]]}]@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
|
|
@ -238,3 +238,5 @@ sql show databases
|
||||||
if $rows != 0 then
|
if $rows != 0 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -39,4 +39,4 @@ sql alter table tb1 set tag len = 379
|
||||||
# test end
|
# test end
|
||||||
sql drop database $db
|
sql drop database $db
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -1,6 +1,4 @@
|
||||||
system sh/stop_dnodes.sh
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2
|
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2
|
||||||
|
@ -222,13 +220,13 @@ sql show tables
|
||||||
if $rows != 3 then
|
if $rows != 3 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
if $data00 != tb3 then
|
if $data00 != tb1 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
if $data10 != tb2 then
|
if $data10 != tb2 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
if $data20 != tb1 then
|
if $data20 != tb3 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
@ -301,6 +299,9 @@ sql_error create table txu using tu tags(0) values(now, 1);
|
||||||
|
|
||||||
#[TBASE-675]
|
#[TBASE-675]
|
||||||
sql insert into tu values(1565971200000, 1) (1565971200000,2) (1565971200001, 3)(1565971200001, 4)
|
sql insert into tu values(1565971200000, 1) (1565971200000,2) (1565971200001, 3)(1565971200001, 4)
|
||||||
|
sql select * from tu
|
||||||
if $rows != 2 then
|
if $rows != 2 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -1,8 +1,7 @@
|
||||||
system sh/stop_dnodes.sh
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitTime -v 30
|
system sh/cfg.sh -n dnode1 -c ctime -v 30
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
sleep 3000
|
sleep 3000
|
||||||
sql connect
|
sql connect
|
||||||
|
@ -22,7 +21,7 @@ $stb = $stbPrefix . $i
|
||||||
|
|
||||||
sql drop database $db -x step1
|
sql drop database $db -x step1
|
||||||
step1:
|
step1:
|
||||||
sql create database $db maxrows 200 cache 2048 maxTables 4
|
sql create database $db maxrows 200 cache 2 maxTables 4
|
||||||
print ====== create tables
|
print ====== create tables
|
||||||
sql use $db
|
sql use $db
|
||||||
|
|
||||||
|
@ -81,3 +80,5 @@ while $x < 100
|
||||||
$x = $x + 1
|
$x = $x + 1
|
||||||
print loop $x
|
print loop $x
|
||||||
endw
|
endw
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -95,9 +95,4 @@ if $data41 != @udp005@ then
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -42,4 +42,3 @@ sql select st1.ts, st1.f1, st2.f2 from db.st1, db.st2 where st1.t1=st2.t2 and st
|
||||||
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select st1.ts, st1.f1, st2.f2 from db.st1, db.st2 where st1.t1=st2.t2 and st1.ts=st2.ts' 127.0.0.1:6020/restful/sql
|
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select st1.ts, st1.f1, st2.f2 from db.st1, db.st2 where st1.t1=st2.t2 and st1.ts=st2.ts' 127.0.0.1:6020/restful/sql
|
||||||
|
|
||||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||||
|
|
||||||
|
|
|
@ -90,3 +90,5 @@ endi
|
||||||
#### illegal operations
|
#### illegal operations
|
||||||
sql_error select max(c2*2) from $tb
|
sql_error select max(c2*2) from $tb
|
||||||
sql_error select max(c1-c2) from $tb
|
sql_error select max(c1-c2) from $tb
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -22,15 +22,5 @@ run general/parser/columnValue_bigint.sim
|
||||||
run general/parser/columnValue_float.sim
|
run general/parser/columnValue_float.sim
|
||||||
run general/parser/columnValue_double.sim
|
run general/parser/columnValue_double.sim
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -107,3 +107,5 @@ while $loop <= $loops
|
||||||
endi
|
endi
|
||||||
$loop = $loop + 1
|
$loop = $loop + 1
|
||||||
endw
|
endw
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -188,3 +188,5 @@ sql show databases
|
||||||
if $rows != 0 then
|
if $rows != 0 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||||
|
|
|
@ -258,3 +258,5 @@ sql show databases
|
||||||
if $rows != 0 then
|
if $rows != 0 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -188,3 +188,5 @@ sql show databases
|
||||||
if $rows != 0 then
|
if $rows != 0 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -127,5 +127,4 @@ if $rows != 7 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||||
|
|
|
@ -849,3 +849,5 @@ sql show databases
|
||||||
if $rows != 0 then
|
if $rows != 0 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -430,3 +430,5 @@ sql show databases
|
||||||
if $rows != 0 then
|
if $rows != 0 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -83,3 +83,4 @@ sleep 3000
|
||||||
|
|
||||||
run general/parser/first_last_query.sim
|
run general/parser/first_last_query.sim
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -406,4 +406,6 @@ endi
|
||||||
|
|
||||||
if $data97 != @group_tb0@ then
|
if $data97 != @group_tb0@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitTime -v 30
|
system sh/cfg.sh -n dnode1 -c ctime -v 30
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
sleep 3000
|
sleep 3000
|
||||||
sql connect
|
sql connect
|
||||||
|
@ -53,3 +53,4 @@ if $data00 != $res then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitTime -v 30
|
system sh/cfg.sh -n dnode1 -c ctime -v 30
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
sleep 3000
|
sleep 3000
|
||||||
sql connect
|
sql connect
|
||||||
|
@ -52,3 +52,4 @@ if $data00 != $res then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
|
||||||
|
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||||
system sh/cfg.sh -n dnode1 -c commitTime -v 30
|
system sh/cfg.sh -n dnode1 -c ctime -v 30
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
sleep 3000
|
sleep 3000
|
||||||
sql connect
|
sql connect
|
||||||
|
@ -59,3 +59,4 @@ if $data00 != $res then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -34,4 +34,6 @@ endi
|
||||||
|
|
||||||
|
|
||||||
#system rm -f $inFileName # invalid shell
|
#system rm -f $inFileName # invalid shell
|
||||||
system rm -f ~/data.csv
|
system rm -f ~/data.csv
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -233,3 +233,4 @@ endi
|
||||||
#endi
|
#endi
|
||||||
|
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -65,3 +65,5 @@ system sh/exec.sh -n dnode1 -s start
|
||||||
print ================== server restart completed
|
print ================== server restart completed
|
||||||
|
|
||||||
run general/parser/interp_test.sim
|
run general/parser/interp_test.sim
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -444,3 +444,5 @@ sql insert into um1 using m2 tags(1) values(1000001, 10)(2000000, 20);
|
||||||
sql insert into um2 using m2 tags(9) values(1000001, 10)(2000000, 20);
|
sql insert into um2 using m2 tags(9) values(1000001, 10)(2000000, 20);
|
||||||
|
|
||||||
sql_error select count(*) from m1,m2 where m1.a=m2.a and m1.ts=m2.ts;
|
sql_error select count(*) from m1,m2 where m1.a=m2.a and m1.ts=m2.ts;
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -127,4 +127,6 @@ sql select join_mt0.ts, join_mt1.t1 from join_mt0, join_mt1 where join_mt0.ts=jo
|
||||||
|
|
||||||
sql select join_mt0.ts, join_mt1.t1, join_mt0.t1, join_mt1.tbname, join_mt0.tbname from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts and join_mt0.t1=join_mt1.t1
|
sql select join_mt0.ts, join_mt1.t1, join_mt0.t1, join_mt1.tbname, join_mt0.tbname from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts and join_mt0.t1=join_mt1.t1
|
||||||
|
|
||||||
sql select join_mt0.ts, join_mt1.t1, join_mt0.t1, join_mt1.tbname, join_mt0.tbname from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts and join_mt0.t1=join_mt1.t1 limit 1
|
sql select join_mt0.ts, join_mt1.t1, join_mt0.t1, join_mt1.tbname, join_mt0.tbname from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts and join_mt0.t1=join_mt1.t1 limit 1
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -68,3 +68,5 @@ sql connect
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
|
||||||
run general/parser/lastrow_query.sim
|
run general/parser/lastrow_query.sim
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -69,3 +69,5 @@ sleep 3000
|
||||||
|
|
||||||
run general/parser/limit_tb.sim
|
run general/parser/limit_tb.sim
|
||||||
run general/parser/limit_stb.sim
|
run general/parser/limit_stb.sim
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -67,3 +67,5 @@ print ================== server restart completed
|
||||||
|
|
||||||
run general/parser/limit1_tb.sim
|
run general/parser/limit1_tb.sim
|
||||||
run general/parser/limit1_stb.sim
|
run general/parser/limit1_stb.sim
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -67,3 +67,5 @@ print ================== server restart completed
|
||||||
|
|
||||||
run general/parser/limit1_tb.sim
|
run general/parser/limit1_tb.sim
|
||||||
run general/parser/limit1_stb.sim
|
run general/parser/limit1_stb.sim
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -74,3 +74,5 @@ system sh/exec.sh -n dnode1 -s start
|
||||||
print ================== server restart completed
|
print ================== server restart completed
|
||||||
|
|
||||||
run general/parser/limit2_query.sim
|
run general/parser/limit2_query.sim
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -142,3 +142,5 @@ endi
|
||||||
if $data03 != 319 then
|
if $data03 != 319 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -310,3 +310,5 @@ endi
|
||||||
# if $rows != 0 then
|
# if $rows != 0 then
|
||||||
# return -1
|
# return -1
|
||||||
# endi
|
# endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -488,3 +488,4 @@ sql_error alter table st51 set tag tag_tinyint = abc379
|
||||||
#sql drop database $db
|
#sql drop database $db
|
||||||
|
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -375,4 +375,4 @@ sql_error select 'abc';
|
||||||
#=============================tbase-1205
|
#=============================tbase-1205
|
||||||
sql select count(*) from tm1 where ts<now and ts>= now -1d interval(1h) fill(NULL);
|
sql select count(*) from tm1 where ts<now and ts>= now -1d interval(1h) fill(NULL);
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -5,3 +5,5 @@ while $i <= $loops
|
||||||
run general/parser/alter.sim
|
run general/parser/alter.sim
|
||||||
$i = $i + 1
|
$i = $i + 1
|
||||||
endw
|
endw
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -5,3 +5,5 @@ while $i <= $repeats
|
||||||
run general/parser/stream.sim
|
run general/parser/stream.sim
|
||||||
$i = $i + 1
|
$i = $i + 1
|
||||||
endw
|
endw
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -182,3 +182,5 @@ while $loop <= $loops
|
||||||
endw
|
endw
|
||||||
$loop = $loop + 1
|
$loop = $loop + 1
|
||||||
endw
|
endw
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -80,3 +80,5 @@ sql show databases
|
||||||
if $rows != 0 then
|
if $rows != 0 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -68,3 +68,5 @@ endi
|
||||||
if $data12 != 1 then
|
if $data12 != 1 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -838,3 +838,5 @@ sql_error select first(c1), count(*), t2, t1, tbname from select_tags_mt0 group
|
||||||
#sql select first(ts), tbname from select_tags_mt0 group by tbname;
|
#sql select first(ts), tbname from select_tags_mt0 group by tbname;
|
||||||
#sql select count(c1) from select_tags_mt0 where c1=99 group by tbname;
|
#sql select count(c1) from select_tags_mt0 where c1=99 group by tbname;
|
||||||
#sql select count(*),tbname from select_tags_mt0 group by tbname
|
#sql select count(*),tbname from select_tags_mt0 group by tbname
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -225,3 +225,5 @@ endi
|
||||||
if $data04 != NULL then
|
if $data04 != NULL then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -36,3 +36,5 @@ system sh/exec.sh -n dnode1 -s start
|
||||||
print ================== server restart completed
|
print ================== server restart completed
|
||||||
|
|
||||||
run general/parser/single_row_in_tb_query.sim
|
run general/parser/single_row_in_tb_query.sim
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -105,3 +105,5 @@ sql connect
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
|
||||||
run general/parser/slimit_query.sim
|
run general/parser/slimit_query.sim
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -64,3 +64,5 @@ sql connect
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
|
||||||
run general/parser/slimit1_query.sim
|
run general/parser/slimit1_query.sim
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -255,3 +255,5 @@ endi
|
||||||
#if $rows != 0 then
|
#if $rows != 0 then
|
||||||
# return -1
|
# return -1
|
||||||
#endi
|
#endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -220,3 +220,5 @@ sql create database $db
|
||||||
sql use $db
|
sql use $db
|
||||||
sql create table stb (ts timestamp, c1 int) tags(t1 int)
|
sql create table stb (ts timestamp, c1 int) tags(t1 int)
|
||||||
sql create table tb1 using stb tags(1)
|
sql create table tb1 using stb tags(1)
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -58,3 +58,5 @@ sql select * from iostrm
|
||||||
if $rows <= 0 then
|
if $rows <= 0 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -55,4 +55,6 @@ endi
|
||||||
#if $data06 != 100.90000 then
|
#if $data06 != 100.90000 then
|
||||||
# print "expect: 100.90000, act: $data06"
|
# print "expect: 100.90000, act: $data06"
|
||||||
# return -1
|
# return -1
|
||||||
#endi
|
#endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -73,3 +73,5 @@ system sh/exec.sh -n dnode1 -s start
|
||||||
print ================== server restart completed
|
print ================== server restart completed
|
||||||
|
|
||||||
run general/parser/tbnameIn_query.sim
|
run general/parser/tbnameIn_query.sim
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -98,3 +98,8 @@ sleep 2000
|
||||||
run general/parser/select_with_tags.sim
|
run general/parser/select_with_tags.sim
|
||||||
sleep 2000
|
sleep 2000
|
||||||
run general/parser/groupby.sim
|
run general/parser/groupby.sim
|
||||||
|
|
||||||
|
sleep 2000
|
||||||
|
run general/parser/binary_escapeCharacter.sim
|
||||||
|
sleep 2000
|
||||||
|
#run general/parser/bug.sim
|
|
@ -272,3 +272,5 @@ sql select * from tb_where_NULL where c2 <> "nUll"
|
||||||
if $rows != 2 then
|
if $rows != 2 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -3,21 +3,21 @@ cd ../../debug; make
|
||||||
cd ../../../debug; cmake ..
|
cd ../../../debug; cmake ..
|
||||||
cd ../../../debug; make
|
cd ../../../debug; make
|
||||||
|
|
||||||
#./test.sh -f general/alter/cached_schema_after_alter.sim
|
#unsupport ./test.sh -f general/alter/cached_schema_after_alter.sim
|
||||||
#./test.sh -f general/alter/count.sim
|
#unsupport ./test.sh -f general/alter/count.sim
|
||||||
#./test.sh -f general/alter/import.sim
|
#unsupport ./test.sh -f general/alter/import.sim
|
||||||
#./test.sh -f general/alter/insert1.sim
|
#unsupport ./test.sh -f general/alter/insert1.sim
|
||||||
#./test.sh -f general/alter/insert2.sim
|
#unsupport ./test.sh -f general/alter/insert2.sim
|
||||||
#./test.sh -f general/alter/metrics.sim
|
#unsupport ./test.sh -f general/alter/metrics.sim
|
||||||
#./test.sh -f general/alter/table.sim
|
#unsupport ./test.sh -f general/alter/table.sim
|
||||||
|
|
||||||
./test.sh -f general/cache/new_metrics.sim
|
./test.sh -f general/cache/new_metrics.sim
|
||||||
./test.sh -f general/cache/restart_metrics.sim
|
./test.sh -f general/cache/restart_metrics.sim
|
||||||
./test.sh -f general/cache/restart_table.sim
|
./test.sh -f general/cache/restart_table.sim
|
||||||
|
|
||||||
#hongze ./test.sh -f general/column/commit.sim
|
#hongze ./test.sh -f general/column/commit.sim
|
||||||
#hongze ./test.sh -f general/column/metrics.sim
|
./test.sh -f general/column/metrics.sim
|
||||||
#hongze ./test.sh -f general/column/table.sim
|
./test.sh -f general/column/table.sim
|
||||||
|
|
||||||
./test.sh -f general/compress/commitlog.sim
|
./test.sh -f general/compress/commitlog.sim
|
||||||
./test.sh -f general/compress/compress.sim
|
./test.sh -f general/compress/compress.sim
|
||||||
|
@ -28,11 +28,11 @@ cd ../../../debug; make
|
||||||
./test.sh -f general/compute/bottom.sim
|
./test.sh -f general/compute/bottom.sim
|
||||||
./test.sh -f general/compute/count.sim
|
./test.sh -f general/compute/count.sim
|
||||||
./test.sh -f general/compute/diff.sim
|
./test.sh -f general/compute/diff.sim
|
||||||
# liao./test.sh -f general/compute/diff2.sim
|
./test.sh -f general/compute/diff2.sim
|
||||||
./test.sh -f general/compute/first.sim
|
./test.sh -f general/compute/first.sim
|
||||||
# liao./test.sh -f general/compute/interval.sim
|
# liao ./test.sh -f general/compute/interval.sim
|
||||||
# liao./test.sh -f general/compute/last.sim
|
./test.sh -f general/compute/last.sim
|
||||||
# liao./test.sh -f general/compute/leastsquare.sim
|
# liao ./test.sh -f general/compute/leastsquare.sim
|
||||||
./test.sh -f general/compute/max.sim
|
./test.sh -f general/compute/max.sim
|
||||||
./test.sh -f general/compute/min.sim
|
./test.sh -f general/compute/min.sim
|
||||||
./test.sh -f general/compute/null.sim
|
./test.sh -f general/compute/null.sim
|
||||||
|
@ -54,30 +54,30 @@ cd ../../../debug; make
|
||||||
./test.sh -f general/db/delete_writing1.sim
|
./test.sh -f general/db/delete_writing1.sim
|
||||||
./test.sh -f general/db/delete_writing2.sim
|
./test.sh -f general/db/delete_writing2.sim
|
||||||
./test.sh -f general/db/len.sim
|
./test.sh -f general/db/len.sim
|
||||||
#./test.sh -u -f general/db/vnodes.sim
|
#liao ./test.sh -f general/db/vnodes.sim
|
||||||
./test.sh -f general/db/repeat.sim
|
./test.sh -f general/db/repeat.sim
|
||||||
./test.sh -f general/db/tables.sim
|
./test.sh -f general/db/tables.sim
|
||||||
|
|
||||||
./test.sh -f general/field/2.sim
|
./test.sh -f general/field/2.sim
|
||||||
#./test.sh -f general/field/3.sim
|
#liao ./test.sh -f general/field/3.sim
|
||||||
#./test.sh -f general/field/4.sim
|
#liao? ./test.sh -f general/field/4.sim
|
||||||
#./test.sh -f general/field/5.sim
|
#liao? ./test.sh -f general/field/5.sim
|
||||||
#./test.sh -f general/field/6.sim
|
#liao? ./test.sh -f general/field/6.sim
|
||||||
./test.sh -f general/field/bigint.sim
|
./test.sh -f general/field/bigint.sim
|
||||||
# liao./test.sh -f general/field/binary.sim
|
./test.sh -f general/field/binary.sim
|
||||||
./test.sh -f general/field/bool.sim
|
./test.sh -f general/field/bool.sim
|
||||||
./test.sh -f general/field/single.sim
|
./test.sh -f general/field/single.sim
|
||||||
./test.sh -f general/field/smallint.sim
|
./test.sh -f general/field/smallint.sim
|
||||||
./test.sh -f general/field/tinyint.sim
|
./test.sh -f general/field/tinyint.sim
|
||||||
|
|
||||||
# jeff ./test.sh -f general/http/restful.sim
|
./test.sh -f general/http/restful.sim
|
||||||
./test.sh -f general/http/restful_insert.sim
|
./test.sh -f general/http/restful_insert.sim
|
||||||
./test.sh -f general/http/restful_limit.sim
|
./test.sh -f general/http/restful_limit.sim
|
||||||
# jeff ./test.sh -f general/http/restful_full.sim
|
./test.sh -f general/http/restful_full.sim
|
||||||
./test.sh -f general/http/prepare.sim
|
./test.sh -f general/http/prepare.sim
|
||||||
./test.sh -f general/http/telegraf.sim
|
./test.sh -f general/http/telegraf.sim
|
||||||
./test.sh -f general/http/grafana_bug.sim
|
./test.sh -f general/http/grafana_bug.sim
|
||||||
# jeff ./test.sh -f general/http/grafana.sim
|
./test.sh -f general/http/grafana.sim
|
||||||
|
|
||||||
./test.sh -f general/import/basic.sim
|
./test.sh -f general/import/basic.sim
|
||||||
./test.sh -f general/import/commit.sim
|
./test.sh -f general/import/commit.sim
|
||||||
|
@ -94,74 +94,67 @@ cd ../../../debug; make
|
||||||
./test.sh -f general/insert/query_multi_file.sim
|
./test.sh -f general/insert/query_multi_file.sim
|
||||||
./test.sh -f general/insert/tcp.sim
|
./test.sh -f general/insert/tcp.sim
|
||||||
|
|
||||||
# ./test.sh -f general/parser/alter.sim
|
#unsupport ./test.sh -f general/parser/alter.sim
|
||||||
# ./test.sh -f general/parser/alter1.sim
|
#unsupport ./test.sh -f general/parser/alter1.sim
|
||||||
# ./test.sh -f general/parser/alter_stable.sim
|
#unsupport ./test.sh -f general/parser/alter_stable.sim
|
||||||
# ./test.sh -f general/parser/auto_create_tb.sim
|
./test.sh -f general/parser/auto_create_tb.sim
|
||||||
# ./test.sh -f general/parser/auto_create_tb_drop_tb.sim
|
#slguan ./test.sh -f general/parser/auto_create_tb_drop_tb.sim
|
||||||
./test.sh -f general/parser/binary_escapeCharacter.sim
|
|
||||||
#./test.sh -f general/parser/bug.sim
|
|
||||||
./test.sh -f general/parser/col_arithmetic_operation.sim
|
./test.sh -f general/parser/col_arithmetic_operation.sim
|
||||||
./test.sh -f general/parser/columnValue_bigint.sim
|
|
||||||
./test.sh -f general/parser/columnValue_bool.sim
|
|
||||||
./test.sh -f general/parser/columnValue_double.sim
|
|
||||||
./test.sh -f general/parser/columnValue_float.sim
|
|
||||||
./test.sh -f general/parser/columnValue_int.sim
|
|
||||||
# ./test.sh -f general/parser/col_arithmetic_operation.sim
|
|
||||||
./test.sh -f general/parser/columnValue.sim
|
./test.sh -f general/parser/columnValue.sim
|
||||||
./test.sh -f general/parser/commit.sim
|
./test.sh -f general/parser/commit.sim
|
||||||
# ./test.sh -f general/parser/create_db.sim
|
# ./test.sh -f general/parser/create_db.sim
|
||||||
# ./test.sh -f general/parser/create_mt.sim
|
# ./test.sh -f general/parser/create_mt.sim
|
||||||
# ./test.sh -f general/parser/create_tb.sim
|
# ./test.sh -f general/parser/create_tb.sim
|
||||||
# ./test.sh -f general/parser/dbtbnameValidate.sim
|
# ./test.sh -f general/parser/dbtbnameValidate.sim
|
||||||
# ./test.sh -f general/parser/fill.sim
|
|
||||||
# ./test.sh -f general/parser/fill_stb.sim
|
|
||||||
# ./test.sh -f general/parser/first_last.sim
|
|
||||||
./test.sh -f general/parser/import_commit1.sim
|
./test.sh -f general/parser/import_commit1.sim
|
||||||
./test.sh -f general/parser/import_commit2.sim
|
./test.sh -f general/parser/import_commit2.sim
|
||||||
./test.sh -f general/parser/import_commit3.sim
|
./test.sh -f general/parser/import_commit3.sim
|
||||||
# ./test.sh -f general/parser/import_file.sim
|
|
||||||
# ./test.sh -f general/parser/insert_tb.sim
|
# ./test.sh -f general/parser/insert_tb.sim
|
||||||
|
# ./test.sh -f general/parser/first_last.sim
|
||||||
|
# ./test.sh -f general/parser/import_file.sim
|
||||||
|
# ./test.sh -f general/parser/lastrow.sim
|
||||||
|
# ./test.sh -f general/parser/nchar.sim
|
||||||
|
# ./test.sh -f general/parser/null_char.sim
|
||||||
|
# ./test.sh -f general/parser/single_row_in_tb.sim
|
||||||
|
./test.sh -f general/parser/select_from_cache_disk.sim
|
||||||
|
# ./test.sh -f general/parser/limit.sim
|
||||||
|
# ./test.sh -f general/parser/fill.sim
|
||||||
|
# ./test.sh -f general/parser/fill_stb.sim
|
||||||
# ./test.sh -f general/parser/tags_dynamically_specifiy.sim
|
# ./test.sh -f general/parser/tags_dynamically_specifiy.sim
|
||||||
# ./test.sh -f general/parser/interp.sim
|
# ./test.sh -f general/parser/interp.sim
|
||||||
# ./test.sh -f general/parser/lastrow.sim
|
|
||||||
# ./test.sh -f general/parser/limit.sim
|
|
||||||
# ./test.sh -f general/parser/limit1.sim
|
# ./test.sh -f general/parser/limit1.sim
|
||||||
# ./test.sh -f general/parser/limit1_tblocks100.sim
|
# ./test.sh -f general/parser/limit1_tblocks100.sim
|
||||||
# ./test.sh -f general/parser/limit2.sim
|
# ./test.sh -f general/parser/limit2.sim
|
||||||
# ./test.sh -f general/parser/mixed_blocks.sim
|
# ./test.sh -f general/parser/mixed_blocks.sim
|
||||||
# ./test.sh -f general/parser/nchar.sim
|
|
||||||
# ./test.sh -f general/parser/null_char.sim
|
|
||||||
# ./test.sh -f general/parser/selectResNum.sim
|
# ./test.sh -f general/parser/selectResNum.sim
|
||||||
# ./test.sh -f general/parser/select_across_vnodes.sim
|
# ./test.sh -f general/parser/select_across_vnodes.sim
|
||||||
./test.sh -f general/parser/select_from_cache_disk.sim
|
|
||||||
# ./test.sh -f general/parser/set_tag_vals.sim
|
# ./test.sh -f general/parser/set_tag_vals.sim
|
||||||
# ./test.sh -f general/parser/single_row_in_tb.sim
|
|
||||||
# ./test.sh -f general/parser/slimit.sim
|
# ./test.sh -f general/parser/slimit.sim
|
||||||
./test.sh -f general/parser/slimit1.sim
|
./test.sh -f general/parser/slimit1.sim
|
||||||
./test.sh -f general/parser/slimit1_query.sim
|
#unsupport ./test.sh -f general/parser/slimit_alter_tags.sim
|
||||||
# ./test.sh -f general/parser/slimit_alter_tags.sim
|
#unsupport ./test.sh -f general/parser/stream_on_sys.sim
|
||||||
# ./test.sh -f general/parser/stream_on_sys.sim
|
#unsupport ./test.sh -f general/parser/stream.sim
|
||||||
# ./test.sh -f general/parser/stream.sim
|
|
||||||
# ./test.sh -f general/parser/tbnameIn.sim
|
# ./test.sh -f general/parser/tbnameIn.sim
|
||||||
# ./test.sh -f general/parser/where.sim
|
# ./test.sh -f general/parser/where.sim
|
||||||
# #./test.sh -f general/parser/repeatAlter.sim
|
# ./test.sh -f general/parser/repeatAlter.sim
|
||||||
# #./test.sh -f general/parser/repeatStream.sim
|
#unsupport ./test.sh -f general/parser/repeatStream.sim
|
||||||
# ./test.sh -f general/parser/join.sim
|
# ./test.sh -f general/parser/join.sim
|
||||||
# ./test.sh -f general/parser/join_multivnode.sim
|
# ./test.sh -f general/parser/join_multivnode.sim
|
||||||
# ./test.sh -f general/parser/projection_limit_offset.sim
|
# ./test.sh -f general/parser/projection_limit_offset.sim
|
||||||
# ./test.sh -f general/parser/select_with_tags.sim
|
# ./test.sh -f general/parser/select_with_tags.sim
|
||||||
# ./test.sh -f general/parser/groupby.sim
|
# ./test.sh -f general/parser/groupby.sim
|
||||||
|
./test.sh -f general/parser/binary_escapeCharacter.sim
|
||||||
|
#./test.sh -f general/parser/bug.sim
|
||||||
|
|
||||||
#./test.sh -f general/stable/disk.sim
|
./test.sh -f general/stable/disk.sim
|
||||||
#./test.sh -f general/stable/metrics.sim
|
./test.sh -f general/stable/metrics.sim
|
||||||
#./test.sh -f general/stable/values.sim
|
#liao? ./test.sh -f general/stable/values.sim
|
||||||
./test.sh -f general/stable/vnode3.sim
|
./test.sh -f general/stable/vnode3.sim
|
||||||
|
|
||||||
./test.sh -f general/table/autocreate.sim
|
./test.sh -f general/table/autocreate.sim
|
||||||
./test.sh -f general/table/basic1.sim
|
./test.sh -f general/table/basic1.sim
|
||||||
./test.sh -f general/table/basic2.sim
|
./test.sh -f general/table/basic2.sim
|
||||||
#hongze ./test.sh -f general/table/basic3.sim
|
./test.sh -f general/table/basic3.sim
|
||||||
./test.sh -f general/table/bigint.sim
|
./test.sh -f general/table/bigint.sim
|
||||||
./test.sh -f general/table/binary.sim
|
./test.sh -f general/table/binary.sim
|
||||||
./test.sh -f general/table/bool.sim
|
./test.sh -f general/table/bool.sim
|
||||||
|
@ -186,29 +179,29 @@ cd ../../../debug; make
|
||||||
./test.sh -f general/table/tinyint.sim
|
./test.sh -f general/table/tinyint.sim
|
||||||
./test.sh -f general/table/vgroup.sim
|
./test.sh -f general/table/vgroup.sim
|
||||||
|
|
||||||
#./test.sh -f general/tag/3.sim
|
#liao ./test.sh -f general/tag/3.sim
|
||||||
#./test.sh -f general/tag/4.sim
|
#liao? ./test.sh -f general/tag/4.sim
|
||||||
#./test.sh -f general/tag/5.sim
|
#liao? ./test.sh -f general/tag/5.sim
|
||||||
#./test.sh -f general/tag/6.sim
|
#liao? ./test.sh -f general/tag/6.sim
|
||||||
#./test.sh -f general/tag/add.sim
|
#unsupport ./test.sh -f general/tag/add.sim
|
||||||
./test.sh -f general/tag/bigint.sim
|
./test.sh -f general/tag/bigint.sim
|
||||||
#./test.sh -f general/tag/binary_binary.sim
|
./test.sh -f general/tag/binary_binary.sim
|
||||||
#./test.sh -f general/tag/binary.sim
|
./test.sh -f general/tag/binary.sim
|
||||||
#./test.sh -f general/tag/bool_binary.sim
|
./test.sh -f general/tag/bool_binary.sim
|
||||||
#./test.sh -f general/tag/bool_int.sim
|
./test.sh -f general/tag/bool_int.sim
|
||||||
./test.sh -f general/tag/bool.sim
|
./test.sh -f general/tag/bool.sim
|
||||||
#./test.sh -f general/tag/change.sim
|
#unsupport ./test.sh -f general/tag/change.sim
|
||||||
#liao ./test.sh -f general/tag/column.sim
|
./test.sh -f general/tag/column.sim
|
||||||
#./test.sh -f general/tag/commit.sim
|
#unsupport ./test.sh -f general/tag/commit.sim
|
||||||
#liao ./test.sh -f general/tag/create.sim
|
./test.sh -f general/tag/create.sim
|
||||||
#./test.sh -f general/tag/delete.sim
|
#unsupport ./test.sh -f general/tag/delete.sim
|
||||||
#./test.sh -f general/tag/double.sim
|
#./test.sh -f general/tag/double.sim
|
||||||
./test.sh -f general/tag/filter.sim
|
./test.sh -f general/tag/filter.sim
|
||||||
#./test.sh -f general/tag/float.sim
|
#liao? ./test.sh -f general/tag/float.sim
|
||||||
#./test.sh -f general/tag/int_binary.sim
|
#liao? ./test.sh -f general/tag/int_binary.sim
|
||||||
#./test.sh -f general/tag/int_float.sim
|
./test.sh -f general/tag/int_float.sim
|
||||||
./test.sh -f general/tag/int.sim
|
./test.sh -f general/tag/int.sim
|
||||||
#./test.sh -f general/tag/set.sim
|
#unsupport ./test.sh -f general/tag/set.sim
|
||||||
./test.sh -f general/tag/smallint.sim
|
./test.sh -f general/tag/smallint.sim
|
||||||
./test.sh -f general/tag/tinyint.sim
|
./test.sh -f general/tag/tinyint.sim
|
||||||
|
|
||||||
|
@ -224,7 +217,7 @@ cd ../../../debug; make
|
||||||
./test.sh -f general/vector/metrics_query.sim
|
./test.sh -f general/vector/metrics_query.sim
|
||||||
./test.sh -f general/vector/metrics_tag.sim
|
./test.sh -f general/vector/metrics_tag.sim
|
||||||
./test.sh -f general/vector/metrics_time.sim
|
./test.sh -f general/vector/metrics_time.sim
|
||||||
#liao ./test.sh -f general/vector/multi.sim
|
./test.sh -f general/vector/multi.sim
|
||||||
./test.sh -f general/vector/single.sim
|
./test.sh -f general/vector/single.sim
|
||||||
./test.sh -f general/vector/table_field.sim
|
./test.sh -f general/vector/table_field.sim
|
||||||
./test.sh -f general/vector/table_mix.sim
|
./test.sh -f general/vector/table_mix.sim
|
||||||
|
|
Loading…
Reference in New Issue