From e2196d6f8082ed3f3361c7243cc2535f71c3eb6a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 29 Jul 2020 15:01:56 +0800 Subject: [PATCH 001/190] [td-988] --- src/tsdb/src/tsdbRead.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 0e3a657fde..9d66e5c569 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -316,17 +316,20 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh assert(pCheckInfo->iter == NULL && pCheckInfo->iiter == NULL); - // TODO: add uid check - if (pHandle->mem && pCheckInfo->tableId.tid < pHandle->mem->maxTables && - pHandle->mem->tData[pCheckInfo->tableId.tid] != NULL) { - pCheckInfo->iter = tSkipListCreateIterFromVal(pHandle->mem->tData[pCheckInfo->tableId.tid]->pData, - (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); + if (pHandle->mem && pCheckInfo->tableId.tid < pHandle->mem->maxTables) { + STableData* ptd = pHandle->mem->tData[pCheckInfo->tableId.tid]; + if (ptd != NULL && ptd->uid == pCheckInfo->tableId.uid) { // check uid + pCheckInfo->iter = + tSkipListCreateIterFromVal(ptd->pData, (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); + } } - if (pHandle->imem && pCheckInfo->tableId.tid < pHandle->imem->maxTables && - pHandle->imem->tData[pCheckInfo->tableId.tid] != NULL) { - pCheckInfo->iiter = tSkipListCreateIterFromVal(pHandle->imem->tData[pCheckInfo->tableId.tid]->pData, - (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); + if (pHandle->imem && pCheckInfo->tableId.tid < pHandle->imem->maxTables) { + STableData* ptd = pHandle->imem->tData[pCheckInfo->tableId.tid]; + if (ptd != NULL && ptd->uid == pCheckInfo->tableId.uid) { // check uid + pCheckInfo->iiter = + tSkipListCreateIterFromVal(ptd->pData, (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); + } } // both iterators are NULL, no data in buffer right now From f86022f6c11dfdaecfac7ff7608e5c1294f0acd8 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 29 Jul 2020 15:02:33 +0800 Subject: [PATCH 002/190] [td-960] --- src/client/src/tscSQLParser.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 9488b37fe3..48f932203d 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1191,7 +1191,15 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel tExprTreeDestroy(&pNode, NULL); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid arithmetic expression in select clause"); } - + + size_t numOfNode = taosArrayGetSize(colList); + for(int32_t k = 0; k < numOfNode; ++k) { + SColIndex* pIndex = taosArrayGet(colList, k); + if (pIndex->flag == 1) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "tag columns can not be used in arithmetic expression"); + } + } + SBufferWriter bw = tbufInitWriter(NULL, false); TRY(0) { @@ -6115,12 +6123,14 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS if (ret != TSDB_CODE_SUCCESS) { return ret; } - + + STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta; + int32_t numOfColumns = tscGetNumOfColumns(pTableMeta); + *pExpr = calloc(1, sizeof(tExprNode)); (*pExpr)->nodeType = TSQL_NODE_COL; (*pExpr)->pSchema = calloc(1, sizeof(SSchema)); - - STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta; + SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex); *(*pExpr)->pSchema = *pSchema; @@ -6129,7 +6139,8 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS tstrncpy(colIndex.name, pSchema->name, sizeof(colIndex.name)); colIndex.colId = pSchema->colId; colIndex.colIndex = index.columnIndex; - + colIndex.flag = (index.columnIndex >= numOfColumns)? 1:0; + taosArrayPush(pCols, &colIndex); } From 6fd03c905d20ff2fc05b6d1876cf42397570bb24 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 29 Jul 2020 17:24:43 +0800 Subject: [PATCH 003/190] [td-225] fix bug in aggregation function in arithmetic expression. --- src/client/src/tscLocalMerge.c | 6 +- src/client/src/tscSQLParser.c | 239 +++++++++++++++++++-------------- src/client/src/tscServer.c | 17 +-- src/client/src/tscUtil.c | 74 ++++++---- 4 files changed, 192 insertions(+), 144 deletions(-) diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index bf76b8cbe8..eaf78235a4 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -68,7 +68,7 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, i); pCtx->aOutputBuf = - pReducer->pResultBuf->data + tscFieldInfoGetOffset(pQueryInfo, i) * pReducer->resColModel->capacity; + pReducer->pResultBuf->data + pExpr->offset * pReducer->resColModel->capacity; pCtx->order = pQueryInfo->order.order; pCtx->functionId = pExpr->functionId; @@ -321,6 +321,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd pReducer->finalRowSize = tscGetResRowLength(pQueryInfo->exprList); pReducer->resColModel = finalmodel; pReducer->resColModel->capacity = pReducer->nResultBufSize; + assert(pReducer->finalRowSize > 0); if (pReducer->finalRowSize > 0) { pReducer->resColModel->capacity /= pReducer->finalRowSize; @@ -328,10 +329,9 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd assert(pReducer->finalRowSize <= pReducer->rowSize); pReducer->pFinalRes = calloc(1, pReducer->rowSize * pReducer->resColModel->capacity); -// pReducer->pBufForInterpo = calloc(1, pReducer->nResultBufSize); if (pReducer->pTempBuffer == NULL || pReducer->discardData == NULL || pReducer->pResultBuf == NULL || - /*pReducer->pBufForInterpo == NULL || */pReducer->pFinalRes == NULL || pReducer->prevRowOfInput == NULL) { + pReducer->pFinalRes == NULL || pReducer->prevRowOfInput == NULL) { tfree(pReducer->pTempBuffer); tfree(pReducer->discardData); tfree(pReducer->pResultBuf); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 48f932203d..07afd82150 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -87,7 +87,7 @@ static int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQueryS static int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); static int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo); static int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); -static int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString); +static int32_t arithmeticExprToString(tSQLExpr* pExpr, char** exprString); static int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type); static int32_t validateEp(char* ep); @@ -1107,13 +1107,128 @@ static void extractColumnNameFromString(tSQLExprItem* pItem) { } } +static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t exprIndex, tSQLExprItem* pItem) { + const char* msg1 = "invalid column name, or illegal column type"; + const char* msg2 = "invalid arithmetic expression in select clause"; + const char* msg3 = "tag columns can not be used in arithmetic expression"; + const char* msg4 = "columns from different table mixed up in arithmetic expression"; + + // arithmetic function in select clause + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); + + SColumnList columnList = {0}; + int32_t arithmeticType = NON_ARITHMEIC_EXPR; + + if (validateArithmeticSQLExpr(pCmd, pItem->pNode, pQueryInfo, &columnList, &arithmeticType) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + int32_t tableIndex = columnList.ids[0].tableIndex; + + // todo potential data overflow + char arithmeticExprStr[1024*12]; + char* p = arithmeticExprStr; + + if (arithmeticType == NORMAL_ARITHMETIC) { + pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; + + // all columns in arithmetic expression must belong to the same table + for (int32_t f = 1; f < columnList.num; ++f) { + if (columnList.ids[f].tableIndex != tableIndex) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + } + + if (arithmeticExprToString(pItem->pNode, &p) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_SQL; + } + + // expr string is set as the parameter of function + SColumnIndex index = {.tableIndex = tableIndex}; + + SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, sizeof(double), + sizeof(double), false); + + char* name = (pItem->aliasName != NULL)? pItem->aliasName:arithmeticExprStr; + tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName)); + + tExprNode* pNode = NULL; + SArray* colList = taosArrayInit(10, sizeof(SColIndex)); + + int32_t ret = exprTreeFromSqlExpr(pCmd, &pNode, pItem->pNode, pQueryInfo->exprList, pQueryInfo, colList); + if (ret != TSDB_CODE_SUCCESS) { + tExprTreeDestroy(&pNode, NULL); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + + size_t numOfNode = taosArrayGetSize(colList); + for(int32_t k = 0; k < numOfNode; ++k) { + SColIndex* pIndex = taosArrayGet(colList, k); + if (pIndex->flag == 1) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } + + SBufferWriter bw = tbufInitWriter(NULL, false); + + TRY(0) { + exprTreeToBinary(&bw, pNode); + } CATCH(code) { + tbufCloseWriter(&bw); + UNUSED(code); + // TODO: other error handling + } END_TRY + + size_t len = tbufTell(&bw); + char* c = tbufGetData(&bw, true); + + // set the serialized binary string as the parameter of arithmetic expression + addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, len, index.tableIndex); + + insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName, pExpr); + + taosArrayDestroy(colList); + tExprTreeDestroy(&pNode, NULL); + } else { + if (arithmeticExprToString(pItem->pNode, &p) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_SQL; + } + + columnList.num = 0; + columnList.ids[0] = (SColumnIndex) {0, 0}; + + char* name = (pItem->aliasName != NULL)? pItem->aliasName:arithmeticExprStr; + insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, name, NULL); + + int32_t slot = tscNumOfFields(pQueryInfo) - 1; + SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, slot); + + if (pInfo->pSqlExpr == NULL) { + SExprInfo* pArithExprInfo = calloc(1, sizeof(SExprInfo)); + + // arithmetic expression always return result in the format of double float + pArithExprInfo->bytes = sizeof(double); + pArithExprInfo->interBytes = sizeof(double); + pArithExprInfo->type = TSDB_DATA_TYPE_DOUBLE; + + int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo->exprList, pQueryInfo, NULL); + if (ret != TSDB_CODE_SUCCESS) { + tExprTreeDestroy(&pArithExprInfo->pExpr, NULL); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid expression in select clause"); + } + + pInfo->pArithExprInfo = pArithExprInfo; + } + } + + return TSDB_CODE_SUCCESS; +} + int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable) { assert(pSelection != NULL && pCmd != NULL); - const char* msg1 = "invalid column name, or illegal column type"; const char* msg2 = "functions can not be mixed up"; const char* msg3 = "not support query expression"; - const char* msg4 = "columns from different table mixed up in arithmetic expression"; const char* msg5 = "invalid function name"; SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); @@ -1148,104 +1263,11 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel } } else if (pItem->pNode->nSQLOptr >= TK_PLUS && pItem->pNode->nSQLOptr <= TK_REM) { - // arithmetic function in select clause - SColumnList columnList = {0}; - int32_t arithmeticType = NON_ARITHMEIC_EXPR; - - if (validateArithmeticSQLExpr(pCmd, pItem->pNode, pQueryInfo, &columnList, &arithmeticType) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + int32_t code = handleArithmeticExpr(pCmd, clauseIndex, i, pItem); + if (code != TSDB_CODE_SUCCESS) { + return code; } - - int32_t tableIndex = columnList.ids[0].tableIndex; - char arithmeticExprStr[1024] = {0}; - char* p = arithmeticExprStr; - - if (arithmeticType == NORMAL_ARITHMETIC) { - pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; - - // all columns in arithmetic expression must belong to the same table - for (int32_t f = 1; f < columnList.num; ++f) { - if (columnList.ids[f].tableIndex != tableIndex) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); - } - } - - if (buildArithmeticExprString(pItem->pNode, &p) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; - } - - // expr string is set as the parameter of function - SColumnIndex index = {.tableIndex = tableIndex}; - SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, - sizeof(double), sizeof(double), false); - - /* todo alias name should use the original sql string */ - char* name = (pItem->aliasName != NULL)? pItem->aliasName:arithmeticExprStr; - tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName)); - - tExprNode* pNode = NULL; - SArray* colList = taosArrayInit(10, sizeof(SColIndex)); - - int32_t ret = exprTreeFromSqlExpr(pCmd, &pNode, pItem->pNode, pQueryInfo->exprList, pQueryInfo, colList); - if (ret != TSDB_CODE_SUCCESS) { - tExprTreeDestroy(&pNode, NULL); - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid arithmetic expression in select clause"); - } - size_t numOfNode = taosArrayGetSize(colList); - for(int32_t k = 0; k < numOfNode; ++k) { - SColIndex* pIndex = taosArrayGet(colList, k); - if (pIndex->flag == 1) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "tag columns can not be used in arithmetic expression"); - } - } - - SBufferWriter bw = tbufInitWriter(NULL, false); - - TRY(0) { - exprTreeToBinary(&bw, pNode); - } CATCH(code) { - tbufCloseWriter(&bw); - UNUSED(code); - // TODO: other error handling - } END_TRY - - size_t len = tbufTell(&bw); - char* c = tbufGetData(&bw, true); - - // set the serialized binary string as the parameter of arithmetic expression - addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, len, index.tableIndex); - - insertResultField(pQueryInfo, i, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName, pExpr); - - taosArrayDestroy(colList); - tExprTreeDestroy(&pNode, NULL); - } else { - columnList.num = 0; - columnList.ids[0] = (SColumnIndex) {0, 0}; - - insertResultField(pQueryInfo, i, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, "dummy_column", NULL); - - int32_t slot = tscNumOfFields(pQueryInfo) - 1; - SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, slot); - - if (pInfo->pSqlExpr == NULL) { - SExprInfo* pArithExprInfo = calloc(1, sizeof(SExprInfo)); - - // arithmetic expression always return result in the format of double float - pArithExprInfo->bytes = sizeof(double); - pArithExprInfo->interBytes = sizeof(double); - pArithExprInfo->type = TSDB_DATA_TYPE_DOUBLE; - - int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo->exprList, pQueryInfo, NULL); - if (ret != TSDB_CODE_SUCCESS) { - tExprTreeDestroy(&pArithExprInfo->pExpr, NULL); - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid expression in select clause"); - } - - pInfo->pArithExprInfo = pArithExprInfo; - } - } } else { /* * not support such expression @@ -3090,14 +3112,14 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* } // todo error handle / such as and /or mixed with +/-/*/ -int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString) { +int32_t doArithmeticExprToString(tSQLExpr* pExpr, char** exprString) { tSQLExpr* pLeft = pExpr->pLeft; tSQLExpr* pRight = pExpr->pRight; *(*exprString)++ = '('; if (pLeft->nSQLOptr >= TK_PLUS && pLeft->nSQLOptr <= TK_REM) { - buildArithmeticExprString(pLeft, exprString); + doArithmeticExprToString(pLeft, exprString); } else { int32_t ret = tSQLExprNodeToString(pLeft, exprString); if (ret != TSDB_CODE_SUCCESS) { @@ -3108,7 +3130,7 @@ int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString) { optrToString(pExpr, exprString); if (pRight->nSQLOptr >= TK_PLUS && pRight->nSQLOptr <= TK_REM) { - buildArithmeticExprString(pRight, exprString); + doArithmeticExprToString(pRight, exprString); } else { int32_t ret = tSQLExprNodeToString(pRight, exprString); if (ret != TSDB_CODE_SUCCESS) { @@ -3121,6 +3143,19 @@ int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString) { return TSDB_CODE_SUCCESS; } +static int32_t arithmeticExprToString(tSQLExpr* pExpr, char** str) { + char* start = *str; + + int32_t code = doArithmeticExprToString(pExpr, str); + if (code == TSDB_CODE_SUCCESS) { // remove out the parenthesis + int32_t len = strlen(start); + memmove(start, start + 1, len - 2); + start[len - 2] = 0; + } + + return code; +} + static int32_t validateSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type) { if (pExpr->nSQLOptr == TK_ID) { if (*type == NON_ARITHMEIC_EXPR) { diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 521280af87..690a5f790a 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -1461,7 +1461,7 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) { SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); if (pRes->code == TSDB_CODE_SUCCESS && pRes->numOfRows > 0) { - tscSetResultPointer(pQueryInfo, pRes); + tscCreateResPointerInfo(pRes, pQueryInfo); } pRes->row = 0; @@ -2115,21 +2115,6 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) { return 0; } -int tscProcessRetrieveRspFromLocal(SSqlObj *pSql) { - SSqlRes * pRes = &pSql->res; - SSqlCmd * pCmd = &pSql->cmd; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); - - SRetrieveTableRsp *pRetrieve = (SRetrieveTableRsp *)pRes->pRsp; - - pRes->numOfRows = htonl(pRetrieve->numOfRows); - pRes->data = pRetrieve->data; - - tscSetResultPointer(pQueryInfo, pRes); - pRes->row = 0; - return 0; -} - void tscTableMetaCallBack(void *param, TAOS_RES *res, int code); static int32_t getTableMetaFromMgmt(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 73d6f0e592..bbb3af473b 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -249,23 +249,25 @@ void tscClearInterpInfo(SQueryInfo* pQueryInfo) { } int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) { - if (pRes->tsrow == NULL) { - int32_t numOfOutput = pQueryInfo->fieldsInfo.numOfOutput; - pRes->numOfCols = numOfOutput; - - pRes->tsrow = calloc(numOfOutput, POINTER_BYTES); - pRes->length = calloc(numOfOutput, sizeof(int32_t)); // todo refactor - pRes->buffer = calloc(numOfOutput, POINTER_BYTES); - - // not enough memory - if (pRes->tsrow == NULL || (pRes->buffer == NULL && pRes->numOfCols > 0)) { - tfree(pRes->tsrow); - tfree(pRes->buffer); - tfree(pRes->length); - - pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; - return pRes->code; - } + if (pRes->tsrow != NULL) { + return TSDB_CODE_SUCCESS; + } + + int32_t numOfOutput = pQueryInfo->fieldsInfo.numOfOutput; + pRes->numOfCols = numOfOutput; + + pRes->tsrow = calloc(numOfOutput, POINTER_BYTES); + pRes->length = calloc(numOfOutput, sizeof(int32_t)); // todo refactor + pRes->buffer = calloc(numOfOutput, POINTER_BYTES); + + // not enough memory + if (pRes->tsrow == NULL || (pRes->buffer == NULL && pRes->numOfCols > 0)) { + tfree(pRes->tsrow); + tfree(pRes->buffer); + tfree(pRes->length); + + pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; + return pRes->code; } return TSDB_CODE_SUCCESS; @@ -858,12 +860,13 @@ void tscFieldInfoCopy(SFieldInfo* dst, const SFieldInfo* src) { } TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index) { + assert(index < pFieldInfo->numOfOutput); return TARRAY_GET_ELEM(pFieldInfo->pFields, index); } int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) { SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, index); - assert(pInfo != NULL); + assert(pInfo != NULL && pInfo->pSqlExpr != NULL); return pInfo->pSqlExpr->offset; } @@ -1773,11 +1776,36 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->uid == uid) { - TAOS_FIELD* p = tscFieldInfoGetField(pFieldInfo, i); - SFieldSupInfo* pInfo = tscFieldInfoGetSupp(pFieldInfo, i); - - SFieldSupInfo* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, p); - *pInfo1 = *pInfo; + if (i < pFieldInfo->numOfOutput) { + SFieldSupInfo* pInfo = tscFieldInfoGetSupp(pFieldInfo, i); + if (pInfo->pSqlExpr != NULL) { + TAOS_FIELD* p = tscFieldInfoGetField(pFieldInfo, i); + assert(strcmp(p->name, pExpr->aliasName) == 0 && pInfo->pSqlExpr == pExpr); + + SFieldSupInfo* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, p); + *pInfo1 = *pInfo; + } else { + // current sql function is not direct output result, so create a dummy output field + assert(pInfo->pArithExprInfo != NULL); + + TAOS_FIELD f = {.type = pExpr->resType, .bytes = pExpr->resBytes}; + tstrncpy(f.name, pExpr->aliasName, sizeof(f.name)); + + SFieldSupInfo* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, &f); + + pInfo1->pSqlExpr = pExpr; + pInfo1->visible = false; + } + } else { + // current sql function is not direct output result, so create a dummy output field + TAOS_FIELD f = {.type = pExpr->resType, .bytes = pExpr->resBytes}; + tstrncpy(f.name, pExpr->aliasName, sizeof(f.name)); + + SFieldSupInfo* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, &f); + + pInfo1->pSqlExpr = pExpr; + pInfo1->visible = false; + } } } From e8f9f503fcaadf103a4e10c2f39c265db1a56f68 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 29 Jul 2020 18:04:20 +0800 Subject: [PATCH 004/190] [td-225] fix bugs in join query. --- src/client/inc/tsclient.h | 1 - src/client/src/tscSubquery.c | 8 +++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 7a44870938..dbbb6be128 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -419,7 +419,6 @@ char *tscGetErrorMsgPayload(SSqlCmd *pCmd); int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql); int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo); -//void tscGetResultColumnChr(SSqlRes *pRes, SFieldInfo* pFieldInfo, int32_t column); static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) { SFieldSupInfo* pInfo = (SFieldSupInfo*) TARRAY_GET_ELEM(pFieldInfo->pSupportInfo, columnIndex); diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 15e02799aa..c396c81310 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -1054,6 +1054,10 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { } } } + + // restore the offset value for super table query in case of final result. + tscRestoreSQLFuncForSTableQuery(pQueryInfo); + tscFieldInfoUpdateOffset(pQueryInfo); } void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { @@ -2046,9 +2050,7 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) { } while (1) { - if (pRes->row < pRes->numOfRows) { - assert(0); - } + assert (pRes->row >= pRes->numOfRows); doBuildResFromSubqueries(pSql); sem_post(&pSql->rspSem); From 6e45e83242abed03a643b85e45c3a1471c52ab3c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 29 Jul 2020 18:08:47 +0800 Subject: [PATCH 005/190] [td-225] fix bugs in join query. --- src/client/src/tscUtil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index bbb3af473b..bd1fd9905a 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1780,7 +1780,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void SFieldSupInfo* pInfo = tscFieldInfoGetSupp(pFieldInfo, i); if (pInfo->pSqlExpr != NULL) { TAOS_FIELD* p = tscFieldInfoGetField(pFieldInfo, i); - assert(strcmp(p->name, pExpr->aliasName) == 0 && pInfo->pSqlExpr == pExpr); + assert(strcmp(p->name, pExpr->aliasName) == 0); SFieldSupInfo* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, p); *pInfo1 = *pInfo; From 4e2ed952ec89c106d9b565bf8cc1eea3518da629 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 29 Jul 2020 18:51:27 +0800 Subject: [PATCH 006/190] [td-225] fix bugs in join query. --- src/client/inc/tsclient.h | 2 +- src/client/src/tscSubquery.c | 10 ++- src/client/src/tscUtil.c | 140 ++++++++++++++++++----------------- 3 files changed, 79 insertions(+), 73 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index dbbb6be128..e3e1d44514 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -106,7 +106,7 @@ typedef struct SColumnIndex { typedef struct SFieldSupInfo { bool visible; SExprInfo *pArithExprInfo; - SSqlExpr * pSqlExpr; + SSqlExpr *pSqlExpr; } SFieldSupInfo; typedef struct SFieldInfo { diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index c396c81310..c7e7d1323b 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -1026,9 +1026,11 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { } SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * pQueryInfo->fieldsInfo.numOfOutput); - for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { + int32_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); + pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * numOfExprs); + + for (int32_t i = 0; i < numOfExprs; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); int32_t tableIndexOfSub = -1; @@ -1045,8 +1047,8 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { SSqlCmd* pSubCmd = &pSql->pSubs[tableIndexOfSub]->cmd; SQueryInfo* pSubQueryInfo = tscGetQueryInfoDetail(pSubCmd, 0); - size_t numOfExprs = taosArrayGetSize(pSubQueryInfo->exprList); - for (int32_t k = 0; k < numOfExprs; ++k) { + size_t numOfSubExpr = taosArrayGetSize(pSubQueryInfo->exprList); + for (int32_t k = 0; k < numOfSubExpr; ++k) { SSqlExpr* pSubExpr = tscSqlExprGet(pSubQueryInfo, k); if (pExpr->functionId == pSubExpr->functionId && pExpr->colInfo.colId == pSubExpr->colInfo.colId) { pRes->pColumnIndex[i] = (SColumnIndex){.tableIndex = tableIndexOfSub, .columnIndex = k}; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index bd1fd9905a..32a82a080f 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1673,6 +1673,77 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm return pNew; } +// current sql function is not direct output result, so create a dummy output field +static void doSetNewFieldInfo(SQueryInfo* pNewQueryInfo, SSqlExpr* pExpr) { + TAOS_FIELD f = {.type = pExpr->resType, .bytes = pExpr->resBytes}; + tstrncpy(f.name, pExpr->aliasName, sizeof(f.name)); + + SFieldSupInfo* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, &f); + + pInfo1->pSqlExpr = pExpr; + pInfo1->visible = false; +} + +static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pQueryInfo, SQueryInfo* pNewQueryInfo, int64_t uid) { + int32_t numOfOutput = tscSqlExprNumOfExprs(pNewQueryInfo); + if (numOfOutput == 0) { + return; + } + + size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); + SFieldInfo* pFieldInfo = &pQueryInfo->fieldsInfo; + + // set the field info in pNewQueryInfo object + for (int32_t i = 0; i < numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + + if (pExpr->uid == uid) { + if (i < pFieldInfo->numOfOutput) { + SFieldSupInfo* pInfo = tscFieldInfoGetSupp(pFieldInfo, i); + + if (pInfo->pSqlExpr != NULL) { + TAOS_FIELD* p = tscFieldInfoGetField(pFieldInfo, i); + assert(strcmp(p->name, pExpr->aliasName) == 0); + + SFieldSupInfo* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, p); + *pInfo1 = *pInfo; + } else { + assert(pInfo->pArithExprInfo != NULL); + doSetNewFieldInfo(pNewQueryInfo, pExpr); + } + } else { // it is a arithmetic column, does not have actual field for sqlExpr, so build it + doSetNewFieldInfo(pNewQueryInfo, pExpr); + } + } + } + + // make sure the the sqlExpr for each fields is correct + numOfExprs = tscSqlExprNumOfExprs(pNewQueryInfo); + + // update the pSqlExpr pointer in SFieldSupInfo according the field name + // make sure the pSqlExpr point to the correct SqlExpr in pNewQueryInfo, not SqlExpr in pQueryInfo + for (int32_t f = 0; f < pNewQueryInfo->fieldsInfo.numOfOutput; ++f) { + TAOS_FIELD* field = tscFieldInfoGetField(&pNewQueryInfo->fieldsInfo, f); + + bool matched = false; + for (int32_t k1 = 0; k1 < numOfExprs; ++k1) { + SSqlExpr* pExpr1 = tscSqlExprGet(pNewQueryInfo, k1); + + if (strcmp(field->name, pExpr1->aliasName) == 0) { // establish link according to the result field name + SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pNewQueryInfo->fieldsInfo, f); + pInfo->pSqlExpr = pExpr1; + + matched = true; + break; + } + } + + assert(matched); + } + + tscFieldInfoUpdateOffset(pNewQueryInfo); +} + SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, int32_t cmd, SSqlObj* pPrevSql) { SSqlCmd* pCmd = &pSql->cmd; SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj)); @@ -1766,74 +1837,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void uint64_t uid = pTableMetaInfo->pTableMeta->id.uid; tscSqlExprCopy(pNewQueryInfo->exprList, pQueryInfo->exprList, uid, true); - int32_t numOfOutput = tscSqlExprNumOfExprs(pNewQueryInfo); - - if (numOfOutput > 0) { // todo refactor to extract method - size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); - SFieldInfo* pFieldInfo = &pQueryInfo->fieldsInfo; - - for (int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - - if (pExpr->uid == uid) { - if (i < pFieldInfo->numOfOutput) { - SFieldSupInfo* pInfo = tscFieldInfoGetSupp(pFieldInfo, i); - if (pInfo->pSqlExpr != NULL) { - TAOS_FIELD* p = tscFieldInfoGetField(pFieldInfo, i); - assert(strcmp(p->name, pExpr->aliasName) == 0); - - SFieldSupInfo* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, p); - *pInfo1 = *pInfo; - } else { - // current sql function is not direct output result, so create a dummy output field - assert(pInfo->pArithExprInfo != NULL); - - TAOS_FIELD f = {.type = pExpr->resType, .bytes = pExpr->resBytes}; - tstrncpy(f.name, pExpr->aliasName, sizeof(f.name)); - - SFieldSupInfo* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, &f); - - pInfo1->pSqlExpr = pExpr; - pInfo1->visible = false; - } - } else { - // current sql function is not direct output result, so create a dummy output field - TAOS_FIELD f = {.type = pExpr->resType, .bytes = pExpr->resBytes}; - tstrncpy(f.name, pExpr->aliasName, sizeof(f.name)); - - SFieldSupInfo* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, &f); - - pInfo1->pSqlExpr = pExpr; - pInfo1->visible = false; - } - } - } - - // make sure the the sqlExpr for each fields is correct - // todo handle the agg arithmetic expression - numOfExprs = tscSqlExprNumOfExprs(pNewQueryInfo); - - for(int32_t f = 0; f < pNewQueryInfo->fieldsInfo.numOfOutput; ++f) { - TAOS_FIELD* field = tscFieldInfoGetField(&pNewQueryInfo->fieldsInfo, f); - bool matched = false; - - for(int32_t k1 = 0; k1 < numOfExprs; ++k1) { - SSqlExpr* pExpr1 = tscSqlExprGet(pNewQueryInfo, k1); - - if (strcmp(field->name, pExpr1->aliasName) == 0) { // establish link according to the result field name - SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pNewQueryInfo->fieldsInfo, f); - pInfo->pSqlExpr = pExpr1; - - matched = true; - break; - } - } - - assert(matched); - } - - tscFieldInfoUpdateOffset(pNewQueryInfo); - } + doSetSqlExprAndResultFieldInfo(pQueryInfo, pNewQueryInfo, uid); pNew->fp = fp; pNew->fetchFp = fp; From d58310d9d73b404a9138b934cb9e2f9b9e8507f5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 30 Jul 2020 15:46:50 +0800 Subject: [PATCH 007/190] [td-225] fix bugs in join/union query. --- src/client/src/tscAsync.c | 12 +- src/client/src/tscLocal.c | 4 +- src/client/src/tscServer.c | 58 ++++------ src/client/src/tscSubquery.c | 2 +- src/client/src/tscUtil.c | 4 + src/query/src/qExecutor.c | 2 +- tests/script/general/parser/join.sim | 24 ++++ tests/script/general/parser/union.sim | 154 +++++++++++++------------- 8 files changed, 138 insertions(+), 122 deletions(-) diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 85cff4ba17..59d906e8fd 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -220,14 +220,13 @@ void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), voi if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) { tscFetchDatablockFromSubquery(pSql); } else if (pRes->completed) { - if(pCmd->command == TSDB_SQL_FETCH) { + if(pCmd->command == TSDB_SQL_FETCH || (pCmd->command >= TSDB_SQL_SERV_STATUS && pCmd->command <= TSDB_SQL_CURRENT_USER)) { if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes. tscTryQueryNextVnode(pSql, tscAsyncQueryRowsForNextVnode); - return; } else { /* - * all available virtual node has been checked already, now we need to check - * for the next subclause queries + * all available virtual nodes in current clause has been checked already, now try the + * next one in the following union subclause */ if (pCmd->clauseIndex < pCmd->numOfClause - 1) { tscTryQueryNextClause(pSql, tscAsyncQueryRowsForNextVnode); @@ -235,11 +234,12 @@ void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), voi } /* - * 1. has reach the limitation - * 2. no remain virtual nodes to be retrieved anymore + * 1. has reach the limitation + * 2. no remain virtual nodes to be retrieved anymore */ (*pSql->fetchFp)(param, pSql, 0); } + return; } else if (pCmd->command == TSDB_SQL_RETRIEVE || pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE) { // in case of show command, return no data diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 7f336daa91..6822851d84 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -293,7 +293,7 @@ static void tscProcessCurrentDB(SSqlObj *pSql) { char db[TSDB_DB_NAME_LEN] = {0}; extractDBName(pSql->pTscObj->db, db); - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); pExpr->resType = TSDB_DATA_TYPE_BINARY; @@ -314,7 +314,7 @@ static void tscProcessCurrentDB(SSqlObj *pSql) { static void tscProcessServerVer(SSqlObj *pSql) { const char* v = pSql->pTscObj->sversion; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); pExpr->resType = TSDB_DATA_TYPE_BINARY; diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 690a5f790a..6b9fc0551e 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -46,19 +46,27 @@ void tscSaveSubscriptionProgress(void* sub); static int32_t minMsgSize() { return tsRpcHeadSize + 100; } static void tscSetDnodeEpSet(SSqlObj* pSql, SCMVgroupInfo* pVgroupInfo) { + assert(pSql != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0); + SRpcEpSet* pEpSet = &pSql->epSet; - pEpSet->inUse = 0; - if (pVgroupInfo == NULL) { - pEpSet->numOfEps = 0; - return; - } + pEpSet->inUse = 0; + + // apply the FQDN string length check here + bool hasFqdn = false; pEpSet->numOfEps = pVgroupInfo->numOfEps; for(int32_t i = 0; i < pVgroupInfo->numOfEps; ++i) { strcpy(pEpSet->fqdn[i], pVgroupInfo->epAddr[i].fqdn); pEpSet->port[i] = pVgroupInfo->epAddr[i].port; + + if (!hasFqdn) { + hasFqdn = (strlen(pEpSet->fqdn[i]) > 0); + } } + + assert(hasFqdn); } + static void tscDumpMgmtEpSet(SRpcEpSet *epSet) { taosCorBeginRead(&tscMgmtEpSet.version); *epSet = tscMgmtEpSet.epSet; @@ -128,21 +136,6 @@ void tscPrintMgmtEp() { } } -/* - * For each management node, try twice at least in case of poor network situation. - * If the client start to connect to a non-management node from the client, and the first retry may fail due to - * the poor network quality. And then, the second retry get the response with redirection command. - * The retry will not be executed since only *two* retry is allowed in case of single management node in the cluster. - * Therefore, we need to multiply the retry times by factor of 2 to fix this problem. - */ -UNUSED_FUNC -static int32_t tscGetMgmtConnMaxRetryTimes() { - int32_t factor = 2; - SRpcEpSet dump; - tscDumpMgmtEpSet(&dump); - return dump.numOfEps * factor; -} - void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { STscObj *pObj = (STscObj *)param; if (pObj == NULL) return; @@ -425,21 +418,18 @@ int doProcessSql(SSqlObj *pSql) { } int tscProcessSql(SSqlObj *pSql) { - char * name = NULL; + char *name = NULL; SSqlCmd *pCmd = &pSql->cmd; - SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); STableMetaInfo *pTableMetaInfo = NULL; uint32_t type = 0; if (pQueryInfo != NULL) { pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - if (pTableMetaInfo != NULL) { - name = pTableMetaInfo->name; - } - + name = (pTableMetaInfo != NULL)? pTableMetaInfo->name:NULL; type = pQueryInfo->type; - + // while numOfTables equals to 0, it must be Heartbeat assert((pQueryInfo->numOfTables == 0 && pQueryInfo->command == TSDB_SQL_HB) || pQueryInfo->numOfTables > 0); } @@ -451,7 +441,6 @@ int tscProcessSql(SSqlObj *pSql) { return pSql->res.code; } } else if (pCmd->command < TSDB_SQL_LOCAL) { - //pSql->epSet = tscMgmtEpSet; } else { // local handler return (*tscProcessMsgRsp[pCmd->command])(pSql); @@ -598,11 +587,11 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char } else { pVgroupInfo = &pTableMeta->vgroupInfo; } - tscSetDnodeEpSet(pSql, pVgroupInfo); - if (pVgroupInfo != NULL) { - pQueryMsg->head.vgId = htonl(pVgroupInfo->vgId); - } + assert(pVgroupInfo != NULL); + + tscSetDnodeEpSet(pSql, pVgroupInfo); + pQueryMsg->head.vgId = htonl(pVgroupInfo->vgId); STableIdInfo *pTableIdInfo = (STableIdInfo *)pMsg; pTableIdInfo->tid = htonl(pTableMeta->id.tid); @@ -1885,11 +1874,10 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) { for (int32_t k = 0; k < pVgroups->numOfEps; ++k) { pVgroups->epAddr[k].port = htons(pVgroups->epAddr[k].port); - } - - pMsg += size; } + + pMsg += size; } return pSql->res.code; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index c7e7d1323b..7258ac528e 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -1085,7 +1085,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { if (taos_errno(pSql) != TSDB_CODE_SUCCESS) { assert(taos_errno(pSql) == code); - tscError("%p abort query, code:%d, global code:%d", pSql, code, pParentSql->res.code); + tscError("%p abort query, code:%s, global code:%s", pSql, tstrerror(code), tstrerror(pParentSql->res.code)); pParentSql->res.code = code; quitAllSubquery(pParentSql, pSupporter); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 32a82a080f..5ee3db36d1 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -2034,6 +2034,10 @@ bool hasMoreVnodesToTry(SSqlObj* pSql) { } int32_t numOfVgroups = pTableMetaInfo->vgroupList->numOfVgroups; + if (pTableMetaInfo->pVgroupTables != NULL) { + numOfVgroups = taosArrayGetSize(pTableMetaInfo->pVgroupTables); + } + return tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && (!tscHasReachLimitation(pQueryInfo, pRes)) && (pTableMetaInfo->vgroupIndex < numOfVgroups - 1); } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 6efc8a827e..fb13972689 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2216,7 +2216,7 @@ static void ensureOutputBufferSimple(SQueryRuntimeEnv* pRuntimeEnv, int32_t capa static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) { // in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block SQuery* pQuery = pRuntimeEnv->pQuery; - if (!QUERY_IS_INTERVAL_QUERY(pQuery) && !pRuntimeEnv->groupbyNormalCol && !isFixedOutputQuery(pRuntimeEnv)) { + if (!QUERY_IS_INTERVAL_QUERY(pQuery) && !pRuntimeEnv->groupbyNormalCol && !isFixedOutputQuery(pRuntimeEnv) && !isTSCompQuery(pQuery)) { SResultRec *pRec = &pQuery->rec; if (pQuery->rec.capacity - pQuery->rec.rows < pBlockInfo->rows) { diff --git a/tests/script/general/parser/join.sim b/tests/script/general/parser/join.sim index 07f2cd3f77..882f561ae1 100644 --- a/tests/script/general/parser/join.sim +++ b/tests/script/general/parser/join.sim @@ -257,6 +257,21 @@ if $data01 != $val then return -1 endi +sql select count(join_tb1.*) + count(join_tb0.*) from join_tb1 , join_tb0 where join_tb1.ts = join_tb0.ts and join_tb1.ts >= 100000 and join_tb0.c7 = false;; +if $rows != 1 then + return -1 +endi + +if $data00 != 20.000000000 then + print expect 20.000000000 actual $data00 + return -1 +endi + +sql select count(join_tb1.*)/10 from join_tb1 , join_tb0 where join_tb1.ts = join_tb0.ts and join_tb1.ts >= 100000 and join_tb0.c7 = false;; +if $data00 != 1.000000000 then + return -1 +endi + print 3 #agg + where condition sql select count(join_tb1.c3), count(join_tb0.ts) from $tb1 , $tb2 where $ts1 = $ts2 and join_tb1.ts <= 100002 and join_tb0.c7 = true; @@ -381,6 +396,15 @@ if $data00 != $val then return -1 endi +sql select sum(join_mt0.c1)+sum(join_mt0.c1) from join_mt0, join_mt1 where join_mt0.ts = join_mt1.ts and join_mt0.t1=join_mt1.t1 and join_mt0.c2=99 and join_mt1.ts=100999; +if $rows != 1 then + return -1 +endi + +if $data00 != 396.000000000 then + return -1 +endi + # first/last sql select count(join_mt0.c1), sum(join_mt1.c2), first(join_mt0.c5), last(join_mt1.c7) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts and join_mt0.t1=1 interval(10a) order by join_mt0.ts asc; diff --git a/tests/script/general/parser/union.sim b/tests/script/general/parser/union.sim index 358bcb8a40..b9dc8e8e1f 100644 --- a/tests/script/general/parser/union.sim +++ b/tests/script/general/parser/union.sim @@ -1,10 +1,10 @@ -#system sh/stop_dnodes.sh -# -#system sh/deploy.sh -n dnode1 -i 1 -#system sh/cfg.sh -n dnode1 -c walLevel -v 0 -#system sh/cfg.sh -n dnode1 -c debugFlag -v 135 -#system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 -#system sh/exec.sh -n dnode1 -s start +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c debugFlag -v 135 +system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 +system sh/exec.sh -n dnode1 -s start sleep 1000 sql connect @@ -24,77 +24,77 @@ $mt = $mtPrefix . $i $j = 1 $mt1 = $mtPrefix . $j -# -#sql drop database if exits $db -x step1 -#step1: -#sql create database if not exists $db maxtables 4 + +sql drop database if exits $db -x step1 +step1: +sql create database if not exists $db maxtables 4 sql use $db -#sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) -# -#$i = 0 -#$t = 1578203484000 -# -#while $i < $tbNum -# $tb = $tbPrefix . $i -# sql create table $tb using $mt tags( $i ) -# -# $x = 0 -# while $x < $rowNum -# $ms = $x * 1000 -# $ms = $ms * 60 -# -# $c = $x / 100 -# $c = $c * 100 -# $c = $x - $c -# $binary = 'binary . $c -# $binary = $binary . ' -# $nchar = 'nchar . $c -# $nchar = $nchar . ' -# -# $t1 = $t + $ms -# sql insert into $tb values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) -# $x = $x + 1 -# endw -# -# $i = $i + 1 -#endw -# -#sql create table $mt1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) -# -#$j = 0 -#$t = 1578203484000 -#$rowNum = 1000 -#$tbNum = 5 -#$i = 0 -# -#while $i < $tbNum -# $tb1 = $tbPrefix1 . $j -# sql create table $tb1 using $mt1 tags( $i ) -# -# $x = 0 -# while $x < $rowNum -# $ms = $x * 1000 -# $ms = $ms * 60 -# -# $c = $x / 100 -# $c = $c * 100 -# $c = $x - $c -# $binary = 'binary . $c -# $binary = $binary . ' -# $nchar = 'nchar . $c -# $nchar = $nchar . ' -# -# $t1 = $t + $ms -# sql insert into $tb1 values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) -# $x = $x + 1 -# endw -# -# $i = $i + 1 -# $j = $j + 1 -#endw -# -#print sleep 1sec. -#sleep 1000 +sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) + +$i = 0 +$t = 1578203484000 + +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( $i ) + + $x = 0 + while $x < $rowNum + $ms = $x * 1000 + $ms = $ms * 60 + + $c = $x / 100 + $c = $c * 100 + $c = $x - $c + $binary = 'binary . $c + $binary = $binary . ' + $nchar = 'nchar . $c + $nchar = $nchar . ' + + $t1 = $t + $ms + sql insert into $tb values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) + $x = $x + 1 + endw + + $i = $i + 1 +endw + +sql create table $mt1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) + +$j = 0 +$t = 1578203484000 +$rowNum = 1000 +$tbNum = 5 +$i = 0 + +while $i < $tbNum + $tb1 = $tbPrefix1 . $j + sql create table $tb1 using $mt1 tags( $i ) + + $x = 0 + while $x < $rowNum + $ms = $x * 1000 + $ms = $ms * 60 + + $c = $x / 100 + $c = $c * 100 + $c = $x - $c + $binary = 'binary . $c + $binary = $binary . ' + $nchar = 'nchar . $c + $nchar = $nchar . ' + + $t1 = $t + $ms + sql insert into $tb1 values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) + $x = $x + 1 + endw + + $i = $i + 1 + $j = $j + 1 +endw + +print sleep 1sec. +sleep 1000 $i = 1 $tb = $tbPrefix . $i From 1dd3d2ba05e065c2cb72535ca6f6a1d68b1dd21f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 30 Jul 2020 15:57:32 +0800 Subject: [PATCH 008/190] [td-225] update the script --- tests/script/general/parser/sliding.sim | 4 ++-- tests/script/general/parser/union.sim | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/script/general/parser/sliding.sim b/tests/script/general/parser/sliding.sim index 177c95651f..f85211beb8 100644 --- a/tests/script/general/parser/sliding.sim +++ b/tests/script/general/parser/sliding.sim @@ -1,7 +1,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/cfg.sh -n dnode1 -c debugFlag -v 135 system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 system sh/exec.sh -n dnode1 -s start @@ -28,7 +28,7 @@ $mt = $mtPrefix . $i sql drop database if exits $db -x step1 step1: -sql create database if not exists $db tables 4 keep 36500 +sql create database if not exists $db maxtables 4 keep 36500 sql use $db sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12)) diff --git a/tests/script/general/parser/union.sim b/tests/script/general/parser/union.sim index b9dc8e8e1f..fbd1c211b9 100644 --- a/tests/script/general/parser/union.sim +++ b/tests/script/general/parser/union.sim @@ -412,4 +412,8 @@ if $data10 != @union_db0@ then return -1 endi +sql_error show tables union all show tables +sql_error show stables union all show stables +sql_error show databases union all show databases + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file From a09378a4db084835bbaf2d221b2c977e58ffe544 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 30 Jul 2020 17:02:42 +0800 Subject: [PATCH 009/190] [td-225] refactors --- src/query/src/qExecutor.c | 10 +++++----- src/vnode/src/vnodeRead.c | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index fb13972689..66762a1ca5 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6670,9 +6670,9 @@ void qQueryMgmtNotifyClosed(void* pQMgmt) { SQueryMgmt* pQueryMgmt = pQMgmt; qDebug("vgId:%d, set querymgmt closed, wait for all queries cancelled", pQueryMgmt->vgId); - pthread_mutex_lock(&pQueryMgmt->lock); +// pthread_mutex_lock(&pQueryMgmt->lock); pQueryMgmt->closed = true; - pthread_mutex_unlock(&pQueryMgmt->lock); +// pthread_mutex_unlock(&pQueryMgmt->lock); taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn); } @@ -6710,16 +6710,16 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { return NULL; } - pthread_mutex_lock(&pQueryMgmt->lock); +// pthread_mutex_lock(&pQueryMgmt->lock); if (pQueryMgmt->closed) { - pthread_mutex_unlock(&pQueryMgmt->lock); +// pthread_mutex_unlock(&pQueryMgmt->lock); qError("QInfo:%p failed to add qhandle into cache, since qMgmt is colsing", (void *)qInfo); return NULL; } else { uint64_t handleVal = (uint64_t) qInfo; void** handle = taosCachePut(pQueryMgmt->qinfoPool, &handleVal, sizeof(int64_t), &qInfo, POINTER_BYTES, DEFAULT_QHANDLE_LIFE_SPAN); - pthread_mutex_unlock(&pQueryMgmt->lock); +// pthread_mutex_unlock(&pQueryMgmt->lock); return handle; } diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 066770e1bb..3c642b5098 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -14,7 +14,7 @@ */ #define _DEFAULT_SOURCE -#include +//#include #include "os.h" #include "tglobal.h" From fa560fd3a754c78bb136ee07bf262b1f5683c1b6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 30 Jul 2020 17:14:17 +0800 Subject: [PATCH 010/190] [td-225] refactor codes --- cmake/platform.inc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmake/platform.inc b/cmake/platform.inc index 7a371df70b..dac0509b12 100755 --- a/cmake/platform.inc +++ b/cmake/platform.inc @@ -61,6 +61,8 @@ execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh) execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh "" OUTPUT_VARIABLE TD_OS_INFO) MESSAGE(STATUS "The current os is " ${TD_OS_INFO}) +ADD_DEFINITIONS(-D_TD_LINUX_64) + IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux") IF (${CMAKE_SIZEOF_VOID_P} MATCHES 8) SET(TD_LINUX_64 TRUE) From 3b80eb725593acf3301bd264ddfd39ab26d91676 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 30 Jul 2020 17:18:15 +0800 Subject: [PATCH 011/190] [td-225] refactor codes. --- src/util/src/tcache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index dc9128d4a9..8470eedd0e 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -276,7 +276,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v return NULL; } - __cache_wr_lock(pCacheObj); + __cache_rd_lock(pCacheObj); SCacheDataNode **pt = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen); SCacheDataNode * pOld = (pt != NULL) ? (*pt) : NULL; From ae244c9e849c71b02c99307b84d7590fedcdad3d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 30 Jul 2020 17:22:13 +0800 Subject: [PATCH 012/190] [td-225] refactor codes. --- src/util/src/tcache.c | 2 +- src/vnode/src/vnodeRead.c | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 8470eedd0e..dc9128d4a9 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -276,7 +276,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v return NULL; } - __cache_rd_lock(pCacheObj); + __cache_wr_lock(pCacheObj); SCacheDataNode **pt = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen); SCacheDataNode * pOld = (pt != NULL) ? (*pt) : NULL; diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 3c642b5098..6c05091cec 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -207,6 +207,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { vDebug("vgId:%d, QInfo:%p, start to build result rsp after query paused, %p", pVnode->vgId, *handle, pReadMsg->rpcMsg.handle); code = vnodeDumpQueryResult(&pReadMsg->rspRet, pVnode, *handle, &freehandle); + freehandle = false; // todo test the error code case if (code == TSDB_CODE_SUCCESS) { code = TSDB_CODE_QRY_HAS_RSP; @@ -266,6 +267,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { } code = vnodeDumpQueryResult(pRet, pVnode, *handle, &freeHandle); + freeHandle = false; } qReleaseQInfo(pVnode->qMgmt, (void**) &handle, freeHandle); From c2ce0fc4f88aeb31d8114d61aed15ec52adbfa0f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 30 Jul 2020 23:47:06 +0800 Subject: [PATCH 013/190] [td-225] refactor codes --- src/util/inc/hash.h | 12 ++-- src/util/src/hash.c | 127 ++++++++++++------------------------- src/util/src/tcache.c | 130 +++++++++++++++++++------------------- src/vnode/src/vnodeRead.c | 2 - 4 files changed, 113 insertions(+), 158 deletions(-) diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h index 137777f3cb..900aa112e4 100644 --- a/src/util/inc/hash.h +++ b/src/util/inc/hash.h @@ -29,16 +29,12 @@ extern "C" { typedef void (*_hash_free_fn_t)(void *param); typedef struct SHashNode { - char *key; -// union { - struct SHashNode * prev; -// struct SHashEntry *prev1; -// }; -// + char *key; + struct SHashNode *prev; struct SHashNode *next; uint32_t hashVal; // the hash value of key, if hashVal == HASH_VALUE_IN_TRASH, this node is moved to trash uint32_t keyLen; // length of the key - char data[]; + char *data; } SHashNode; typedef struct SHashObj { @@ -109,6 +105,8 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen); */ void taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen); +void taosHashRemoveNode(); + /** * clean up hash table * @param handle diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 5dff6286f0..37e8c37cb6 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -19,6 +19,8 @@ #include "tulog.h" #include "tutil.h" +#define HASH_NEED_RESIZE(_h) ((_h)->size >= (_h)->capacity * HASH_DEFAULT_LOAD_FACTOR) + static FORCE_INLINE void __wr_lock(void *lock) { if (lock == NULL) { return; @@ -95,29 +97,19 @@ static FORCE_INLINE int32_t taosHashCapacity(int32_t length) { * @param hashVal hash value by hash function * @return */ -FORCE_INLINE SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, uint32_t *hashVal) { - uint32_t hash = (*pHashObj->hashFp)(key, keyLen); - - int32_t slot = HASH_INDEX(hash, pHashObj->capacity); +FORCE_INLINE SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, uint32_t hashVal) { + int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); + SHashNode *pNode = pHashObj->hashList[slot]; - while (pNode) { if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { + assert(pNode->hashVal == hashVal); break; } pNode = pNode->next; } - if (pNode) { - assert(HASH_INDEX(pNode->hashVal, pHashObj->capacity) == slot); - } - - // return the calculated hash value, to avoid calculating it again in other functions - if (hashVal != NULL) { - *hashVal = hash; - } - return pNode; } @@ -148,7 +140,13 @@ static SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *p * @param dsize size of actual data * @return hash node */ -static SHashNode *doUpdateHashNode(SHashNode *pNode, const void *key, size_t keyLen, const void *pData, size_t dsize); +static FORCE_INLINE SHashNode *doUpdateHashNode(SHashNode *pNode, SHashNode *pNewNode) { + assert(pNode->keyLen == pNewNode->keyLen); + SWAP(pNode->key, pNewNode->key, void*); + SWAP(pNode->data, pNewNode->data, void*); + + return pNewNode; +} /** * insert the hash node at the front of the linked list @@ -217,58 +215,43 @@ size_t taosHashGetSize(const SHashObj *pHashObj) { } int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t size) { - __wr_lock(pHashObj->lock); + uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); + SHashNode *pNewNode = doCreateHashNode(key, keyLen, data, size, hashVal); + if (pNewNode == NULL) { + return -1; + } - uint32_t hashVal = 0; - SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, keyLen, &hashVal); + __wr_lock(pHashObj->lock); + SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, keyLen, hashVal); if (pNode == NULL) { // no data in hash table with the specified key, add it into hash table - taosHashTableResize(pHashObj); - SHashNode *pNewNode = doCreateHashNode(key, keyLen, data, size, hashVal); - if (pNewNode == NULL) { - __unlock(pHashObj->lock); - - return -1; + if (HASH_NEED_RESIZE(pHashObj)) { + taosHashTableResize(pHashObj); } doAddToHashTable(pHashObj, pNewNode); + __unlock(pHashObj->lock); } else { - SHashNode *pNewNode = doUpdateHashNode(pNode, key, keyLen, data, size); - if (pNewNode == NULL) { - __unlock(pHashObj->lock); - return -1; - } + doUpdateHashNode(pNode, pNewNode); + __unlock(pHashObj->lock); - if (pNewNode->prev) { - pNewNode->prev->next = pNewNode; - } else { - int32_t slot = HASH_INDEX(pNewNode->hashVal, pHashObj->capacity); - - assert(pHashObj->hashList[slot] == pNode); - pHashObj->hashList[slot] = pNewNode; - } - - if (pNewNode->next) { - (pNewNode->next)->prev = pNewNode; - } + tfree(pNewNode->data) + tfree(pNewNode); } - __unlock(pHashObj->lock); return 0; } void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { + uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); + __rd_lock(pHashObj->lock); - - uint32_t hashVal = 0; - SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, keyLen, &hashVal); - + SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, keyLen, hashVal); __unlock(pHashObj->lock); - if (pNode != NULL) { + if (pNode) { assert(pNode->hashVal == hashVal); - return pNode->data; } else { return NULL; @@ -276,10 +259,10 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { } void taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) { - __wr_lock(pHashObj->lock); + uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); - uint32_t val = 0; - SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, keyLen, &val); + __wr_lock(pHashObj->lock); + SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, keyLen, hashVal); if (pNode == NULL) { __unlock(pHashObj->lock); return; @@ -287,7 +270,7 @@ void taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) { SHashNode *pNext = pNode->next; if (pNode->prev == NULL) { - int32_t slot = HASH_INDEX(val, pHashObj->capacity); + int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); assert(pHashObj->hashList[slot] == pNode); pHashObj->hashList[slot] = pNext; @@ -299,13 +282,13 @@ void taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) { pNext->prev = pNode->prev; } - pHashObj->size--; + pHashObj->size -= 1; + __unlock(pHashObj->lock); pNode->next = NULL; pNode->prev = NULL; tfree(pNode); - __unlock(pHashObj->lock); } void taosHashCleanup(SHashObj *pHashObj) { @@ -341,14 +324,6 @@ void taosHashCleanup(SHashObj *pHashObj) { free(pHashObj); } -void taosHashSetFreecb(SHashObj *pHashObj, _hash_free_fn_t freeFp) { - if (pHashObj == NULL || freeFp == NULL) { - return; - } - - pHashObj->freeFp = freeFp; -} - SHashMutableIterator *taosHashCreateIter(SHashObj *pHashObj) { SHashMutableIterator *pIter = calloc(1, sizeof(SHashMutableIterator)); if (pIter == NULL) { @@ -528,38 +503,20 @@ void taosHashTableResize(SHashObj *pHashObj) { } SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) { - size_t totalSize = dsize + sizeof(SHashNode) + keyLen; - - SHashNode *pNewNode = calloc(1, totalSize); + SHashNode *pNewNode = calloc(1, sizeof(SHashNode)); if (pNewNode == NULL) { uError("failed to allocate memory, reason:%s", strerror(errno)); return NULL; } - - memcpy(pNewNode->data, pData, dsize); - - pNewNode->key = pNewNode->data + dsize; - memcpy(pNewNode->key, key, keyLen); - pNewNode->keyLen = keyLen; - - pNewNode->hashVal = hashVal; - return pNewNode; -} -SHashNode *doUpdateHashNode(SHashNode *pNode, const void *key, size_t keyLen, const void *pData, size_t dsize) { - size_t size = dsize + sizeof(SHashNode) + keyLen; - - SHashNode *pNewNode = (SHashNode *)realloc(pNode, size); - if (pNewNode == NULL) { - return NULL; - } - + pNewNode->data = malloc(dsize + keyLen); memcpy(pNewNode->data, pData, dsize); pNewNode->key = pNewNode->data + dsize; - assert(memcmp(pNewNode->key, key, keyLen) == 0 && keyLen == pNewNode->keyLen); - memcpy(pNewNode->key, key, keyLen); + + pNewNode->keyLen = keyLen; + pNewNode->hashVal = hashVal; return pNewNode; } diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index dc9128d4a9..b89cd836e1 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -147,49 +147,18 @@ static FORCE_INLINE void taosCacheMoveToTrash(SCacheObj *pCacheObj, SCacheDataNo * @param dataSize * @return */ -static SCacheDataNode *taosUpdateCacheImpl(SCacheObj *pCacheObj, SCacheDataNode *pNode, const char *key, int32_t keyLen, - const void *pData, uint32_t dataSize, uint64_t duration) { - SCacheDataNode *pNewNode = NULL; - +static UNUSED_FUNC SCacheDataNode *taosUpdateCacheImpl(SCacheObj *pCacheObj, SCacheDataNode* pNode, SCacheDataNode* pNewNode, + const char *key, int32_t keyLen) { + // only a node is not referenced by any other object, in-place update it - if (T_REF_VAL_GET(pNode) == 0) { - size_t newSize = sizeof(SCacheDataNode) + dataSize + keyLen + 1; - - pNewNode = (SCacheDataNode *)realloc(pNode, newSize); - if (pNewNode == NULL) { - return NULL; - } - - memset(pNewNode, 0, newSize); - pNewNode->signature = (uint64_t)pNewNode; - memcpy(pNewNode->data, pData, dataSize); - - pNewNode->key = (char *)pNewNode + sizeof(SCacheDataNode) + dataSize; - pNewNode->keySize = keyLen; - memcpy(pNewNode->key, key, keyLen); - - // update the timestamp information for updated key/value - pNewNode->addedTime = taosGetTimestampMs(); - pNewNode->lifespan = duration; - - T_REF_INC(pNewNode); - - // the address of this node may be changed, so the prev and next element should update the corresponding pointer - taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNewNode, sizeof(void *)); - } else { + if (T_REF_VAL_GET(pNode) > 0) { taosCacheMoveToTrash(pCacheObj, pNode); - - pNewNode = taosCreateCacheNode(key, keyLen, pData, dataSize, duration); - if (pNewNode == NULL) { - return NULL; - } - - T_REF_INC(pNewNode); - - // addedTime new element to hashtable - taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNewNode, sizeof(void *)); } - + + T_REF_INC(pNewNode); + + // addedTime new element to hashtable + taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNewNode, sizeof(void *)); return pNewNode; } @@ -238,7 +207,7 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext return NULL; } - pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), false); + pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), true); pCacheObj->name = strdup(cacheName); if (pCacheObj->pHashTable == NULL) { free(pCacheObj); @@ -270,36 +239,59 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext } void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const void *pData, size_t dataSize, int duration) { - SCacheDataNode *pNode; +// SCacheDataNode *pNode = NULL; if (pCacheObj == NULL || pCacheObj->pHashTable == NULL) { return NULL; } + SCacheDataNode *pNode1 = taosCreateCacheNode(key, keyLen, pData, dataSize, duration); + if (pNode1 == NULL) { + uError("cache:%s, key:%p, failed to added into cache, out of memory", pCacheObj->name, key); + return NULL; + } + __cache_wr_lock(pCacheObj); SCacheDataNode **pt = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen); SCacheDataNode * pOld = (pt != NULL) ? (*pt) : NULL; if (pOld == NULL) { // do addedTime to cache - pNode = taosAddToCacheImpl(pCacheObj, key, keyLen, pData, dataSize, duration * 1000L); - if (NULL != pNode) { - pCacheObj->totalSize += pNode->size; + T_REF_INC(pNode1); + taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNode1, sizeof(void *)); + __cache_unlock(pCacheObj); - uDebug("cache:%s, key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", totalNum:%d totalSize:%" PRId64 - "bytes size:%" PRId64 "bytes", - pCacheObj->name, key, pNode->data, pNode->addedTime, pNode->expireTime, - (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, (int64_t)dataSize); - } else { - uError("cache:%s, key:%p, failed to added into cache, out of memory", pCacheObj->name, key); - } + atomic_add_fetch_64(&pCacheObj->totalSize, pNode1->size); + uDebug("cache:%s, key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", totalNum:%d totalSize:%" PRId64 + "bytes size:%" PRId64 "bytes", + pCacheObj->name, key, pNode1->data, pNode1->addedTime, pNode1->expireTime, + (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, (int64_t)dataSize); } else { // old data exists, update the node - pNode = taosUpdateCacheImpl(pCacheObj, pOld, key, keyLen, pData, dataSize, duration * 1000L); - uDebug("cache:%s, key:%p, %p exist in cache, updated old:%p", pCacheObj->name, key, pNode->data, pOld); + + bool addToTrashcan = false; + if (T_REF_VAL_GET(pOld) > 0) { + // todo removed by node, instead of by key + taosHashRemove(pCacheObj->pHashTable, pOld->key, pOld->keySize); + } else { + addToTrashcan = true; + } + + T_REF_INC(pNode1); + + // addedTime new element to hashtable + taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNode1, sizeof(void *)); + __cache_unlock(pCacheObj); + + // todo add trashcan lock + if (addToTrashcan) { + taosAddToTrash(pCacheObj, pOld); + } else { + tfree(pOld); + } + + uDebug("cache:%s, key:%p, %p exist in cache, updated old:%p", pCacheObj->name, key, pNode1->data, pOld); } - __cache_unlock(pCacheObj); - - return (pNode != NULL) ? pNode->data : NULL; + return pNode1->data; } void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen) { @@ -310,7 +302,6 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen void *pData = NULL; __cache_rd_lock(pCacheObj); - SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen); int32_t ref = 0; @@ -439,21 +430,33 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { * that tries to do the same thing. */ if (pNode->inTrashCan) { + __cache_unlock(pCacheObj); + if (ref == 0) { assert(pNode->pTNodeHeader->pData == pNode); taosRemoveFromTrashCan(pCacheObj, pNode->pTNodeHeader); } } else { + taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); + __cache_unlock(pCacheObj); + if (ref > 0) { assert(pNode->pTNodeHeader == NULL); - taosCacheMoveToTrash(pCacheObj, pNode); + + // todo trashcan lock + taosAddToTrash(pCacheObj, pNode); } else { - taosCacheReleaseNode(pCacheObj, pNode); +// taosCacheReleaseNode(pCacheObj, pNode); + atomic_fetch_sub_ptr(&pCacheObj->totalSize, pNode->size); + uDebug("cache:%s, key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes", + pCacheObj->name, pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, + pNode->size); + + if (pCacheObj->freeFp) pCacheObj->freeFp(pNode->data); + free(pNode); } } - __cache_unlock(pCacheObj); - } else { // NOTE: once refcount is decrease, pNode may be freed by other thread immediately. int32_t ref = T_REF_DEC(pNode); @@ -497,8 +500,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { doCleanupDataCache(pCacheObj); } -SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *pData, size_t size, - uint64_t duration) { +SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *pData, size_t size, uint64_t duration) { size_t totalSize = size + sizeof(SCacheDataNode) + keyLen; SCacheDataNode *pNewNode = calloc(1, totalSize); diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 6c05091cec..3c642b5098 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -207,7 +207,6 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { vDebug("vgId:%d, QInfo:%p, start to build result rsp after query paused, %p", pVnode->vgId, *handle, pReadMsg->rpcMsg.handle); code = vnodeDumpQueryResult(&pReadMsg->rspRet, pVnode, *handle, &freehandle); - freehandle = false; // todo test the error code case if (code == TSDB_CODE_SUCCESS) { code = TSDB_CODE_QRY_HAS_RSP; @@ -267,7 +266,6 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { } code = vnodeDumpQueryResult(pRet, pVnode, *handle, &freeHandle); - freeHandle = false; } qReleaseQInfo(pVnode->qMgmt, (void**) &handle, freeHandle); From 4a508cdf3e6f9f200cbe97d6eb08c2b50a6171cf Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 31 Jul 2020 00:15:59 +0800 Subject: [PATCH 014/190] [td-225] refactor codes. --- src/util/src/tcache.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index b89cd836e1..715a142c90 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -207,7 +207,7 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext return NULL; } - pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), true); + pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), false); pCacheObj->name = strdup(cacheName); if (pCacheObj->pHashTable == NULL) { free(pCacheObj); @@ -239,8 +239,6 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext } void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const void *pData, size_t dataSize, int duration) { -// SCacheDataNode *pNode = NULL; - if (pCacheObj == NULL || pCacheObj->pHashTable == NULL) { return NULL; } @@ -266,12 +264,11 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v pCacheObj->name, key, pNode1->data, pNode1->addedTime, pNode1->expireTime, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, (int64_t)dataSize); } else { // old data exists, update the node - bool addToTrashcan = false; if (T_REF_VAL_GET(pOld) > 0) { + // todo removed by node, instead of by key taosHashRemove(pCacheObj->pHashTable, pOld->key, pOld->keySize); - } else { addToTrashcan = true; } @@ -285,7 +282,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v if (addToTrashcan) { taosAddToTrash(pCacheObj, pOld); } else { - tfree(pOld); + free(pOld); } uDebug("cache:%s, key:%p, %p exist in cache, updated old:%p", pCacheObj->name, key, pNode1->data, pOld); From e47e6300e4b25f961b016a634a366af9b137a08b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 31 Jul 2020 00:49:24 +0800 Subject: [PATCH 015/190] [td-225] refactor codes. --- src/util/inc/hash.h | 2 +- src/util/src/hash.c | 5 +++-- src/util/src/tcache.c | 20 +++++++++++++------- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h index 900aa112e4..d3edc89585 100644 --- a/src/util/inc/hash.h +++ b/src/util/inc/hash.h @@ -103,7 +103,7 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen); * @param key * @param keyLen */ -void taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen); +int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen); void taosHashRemoveNode(); diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 37e8c37cb6..f59f25d153 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -258,14 +258,14 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { } } -void taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) { +int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) { uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); __wr_lock(pHashObj->lock); SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, keyLen, hashVal); if (pNode == NULL) { __unlock(pHashObj->lock); - return; + return -1; } SHashNode *pNext = pNode->next; @@ -289,6 +289,7 @@ void taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) { pNode->prev = NULL; tfree(pNode); + return 0; } void taosHashCleanup(SHashObj *pHashObj) { diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 715a142c90..a5317cdda1 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -207,7 +207,7 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext return NULL; } - pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), false); + pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), true); pCacheObj->name = strdup(cacheName); if (pCacheObj->pHashTable == NULL) { free(pCacheObj); @@ -268,7 +268,9 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v if (T_REF_VAL_GET(pOld) > 0) { // todo removed by node, instead of by key - taosHashRemove(pCacheObj->pHashTable, pOld->key, pOld->keySize); + int32_t succ = taosHashRemove(pCacheObj->pHashTable, pOld->key, pOld->keySize); + assert(succ == 0); + addToTrashcan = true; } @@ -413,7 +415,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { } if (_remove) { - __cache_wr_lock(pCacheObj); +// __cache_wr_lock(pCacheObj); // NOTE: once refcount is decrease, pNode may be freed by other thread immediately. int32_t ref = T_REF_DEC(pNode); @@ -427,22 +429,26 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { * that tries to do the same thing. */ if (pNode->inTrashCan) { - __cache_unlock(pCacheObj); +// __cache_unlock(pCacheObj); if (ref == 0) { assert(pNode->pTNodeHeader->pData == pNode); taosRemoveFromTrashCan(pCacheObj, pNode->pTNodeHeader); } } else { - taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); - __cache_unlock(pCacheObj); + int32_t success = taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); if (ref > 0) { assert(pNode->pTNodeHeader == NULL); // todo trashcan lock - taosAddToTrash(pCacheObj, pNode); + if (success) { + taosAddToTrash(pCacheObj, pNode); + } +// __cache_unlock(pCacheObj); } else { +// __cache_unlock(pCacheObj); + // taosCacheReleaseNode(pCacheObj, pNode); atomic_fetch_sub_ptr(&pCacheObj->totalSize, pNode->size); uDebug("cache:%s, key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes", From b52bae71ae2e7f8054ba77a9b6af37328972f729 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 31 Jul 2020 00:54:31 +0800 Subject: [PATCH 016/190] [td-225] refactor codes. --- src/util/src/tcache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index a5317cdda1..77f3ea2db0 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -300,7 +300,7 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen void *pData = NULL; - __cache_rd_lock(pCacheObj); +// __cache_rd_lock(pCacheObj); SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen); int32_t ref = 0; @@ -309,7 +309,7 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen pData = (*ptNode)->data; } - __cache_unlock(pCacheObj); +// __cache_unlock(pCacheObj); if (pData != NULL) { atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1); From ce4a2802cd5332b12c1c9034d3436a4a1a7aba90 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 31 Jul 2020 11:32:54 +0800 Subject: [PATCH 017/190] [td-225] update the hash func --- src/client/src/tscParseInsert.c | 2 +- src/client/src/tscUtil.c | 2 +- src/mnode/src/mnodeSdb.c | 2 +- src/mnode/src/mnodeTable.c | 2 +- src/query/src/qExecutor.c | 2 +- src/query/src/qResultbuf.c | 4 +- src/query/src/qTokenizer.c | 2 +- src/query/src/qUtil.c | 4 +- src/rpc/src/rpcMain.c | 2 +- src/sync/src/syncMain.c | 2 +- src/tsdb/src/tsdbMeta.c | 2 +- src/util/inc/hash.h | 6 +- src/util/src/hash.c | 50 ++++++++++++++-- src/util/src/tcache.c | 103 +++++++++++++++----------------- src/util/src/tkvstore.c | 2 +- src/vnode/src/vnodeMain.c | 2 +- 16 files changed, 114 insertions(+), 75 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index ae2370cd56..d113a43ecf 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -1039,7 +1039,7 @@ int tsParseInsertSql(SSqlObj *pSql) { } if (NULL == pCmd->pTableList) { - pCmd->pTableList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false); + pCmd->pTableList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); pCmd->pDataBlocks = taosArrayInit(4, POINTER_BYTES); if (NULL == pCmd->pTableList || NULL == pSql->cmd.pDataBlocks) { code = TSDB_CODE_TSC_OUT_OF_MEMORY; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 5ee3db36d1..9c2454e76c 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -646,7 +646,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) { STableDataBlocks* pOneTableBlock = taosArrayGetP(pTableDataBlockList, 0); int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta); - void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false); + void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES); size_t total = taosArrayGetSize(pTableDataBlockList); diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 26efcfeac0..f09541d306 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -814,7 +814,7 @@ void *sdbOpenTable(SSdbTableDesc *pDesc) { if (pTable->keyType == SDB_KEY_STRING || pTable->keyType == SDB_KEY_VAR_STRING) { hashFp = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); } - pTable->iHandle = taosHashInit(pTable->hashSessions, hashFp, true); + pTable->iHandle = taosHashInit(pTable->hashSessions, hashFp, true, true); tsSdbObj.numOfTables++; tsSdbObj.tableList[pTable->tableId] = pTable; diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 053a1522a7..85afd70b83 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -378,7 +378,7 @@ static void mnodeAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCt atomic_add_fetch_32(&pStable->numOfTables, 1); if (pStable->vgHash == NULL) { - pStable->vgHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false); + pStable->vgHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, false); } if (pStable->vgHash != NULL) { diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 66762a1ca5..8afc44f939 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -5841,7 +5841,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList, pQInfo->tableqinfoGroupInfo.pGroupList = taosArrayInit(numOfGroups, POINTER_BYTES); pQInfo->tableqinfoGroupInfo.numOfTables = pTableGroupInfo->numOfTables; pQInfo->tableqinfoGroupInfo.map = taosHashInit(pTableGroupInfo->numOfTables, - taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false); + taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, false); } int tableIndex = 0; diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c index 93ccad8e27..d2a124aa30 100644 --- a/src/query/src/qResultbuf.c +++ b/src/query/src/qResultbuf.c @@ -34,9 +34,9 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t ro pResBuf->lruList = tdListNew(POINTER_BYTES); // init id hash table - pResBuf->groupSet = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false); + pResBuf->groupSet = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, false); pResBuf->assistBuf = malloc(pResBuf->pageSize + 2); // EXTRA BYTES - pResBuf->all = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false); + pResBuf->all = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, false); char path[PATH_MAX] = {0}; getTmpfilePath("qbuf", path); diff --git a/src/query/src/qTokenizer.c b/src/query/src/qTokenizer.c index 0ea0ff7bf3..a22c4d5f65 100644 --- a/src/query/src/qTokenizer.c +++ b/src/query/src/qTokenizer.c @@ -256,7 +256,7 @@ static void* KeywordHashTable = NULL; static void doInitKeywordsTable() { int numOfEntries = tListLen(keywordTable); - KeywordHashTable = taosHashInit(numOfEntries, MurmurHash3_32, false); + KeywordHashTable = taosHashInit(numOfEntries, MurmurHash3_32, true, false); for (int32_t i = 0; i < numOfEntries; i++) { keywordTable[i].len = strlen(keywordTable[i].name); void* ptr = &keywordTable[i]; diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index c9143b3a53..ff3c6b20bb 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -40,7 +40,7 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun pWindowResInfo->type = type; _hash_fn_t fn = taosGetDefaultHashFunction(type); - pWindowResInfo->hashList = taosHashInit(threshold, fn, false); + pWindowResInfo->hashList = taosHashInit(threshold, fn, true, false); if (pWindowResInfo->hashList == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; } @@ -107,7 +107,7 @@ void resetTimeWindowInfo(SQueryRuntimeEnv *pRuntimeEnv, SWindowResInfo *pWindowR pWindowResInfo->size = 0; _hash_fn_t fn = taosGetDefaultHashFunction(pWindowResInfo->type); - pWindowResInfo->hashList = taosHashInit(pWindowResInfo->capacity, fn, false); + pWindowResInfo->hashList = taosHashInit(pWindowResInfo->capacity, fn, true, false); pWindowResInfo->startTime = TSKEY_INITIAL_VAL; pWindowResInfo->prevSKey = TSKEY_INITIAL_VAL; diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 7425558535..4acf95d4f4 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -261,7 +261,7 @@ void *rpcOpen(const SRpcInit *pInit) { } if (pRpc->connType == TAOS_CONN_SERVER) { - pRpc->hash = taosHashInit(pRpc->sessions, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true); + pRpc->hash = taosHashInit(pRpc->sessions, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, true); if (pRpc->hash == NULL) { tError("%s failed to init string hash", pRpc->label); rpcClose(pRpc); diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 93c4a9402f..afd765f2c2 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -96,7 +96,7 @@ static void syncModuleInitFunc() { return; } - vgIdHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true); + vgIdHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, true); if (vgIdHash == NULL) { taosTmrCleanUp(syncTmrCtrl); taosCloseTcpThreadPool(tsTcpPool); diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 09bbbd8f4d..5fc2f3e253 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -436,7 +436,7 @@ STsdbMeta *tsdbNewMeta(STsdbCfg *pCfg) { goto _err; } - pMeta->uidMap = taosHashInit(TSDB_INIT_NTABLES * 1.1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false); + pMeta->uidMap = taosHashInit(TSDB_INIT_NTABLES * 1.1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); if (pMeta->uidMap == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _err; diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h index d3edc89585..3c99a36764 100644 --- a/src/util/inc/hash.h +++ b/src/util/inc/hash.h @@ -44,6 +44,7 @@ typedef struct SHashObj { _hash_fn_t hashFp; // hash function _hash_free_fn_t freeFp; // hash node free callback function + bool enableUpdate; // enable update #if defined(LINUX) pthread_rwlock_t *lock; #else @@ -67,7 +68,7 @@ typedef struct SHashMutableIterator { * @param threadsafe thread safe or not * @return */ -SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool threadsafe); +SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, bool threadsafe); /** * return the size of hash table @@ -105,7 +106,8 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen); */ int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen); -void taosHashRemoveNode(); + +void* taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen); /** * clean up hash table diff --git a/src/util/src/hash.c b/src/util/src/hash.c index f59f25d153..e560a9c744 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -163,7 +163,7 @@ static void doAddToHashTable(SHashObj *pHashObj, SHashNode *pNode); */ static SHashNode *getNextHashNode(SHashMutableIterator *pIter); -SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool threadsafe) { +SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, bool threadsafe) { if (capacity == 0 || fn == NULL) { return NULL; } @@ -179,6 +179,7 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool threadsafe) { assert((pHashObj->capacity & (pHashObj->capacity - 1)) == 0); pHashObj->hashFp = fn; + pHashObj->enableUpdate = update; pHashObj->hashList = (SHashNode **)calloc(pHashObj->capacity, POINTER_BYTES); if (pHashObj->hashList == NULL) { @@ -232,15 +233,21 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da doAddToHashTable(pHashObj, pNewNode); __unlock(pHashObj->lock); + + return 0; } else { - doUpdateHashNode(pNode, pNewNode); + // not support the update operation, return error + if (pHashObj->enableUpdate) { + doUpdateHashNode(pNode, pNewNode); + } + __unlock(pHashObj->lock); tfree(pNewNode->data) tfree(pNewNode); - } - return 0; + return pHashObj->enableUpdate? 0:-1; + } } void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { @@ -288,10 +295,45 @@ int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) { pNode->next = NULL; pNode->prev = NULL; + tfree(pNode->data); tfree(pNode); + return 0; } +void* taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen) { + uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); + + __wr_lock(pHashObj->lock); + SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, keyLen, hashVal); + if (pNode == NULL) { + __unlock(pHashObj->lock); + return NULL; + } + + SHashNode *pNext = pNode->next; + if (pNode->prev == NULL) { + int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); + assert(pHashObj->hashList[slot] == pNode); + + pHashObj->hashList[slot] = pNext; + } else { + pNode->prev->next = pNext; + } + + if (pNext != NULL) { + pNext->prev = pNode->prev; + } + + pHashObj->size -= 1; + __unlock(pHashObj->lock); + + pNode->next = NULL; + pNode->prev = NULL; + + return pNode; +} + void taosHashCleanup(SHashObj *pHashObj) { if (pHashObj == NULL) return; diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 77f3ea2db0..2432e97552 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -207,7 +207,7 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext return NULL; } - pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), true); + pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), false, true); pCacheObj->name = strdup(cacheName); if (pCacheObj->pHashTable == NULL) { free(pCacheObj); @@ -249,45 +249,49 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v return NULL; } - __cache_wr_lock(pCacheObj); - SCacheDataNode **pt = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen); - SCacheDataNode * pOld = (pt != NULL) ? (*pt) : NULL; - - if (pOld == NULL) { // do addedTime to cache - T_REF_INC(pNode1); - taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNode1, sizeof(void *)); - __cache_unlock(pCacheObj); +// __cache_wr_lock(pCacheObj); + T_REF_INC(pNode1); + int32_t succ = taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNode1, sizeof(void *)); + if (succ == 0) { atomic_add_fetch_64(&pCacheObj->totalSize, pNode1->size); - uDebug("cache:%s, key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", totalNum:%d totalSize:%" PRId64 - "bytes size:%" PRId64 "bytes", + uDebug("cache:%s, key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 + ", totalNum:%d totalSize:%" PRId64 "bytes size:%" PRId64 "bytes", pCacheObj->name, key, pNode1->data, pNode1->addedTime, pNode1->expireTime, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, (int64_t)dataSize); - } else { // old data exists, update the node - bool addToTrashcan = false; - if (T_REF_VAL_GET(pOld) > 0) { - + } else { // duplicated key exists + while (1) { // todo removed by node, instead of by key - int32_t succ = taosHashRemove(pCacheObj->pHashTable, pOld->key, pOld->keySize); - assert(succ == 0); + SHashNode *p = taosHashRemoveNode(pCacheObj->pHashTable, key, keyLen); - addToTrashcan = true; + // add to trashcan + if (p != NULL) { + SCacheDataNode* pCachedNode = *(SCacheDataNode**)p->data; + if (T_REF_VAL_GET(pCachedNode) == 0) { + tfree(pCachedNode); + } else { + taosAddToTrash(pCacheObj, pCachedNode); + uDebug("cache:%s, key:%p, %p exist in cache, updated old:%p", pCacheObj->name, key, pNode1->data, pCachedNode); + } + } + + assert(T_REF_VAL_GET(pNode1) == 1); + + int32_t ret = taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNode1, sizeof(void *)); + if (ret == 0) { + atomic_add_fetch_64(&pCacheObj->totalSize, pNode1->size); + + uDebug("cache:%s, key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 + ", totalNum:%d totalSize:%" PRId64 "bytes size:%" PRId64 "bytes", + pCacheObj->name, key, pNode1->data, pNode1->addedTime, pNode1->expireTime, + (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, (int64_t)dataSize); + + return pNode1; + + } else { + // failed, try again + } } - - T_REF_INC(pNode1); - - // addedTime new element to hashtable - taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNode1, sizeof(void *)); - __cache_unlock(pCacheObj); - - // todo add trashcan lock - if (addToTrashcan) { - taosAddToTrash(pCacheObj, pOld); - } else { - free(pOld); - } - - uDebug("cache:%s, key:%p, %p exist in cache, updated old:%p", pCacheObj->name, key, pNode1->data, pOld); } return pNode1->data; @@ -415,8 +419,6 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { } if (_remove) { -// __cache_wr_lock(pCacheObj); - // NOTE: once refcount is decrease, pNode may be freed by other thread immediately. int32_t ref = T_REF_DEC(pNode); uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, ref); @@ -429,34 +431,27 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { * that tries to do the same thing. */ if (pNode->inTrashCan) { -// __cache_unlock(pCacheObj); - if (ref == 0) { assert(pNode->pTNodeHeader->pData == pNode); taosRemoveFromTrashCan(pCacheObj, pNode->pTNodeHeader); } } else { int32_t success = taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); + if (success) { + if (ref > 0) { + assert(pNode->pTNodeHeader == NULL); - if (ref > 0) { - assert(pNode->pTNodeHeader == NULL); - - // todo trashcan lock - if (success) { + // todo trashcan lock taosAddToTrash(pCacheObj, pNode); + } else { // ref == 0 + atomic_fetch_sub_ptr(&pCacheObj->totalSize, pNode->size); + uDebug("cache:%s, key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes", + pCacheObj->name, pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), + pCacheObj->totalSize, pNode->size); + + if (pCacheObj->freeFp) pCacheObj->freeFp(pNode->data); + free(pNode); } -// __cache_unlock(pCacheObj); - } else { -// __cache_unlock(pCacheObj); - -// taosCacheReleaseNode(pCacheObj, pNode); - atomic_fetch_sub_ptr(&pCacheObj->totalSize, pNode->size); - uDebug("cache:%s, key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes", - pCacheObj->name, pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, - pNode->size); - - if (pCacheObj->freeFp) pCacheObj->freeFp(pNode->data); - free(pNode); } } diff --git a/src/util/src/tkvstore.c b/src/util/src/tkvstore.c index d7bf9d7857..602ea8c96d 100644 --- a/src/util/src/tkvstore.c +++ b/src/util/src/tkvstore.c @@ -419,7 +419,7 @@ static SKVStore *tdNewKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void pStore->iFunc = iFunc; pStore->aFunc = aFunc; pStore->appH = appH; - pStore->map = taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false); + pStore->map = taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); if (pStore->map == NULL) { terrno = TSDB_CODE_COM_OUT_OF_MEMORY; goto _err; diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 77d4503d9d..e8919e5fce 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -50,7 +50,7 @@ int32_t vnodeInitResources() { vnodeInitWriteFp(); vnodeInitReadFp(); - tsDnodeVnodesHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true); + tsDnodeVnodesHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, true); if (tsDnodeVnodesHash == NULL) { vError("failed to init vnode list"); return TSDB_CODE_VND_OUT_OF_MEMORY; From 97616d2ad1c664f95f549bbc61d51575dbe8dd96 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 31 Jul 2020 11:50:13 +0800 Subject: [PATCH 018/190] [td-225] update the hash func --- src/util/src/tcache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 2432e97552..4e2c41d1bc 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -436,8 +436,8 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { taosRemoveFromTrashCan(pCacheObj, pNode->pTNodeHeader); } } else { - int32_t success = taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); - if (success) { + int32_t ret = taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); + if (ret == 0) { if (ref > 0) { assert(pNode->pTNodeHeader == NULL); From 285d732e7fa994d3fa09790c1134451a727a07f2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 31 Jul 2020 16:16:32 +0800 Subject: [PATCH 019/190] [td-225] update the hash func --- src/util/inc/hash.h | 49 +++-- src/util/src/hash.c | 418 ++++++++++++++++++++++++------------ src/util/src/tcache.c | 32 +-- src/util/tests/hashTest.cpp | 6 +- 4 files changed, 330 insertions(+), 175 deletions(-) diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h index 3c99a36764..c5168a92ee 100644 --- a/src/util/inc/hash.h +++ b/src/util/inc/hash.h @@ -20,7 +20,9 @@ extern "C" { #endif +#include "tarray.h" #include "hashfunc.h" +#include "tlockfree.h" #define HASH_MAX_CAPACITY (1024 * 1024 * 16) #define HASH_DEFAULT_LOAD_FACTOR (0.75) @@ -37,23 +39,41 @@ typedef struct SHashNode { char *data; } SHashNode; -typedef struct SHashObj { - SHashNode **hashList; - size_t capacity; // number of slots - size_t size; // number of elements in hash table - _hash_fn_t hashFp; // hash function - _hash_free_fn_t freeFp; // hash node free callback function +typedef enum SHashLockTypeE { + HASH_NO_LOCK = 0, + HASH_GLOBAL_LOCK = 1, + HASH_ENTRY_LOCK = 2, +} SHashLockTypeE; - bool enableUpdate; // enable update +typedef struct SHashLock { #if defined(LINUX) pthread_rwlock_t *lock; #else - pthread_mutex_t *lock; + pthread_mutex_t *lock; #endif +} SHashLock; + +typedef struct SHashEntry { + int32_t num; // number of elements in current entry + SRWLatch latch; // entry latch + SHashNode head; // dummy head +} SHashEntry; + +typedef struct SHashObj { + SHashEntry **hashList; + size_t capacity; // number of slots + size_t size; // number of elements in hash table + _hash_fn_t hashFp; // hash function + _hash_free_fn_t freeFp; // hash node free callback function + + SHashLock lock; + SHashLockTypeE lockType; // lock type + bool enableUpdate; // enable update + SArray *pMemBlock; // memory block allocated for SHashEntry } SHashObj; typedef struct SHashMutableIterator { - SHashObj * pHashObj; + SHashObj *pHashObj; int32_t entryIndex; SHashNode *pCur; SHashNode *pNext; // current node can be deleted for mutable iterator, so keep the next one before return current @@ -68,7 +88,7 @@ typedef struct SHashMutableIterator { * @param threadsafe thread safe or not * @return */ -SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, bool threadsafe); +SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTypeE type); /** * return the size of hash table @@ -107,7 +127,7 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen); int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen); -void* taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen); +int32_t taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen, void* data, size_t dsize); /** * clean up hash table @@ -115,13 +135,6 @@ void* taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen); */ void taosHashCleanup(SHashObj *pHashObj); -/** - * Set the free callback function - * This function if set will be invoked right before freeing each hash node - * @param pHashObj - */ -void taosHashSetFreecb(SHashObj *pHashObj, _hash_free_fn_t freeFp); - /** * * @param pHashObj diff --git a/src/util/src/hash.c b/src/util/src/hash.c index e560a9c744..bff8d1e50a 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -100,7 +100,19 @@ static FORCE_INLINE int32_t taosHashCapacity(int32_t length) { FORCE_INLINE SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, uint32_t hashVal) { int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); - SHashNode *pNode = pHashObj->hashList[slot]; + SHashEntry *pe = pHashObj->hashList[slot]; + + // no data, return directly + int32_t num = atomic_load_32(&pe->num); + if (num == 0) { + return NULL; + } + + if (pHashObj->lockType == HASH_ENTRY_LOCK) { + taosRLockLatch(&pe->latch); + } + + SHashNode* pNode = pe->head.next; while (pNode) { if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { assert(pNode->hashVal == hashVal); @@ -109,7 +121,11 @@ FORCE_INLINE SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *k pNode = pNode->next; } - + + if (pHashObj->lockType == HASH_ENTRY_LOCK) { + taosRUnLockLatch(&pe->latch); + } + return pNode; } @@ -154,7 +170,7 @@ static FORCE_INLINE SHashNode *doUpdateHashNode(SHashNode *pNode, SHashNode *pNe * @param pHashObj * @param pNode */ -static void doAddToHashTable(SHashObj *pHashObj, SHashNode *pNode); +static void pushfrontNode(SHashEntry* pEntry, SHashNode *pNode); /** * Get the next element in hash table for iterator @@ -163,7 +179,7 @@ static void doAddToHashTable(SHashObj *pHashObj, SHashNode *pNode); */ static SHashNode *getNextHashNode(SHashMutableIterator *pIter); -SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, bool threadsafe) { +SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTypeE type) { if (capacity == 0 || fn == NULL) { return NULL; } @@ -179,24 +195,35 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, bool threads assert((pHashObj->capacity & (pHashObj->capacity - 1)) == 0); pHashObj->hashFp = fn; + pHashObj->lockType = type; pHashObj->enableUpdate = update; - pHashObj->hashList = (SHashNode **)calloc(pHashObj->capacity, POINTER_BYTES); + pHashObj->hashList = (SHashEntry **)calloc(pHashObj->capacity, sizeof(void*)); if (pHashObj->hashList == NULL) { free(pHashObj); uError("failed to allocate memory, reason:%s", strerror(errno)); return NULL; + } else { + + pHashObj->pMemBlock = taosArrayInit(8, sizeof(void*)); + + void* p = calloc(pHashObj->capacity, sizeof(SHashEntry)); + for(int32_t i = 0; i < pHashObj->capacity; ++i) { + pHashObj->hashList[i] = p + i * sizeof(SHashEntry); + } + + taosArrayPush(pHashObj->pMemBlock, &p); } - if (threadsafe) { + if (pHashObj->lockType != HASH_NO_LOCK) { #if defined(LINUX) - pHashObj->lock = calloc(1, sizeof(pthread_rwlock_t)); + pHashObj->lock.lock = calloc(1, sizeof(pthread_rwlock_t)); #else - pHashObj->lock = calloc(1, sizeof(pthread_mutex_t)); + pHashObj->lock.lock = calloc(1, sizeof(pthread_mutex_t)); #endif } - if (__lock_init(pHashObj->lock) != 0) { + if (__lock_init(pHashObj->lock.lock) != 0) { free(pHashObj->hashList); free(pHashObj); @@ -208,11 +235,7 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, bool threads } size_t taosHashGetSize(const SHashObj *pHashObj) { - if (pHashObj == NULL) { - return 0; - } - - return pHashObj->size; + return (pHashObj == NULL)? 0:pHashObj->size; } int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t size) { @@ -222,17 +245,43 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da return -1; } - __wr_lock(pHashObj->lock); - SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, keyLen, hashVal); + // need the resize process, write lock applied + if (HASH_NEED_RESIZE(pHashObj)) { + __wr_lock(pHashObj->lock.lock); + taosHashTableResize(pHashObj); + __unlock(pHashObj->lock.lock); + } - if (pNode == NULL) { // no data in hash table with the specified key, add it into hash table + __rd_lock(pHashObj->lock.lock); - if (HASH_NEED_RESIZE(pHashObj)) { - taosHashTableResize(pHashObj); + int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); + SHashEntry *pe = pHashObj->hashList[slot]; + + if (pHashObj->lockType == HASH_ENTRY_LOCK) { + taosWLockLatch(&pe->latch); + } + + SHashNode* pNode = pe->head.next; + while (pNode) { + if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { + assert(pNode->hashVal == hashVal); + break; } - doAddToHashTable(pHashObj, pNewNode); - __unlock(pHashObj->lock); + pNode = pNode->next; + } + + if (pNode == NULL) { + // no data in hash table with the specified key, add it into hash table + pushfrontNode(pe, pNewNode); + + if (pHashObj->lockType == HASH_ENTRY_LOCK) { + taosWUnLockLatch(&pe->latch); + } + + // enable resize + __unlock(pHashObj->lock.lock); + atomic_add_fetch_64(&pHashObj->size, 1); return 0; } else { @@ -241,7 +290,12 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da doUpdateHashNode(pNode, pNewNode); } - __unlock(pHashObj->lock); + if (pHashObj->lockType == HASH_ENTRY_LOCK) { + taosWUnLockLatch(&pe->latch); + } + + // enable resize + __unlock(pHashObj->lock.lock); tfree(pNewNode->data) tfree(pNewNode); @@ -251,11 +305,18 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da } void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { + if (pHashObj->size <= 0 || keyLen == 0 || key == NULL) { + return NULL; + } + uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); - __rd_lock(pHashObj->lock); + // only add the read lock to disable the resize process + __rd_lock(pHashObj->lock.lock); + SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, keyLen, hashVal); - __unlock(pHashObj->lock); + + __unlock(pHashObj->lock.lock); if (pNode) { assert(pNode->hashVal == hashVal); @@ -266,91 +327,167 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { } int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) { - uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); - - __wr_lock(pHashObj->lock); - SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, keyLen, hashVal); - if (pNode == NULL) { - __unlock(pHashObj->lock); + if (pHashObj->size <= 0) { return -1; } - SHashNode *pNext = pNode->next; - if (pNode->prev == NULL) { - int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); - assert(pHashObj->hashList[slot] == pNode); - - pHashObj->hashList[slot] = pNext; - } else { - pNode->prev->next = pNext; - } - - if (pNext != NULL) { - pNext->prev = pNode->prev; - } - - pHashObj->size -= 1; - __unlock(pHashObj->lock); - - pNode->next = NULL; - pNode->prev = NULL; - - tfree(pNode->data); - tfree(pNode); - - return 0; -} - -void* taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen) { uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); - __wr_lock(pHashObj->lock); - SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, keyLen, hashVal); - if (pNode == NULL) { - __unlock(pHashObj->lock); - return NULL; + // disable the resize process + __rd_lock(pHashObj->lock.lock); + + int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); + SHashEntry *pe = pHashObj->hashList[slot]; + + // no data, return directly + if (pe->num == 0) { + __unlock(pHashObj->lock.lock); + return -1; } - SHashNode *pNext = pNode->next; - if (pNode->prev == NULL) { - int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); - assert(pHashObj->hashList[slot] == pNode); + if (pHashObj->lockType == HASH_ENTRY_LOCK) { + taosWLockLatch(&pe->latch); + } - pHashObj->hashList[slot] = pNext; - } else { + SHashNode* pNode = pe->head.next; + while (pNode) { + if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { + assert(pNode->hashVal == hashVal); + break; + } + + pNode = pNode->next; + } + + if (pNode != NULL) { + assert(pNode->prev != NULL); + + SHashNode *pNext = pNode->next; pNode->prev->next = pNext; + + if (pNext != NULL) { + pNext->prev = pNode->prev; + } + + pe->num -= 1; } - if (pNext != NULL) { - pNext->prev = pNode->prev; + if (pHashObj->lockType == HASH_ENTRY_LOCK) { + taosWUnLockLatch(&pe->latch); } - pHashObj->size -= 1; - __unlock(pHashObj->lock); + __unlock(pHashObj->lock.lock); - pNode->next = NULL; - pNode->prev = NULL; + if (pNode != NULL) { + atomic_sub_fetch_64(&pHashObj->size, 1); - return pNode; + pNode->next = NULL; + pNode->prev = NULL; + + tfree(pNode->data); + tfree(pNode); + + return 0; + } else { + return -1; + } +} + +int32_t taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen, void* data, size_t dsize) { + if (pHashObj->size <= 0) { + return -1; + } + + uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); + + // disable the resize process + __rd_lock(pHashObj->lock.lock); + + int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); + SHashEntry *pe = pHashObj->hashList[slot]; + + // no data, return directly + if (pe->num == 0) { + __unlock(pHashObj->lock.lock); + return -1; + } + + if (pHashObj->lockType == HASH_ENTRY_LOCK) { + taosWLockLatch(&pe->latch); + } + + SHashNode* pNode = pe->head.next; + while (pNode) { + if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { + assert(pNode->hashVal == hashVal); + break; + } + + pNode = pNode->next; + } + + if (pNode != NULL) { + assert(pNode->prev != NULL); + + SHashNode *pNext = pNode->next; + pNode->prev->next = pNext; + + if (pNext != NULL) { + pNext->prev = pNode->prev; + } + } + + if (pHashObj->lockType == HASH_ENTRY_LOCK) { + pe->num -= 1; + taosWUnLockLatch(&pe->latch); + } + + __unlock(pHashObj->lock.lock); + + atomic_sub_fetch_64(&pHashObj->size, 1); + + if (data != NULL) { + memcpy(data, pNode->data, dsize); + } + + if (pNode != NULL) { + pNode->next = NULL; + pNode->prev = NULL; + + tfree(pNode->data); + tfree(pNode); + + return 0; + } else { + return -1; + } } void taosHashCleanup(SHashObj *pHashObj) { - if (pHashObj == NULL) return; + if (pHashObj == NULL) { + return; + } SHashNode *pNode, *pNext; - __wr_lock(pHashObj->lock); + __wr_lock(pHashObj->lock.lock); if (pHashObj->hashList) { for (int32_t i = 0; i < pHashObj->capacity; ++i) { - pNode = pHashObj->hashList[i]; + SHashEntry* pEntry = pHashObj->hashList[i]; + if (pEntry->num == 0) { + assert(pEntry->head.next == 0); + continue; + } + pNode = pEntry->head.next; while (pNode) { pNext = pNode->next; if (pHashObj->freeFp) { pHashObj->freeFp(pNode->data); } + free(pNode->data); free(pNode); pNode = pNext; } @@ -359,10 +496,20 @@ void taosHashCleanup(SHashObj *pHashObj) { free(pHashObj->hashList); } - __unlock(pHashObj->lock); - __lock_destroy(pHashObj->lock); + __unlock(pHashObj->lock.lock); + __lock_destroy(pHashObj->lock.lock); + + tfree(pHashObj->lock.lock); + + // destroy mem block + size_t memBlock = taosArrayGetSize(pHashObj->pMemBlock); + for(int32_t i = 0; i < memBlock; ++i) { + void* p = taosArrayGetP(pHashObj->pMemBlock, i); + tfree(p); + } + + taosArrayDestroy(pHashObj->pMemBlock); - tfree(pHashObj->lock); memset(pHashObj, 0, sizeof(SHashObj)); free(pHashObj); } @@ -392,13 +539,13 @@ bool taosHashIterNext(SHashMutableIterator *pIter) { assert(pIter->pCur == NULL && pIter->pNext == NULL); while (1) { - SHashNode *pEntry = pIter->pHashObj->hashList[pIter->entryIndex]; - if (pEntry == NULL) { + SHashEntry *pEntry = pIter->pHashObj->hashList[pIter->entryIndex]; + if (pEntry->num == 0) { pIter->entryIndex++; continue; } - pIter->pCur = pEntry; + pIter->pCur = pEntry->head.next; if (pIter->pCur->next) { pIter->pNext = pIter->pCur->next; @@ -451,19 +598,9 @@ int32_t taosHashGetMaxOverflowLinkLength(const SHashObj *pHashObj) { int32_t num = 0; for (int32_t i = 0; i < pHashObj->size; ++i) { - SHashNode *pEntry = pHashObj->hashList[i]; - if (pEntry == NULL) { - continue; - } - - int32_t j = 0; - while(pEntry != NULL) { - pEntry = pEntry->next; - j++; - } - - if (num < j) { - num = j; + SHashEntry *pEntry = pHashObj->hashList[i]; + if (num < pEntry->num) { + num = pEntry->num; } } @@ -471,7 +608,7 @@ int32_t taosHashGetMaxOverflowLinkLength(const SHashObj *pHashObj) { } void taosHashTableResize(SHashObj *pHashObj) { - if (pHashObj->size < pHashObj->capacity * HASH_DEFAULT_LOAD_FACTOR) { + if (!HASH_NEED_RESIZE(pHashObj)) { return; } @@ -486,37 +623,43 @@ void taosHashTableResize(SHashObj *pHashObj) { return; } - int32_t pointerSize = POINTER_BYTES; - void *pNewEntry = realloc(pHashObj->hashList, pointerSize * newSize); - if (pNewEntry == NULL) {// todo handle error + void *pNewEntryList = realloc(pHashObj->hashList, sizeof(SHashEntry) * newSize); + if (pNewEntryList == NULL) {// todo handle error // uDebug("cache resize failed due to out of memory, capacity remain:%d", pHashObj->capacity); return; } - pHashObj->hashList = pNewEntry; - memset(&pHashObj->hashList[pHashObj->capacity], 0, POINTER_BYTES * (newSize - pHashObj->capacity)); - + pHashObj->hashList = pNewEntryList; + + size_t inc = newSize - pHashObj->capacity; + void* p = calloc(inc, sizeof(SHashEntry)); + + for(int32_t i = 0; i < inc; ++i) { + pHashObj->hashList[i + pHashObj->capacity] = p + i * sizeof(SHashEntry); + } + + taosArrayPush(pHashObj->pMemBlock, &p); + pHashObj->capacity = newSize; - for (int32_t i = 0; i < pHashObj->capacity; ++i) { - pNode = pHashObj->hashList[i]; - if (pNode != NULL) { - assert(pNode->prev == NULL); + SHashEntry* pe = pHashObj->hashList[i]; + if (pe->num == 0) { + assert(pe->head.next == NULL); + continue; } - + + pNode = pe->head.next; while (pNode) { int32_t j = HASH_INDEX(pNode->hashVal, pHashObj->capacity); if (j == i) { // this key locates in the same slot, no need to relocate it pNode = pNode->next; + assert(pNode == NULL || pNode->next != pNode); } else { pNext = pNode->next; - - if (pNode->prev == NULL) { // first node of the overflow linked list - pHashObj->hashList[i] = pNext; - } else { - pNode->prev->next = pNext; - } - + assert(pNode != pNext && (pNext == NULL || pNext->prev == pNode) && pNode->prev->next == pNode); + + assert(pNode->prev != NULL); + pNode->prev->next = pNext; if (pNext != NULL) { pNext->prev = pNode->prev; } @@ -524,17 +667,12 @@ void taosHashTableResize(SHashObj *pHashObj) { // clear pointer pNode->next = NULL; pNode->prev = NULL; + pe->num -= 1; // added into new slot - SHashNode *pNew = pHashObj->hashList[j]; - if (pNew != NULL) { - assert(pNew->prev == NULL); - pNew->prev = pNode; - } - - pNode->next = pNew; - pHashObj->hashList[j] = pNode; - + SHashEntry *pNewEntry = pHashObj->hashList[j]; + pushfrontNode(pNewEntry, pNode); + // continue pNode = pNext; } @@ -563,21 +701,19 @@ SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, s return pNewNode; } -void doAddToHashTable(SHashObj *pHashObj, SHashNode *pNode) { - assert(pNode != NULL); +void pushfrontNode(SHashEntry* pEntry, SHashNode *pNode) { + assert(pNode != NULL && pEntry != NULL); - int32_t index = HASH_INDEX(pNode->hashVal, pHashObj->capacity); - - SHashNode* pEntry = pHashObj->hashList[index]; - if (pEntry != NULL) { - pEntry->prev = pNode; - - pNode->next = pEntry; - pNode->prev = NULL; + SHashNode* pNext = pEntry->head.next; + if (pNext != NULL) { + pNext->prev = pNode; } - - pHashObj->hashList[index] = pNode; - pHashObj->size++; + + pNode->next = pNext; + pNode->prev = &pEntry->head; + pEntry->head.next = pNode; + + pEntry->num += 1; } SHashNode *getNextHashNode(SHashMutableIterator *pIter) { @@ -585,13 +721,13 @@ SHashNode *getNextHashNode(SHashMutableIterator *pIter) { pIter->entryIndex++; while (pIter->entryIndex < pIter->pHashObj->capacity) { - SHashNode *pNode = pIter->pHashObj->hashList[pIter->entryIndex]; - if (pNode == NULL) { + SHashEntry*pEntry = pIter->pHashObj->hashList[pIter->entryIndex]; + if (pEntry->num == 0) { pIter->entryIndex++; continue; } - return pNode; + return pEntry->head.next; } return NULL; diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 4e2c41d1bc..65b2e99337 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -207,7 +207,7 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext return NULL; } - pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), false, true); + pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), false, HASH_ENTRY_LOCK); pCacheObj->name = strdup(cacheName); if (pCacheObj->pHashTable == NULL) { free(pCacheObj); @@ -249,7 +249,6 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v return NULL; } -// __cache_wr_lock(pCacheObj); T_REF_INC(pNode1); int32_t succ = taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNode1, sizeof(void *)); @@ -261,23 +260,27 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, (int64_t)dataSize); } else { // duplicated key exists while (1) { - // todo removed by node, instead of by key - SHashNode *p = taosHashRemoveNode(pCacheObj->pHashTable, key, keyLen); + SCacheDataNode* p = NULL; + int32_t ret = taosHashRemoveNode(pCacheObj->pHashTable, key, keyLen, (void*) &p, sizeof(void*)); // add to trashcan - if (p != NULL) { - SCacheDataNode* pCachedNode = *(SCacheDataNode**)p->data; - if (T_REF_VAL_GET(pCachedNode) == 0) { - tfree(pCachedNode); + if (ret == 0) { + if (T_REF_VAL_GET(p) == 0) { + + if (pCacheObj->freeFp) { + pCacheObj->freeFp(p->data); + } + + tfree(p); } else { - taosAddToTrash(pCacheObj, pCachedNode); - uDebug("cache:%s, key:%p, %p exist in cache, updated old:%p", pCacheObj->name, key, pNode1->data, pCachedNode); + taosAddToTrash(pCacheObj, p); + uDebug("cache:%s, key:%p, %p exist in cache, updated old:%p", pCacheObj->name, key, pNode1->data, p); } } assert(T_REF_VAL_GET(pNode1) == 1); - int32_t ret = taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNode1, sizeof(void *)); + ret = taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNode1, sizeof(void *)); if (ret == 0) { atomic_add_fetch_64(&pCacheObj->totalSize, pNode1->size); @@ -430,14 +433,16 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { * NOTE: previous ref is 0, and current ref is still 0, remove it. If previous is not 0, there is another thread * that tries to do the same thing. */ - if (pNode->inTrashCan) { + if (inTrashCan) { if (ref == 0) { assert(pNode->pTNodeHeader->pData == pNode); + + // todo add lock here taosRemoveFromTrashCan(pCacheObj, pNode->pTNodeHeader); } } else { int32_t ret = taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); - if (ret == 0) { + if (ret == 0) { // successfully remove from hash table if (ref > 0) { assert(pNode->pTNodeHeader == NULL); @@ -459,6 +464,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { // NOTE: once refcount is decrease, pNode may be freed by other thread immediately. int32_t ref = T_REF_DEC(pNode); + // todo so, invalid read here! uDebug("cache:%s, key:%p, %p released, refcnt:%d, data in trancan:%d", pCacheObj->name, pNode->key, pNode->data, ref, inTrashCan); } diff --git a/src/util/tests/hashTest.cpp b/src/util/tests/hashTest.cpp index 93a1989741..16b300a831 100644 --- a/src/util/tests/hashTest.cpp +++ b/src/util/tests/hashTest.cpp @@ -10,7 +10,7 @@ namespace { // the simple test code for basic operations void simpleTest() { - auto* hashTable = (SHashObj*) taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false); + SHashObj* hashTable = (SHashObj*) taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_ENTRY_LOCK); ASSERT_EQ(taosHashGetSize(hashTable), 0); // put 400 elements in the hash table @@ -47,7 +47,7 @@ void simpleTest() { } void stringKeyTest() { - auto* hashTable = (SHashObj*) taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false); + auto* hashTable = (SHashObj*) taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK); ASSERT_EQ(taosHashGetSize(hashTable), 0); char key[128] = {0}; @@ -97,7 +97,7 @@ void functionTest() { * a single threads situation */ void noLockPerformanceTest() { - auto* hashTable = (SHashObj*) taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false); + auto* hashTable = (SHashObj*) taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK); ASSERT_EQ(taosHashGetSize(hashTable), 0); char key[128] = {0}; From 99e15e0d6d1644721442ef3c187232c29b595605 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 31 Jul 2020 16:31:19 +0800 Subject: [PATCH 020/190] [td-225] update the hash func --- src/query/src/qExecutor.c | 2 +- src/util/src/tcache.c | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 8afc44f939..9ae407e078 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6640,7 +6640,7 @@ void freeqinfoFn(void *qhandle) { } void* qOpenQueryMgmt(int32_t vgId) { - const int32_t REFRESH_HANDLE_INTERVAL = 30; // every 30 seconds, refresh handle pool + const int32_t REFRESH_HANDLE_INTERVAL = 60; // every 30 seconds, refresh handle pool char cacheName[128] = {0}; sprintf(cacheName, "qhandle_%d", vgId); diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 65b2e99337..3efa31dc84 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -108,7 +108,7 @@ static void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force); /** * release node * @param pCacheObj cache object - * @param pNode data node + * @param pNode data node */ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNode *pNode) { if (pNode->signature != (uint64_t)pNode) { @@ -207,7 +207,7 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext return NULL; } - pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), false, HASH_ENTRY_LOCK); + pCacheObj->pHashTable = taosHashInit(4096, taosGetDefaultHashFunction(keyType), false, HASH_ENTRY_LOCK); pCacheObj->name = strdup(cacheName); if (pCacheObj->pHashTable == NULL) { free(pCacheObj); @@ -648,12 +648,12 @@ void doCleanupDataCache(SCacheObj *pCacheObj) { static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t fp) { SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); - __cache_wr_lock(pCacheObj); +// __cache_wr_lock(pCacheObj); while (taosHashIterNext(pIter)) { SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); if (pNode->expireTime < time && T_REF_VAL_GET(pNode) <= 0) { - taosCacheReleaseNode(pCacheObj, pNode); +// taosCacheReleaseNode(pCacheObj, pNode); continue; } @@ -662,7 +662,7 @@ static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t } } - __cache_unlock(pCacheObj); +// __cache_unlock(pCacheObj); taosHashDestroyIter(pIter); } From 6f3f7f7ff993c1c54d6589b1d81f58e1eea5442b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 31 Jul 2020 17:27:12 +0800 Subject: [PATCH 021/190] [td-225] update the hash func --- src/util/inc/hash.h | 22 ++-- src/util/src/hash.c | 233 +++++++++++------------------------------- src/util/src/tcache.c | 2 +- 3 files changed, 72 insertions(+), 185 deletions(-) diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h index c5168a92ee..e22716241d 100644 --- a/src/util/inc/hash.h +++ b/src/util/inc/hash.h @@ -41,17 +41,17 @@ typedef struct SHashNode { typedef enum SHashLockTypeE { HASH_NO_LOCK = 0, - HASH_GLOBAL_LOCK = 1, - HASH_ENTRY_LOCK = 2, +// HASH_GLOBAL_LOCK = 1, + HASH_ENTRY_LOCK = 1, } SHashLockTypeE; -typedef struct SHashLock { -#if defined(LINUX) - pthread_rwlock_t *lock; -#else - pthread_mutex_t *lock; -#endif -} SHashLock; +//typedef struct SHashLock { +//#if defined(LINUX) +// pthread_rwlock_t *lock; +//#else +// pthread_mutex_t *lock; +//#endif +//} SHashLock; typedef struct SHashEntry { int32_t num; // number of elements in current entry @@ -66,8 +66,8 @@ typedef struct SHashObj { _hash_fn_t hashFp; // hash function _hash_free_fn_t freeFp; // hash node free callback function - SHashLock lock; - SHashLockTypeE lockType; // lock type + SRWLatch lock; // read-write spin lock + SHashLockTypeE type; // lock type bool enableUpdate; // enable update SArray *pMemBlock; // memory block allocated for SHashEntry } SHashObj; diff --git a/src/util/src/hash.c b/src/util/src/hash.c index bff8d1e50a..5db9d26d04 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -21,64 +21,35 @@ #define HASH_NEED_RESIZE(_h) ((_h)->size >= (_h)->capacity * HASH_DEFAULT_LOAD_FACTOR) -static FORCE_INLINE void __wr_lock(void *lock) { - if (lock == NULL) { +static FORCE_INLINE void __wr_lock(void *lock, int32_t type) { + if (type == HASH_NO_LOCK) { + return; + } + taosWLockLatch(lock); +} + +static FORCE_INLINE void __rd_lock(void *lock, int32_t type) { + if (type == HASH_NO_LOCK) { return; } -#if defined(LINUX) - pthread_rwlock_wrlock(lock); -#else - pthread_mutex_lock(lock); -#endif + taosRLockLatch(lock); } -static FORCE_INLINE void __rd_lock(void *lock) { - if (lock == NULL) { +static FORCE_INLINE void __rd_unlock(void *lock, int32_t type) { + if (type == HASH_NO_LOCK) { return; } -#if defined(LINUX) - pthread_rwlock_rdlock(lock); -#else - pthread_mutex_lock(lock); -#endif + taosRUnLockLatch(lock); } -static FORCE_INLINE void __unlock(void *lock) { - if (lock == NULL) { +static FORCE_INLINE void __wr_unlock(void *lock, int32_t type) { + if (type == HASH_NO_LOCK) { return; } -#if defined(LINUX) - pthread_rwlock_unlock(lock); -#else - pthread_mutex_unlock(lock); -#endif -} - -static FORCE_INLINE int32_t __lock_init(void *lock) { - if (lock == NULL) { - return 0; - } - -#if defined(LINUX) - return pthread_rwlock_init(lock, NULL); -#else - return pthread_mutex_init(lock, NULL); -#endif -} - -static FORCE_INLINE void __lock_destroy(void *lock) { - if (lock == NULL) { - return; - } - -#if defined(LINUX) - pthread_rwlock_destroy(lock); -#else - pthread_mutex_destroy(lock); -#endif + taosWUnLockLatch(lock); } static FORCE_INLINE int32_t taosHashCapacity(int32_t length) { @@ -97,32 +68,38 @@ static FORCE_INLINE int32_t taosHashCapacity(int32_t length) { * @param hashVal hash value by hash function * @return */ -FORCE_INLINE SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, uint32_t hashVal) { - int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); - - SHashEntry *pe = pHashObj->hashList[slot]; - - // no data, return directly - int32_t num = atomic_load_32(&pe->num); - if (num == 0) { - return NULL; - } - - if (pHashObj->lockType == HASH_ENTRY_LOCK) { - taosRLockLatch(&pe->latch); - } +static FORCE_INLINE SHashNode* doSearchEntryList(SHashEntry* pe, const void* key, size_t keyLen, uint32_t hashVal) { SHashNode* pNode = pe->head.next; while (pNode) { if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { assert(pNode->hashVal == hashVal); break; } - + pNode = pNode->next; } - if (pHashObj->lockType == HASH_ENTRY_LOCK) { + return pNode; +} + +static FORCE_INLINE SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, uint32_t hashVal) { + int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); + + SHashEntry *pe = pHashObj->hashList[slot]; + + // no data, return directly + if (atomic_load_32(&pe->num) == 0) { + return NULL; + } + + if (pHashObj->type == HASH_ENTRY_LOCK) { + taosRLockLatch(&pe->latch); + } + + SHashNode* pNode = doSearchEntryList(pe, key, keyLen, hashVal); + + if (pHashObj->type == HASH_ENTRY_LOCK) { taosRUnLockLatch(&pe->latch); } @@ -195,7 +172,7 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTyp assert((pHashObj->capacity & (pHashObj->capacity - 1)) == 0); pHashObj->hashFp = fn; - pHashObj->lockType = type; + pHashObj->type = type; pHashObj->enableUpdate = update; pHashObj->hashList = (SHashEntry **)calloc(pHashObj->capacity, sizeof(void*)); @@ -215,22 +192,6 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTyp taosArrayPush(pHashObj->pMemBlock, &p); } - if (pHashObj->lockType != HASH_NO_LOCK) { -#if defined(LINUX) - pHashObj->lock.lock = calloc(1, sizeof(pthread_rwlock_t)); -#else - pHashObj->lock.lock = calloc(1, sizeof(pthread_mutex_t)); -#endif - } - - if (__lock_init(pHashObj->lock.lock) != 0) { - free(pHashObj->hashList); - free(pHashObj); - - uError("failed to init lock, reason:%s", strerror(errno)); - return NULL; - } - return pHashObj; } @@ -247,17 +208,17 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da // need the resize process, write lock applied if (HASH_NEED_RESIZE(pHashObj)) { - __wr_lock(pHashObj->lock.lock); + __wr_lock(&pHashObj->lock, pHashObj->type); taosHashTableResize(pHashObj); - __unlock(pHashObj->lock.lock); + __wr_unlock(&pHashObj->lock, pHashObj->type); } - __rd_lock(pHashObj->lock.lock); + __rd_lock(&pHashObj->lock, pHashObj->type); int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); SHashEntry *pe = pHashObj->hashList[slot]; - if (pHashObj->lockType == HASH_ENTRY_LOCK) { + if (pHashObj->type == HASH_ENTRY_LOCK) { taosWLockLatch(&pe->latch); } @@ -275,12 +236,12 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da // no data in hash table with the specified key, add it into hash table pushfrontNode(pe, pNewNode); - if (pHashObj->lockType == HASH_ENTRY_LOCK) { + if (pHashObj->type == HASH_ENTRY_LOCK) { taosWUnLockLatch(&pe->latch); } // enable resize - __unlock(pHashObj->lock.lock); + __rd_unlock(&pHashObj->lock, pHashObj->type); atomic_add_fetch_64(&pHashObj->size, 1); return 0; @@ -290,12 +251,12 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da doUpdateHashNode(pNode, pNewNode); } - if (pHashObj->lockType == HASH_ENTRY_LOCK) { + if (pHashObj->type == HASH_ENTRY_LOCK) { taosWUnLockLatch(&pe->latch); } // enable resize - __unlock(pHashObj->lock.lock); + __rd_unlock(&pHashObj->lock, pHashObj->type); tfree(pNewNode->data) tfree(pNewNode); @@ -312,11 +273,11 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); // only add the read lock to disable the resize process - __rd_lock(pHashObj->lock.lock); + __rd_lock(&pHashObj->lock, pHashObj->type); SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, keyLen, hashVal); - __unlock(pHashObj->lock.lock); + __rd_unlock(&pHashObj->lock, pHashObj->type); if (pNode) { assert(pNode->hashVal == hashVal); @@ -327,70 +288,7 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { } int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) { - if (pHashObj->size <= 0) { - return -1; - } - - uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); - - // disable the resize process - __rd_lock(pHashObj->lock.lock); - - int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); - SHashEntry *pe = pHashObj->hashList[slot]; - - // no data, return directly - if (pe->num == 0) { - __unlock(pHashObj->lock.lock); - return -1; - } - - if (pHashObj->lockType == HASH_ENTRY_LOCK) { - taosWLockLatch(&pe->latch); - } - - SHashNode* pNode = pe->head.next; - while (pNode) { - if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { - assert(pNode->hashVal == hashVal); - break; - } - - pNode = pNode->next; - } - - if (pNode != NULL) { - assert(pNode->prev != NULL); - - SHashNode *pNext = pNode->next; - pNode->prev->next = pNext; - - if (pNext != NULL) { - pNext->prev = pNode->prev; - } - - pe->num -= 1; - } - - if (pHashObj->lockType == HASH_ENTRY_LOCK) { - taosWUnLockLatch(&pe->latch); - } - - __unlock(pHashObj->lock.lock); - - if (pNode != NULL) { - atomic_sub_fetch_64(&pHashObj->size, 1); - - pNode->next = NULL; - pNode->prev = NULL; - - tfree(pNode->data); - tfree(pNode); - - return 0; - } else { - return -1; - } + return taosHashRemoveNode(pHashObj, key, keyLen, NULL, 0); } int32_t taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen, void* data, size_t dsize) { @@ -401,31 +299,22 @@ int32_t taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen, v uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); // disable the resize process - __rd_lock(pHashObj->lock.lock); + __rd_lock(&pHashObj->lock, pHashObj->type); int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); SHashEntry *pe = pHashObj->hashList[slot]; // no data, return directly if (pe->num == 0) { - __unlock(pHashObj->lock.lock); + __rd_unlock(&pHashObj->lock, pHashObj->type); return -1; } - if (pHashObj->lockType == HASH_ENTRY_LOCK) { + if (pHashObj->type == HASH_ENTRY_LOCK) { taosWLockLatch(&pe->latch); } - SHashNode* pNode = pe->head.next; - while (pNode) { - if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { - assert(pNode->hashVal == hashVal); - break; - } - - pNode = pNode->next; - } - + SHashNode* pNode = doSearchEntryList(pe, key, keyLen, hashVal); if (pNode != NULL) { assert(pNode->prev != NULL); @@ -435,14 +324,15 @@ int32_t taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen, v if (pNext != NULL) { pNext->prev = pNode->prev; } + + pe->num -= 1; } - if (pHashObj->lockType == HASH_ENTRY_LOCK) { - pe->num -= 1; + if (pHashObj->type == HASH_ENTRY_LOCK) { taosWUnLockLatch(&pe->latch); } - __unlock(pHashObj->lock.lock); + __rd_unlock(&pHashObj->lock, pHashObj->type); atomic_sub_fetch_64(&pHashObj->size, 1); @@ -470,7 +360,7 @@ void taosHashCleanup(SHashObj *pHashObj) { SHashNode *pNode, *pNext; - __wr_lock(pHashObj->lock.lock); + __wr_lock(&pHashObj->lock, pHashObj->type); if (pHashObj->hashList) { for (int32_t i = 0; i < pHashObj->capacity; ++i) { @@ -496,10 +386,7 @@ void taosHashCleanup(SHashObj *pHashObj) { free(pHashObj->hashList); } - __unlock(pHashObj->lock.lock); - __lock_destroy(pHashObj->lock.lock); - - tfree(pHashObj->lock.lock); + __wr_unlock(&pHashObj->lock, pHashObj->type); // destroy mem block size_t memBlock = taosArrayGetSize(pHashObj->pMemBlock); diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 3efa31dc84..9f6f0f77ec 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -289,7 +289,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v pCacheObj->name, key, pNode1->data, pNode1->addedTime, pNode1->expireTime, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, (int64_t)dataSize); - return pNode1; + return pNode1->data; } else { // failed, try again From abd8b930fad1d433269c45be1600a177448ea555 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 31 Jul 2020 17:54:21 +0800 Subject: [PATCH 022/190] [td-225] --- src/util/src/hash.c | 50 +++++++++++++++++++-------------------- src/util/src/tcache.c | 2 +- src/vnode/src/vnodeRead.c | 4 ++-- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 5db9d26d04..171c8ec100 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -21,6 +21,12 @@ #define HASH_NEED_RESIZE(_h) ((_h)->size >= (_h)->capacity * HASH_DEFAULT_LOAD_FACTOR) +#define FREE_HASH_NODE(_n) \ + do { \ + tfree((_n)->data); \ + tfree(_n); \ + } while (0) + static FORCE_INLINE void __wr_lock(void *lock, int32_t type) { if (type == HASH_NO_LOCK) { return; @@ -258,9 +264,7 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da // enable resize __rd_unlock(&pHashObj->lock, pHashObj->type); - tfree(pNewNode->data) - tfree(pNewNode); - + FREE_HASH_NODE(pNewNode); return pHashObj->enableUpdate? 0:-1; } } @@ -273,7 +277,7 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); // only add the read lock to disable the resize process - __rd_lock(&pHashObj->lock, pHashObj->type); + __rd_lock(&pHashObj->lock, pHashObj->type); SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, keyLen, hashVal); @@ -291,6 +295,18 @@ int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) { return taosHashRemoveNode(pHashObj, key, keyLen, NULL, 0); } +static FORCE_INLINE void popNodeFromEntryList(SHashEntry* pe, SHashNode* pNode) { + SHashNode* pNext = pNode->next; + + assert(pNode->prev != NULL); + pNode->prev->next = pNext; + if (pNext != NULL) { + pNext->prev = pNode->prev; + } + + pe->num -= 1; +} + int32_t taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen, void* data, size_t dsize) { if (pHashObj->size <= 0) { return -1; @@ -316,16 +332,7 @@ int32_t taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen, v SHashNode* pNode = doSearchEntryList(pe, key, keyLen, hashVal); if (pNode != NULL) { - assert(pNode->prev != NULL); - - SHashNode *pNext = pNode->next; - pNode->prev->next = pNext; - - if (pNext != NULL) { - pNext->prev = pNode->prev; - } - - pe->num -= 1; + popNodeFromEntryList(pe, pNode); } if (pHashObj->type == HASH_ENTRY_LOCK) { @@ -344,8 +351,7 @@ int32_t taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen, v pNode->next = NULL; pNode->prev = NULL; - tfree(pNode->data); - tfree(pNode); + FREE_HASH_NODE(pNode); return 0; } else { @@ -540,22 +546,16 @@ void taosHashTableResize(SHashObj *pHashObj) { int32_t j = HASH_INDEX(pNode->hashVal, pHashObj->capacity); if (j == i) { // this key locates in the same slot, no need to relocate it pNode = pNode->next; - assert(pNode == NULL || pNode->next != pNode); } else { pNext = pNode->next; assert(pNode != pNext && (pNext == NULL || pNext->prev == pNode) && pNode->prev->next == pNode); - assert(pNode->prev != NULL); - pNode->prev->next = pNext; - if (pNext != NULL) { - pNext->prev = pNode->prev; - } - + popNodeFromEntryList(pe, pNode); + // clear pointer pNode->next = NULL; pNode->prev = NULL; - pe->num -= 1; - + // added into new slot SHashEntry *pNewEntry = pHashObj->hashList[j]; pushfrontNode(pNewEntry, pNode); diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 9f6f0f77ec..f8711828d1 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -653,7 +653,7 @@ static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); if (pNode->expireTime < time && T_REF_VAL_GET(pNode) <= 0) { -// taosCacheReleaseNode(pCacheObj, pNode); + taosCacheReleaseNode(pCacheObj, pNode); continue; } diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 3c642b5098..9b16cce66c 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -213,7 +213,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { } } - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, freehandle); + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); } } @@ -268,7 +268,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { code = vnodeDumpQueryResult(pRet, pVnode, *handle, &freeHandle); } - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, freeHandle); + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); return code; } From 07726783e1ff732558b0b513b52356f8cc5a27b6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 31 Jul 2020 17:58:07 +0800 Subject: [PATCH 023/190] [td-225] --- src/query/src/qExecutor.c | 1 - src/vnode/src/vnodeRead.c | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 9ae407e078..899c49139a 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6742,7 +6742,6 @@ void** qAcquireQInfo(void* pMgmt, uint64_t key) { void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle) { SQueryMgmt *pQueryMgmt = pMgmt; - if (pQueryMgmt->qinfoPool == NULL) { return NULL; } diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 9b16cce66c..3c642b5098 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -213,7 +213,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { } } - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, freehandle); } } @@ -268,7 +268,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { code = vnodeDumpQueryResult(pRet, pVnode, *handle, &freeHandle); } - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, freeHandle); return code; } From adcbb40fbef1fce1288532113c8d06ad759597df Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 1 Aug 2020 13:41:24 +0800 Subject: [PATCH 024/190] [td-225] --- src/util/inc/hash.h | 10 +- src/util/src/hash.c | 222 +++++++++++++++++++++++++----------- src/util/src/tcache.c | 259 ++++++++++++++++++++++-------------------- 3 files changed, 299 insertions(+), 192 deletions(-) diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h index e22716241d..688bf317d6 100644 --- a/src/util/inc/hash.h +++ b/src/util/inc/hash.h @@ -76,8 +76,9 @@ typedef struct SHashMutableIterator { SHashObj *pHashObj; int32_t entryIndex; SHashNode *pCur; - SHashNode *pNext; // current node can be deleted for mutable iterator, so keep the next one before return current - int32_t num; // already check number of elements in hash table + SHashNode *pNext; // current node can be deleted for mutable iterator, so keep the next one before return current + size_t numOfChecked; // already check number of elements in hash table + size_t numOfEntries; // number of entries while the iterator is created } SHashMutableIterator; /** @@ -118,6 +119,8 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da */ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen); +void *taosHashGetCB(SHashObj *pHashObj, const void *key, size_t keyLen, void(*fp)(void*)); + /** * remove item with the specified key * @param pHashObj @@ -126,8 +129,9 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen); */ int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen); +int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLen, void* data, size_t dsize); -int32_t taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen, void* data, size_t dsize); +int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), void *param); /** * clean up hash table diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 171c8ec100..be4baf85d2 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -19,7 +19,7 @@ #include "tulog.h" #include "tutil.h" -#define HASH_NEED_RESIZE(_h) ((_h)->size >= (_h)->capacity * HASH_DEFAULT_LOAD_FACTOR) +#define HASH_NEED_RESIZE(_h) ((_h)->size >= (_h)->capacity * HASH_DEFAULT_LOAD_FACTOR) #define FREE_HASH_NODE(_n) \ do { \ @@ -75,8 +75,8 @@ static FORCE_INLINE int32_t taosHashCapacity(int32_t length) { * @return */ -static FORCE_INLINE SHashNode* doSearchEntryList(SHashEntry* pe, const void* key, size_t keyLen, uint32_t hashVal) { - SHashNode* pNode = pe->head.next; +static FORCE_INLINE SHashNode *doSearchInEntryList(SHashEntry *pe, const void *key, size_t keyLen, uint32_t hashVal) { + SHashNode *pNode = pe->head.next; while (pNode) { if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { assert(pNode->hashVal == hashVal); @@ -89,7 +89,8 @@ static FORCE_INLINE SHashNode* doSearchEntryList(SHashEntry* pe, const void* key return pNode; } -static FORCE_INLINE SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, uint32_t hashVal) { +static FORCE_INLINE SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, + uint32_t hashVal) { int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); SHashEntry *pe = pHashObj->hashList[slot]; @@ -103,7 +104,7 @@ static FORCE_INLINE SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const taosRLockLatch(&pe->latch); } - SHashNode* pNode = doSearchEntryList(pe, key, keyLen, hashVal); + SHashNode *pNode = doSearchInEntryList(pe, key, keyLen, hashVal); if (pHashObj->type == HASH_ENTRY_LOCK) { taosRUnLockLatch(&pe->latch); @@ -141,8 +142,8 @@ static SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *p */ static FORCE_INLINE SHashNode *doUpdateHashNode(SHashNode *pNode, SHashNode *pNewNode) { assert(pNode->keyLen == pNewNode->keyLen); - SWAP(pNode->key, pNewNode->key, void*); - SWAP(pNode->data, pNewNode->data, void*); + SWAP(pNode->key, pNewNode->key, void *); + SWAP(pNode->data, pNewNode->data, void *); return pNewNode; } @@ -153,7 +154,7 @@ static FORCE_INLINE SHashNode *doUpdateHashNode(SHashNode *pNode, SHashNode *pNe * @param pHashObj * @param pNode */ -static void pushfrontNode(SHashEntry* pEntry, SHashNode *pNode); +static void pushfrontNode(SHashEntry *pEntry, SHashNode *pNode); /** * Get the next element in hash table for iterator @@ -181,17 +182,16 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTyp pHashObj->type = type; pHashObj->enableUpdate = update; - pHashObj->hashList = (SHashEntry **)calloc(pHashObj->capacity, sizeof(void*)); + pHashObj->hashList = (SHashEntry **)calloc(pHashObj->capacity, sizeof(void *)); if (pHashObj->hashList == NULL) { free(pHashObj); uError("failed to allocate memory, reason:%s", strerror(errno)); return NULL; } else { + pHashObj->pMemBlock = taosArrayInit(8, sizeof(void *)); - pHashObj->pMemBlock = taosArrayInit(8, sizeof(void*)); - - void* p = calloc(pHashObj->capacity, sizeof(SHashEntry)); - for(int32_t i = 0; i < pHashObj->capacity; ++i) { + void *p = calloc(pHashObj->capacity, sizeof(SHashEntry)); + for (int32_t i = 0; i < pHashObj->capacity; ++i) { pHashObj->hashList[i] = p + i * sizeof(SHashEntry); } @@ -201,9 +201,7 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTyp return pHashObj; } -size_t taosHashGetSize(const SHashObj *pHashObj) { - return (pHashObj == NULL)? 0:pHashObj->size; -} +size_t taosHashGetSize(const SHashObj *pHashObj) { return (pHashObj == NULL) ? 0 : pHashObj->size; } int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t size) { uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); @@ -221,14 +219,14 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da __rd_lock(&pHashObj->lock, pHashObj->type); - int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); + int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); SHashEntry *pe = pHashObj->hashList[slot]; if (pHashObj->type == HASH_ENTRY_LOCK) { taosWLockLatch(&pe->latch); } - SHashNode* pNode = pe->head.next; + SHashNode *pNode = pe->head.next; while (pNode) { if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { assert(pNode->hashVal == hashVal); @@ -265,11 +263,15 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da __rd_unlock(&pHashObj->lock, pHashObj->type); FREE_HASH_NODE(pNewNode); - return pHashObj->enableUpdate? 0:-1; + return pHashObj->enableUpdate ? 0 : -1; } } void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { + return taosHashGetCB(pHashObj, key, keyLen, NULL); +} + +void *taosHashGetCB(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *)) { if (pHashObj->size <= 0 || keyLen == 0 || key == NULL) { return NULL; } @@ -279,24 +281,42 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { // only add the read lock to disable the resize process __rd_lock(&pHashObj->lock, pHashObj->type); - SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, keyLen, hashVal); + int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); + SHashEntry *pe = pHashObj->hashList[slot]; - __rd_unlock(&pHashObj->lock, pHashObj->type); - - if (pNode) { - assert(pNode->hashVal == hashVal); - return pNode->data; - } else { + // no data, return directly + if (atomic_load_32(&pe->num) == 0) { + __rd_unlock(&pHashObj->lock, pHashObj->type); return NULL; } + + char *data = NULL; + + // lock entry + if (pHashObj->type == HASH_ENTRY_LOCK) { + taosRLockLatch(&pe->latch); + } + + SHashNode *pNode = doSearchInEntryList(pe, key, keyLen, hashVal); + if (fp != NULL) { + fp(pNode->data); + } + + data = pNode->data; + if (pHashObj->type == HASH_ENTRY_LOCK) { + taosRUnLockLatch(&pe->latch); + } + + __rd_unlock(&pHashObj->lock, pHashObj->type); + return data; } int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) { - return taosHashRemoveNode(pHashObj, key, keyLen, NULL, 0); + return taosHashRemoveWithData(pHashObj, key, keyLen, NULL, 0); } -static FORCE_INLINE void popNodeFromEntryList(SHashEntry* pe, SHashNode* pNode) { - SHashNode* pNext = pNode->next; +static FORCE_INLINE void doPopFromEntryList(SHashEntry *pe, SHashNode *pNode) { + SHashNode *pNext = pNode->next; assert(pNode->prev != NULL); pNode->prev->next = pNext; @@ -307,17 +327,17 @@ static FORCE_INLINE void popNodeFromEntryList(SHashEntry* pe, SHashNode* pNode) pe->num -= 1; } -int32_t taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen, void* data, size_t dsize) { - if (pHashObj->size <= 0) { +int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t dsize) { + if (pHashObj == NULL || pHashObj->size <= 0) { return -1; } uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); // disable the resize process - __rd_lock(&pHashObj->lock, pHashObj->type); + __rd_lock(&pHashObj->lock, pHashObj->type); - int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); + int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); SHashEntry *pe = pHashObj->hashList[slot]; // no data, return directly @@ -330,9 +350,9 @@ int32_t taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen, v taosWLockLatch(&pe->latch); } - SHashNode* pNode = doSearchEntryList(pe, key, keyLen, hashVal); + SHashNode *pNode = doSearchInEntryList(pe, key, keyLen, hashVal); if (pNode != NULL) { - popNodeFromEntryList(pe, pNode); + doPopFromEntryList(pe, pNode); } if (pHashObj->type == HASH_ENTRY_LOCK) { @@ -341,13 +361,13 @@ int32_t taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen, v __rd_unlock(&pHashObj->lock, pHashObj->type); - atomic_sub_fetch_64(&pHashObj->size, 1); - if (data != NULL) { memcpy(data, pNode->data, dsize); } if (pNode != NULL) { + atomic_sub_fetch_64(&pHashObj->size, 1); + pNode->next = NULL; pNode->prev = NULL; @@ -359,6 +379,49 @@ int32_t taosHashRemoveNode(SHashObj *pHashObj, const void *key, size_t keyLen, v } } +int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), void *param) { + if (pHashObj == NULL || pHashObj->size == 0) { + return 0; + } + + // disable the resize process + __rd_lock(&pHashObj->lock, pHashObj->type); + + int32_t numOfEntries = pHashObj->capacity; + for (int32_t i = 0; i < numOfEntries; ++i) { + SHashEntry *pEntry = pHashObj->hashList[i]; + if (pEntry->num <= 0) { + continue; + } + + if (pHashObj->type == HASH_ENTRY_LOCK) { + taosWLockLatch(&pEntry->latch); + } + + SHashNode *pNode = pEntry->head.next; + assert(pNode != NULL); + + SHashNode *pNext = NULL; + while (pNode != NULL) { + pNext = pNode->next; + + // not qualified, remove it + if (fp && (!fp(param, pNode->data))) { + doPopFromEntryList(pEntry, pNode); + } + + pNode = pNext; + } + + if (pHashObj->type == HASH_ENTRY_LOCK) { + taosWUnLockLatch(&pEntry->latch); + } + } + + __rd_unlock(&pHashObj->lock, pHashObj->type); + return 0; +} + void taosHashCleanup(SHashObj *pHashObj) { if (pHashObj == NULL) { return; @@ -370,7 +433,7 @@ void taosHashCleanup(SHashObj *pHashObj) { if (pHashObj->hashList) { for (int32_t i = 0; i < pHashObj->capacity; ++i) { - SHashEntry* pEntry = pHashObj->hashList[i]; + SHashEntry *pEntry = pHashObj->hashList[i]; if (pEntry->num == 0) { assert(pEntry->head.next == 0); continue; @@ -396,8 +459,8 @@ void taosHashCleanup(SHashObj *pHashObj) { // destroy mem block size_t memBlock = taosArrayGetSize(pHashObj->pMemBlock); - for(int32_t i = 0; i < memBlock; ++i) { - void* p = taosArrayGetP(pHashObj->pMemBlock, i); + for (int32_t i = 0; i < memBlock; ++i) { + void *p = taosArrayGetP(pHashObj->pMemBlock, i); tfree(p); } @@ -414,6 +477,9 @@ SHashMutableIterator *taosHashCreateIter(SHashObj *pHashObj) { } pIter->pHashObj = pHashObj; + + // keep it in local variable, in case the resize operation expand the size + pIter->numOfEntries = pHashObj->capacity; return pIter; } @@ -428,7 +494,7 @@ bool taosHashIterNext(SHashMutableIterator *pIter) { } // check the first one - if (pIter->num == 0) { + if (pIter->numOfChecked == 0) { assert(pIter->pCur == NULL && pIter->pNext == NULL); while (1) { @@ -438,18 +504,30 @@ bool taosHashIterNext(SHashMutableIterator *pIter) { continue; } + if (pIter->pHashObj->type == HASH_ENTRY_LOCK) { + taosRLockLatch(&pEntry->latch); + } + pIter->pCur = pEntry->head.next; if (pIter->pCur->next) { pIter->pNext = pIter->pCur->next; + + if (pIter->pHashObj->type == HASH_ENTRY_LOCK) { + taosRUnLockLatch(&pEntry->latch); + } } else { + if (pIter->pHashObj->type == HASH_ENTRY_LOCK) { + taosRUnLockLatch(&pEntry->latch); + } + pIter->pNext = getNextHashNode(pIter); } break; } - pIter->num++; + pIter->numOfChecked++; return true; } else { assert(pIter->pCur != NULL); @@ -459,7 +537,7 @@ bool taosHashIterNext(SHashMutableIterator *pIter) { return false; } - pIter->num++; + pIter->numOfChecked++; if (pIter->pCur->next) { pIter->pNext = pIter->pCur->next; @@ -504,30 +582,30 @@ void taosHashTableResize(SHashObj *pHashObj) { if (!HASH_NEED_RESIZE(pHashObj)) { return; } - + // double the original capacity SHashNode *pNode = NULL; SHashNode *pNext = NULL; - + int32_t newSize = pHashObj->capacity << 1u; if (newSize > HASH_MAX_CAPACITY) { -// uDebug("current capacity:%d, maximum capacity:%d, no resize applied due to limitation is reached", -// pHashObj->capacity, HASH_MAX_CAPACITY); + // uDebug("current capacity:%d, maximum capacity:%d, no resize applied due to limitation is reached", + // pHashObj->capacity, HASH_MAX_CAPACITY); return; } void *pNewEntryList = realloc(pHashObj->hashList, sizeof(SHashEntry) * newSize); - if (pNewEntryList == NULL) {// todo handle error -// uDebug("cache resize failed due to out of memory, capacity remain:%d", pHashObj->capacity); + if (pNewEntryList == NULL) { // todo handle error + // uDebug("cache resize failed due to out of memory, capacity remain:%d", pHashObj->capacity); return; } - + pHashObj->hashList = pNewEntryList; size_t inc = newSize - pHashObj->capacity; - void* p = calloc(inc, sizeof(SHashEntry)); + void * p = calloc(inc, sizeof(SHashEntry)); - for(int32_t i = 0; i < inc; ++i) { + for (int32_t i = 0; i < inc; ++i) { pHashObj->hashList[i + pHashObj->capacity] = p + i * sizeof(SHashEntry); } @@ -535,7 +613,7 @@ void taosHashTableResize(SHashObj *pHashObj) { pHashObj->capacity = newSize; for (int32_t i = 0; i < pHashObj->capacity; ++i) { - SHashEntry* pe = pHashObj->hashList[i]; + SHashEntry *pe = pHashObj->hashList[i]; if (pe->num == 0) { assert(pe->head.next == NULL); continue; @@ -550,7 +628,7 @@ void taosHashTableResize(SHashObj *pHashObj) { pNext = pNode->next; assert(pNode != pNext && (pNext == NULL || pNext->prev == pNode) && pNode->prev->next == pNode); - popNodeFromEntryList(pe, pNode); + doPopFromEntryList(pe, pNode); // clear pointer pNode->next = NULL; @@ -566,8 +644,8 @@ void taosHashTableResize(SHashObj *pHashObj) { } } -// uDebug("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", pHashObj->capacity, -// ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0); + // uDebug("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", pHashObj->capacity, + // ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0); } SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) { @@ -579,7 +657,7 @@ SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, s pNewNode->data = malloc(dsize + keyLen); memcpy(pNewNode->data, pData, dsize); - + pNewNode->key = pNewNode->data + dsize; memcpy(pNewNode->key, key, keyLen); @@ -588,10 +666,10 @@ SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, s return pNewNode; } -void pushfrontNode(SHashEntry* pEntry, SHashNode *pNode) { +void pushfrontNode(SHashEntry *pEntry, SHashNode *pNode) { assert(pNode != NULL && pEntry != NULL); - - SHashNode* pNext = pEntry->head.next; + + SHashNode *pNext = pEntry->head.next; if (pNext != NULL) { pNext->prev = pNode; } @@ -605,17 +683,29 @@ void pushfrontNode(SHashEntry* pEntry, SHashNode *pNode) { SHashNode *getNextHashNode(SHashMutableIterator *pIter) { assert(pIter != NULL); - + pIter->entryIndex++; - while (pIter->entryIndex < pIter->pHashObj->capacity) { - SHashEntry*pEntry = pIter->pHashObj->hashList[pIter->entryIndex]; + SHashNode *p = NULL; + + while (pIter->entryIndex < pIter->numOfEntries) { + SHashEntry *pEntry = pIter->pHashObj->hashList[pIter->entryIndex]; if (pEntry->num == 0) { pIter->entryIndex++; continue; } - - return pEntry->head.next; + + if (pIter->pHashObj->type == HASH_ENTRY_LOCK) { + taosRLockLatch(&pEntry->latch); + } + + p = pEntry->head.next; + + if (pIter->pHashObj->type == HASH_ENTRY_LOCK) { + taosRUnLockLatch(&pEntry->latch); + } + + return p; } - + return NULL; } diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index f8711828d1..8523a6f8b6 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -63,13 +63,6 @@ static FORCE_INLINE void __cache_lock_destroy(SCacheObj *pCacheObj) { #endif } -#if 0 -static FORCE_INLINE void taosFreeNode(void *data) { - SCacheDataNode *pNode = *(SCacheDataNode **)data; - free(pNode); -} -#endif - /** * @param key key of object for hash, usually a null-terminated string * @param keyLen length of key @@ -89,13 +82,6 @@ static SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const */ static void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode); -/** - * remove node in trash can - * @param pCacheObj - * @param pElem - */ -static void taosRemoveFromTrashCan(SCacheObj *pCacheObj, STrashElem *pElem); - /** * remove nodes in trash with refCount == 0 in cache * @param pNode @@ -113,17 +99,19 @@ static void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force); static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNode *pNode) { if (pNode->signature != (uint64_t)pNode) { uError("key:%s, %p data is invalid, or has been released", pNode->key, pNode); + assert(0); return; } - taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); - pCacheObj->totalSize -= pNode->size; + int32_t size = taosHashGetSize(pCacheObj->pHashTable); uDebug("cache:%s, key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes", - pCacheObj->name, pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, - pNode->size); + pCacheObj->name, pNode->key, pNode->data, size, pCacheObj->totalSize, pNode->size); + + if (pCacheObj->freeFp) { + pCacheObj->freeFp(pNode->data); + } - if (pCacheObj->freeFp) pCacheObj->freeFp(pNode->data); free(pNode); } @@ -137,6 +125,32 @@ static FORCE_INLINE void taosCacheMoveToTrash(SCacheObj *pCacheObj, SCacheDataNo taosAddToTrash(pCacheObj, pNode); } +static FORCE_INLINE void doRemoveElemInTrashcan(SCacheObj* pCacheObj, STrashElem *pElem) { + if (pElem->pData->signature != (uint64_t) pElem->pData) { + uError("key:sig:0x%" PRIx64 " %p data has been released, ignore", pElem->pData->signature, pElem->pData); + return; + } + + pCacheObj->numOfElemsInTrash--; + if (pElem->prev) { + pElem->prev->next = pElem->next; + } else { // pnode is the header, update header + pCacheObj->pTrash = pElem->next; + } + + if (pElem->next) { + pElem->next->prev = pElem->prev; + } +} + +static FORCE_INLINE void doDestroyTrashcanElem(SCacheObj* pCacheObj, STrashElem *pElem) { + if (pCacheObj->freeFp) { + pCacheObj->freeFp(pElem->pData->data); + } + + free(pElem->pData); + free(pElem); +} /** * update data in cache * @param pCacheObj @@ -261,12 +275,11 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v } else { // duplicated key exists while (1) { SCacheDataNode* p = NULL; - int32_t ret = taosHashRemoveNode(pCacheObj->pHashTable, key, keyLen, (void*) &p, sizeof(void*)); + int32_t ret = taosHashRemoveWithData(pCacheObj->pHashTable, key, keyLen, (void*) &p, sizeof(void*)); // add to trashcan if (ret == 0) { if (T_REF_VAL_GET(p) == 0) { - if (pCacheObj->freeFp) { pCacheObj->freeFp(p->data); } @@ -300,27 +313,25 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v return pNode1->data; } +static void incRefFn(void* ptNode) { + assert(ptNode != NULL); + + SCacheDataNode** p = (SCacheDataNode**) ptNode; + int32_t ret = T_REF_INC(*p); + assert(ret > 0); +} + void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen) { if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) == 0) { return NULL; } - void *pData = NULL; - -// __cache_rd_lock(pCacheObj); - SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen); - - int32_t ref = 0; - if (ptNode != NULL) { - ref = T_REF_INC(*ptNode); - pData = (*ptNode)->data; - } - -// __cache_unlock(pCacheObj); + SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGetCB(pCacheObj->pHashTable, key, keyLen, incRefFn); + void* pData = (ptNode != NULL)? (*ptNode)->data:NULL; if (pData != NULL) { atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1); - uDebug("cache:%s, key:%p, %p is retrieved from cache, refcnt:%d", pCacheObj->name, key, pData, ref); + uDebug("cache:%s, key:%p, %p is retrieved from cache, refcnt:%d", pCacheObj->name, key, pData, T_REF_VAL_GET(*ptNode)); } else { atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1); uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key); @@ -423,8 +434,11 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { if (_remove) { // NOTE: once refcount is decrease, pNode may be freed by other thread immediately. + char* key = pNode->key; + char* d = pNode->data; + int32_t ref = T_REF_DEC(pNode); - uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, ref); + uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, key, d, ref); /* * If it is not referenced by other users, remove it immediately. Otherwise move this node to trashcan wait for all users @@ -437,24 +451,35 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { if (ref == 0) { assert(pNode->pTNodeHeader->pData == pNode); - // todo add lock here - taosRemoveFromTrashCan(pCacheObj, pNode->pTNodeHeader); + __cache_wr_lock(pCacheObj); + doRemoveElemInTrashcan(pCacheObj, pNode->pTNodeHeader); + __cache_unlock(pCacheObj); + + doDestroyTrashcanElem(pCacheObj, pNode->pTNodeHeader); } } else { int32_t ret = taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); - if (ret == 0) { // successfully remove from hash table + + // successfully remove from hash table, if failed, this node must have been move to trash already, do nothing. + // note that the remove operation can be executed only once. + if (ret == 0) { if (ref > 0) { assert(pNode->pTNodeHeader == NULL); - // todo trashcan lock + __cache_wr_lock(pCacheObj); taosAddToTrash(pCacheObj, pNode); + __cache_unlock(pCacheObj); } else { // ref == 0 - atomic_fetch_sub_ptr(&pCacheObj->totalSize, pNode->size); + atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size); + uDebug("cache:%s, key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes", pCacheObj->name, pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, pNode->size); - if (pCacheObj->freeFp) pCacheObj->freeFp(pNode->data); + if (pCacheObj->freeFp) { + pCacheObj->freeFp(pNode->data); + } + free(pNode); } } @@ -462,33 +487,40 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { } else { // NOTE: once refcount is decrease, pNode may be freed by other thread immediately. - int32_t ref = T_REF_DEC(pNode); + char* key = pNode->key; + char* p = pNode->data; - // todo so, invalid read here! - uDebug("cache:%s, key:%p, %p released, refcnt:%d, data in trancan:%d", pCacheObj->name, pNode->key, pNode->data, - ref, inTrashCan); + int32_t ref = T_REF_DEC(pNode); + uDebug("cache:%s, key:%p, %p released, refcnt:%d, data in trancan:%d", pCacheObj->name, key, p, ref, inTrashCan); } } -void taosCacheEmpty(SCacheObj *pCacheObj) { - SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); - - __cache_wr_lock(pCacheObj); - while (taosHashIterNext(pIter)) { - if (pCacheObj->deleting == 1) { - break; - } - - SCacheDataNode *pNode = *(SCacheDataNode **) taosHashIterGet(pIter); - if (T_REF_VAL_GET(pNode) == 0) { - taosCacheReleaseNode(pCacheObj, pNode); - } else { - taosCacheMoveToTrash(pCacheObj, pNode); - } +typedef struct SHashTravSupp { + SCacheObj* pCacheObj; + int64_t time; + __cache_free_fn_t fp; +} SHashTravSupp; + +static bool travHashTableEmptyFn(void* param, void* data) { + SHashTravSupp* ps = (SHashTravSupp*) param; + SCacheObj* pCacheObj= ps->pCacheObj; + + SCacheDataNode *pNode = *(SCacheDataNode **) data; + + if (T_REF_VAL_GET(pNode) == 0) { + taosCacheReleaseNode(pCacheObj, pNode); + } else { // do add to trashcan + taosAddToTrash(pCacheObj, pNode); } - __cache_unlock(pCacheObj); - - taosHashDestroyIter(pIter); + + // this node should be remove from hash table + return false; +} + +void taosCacheEmpty(SCacheObj *pCacheObj) { + SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = NULL, .time = taosGetTimestampMs()}; + + taosHashCondTraverse(pCacheObj->pHashTable, travHashTableEmptyFn, &sup); taosTrashCanEmpty(pCacheObj, false); } @@ -553,33 +585,6 @@ void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) { uDebug("key:%p, %p move to trash, numOfElem in trash:%d", pNode->key, pNode->data, pCacheObj->numOfElemsInTrash); } -void taosRemoveFromTrashCan(SCacheObj *pCacheObj, STrashElem *pElem) { - if (pElem->pData->signature != (uint64_t)pElem->pData) { - uError("key:sig:0x%" PRIx64 " %p data has been released, ignore", pElem->pData->signature, pElem->pData); - return; - } - - pCacheObj->numOfElemsInTrash--; - if (pElem->prev) { - pElem->prev->next = pElem->next; - } else { /* pnode is the header, update header */ - pCacheObj->pTrash = pElem->next; - } - - if (pElem->next) { - pElem->next->prev = pElem->prev; - } - - pElem->pData->signature = 0; - if (pCacheObj->freeFp) { - pCacheObj->freeFp(pElem->pData->data); - } - - free(pElem->pData); - free(pElem); -} - -// TODO add another lock when scanning trashcan void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) { __cache_wr_lock(pCacheObj); @@ -587,8 +592,8 @@ void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) { if (pCacheObj->pTrash != NULL) { uError("key:inconsistency data in cache, numOfElem in trash:%d", pCacheObj->numOfElemsInTrash); } - pCacheObj->pTrash = NULL; + pCacheObj->pTrash = NULL; __cache_unlock(pCacheObj); return; } @@ -604,10 +609,12 @@ void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) { if (force || (T_REF_VAL_GET(pElem->pData) == 0)) { uDebug("key:%p, %p removed from trash. numOfElem in trash:%d", pElem->pData->key, pElem->pData->data, pCacheObj->numOfElemsInTrash - 1); - STrashElem *p = pElem; + STrashElem *p = pElem; pElem = pElem->next; - taosRemoveFromTrashCan(pCacheObj, p); + + doRemoveElemInTrashcan(pCacheObj, p); + doDestroyTrashcanElem(pCacheObj, p); } else { pElem = pElem->next; } @@ -617,26 +624,27 @@ void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) { } void doCleanupDataCache(SCacheObj *pCacheObj) { - __cache_wr_lock(pCacheObj); - SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); - while (taosHashIterNext(pIter)) { - SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); +// SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); +// while (taosHashIterNext(pIter)) { +// SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); +// +// int32_t c = T_REF_VAL_GET(pNode); +// if (c <= 0) { +// taosCacheReleaseNode(pCacheObj, pNode); +// } else { +// uDebug("cache:%s key:%p, %p will not remove from cache, refcnt:%d", pCacheObj->name, pNode->key, +// pNode->data, T_REF_VAL_GET(pNode)); +// } +// } +// +// taosHashDestroyIter(pIter); - int32_t c = T_REF_VAL_GET(pNode); - if (c <= 0) { - taosCacheReleaseNode(pCacheObj, pNode); - } else { - uDebug("cache:%s key:%p, %p will not remove from cache, refcnt:%d", pCacheObj->name, pNode->key, - pNode->data, T_REF_VAL_GET(pNode)); - } - } - taosHashDestroyIter(pIter); + SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = NULL, .time = taosGetTimestampMs()}; + taosHashCondTraverse(pCacheObj->pHashTable, travHashTableEmptyFn, &sup); // todo memory leak if there are object with refcount greater than 0 in hash table? taosHashCleanup(pCacheObj->pHashTable); - __cache_unlock(pCacheObj); - taosTrashCanEmpty(pCacheObj, true); __cache_lock_destroy(pCacheObj); @@ -645,26 +653,31 @@ void doCleanupDataCache(SCacheObj *pCacheObj) { free(pCacheObj); } -static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t fp) { - SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); +bool travHashTableFn(void* param, void* data) { + SHashTravSupp* ps = (SHashTravSupp*) param; + SCacheObj* pCacheObj= ps->pCacheObj; -// __cache_wr_lock(pCacheObj); - while (taosHashIterNext(pIter)) { - SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); + SCacheDataNode* pNode = *(SCacheDataNode **) data; + if (pNode->expireTime < ps->time && T_REF_VAL_GET(pNode) <= 0) { + taosCacheReleaseNode(pCacheObj, pNode); - if (pNode->expireTime < time && T_REF_VAL_GET(pNode) <= 0) { - taosCacheReleaseNode(pCacheObj, pNode); - continue; - } - - if (fp) { - fp(pNode->data); - } + // this node should be remove from hash table + return false; } -// __cache_unlock(pCacheObj); + if (ps->fp) { + (ps->fp)(pNode->data); + } - taosHashDestroyIter(pIter); + // do not remove element in hash table + return true; +} + +static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t fp) { + assert(pCacheObj != NULL); + + SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = fp, .time = time}; + taosHashCondTraverse(pCacheObj->pHashTable, travHashTableFn, &sup); } void* taosCacheTimedRefresh(void *handle) { From 2b413844d32fa4f51b6e4a2716a8f539d56f3c54 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 1 Aug 2020 13:50:42 +0800 Subject: [PATCH 025/190] [td-225] --- src/util/src/hash.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/util/src/hash.c b/src/util/src/hash.c index be4baf85d2..698bcbb15f 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -298,11 +298,13 @@ void *taosHashGetCB(SHashObj *pHashObj, const void *key, size_t keyLen, void (*f } SHashNode *pNode = doSearchInEntryList(pe, key, keyLen, hashVal); - if (fp != NULL) { - fp(pNode->data); + if (pNode != NULL) { + if (fp != NULL) { + fp(pNode->data); + } + data = pNode->data; } - data = pNode->data; if (pHashObj->type == HASH_ENTRY_LOCK) { taosRUnLockLatch(&pe->latch); } From 02bdbce35af7a75fef1b63cc3534cb6c61d69245 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 1 Aug 2020 16:15:07 +0800 Subject: [PATCH 026/190] [td-225] merge develop --- src/mnode/src/mnodeProfile.c | 2 +- src/util/inc/hash.h | 9 ------ src/util/inc/tcache.h | 2 +- src/util/src/hash.c | 4 +-- src/util/src/tcache.c | 54 ++++++++++++++++++------------------ 5 files changed, 31 insertions(+), 40 deletions(-) diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index 353dd59671..e8f37f1422 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -112,7 +112,7 @@ void mnodeReleaseConn(SConnObj *pConn) { SConnObj *mnodeAccquireConn(int32_t connId, char *user, uint32_t ip, uint16_t port) { uint64_t expireTime = CONN_KEEP_TIME * 1000 + (uint64_t)taosGetTimestampMs(); - SConnObj *pConn = taosCacheUpdateExpireTimeByName(tsMnodeConnCache, &connId, sizeof(int32_t), expireTime); + SConnObj *pConn = taosCacheAcquireByKey(tsMnodeConnCache, &connId, sizeof(int32_t)); if (pConn == NULL) { mDebug("connId:%d, is already destroyed, user:%s ip:%s:%u", connId, user, taosIpStr(ip), port); return NULL; diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h index 688bf317d6..71493788ac 100644 --- a/src/util/inc/hash.h +++ b/src/util/inc/hash.h @@ -41,18 +41,9 @@ typedef struct SHashNode { typedef enum SHashLockTypeE { HASH_NO_LOCK = 0, -// HASH_GLOBAL_LOCK = 1, HASH_ENTRY_LOCK = 1, } SHashLockTypeE; -//typedef struct SHashLock { -//#if defined(LINUX) -// pthread_rwlock_t *lock; -//#else -// pthread_mutex_t *lock; -//#endif -//} SHashLock; - typedef struct SHashEntry { int32_t num; // number of elements in current entry SRWLatch latch; // entry latch diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h index 1e2aeae394..11121fcf3b 100644 --- a/src/util/inc/tcache.h +++ b/src/util/inc/tcache.h @@ -121,7 +121,7 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen * @param expireTime new expire time of data * @return */ -void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t keyLen, uint64_t expireTime); +//void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t keyLen, uint64_t expireTime); /** * Add one reference count for the exist data, and assign this data for a new owner. diff --git a/src/util/src/hash.c b/src/util/src/hash.c index ed4f445795..83e2630e41 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -447,8 +447,7 @@ void taosHashCleanup(SHashObj *pHashObj) { pHashObj->freeFp(pNode->data); } - free(pNode->data); - free(pNode); + FREE_HASH_NODE(pNode); pNode = pNext; } } @@ -651,6 +650,7 @@ void taosHashTableResize(SHashObj *pHashObj) { SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) { SHashNode *pNewNode = calloc(1, sizeof(SHashNode)); + if (pNewNode == NULL) { uError("failed to allocate memory, reason:%s", strerror(errno)); return NULL; diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index c360d09a4a..e7f4b29744 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -340,33 +340,33 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen return pData; } -void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t keyLen, uint64_t expireTime) { - if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) == 0) { - return NULL; - } - - __cache_rd_lock(pCacheObj); - - SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen); - if (ptNode != NULL) { - T_REF_INC(*ptNode); - (*ptNode)->expireTime = expireTime; // taosGetTimestampMs() + (*ptNode)->lifespan; - } - - __cache_unlock(pCacheObj); - - if (ptNode != NULL) { - atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1); - uDebug("cache:%s, key:%p, %p expireTime is updated in cache, refcnt:%d", pCacheObj->name, key, - (*ptNode)->data, T_REF_VAL_GET(*ptNode)); - } else { - atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1); - uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key); - } - - atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1); - return (ptNode != NULL) ? (*ptNode)->data : NULL; -} +//void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t keyLen, uint64_t expireTime) { +// if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) == 0) { +// return NULL; +// } +// +// __cache_rd_lock(pCacheObj); +// +// SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen); +// if (ptNode != NULL) { +// T_REF_INC(*ptNode); +// (*ptNode)->expireTime = expireTime; // taosGetTimestampMs() + (*ptNode)->lifespan; +// } +// +// __cache_unlock(pCacheObj); +// +// if (ptNode != NULL) { +// atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1); +// uDebug("cache:%s, key:%p, %p expireTime is updated in cache, refcnt:%d", pCacheObj->name, key, +// (*ptNode)->data, T_REF_VAL_GET(*ptNode)); +// } else { +// atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1); +// uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key); +// } +// +// atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1); +// return (ptNode != NULL) ? (*ptNode)->data : NULL; +//} void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) { if (pCacheObj == NULL || data == NULL) return NULL; From 9be35563a2284d8867f446ce7c26b453df8ee834 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 3 Aug 2020 11:06:08 +0800 Subject: [PATCH 027/190] [td-225] fix bugs found by sim. --- src/inc/tsdb.h | 27 ++--- src/mnode/src/mnodeProfile.c | 1 - src/query/inc/qExecutor.h | 2 - src/query/src/qExecutor.c | 88 ++++++++++++----- src/tsdb/src/tsdbRead.c | 131 ++++++++++++++++--------- src/util/src/tcache.c | 34 +++---- tests/script/general/parser/topbot.sim | 12 ++- 7 files changed, 186 insertions(+), 109 deletions(-) diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index bab17322f0..4776d1cda7 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -167,9 +167,14 @@ typedef struct SDataBlockInfo { } SDataBlockInfo; typedef struct { - size_t numOfTables; + void *pTable; + TSKEY lastKey; +} STableKeyInfo; + +typedef struct { + size_t numOfTables; SArray *pGroupList; - SHashObj *map; // speedup acquire the tableQueryInfo from STableId + SHashObj *map; // speedup acquire the tableQueryInfo by table uid } STableGroupInfo; /** @@ -177,24 +182,24 @@ typedef struct { * * @param tsdb tsdb handle * @param pCond query condition, including time window, result set order, and basic required columns for each block - * @param tableqinfoGroupInfo tableId list in the form of set, seperated into different groups according to group by condition + * @param tableInfoGroup table object list in the form of set, grouped into different sets according to the + * group by condition * @param qinfo query info handle from query processor * @return */ -TsdbQueryHandleT *tsdbQueryTables(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableqinfoGroupInfo, void *qinfo); +TsdbQueryHandleT *tsdbQueryTables(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfoGroup, void *qinfo); /** * Get the last row of the given query time window for all the tables in STableGroupInfo object. * Note that only one data block with only row will be returned while invoking retrieve data block function for * all tables in this group. * - * @param tsdb tsdb handle - * @param pCond query condition, including time window, result set order, and basic required columns for each - * block - * @param tableqinfoGroupInfo tableId list. + * @param tsdb tsdb handle + * @param pCond query condition, including time window, result set order, and basic required columns for each block + * @param tableInfo table list. * @return */ -TsdbQueryHandleT tsdbQueryLastRow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableqinfoGroupInfo, void *qinfo); +TsdbQueryHandleT tsdbQueryLastRow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfo, void *qinfo); /** * get the queried table object list @@ -260,7 +265,7 @@ SArray *tsdbRetrieveDataBlock(TsdbQueryHandleT *pQueryHandle, SArray *pColumnIdL * @param stableid. super table sid * @param pTagCond. tag query condition */ -int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T *tsdb, uint64_t uid, const char *pTagCond, size_t len, +int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T *tsdb, uint64_t uid, TSKEY key, const char *pTagCond, size_t len, int16_t tagNameRelType, const char *tbnameCond, STableGroupInfo *pGroupList, SColIndex *pColIndex, int32_t numOfCols); @@ -278,7 +283,7 @@ void tsdbDestroyTableGroup(STableGroupInfo *pGroupList); * @param pGroupInfo the generated result * @return */ -int32_t tsdbGetOneTableGroup(TSDB_REPO_T *tsdb, uint64_t uid, STableGroupInfo *pGroupInfo); +int32_t tsdbGetOneTableGroup(TSDB_REPO_T *tsdb, uint64_t uid, TSKEY startKey, STableGroupInfo *pGroupInfo); /** * diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index e8f37f1422..7079b1a26a 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -73,7 +73,6 @@ int32_t mnodeInitProfile() { void mnodeCleanupProfile() { if (tsMnodeConnCache != NULL) { - mInfo("conn cache is cleanup"); taosCacheCleanup(tsMnodeConnCache); tsMnodeConnCache = NULL; } diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index bd2e0a4470..328078fb60 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -186,8 +186,6 @@ typedef struct SQInfo { void* signature; int32_t pointsInterpo; int32_t code; // error code to returned to client -// sem_t dataReady; - void* tsdb; int32_t vgId; STableGroupInfo tableGroupInfo; // table id list < only includes the STable list> diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 6bc8bf31ea..613c3ae14c 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1813,10 +1813,14 @@ static void doExchangeTimeWindow(SQInfo* pQInfo) { for(int32_t i = 0; i < t; ++i) { SArray* p1 = GET_TABLEGROUP(pQInfo, i); + SArray* tableKeyGroup = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, i); size_t len = taosArrayGetSize(p1); for(int32_t j = 0; j < len; ++j) { STableQueryInfo* pTableQueryInfo = (STableQueryInfo*) taosArrayGetP(p1, j); SWAP(pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey, TSKEY); + + STableKeyInfo* pInfo = taosArrayGet(tableKeyGroup, j); + pInfo->lastKey = pTableQueryInfo->win.skey; } } } @@ -2925,7 +2929,7 @@ static void updateTableQueryInfoForReverseScan(SQuery *pQuery, STableQueryInfo * return; } - // order has change already! + // order has changed already int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); // TODO validate the assertion @@ -2934,9 +2938,13 @@ static void updateTableQueryInfoForReverseScan(SQuery *pQuery, STableQueryInfo * // } else { // assert(pTableQueryInfo->win.ekey <= pTableQueryInfo->lastKey + step); // } - - pTableQueryInfo->win.ekey = pTableQueryInfo->lastKey + step; - + + if (pTableQueryInfo->lastKey == pTableQueryInfo->win.skey) { + // do nothing, no results + } else { + pTableQueryInfo->win.ekey = pTableQueryInfo->lastKey + step; + } + SWAP(pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey, TSKEY); pTableQueryInfo->lastKey = pTableQueryInfo->win.skey; @@ -2998,16 +3006,26 @@ void disableFuncInReverseScan(SQInfo *pQInfo) { } } } - +} + +static void setupQueryRangeForReverseScan(SQInfo* pQInfo) { + SQuery* pQuery = pQInfo->runtimeEnv.pQuery; int32_t numOfGroups = GET_NUM_OF_TABLEGROUP(pQInfo); - + for(int32_t i = 0; i < numOfGroups; ++i) { SArray *group = GET_TABLEGROUP(pQInfo, i); - + SArray *tableKeyGroup = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, i); + size_t t = taosArrayGetSize(group); for (int32_t j = 0; j < t; ++j) { STableQueryInfo *pCheckInfo = taosArrayGetP(group, j); updateTableQueryInfoForReverseScan(pQuery, pCheckInfo); + + // update the last key in tableKeyInfo list + STableKeyInfo *pTableKeyInfo = taosArrayGet(tableKeyGroup, j); + pTableKeyInfo->lastKey = pCheckInfo->lastKey; + + assert(pCheckInfo->pTable == pTableKeyInfo->pTable); } } } @@ -3252,20 +3270,20 @@ static void setEnvBeforeReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatusI .numOfCols = pQuery->numOfCols, }; + setQueryStatus(pQuery, QUERY_NOT_COMPLETED); + switchCtxOrder(pRuntimeEnv); + disableFuncInReverseScan(pQInfo); + setupQueryRangeForReverseScan(pQInfo); + // clean unused handle if (pRuntimeEnv->pSecQueryHandle != NULL) { tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle); } - // add ref for table pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo); if (pRuntimeEnv->pSecQueryHandle == NULL) { longjmp(pRuntimeEnv->env, terrno); } - - setQueryStatus(pQuery, QUERY_NOT_COMPLETED); - switchCtxOrder(pRuntimeEnv); - disableFuncInReverseScan(pQInfo); } static void clearEnvAfterReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatusInfo *pStatus) { @@ -3290,6 +3308,13 @@ static void clearEnvAfterReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatus pQuery->window = pTableQueryInfo->win; } +static void restoreTimeWindow(STableGroupInfo* pTableGroupInfo, STsdbQueryCond* pCond) { + assert(pTableGroupInfo->numOfTables == 1); + SArray* pTableKeyGroup = taosArrayGetP(pTableGroupInfo->pGroupList, 0); + STableKeyInfo* pKeyInfo = taosArrayGet(pTableKeyGroup, 0); + pKeyInfo->lastKey = pCond->twindow.skey; +} + void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) { SQInfo *pQInfo = (SQInfo *) GET_QINFO_ADDR(pRuntimeEnv); SQuery *pQuery = pRuntimeEnv->pQuery; @@ -3337,6 +3362,7 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) { tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle); } + restoreTimeWindow(&pQInfo->tableGroupInfo, &cond); pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo); if (pRuntimeEnv->pSecQueryHandle == NULL) { longjmp(pRuntimeEnv->env, terrno); @@ -4409,9 +4435,11 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) { // todo refactor SArray *g1 = taosArrayInit(1, POINTER_BYTES); - SArray *tx = taosArrayInit(1, POINTER_BYTES); + SArray *tx = taosArrayInit(1, sizeof(STableKeyInfo)); + + STableKeyInfo info = {.pTable = pCheckInfo->pTable, .lastKey = pCheckInfo->lastKey}; + taosArrayPush(tx, &info); - taosArrayPush(tx, &pCheckInfo->pTable); taosArrayPush(g1, &tx); STableGroupInfo gp = {.numOfTables = 1, .pGroupList = g1}; @@ -4561,7 +4589,9 @@ static void sequentialTableProcess(SQInfo *pQInfo) { pRuntimeEnv->pQueryHandle = NULL; } + // no need to update the lastkey for each table pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp, pQInfo); + taosArrayDestroy(g1); taosArrayDestroy(tx); if (pRuntimeEnv->pQueryHandle == NULL) { @@ -4687,8 +4717,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { taosArrayPush(pQInfo->arrTableIdInfo, &tidInfo); // if the buffer is full or group by each table, we need to jump out of the loop - if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL) /*|| - isGroupbyEachTable(pQuery->pGroupbyExpr, pSupporter->pSidSet)*/) { + if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)) { break; } @@ -4753,21 +4782,22 @@ static void doSaveContext(SQInfo *pQInfo) { .colList = pQuery->colList, .numOfCols = pQuery->numOfCols, }; - + // clean unused handle if (pRuntimeEnv->pSecQueryHandle != NULL) { tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle); } + setQueryStatus(pQuery, QUERY_NOT_COMPLETED); + switchCtxOrder(pRuntimeEnv); + disableFuncInReverseScan(pQInfo); + setupQueryRangeForReverseScan(pQInfo); + pRuntimeEnv->prevGroupId = INT32_MIN; pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo); if (pRuntimeEnv->pSecQueryHandle == NULL) { longjmp(pRuntimeEnv->env, terrno); } - - setQueryStatus(pQuery, QUERY_NOT_COMPLETED); - switchCtxOrder(pRuntimeEnv); - disableFuncInReverseScan(pQInfo); } static void doRestoreContext(SQInfo *pQInfo) { @@ -5861,8 +5891,8 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList, } for(int32_t j = 0; j < s; ++j) { - void* pTable = taosArrayGetP(pa, j); - STableId* id = TSDB_TABLEID(pTable); + STableKeyInfo* info = taosArrayGet(pa, j); + STableId* id = TSDB_TABLEID(info->pTable); STableIdInfo* pTableId = taosArraySearch(pTableIdList, id, compareTableIdInfo); if (pTableId != NULL ) { @@ -5872,10 +5902,11 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList, } void* buf = pQInfo->pBuf + index * sizeof(STableQueryInfo); - STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, pTable, window, buf); + STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, info->pTable, window, buf); if (item == NULL) { goto _cleanup; } + item->groupIndex = i; taosArrayPush(p1, &item); taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id->tid, sizeof(id->tid), &item, POINTER_BYTES); @@ -5904,6 +5935,7 @@ _cleanup_query: taosArrayDestroy(pGroupbyExpr->columnInfo); free(pGroupbyExpr); } + taosTFree(pTagCols); for (int32_t i = 0; i < numOfOutput; ++i) { SExprInfo* pExprInfo = &pExprs[i]; @@ -5911,6 +5943,7 @@ _cleanup_query: tExprTreeDestroy(&pExprInfo->pExpr, NULL); } } + taosTFree(pExprs); _cleanup: @@ -6198,7 +6231,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi STableIdInfo *id = taosArrayGet(pTableIdList, 0); qDebug("qmsg:%p query normal table, uid:%"PRId64", tid:%d", pQueryMsg, id->uid, id->tid); - if ((code = tsdbGetOneTableGroup(tsdb, id->uid, &tableGroupInfo)) != TSDB_CODE_SUCCESS) { + if ((code = tsdbGetOneTableGroup(tsdb, id->uid, pQueryMsg->window.skey, &tableGroupInfo)) != TSDB_CODE_SUCCESS) { goto _over; } } else if (TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY|TSDB_QUERY_TYPE_STABLE_QUERY)) { @@ -6215,8 +6248,9 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi } qDebug("qmsg:%p query stable, uid:%"PRId64", tid:%d", pQueryMsg, id->uid, id->tid); - code = tsdbQuerySTableByTagCond(tsdb, id->uid, tagCond, pQueryMsg->tagCondLen, pQueryMsg->tagNameRelType, tbnameCond, &tableGroupInfo, pGroupColIndex, - numOfGroupByCols); + code = tsdbQuerySTableByTagCond(tsdb, id->uid, pQueryMsg->window.skey, tagCond, pQueryMsg->tagCondLen, + pQueryMsg->tagNameRelType, tbnameCond, &tableGroupInfo, pGroupColIndex, numOfGroupByCols); + if (code != TSDB_CODE_SUCCESS) { qError("qmsg:%p failed to query stable, reason: %s", pQueryMsg, tstrerror(code)); goto _over; diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 2537e0e822..b086451dd1 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -172,6 +172,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab if (pQueryHandle == NULL) { goto out_of_memory; } + pQueryHandle->order = pCond->order; pQueryHandle->window = pCond->twindow; pQueryHandle->pTsdb = tsdb; @@ -190,9 +191,6 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab tsdbTakeMemSnapshot(pQueryHandle->pTsdb, &pQueryHandle->mem, &pQueryHandle->imem); - size_t sizeOfGroup = taosArrayGetSize(groupList->pGroupList); - assert(sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0); - // allocate buffer in order to load data blocks from file int32_t numOfCols = pCond->numOfCols; @@ -200,6 +198,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab if (pQueryHandle->statis == NULL) { goto out_of_memory; } + pQueryHandle->pColumns = taosArrayInit(numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array? if (pQueryHandle->pColumns == NULL) { goto out_of_memory; @@ -221,9 +220,13 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab if (pQueryHandle->pTableCheckInfo == NULL) { goto out_of_memory; } + STsdbMeta* pMeta = tsdbGetMeta(tsdb); assert(pMeta != NULL); + size_t sizeOfGroup = taosArrayGetSize(groupList->pGroupList); + assert(sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0); + for (int32_t i = 0; i < sizeOfGroup; ++i) { SArray* group = *(SArray**) taosArrayGet(groupList->pGroupList, i); @@ -231,17 +234,23 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab assert(gsize > 0); for (int32_t j = 0; j < gsize; ++j) { - STable* pTable = (STable*) taosArrayGetP(group, j); + STableKeyInfo* pKeyInfo = (STableKeyInfo*) taosArrayGet(group, j); STableCheckInfo info = { - .lastKey = pQueryHandle->window.skey, - .tableId = pTable->tableId, - .pTableObj = pTable, + .lastKey = pKeyInfo->lastKey, + .tableId = ((STable*)(pKeyInfo->pTable))->tableId, + .pTableObj = pKeyInfo->pTable, }; assert(info.pTableObj != NULL && (info.pTableObj->type == TSDB_NORMAL_TABLE || info.pTableObj->type == TSDB_CHILD_TABLE || info.pTableObj->type == TSDB_STREAM_TABLE)); + if (ASCENDING_TRAVERSE(pQueryHandle->order)) { + assert(info.lastKey >= pQueryHandle->window.skey); + } else { + assert(info.lastKey <= pQueryHandle->window.skey); + } + taosArrayPush(pQueryHandle->pTableCheckInfo, &info); } } @@ -315,19 +324,22 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh assert(pCheckInfo->iter == NULL && pCheckInfo->iiter == NULL); + STableData* pMem = NULL; + STableData* pIMem = NULL; + if (pHandle->mem && pCheckInfo->tableId.tid < pHandle->mem->maxTables) { - STableData* ptd = pHandle->mem->tData[pCheckInfo->tableId.tid]; - if (ptd != NULL && ptd->uid == pCheckInfo->tableId.uid) { // check uid + pMem = pHandle->mem->tData[pCheckInfo->tableId.tid]; + if (pMem != NULL && pMem->uid == pCheckInfo->tableId.uid) { // check uid pCheckInfo->iter = - tSkipListCreateIterFromVal(ptd->pData, (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); + tSkipListCreateIterFromVal(pMem->pData, (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); } } if (pHandle->imem && pCheckInfo->tableId.tid < pHandle->imem->maxTables) { - STableData* ptd = pHandle->imem->tData[pCheckInfo->tableId.tid]; - if (ptd != NULL && ptd->uid == pCheckInfo->tableId.uid) { // check uid + pIMem = pHandle->imem->tData[pCheckInfo->tableId.tid]; + if (pIMem != NULL && pIMem->uid == pCheckInfo->tableId.uid) { // check uid pCheckInfo->iiter = - tSkipListCreateIterFromVal(ptd->pData, (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); + tSkipListCreateIterFromVal(pIMem->pData, (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); } } @@ -348,8 +360,17 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh SDataRow row = SL_GET_NODE_DATA(node); TSKEY key = dataRowKey(row); // first timestamp in buffer - tsdbDebug("%p uid:%" PRId64", tid:%d check data in mem from skey:%" PRId64 ", order:%d, %p", pHandle, - pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pHandle->qinfo); + tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 + "-%" PRId64 ", lastKey:%" PRId64 ", %p", + pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pMem->keyFirst, pMem->keyLast, + pCheckInfo->lastKey, pHandle->qinfo); + + if (ASCENDING_TRAVERSE(order)) { + assert(pCheckInfo->lastKey <= key); + } else { + assert(pCheckInfo->lastKey >= key); + } + } else { tsdbDebug("%p uid:%"PRId64", tid:%d no data in mem, %p", pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pHandle->qinfo); @@ -361,8 +382,16 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh SDataRow row = SL_GET_NODE_DATA(node); TSKEY key = dataRowKey(row); // first timestamp in buffer - tsdbDebug("%p uid:%" PRId64", tid:%d check data in imem from skey:%" PRId64 ", order:%d, %p", pHandle, - pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pHandle->qinfo); + tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 + "-%" PRId64 ", lastKey:%" PRId64 ", %p", + pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pIMem->keyFirst, pIMem->keyLast, + pCheckInfo->lastKey, pHandle->qinfo); + + if (ASCENDING_TRAVERSE(order)) { + assert(pCheckInfo->lastKey <= key); + } else { + assert(pCheckInfo->lastKey >= key); + } } else { tsdbDebug("%p uid:%"PRId64", tid:%d no data in imem, %p", pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pHandle->qinfo); @@ -2033,7 +2062,9 @@ static int32_t getAllTableList(STable* pSuperTable, SArray* list) { SSkipListNode* pNode = tSkipListIterGet(iter); STable** pTable = (STable**) SL_GET_NODE_DATA((SSkipListNode*) pNode); - taosArrayPush(list, pTable); + + STableKeyInfo info = {.pTable = *pTable, .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(list, &info); } tSkipListDestroyIter(iter); @@ -2089,8 +2120,8 @@ typedef struct STableGroupSupporter { int32_t tableGroupComparFn(const void *p1, const void *p2, const void *param) { STableGroupSupporter* pTableGroupSupp = (STableGroupSupporter*) param; - STable* pTable1 = *(STable**) p1; - STable* pTable2 = *(STable**) p2; + STable* pTable1 = ((STableKeyInfo*) p1)->pTable; + STable* pTable2 = ((STableKeyInfo*) p2)->pTable; for (int32_t i = 0; i < pTableGroupSupp->numOfCols; ++i) { SColIndex* pColIndex = &pTableGroupSupp->pCols[i]; @@ -2140,12 +2171,14 @@ int32_t tableGroupComparFn(const void *p1, const void *p2, const void *param) { return 0; } -void createTableGroupImpl(SArray* pGroups, SArray* pTableList, size_t numOfTables, STableGroupSupporter* pSupp, +void createTableGroupImpl(SArray* pGroups, SArray* pTableList, size_t numOfTables, TSKEY skey, STableGroupSupporter* pSupp, __ext_compar_fn_t compareFn) { STable* pTable = taosArrayGetP(pTableList, 0); - SArray* g = taosArrayInit(16, POINTER_BYTES); - taosArrayPush(g, &pTable); + SArray* g = taosArrayInit(16, sizeof(STableKeyInfo)); + + STableKeyInfo info = {.pTable = pTable, .lastKey = skey}; + taosArrayPush(g, &info); tsdbRefTable(pTable); for (int32_t i = 1; i < numOfTables; ++i) { @@ -2159,18 +2192,21 @@ void createTableGroupImpl(SArray* pGroups, SArray* pTableList, size_t numOfTable assert((*p)->type == TSDB_CHILD_TABLE); if (ret == 0) { - taosArrayPush(g, p); + STableKeyInfo info1 = {.pTable = *p, .lastKey = skey}; + taosArrayPush(g, &info1); } else { taosArrayPush(pGroups, &g); // current group is ended, start a new group - g = taosArrayInit(16, POINTER_BYTES); - taosArrayPush(g, p); + g = taosArrayInit(16, sizeof(STableKeyInfo)); + + STableKeyInfo info1 = {.pTable = *p, .lastKey = skey}; + taosArrayPush(g, &info1); } } taosArrayPush(pGroups, &g); } -SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pCols, int32_t numOfOrderCols) { +SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pCols, int32_t numOfOrderCols, TSKEY skey) { assert(pTableList != NULL); SArray* pTableGroup = taosArrayInit(1, POINTER_BYTES); @@ -2181,13 +2217,16 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC } if (numOfOrderCols == 0 || size == 1) { // no group by tags clause or only one table - SArray* sa = taosArrayInit(size, POINTER_BYTES); - for(int32_t i = 0; i < size; ++i) { - STable** pTable = taosArrayGet(pTableList, i); - assert((*pTable)->type == TSDB_CHILD_TABLE); + SArray* sa = taosArrayInit(size, sizeof(STableKeyInfo)); - tsdbRefTable(*pTable); - taosArrayPush(sa, pTable); + for(int32_t i = 0; i < size; ++i) { + STableKeyInfo *pKeyInfo = taosArrayGet(pTableList, i); + assert(((STable*)pKeyInfo->pTable)->type == TSDB_CHILD_TABLE); + + tsdbRefTable(pKeyInfo->pTable); + + STableKeyInfo info = {.pTable = pKeyInfo->pTable, .lastKey = skey}; + taosArrayPush(sa, &info); } taosArrayPush(pTableGroup, &sa); @@ -2198,8 +2237,8 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC pSupp->pTagSchema = pTagSchema; pSupp->pCols = pCols; - taosqsort(pTableList->pData, size, POINTER_BYTES, pSupp, tableGroupComparFn); - createTableGroupImpl(pTableGroup, pTableList, size, pSupp, tableGroupComparFn); + taosqsort(pTableList->pData, size, sizeof(STableKeyInfo), pSupp, tableGroupComparFn); + createTableGroupImpl(pTableGroup, pTableList, size, skey, pSupp, tableGroupComparFn); taosTFree(pSupp); } @@ -2272,7 +2311,7 @@ static int32_t doQueryTableList(STable* pSTable, SArray* pRes, tExprNode* pExpr) return TSDB_CODE_SUCCESS; } -int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, const char* pTagCond, size_t len, +int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len, int16_t tagNameRelType, const char* tbnameCond, STableGroupInfo* pGroupInfo, SColIndex* pColIndex, int32_t numOfCols) { if (tsdbRLockRepoMeta(tsdb) < 0) goto _error; @@ -2296,7 +2335,7 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, const char* pT } //NOTE: not add ref count for super table - SArray* res = taosArrayInit(8, POINTER_BYTES); + SArray* res = taosArrayInit(8, sizeof(STableKeyInfo)); STSchema* pTagSchema = tsdbGetTableTagSchema(pTable); // no tags and tbname condition, all child tables of this stable are involved @@ -2308,7 +2347,7 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, const char* pT } pGroupInfo->numOfTables = taosArrayGetSize(res); - pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols); + pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey); tsdbDebug("%p no table name/tag condition, all tables belong to one group, numOfTables:%zu", tsdb, pGroupInfo->numOfTables); taosArrayDestroy(res); @@ -2351,7 +2390,7 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, const char* pT doQueryTableList(pTable, res, expr); pGroupInfo->numOfTables = taosArrayGetSize(res); - pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols); + pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey); tsdbDebug("%p stable tid:%d, uid:%"PRIu64" query, numOfTables:%zu, belong to %zu groups", tsdb, pTable->tableId.tid, pTable->tableId.uid, pGroupInfo->numOfTables, taosArrayGetSize(pGroupInfo->pGroupList)); @@ -2365,7 +2404,7 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, const char* pT return terrno; } -int32_t tsdbGetOneTableGroup(TSDB_REPO_T* tsdb, uint64_t uid, STableGroupInfo* pGroupInfo) { +int32_t tsdbGetOneTableGroup(TSDB_REPO_T* tsdb, uint64_t uid, TSKEY startKey, STableGroupInfo* pGroupInfo) { if (tsdbRLockRepoMeta(tsdb) < 0) goto _error; STable* pTable = tsdbGetTableByUid(tsdbGetMeta(tsdb), uid); @@ -2382,9 +2421,11 @@ int32_t tsdbGetOneTableGroup(TSDB_REPO_T* tsdb, uint64_t uid, STableGroupInfo* p pGroupInfo->numOfTables = 1; pGroupInfo->pGroupList = taosArrayInit(1, POINTER_BYTES); - SArray* group = taosArrayInit(1, POINTER_BYTES); + SArray* group = taosArrayInit(1, sizeof(STableKeyInfo)); + + STableKeyInfo info = {.pTable = pTable, .lastKey = startKey}; + taosArrayPush(group, &info); - taosArrayPush(group, &pTable); taosArrayPush(pGroupInfo->pGroupList, &group); return TSDB_CODE_SUCCESS; @@ -2401,7 +2442,7 @@ int32_t tsdbGetTableGroupFromIdList(TSDB_REPO_T* tsdb, SArray* pTableIdList, STa assert(pTableIdList != NULL); size_t size = taosArrayGetSize(pTableIdList); pGroupInfo->pGroupList = taosArrayInit(1, POINTER_BYTES); - SArray* group = taosArrayInit(1, POINTER_BYTES); + SArray* group = taosArrayInit(1, sizeof(STableKeyInfo)); int32_t i = 0; for(; i < size; ++i) { @@ -2419,7 +2460,9 @@ int32_t tsdbGetTableGroupFromIdList(TSDB_REPO_T* tsdb, SArray* pTableIdList, STa } tsdbRefTable(pTable); - taosArrayPush(group, &pTable); + + STableKeyInfo info = {.pTable = pTable, .lastKey = id->key}; + taosArrayPush(group, &info); } if (tsdbUnlockRepoMeta(tsdb) < 0) { diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index e7f4b29744..b5f8515f78 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -104,8 +104,10 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo pCacheObj->totalSize -= pNode->size; int32_t size = taosHashGetSize(pCacheObj->pHashTable); - uDebug("cache:%s, key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes", - pCacheObj->name, pNode->key, pNode->data, size, pCacheObj->totalSize, pNode->size); + assert(size > 0); + + uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, num:%d size:%" PRId64 "bytes", + pCacheObj->name, pNode->key, pNode->data, pNode->size, size - 1, pCacheObj->totalSize); if (pCacheObj->freeFp) { pCacheObj->freeFp(pNode->data); @@ -428,7 +430,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { if (pCacheObj->extendLifespan && (!inTrashCan) && (!_remove)) { atomic_store_64(&pNode->expireTime, pNode->lifespan + taosGetTimestampMs()); - uDebug("cache:%s data:%p extend life time to %"PRId64 " before release", pCacheObj->name, pNode->data, pNode->expireTime); + uDebug("cache:%s data:%p extend expire time: %"PRId64, pCacheObj->name, pNode->data, pNode->expireTime); } if (_remove) { @@ -471,9 +473,9 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { } else { // ref == 0 atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size); - uDebug("cache:%s, key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes", - pCacheObj->name, pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), - pCacheObj->totalSize, pNode->size); + int32_t size = taosHashGetSize(pCacheObj->pHashTable); + uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, num:%d size:%" PRId64 "bytes", + pCacheObj->name, pNode->key, pNode->data, pNode->size, size, pCacheObj->totalSize); if (pCacheObj->freeFp) { pCacheObj->freeFp(pNode->data); @@ -581,7 +583,8 @@ void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) { pNode->pTNodeHeader = pElem; pCacheObj->numOfElemsInTrash++; - uDebug("key:%p, %p move to trash, numOfElem in trash:%d", pNode->key, pNode->data, pCacheObj->numOfElemsInTrash); + uDebug("%s key:%p, %p move to trash, numOfElem in trash:%d", pCacheObj->name, pNode->key, pNode->data, + pCacheObj->numOfElemsInTrash); } void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) { @@ -623,28 +626,13 @@ void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) { } void doCleanupDataCache(SCacheObj *pCacheObj) { - -// SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); -// while (taosHashIterNext(pIter)) { -// SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); -// -// int32_t c = T_REF_VAL_GET(pNode); -// if (c <= 0) { -// taosCacheReleaseNode(pCacheObj, pNode); -// } else { -// uDebug("cache:%s key:%p, %p will not remove from cache, refcnt:%d", pCacheObj->name, pNode->key, -// pNode->data, T_REF_VAL_GET(pNode)); -// } -// } -// -// taosHashDestroyIter(pIter); - SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = NULL, .time = taosGetTimestampMs()}; taosHashCondTraverse(pCacheObj->pHashTable, travHashTableEmptyFn, &sup); // todo memory leak if there are object with refcount greater than 0 in hash table? taosHashCleanup(pCacheObj->pHashTable); taosTrashCanEmpty(pCacheObj, true); + __cache_lock_destroy(pCacheObj); taosTFree(pCacheObj->name); diff --git a/tests/script/general/parser/topbot.sim b/tests/script/general/parser/topbot.sim index a0c46dbc65..57858ae04e 100644 --- a/tests/script/general/parser/topbot.sim +++ b/tests/script/general/parser/topbot.sim @@ -66,9 +66,19 @@ if $row != 100 then return -1 endi -sql select last(c2) from tb_tb9 +sql select last(*) from tb_tb9 if $row != 1 then return -1 endi +sql select last(c2) from tb_tb9 +if $row != 0 then + return -1 +endi + +sql select first(c2), last(c2) from tb_tb9 +if $row != 0 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file From 8bdf3a05e0b463922b8db2a611c4eec9c0354714 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 3 Aug 2020 14:36:39 +0800 Subject: [PATCH 028/190] [td-225] fix memory leaks --- src/mnode/src/mnodeProfile.c | 2 +- src/util/src/hash.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index 7079b1a26a..5e11ce98de 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -99,7 +99,7 @@ SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port) { tstrncpy(connObj.user, user, sizeof(connObj.user)); SConnObj *pConn = taosCachePut(tsMnodeConnCache, &connId, sizeof(int32_t), &connObj, sizeof(connObj), CONN_KEEP_TIME); - + mDebug("connId:%d, is created, user:%s ip:%s:%u", connId, user, taosIpStr(ip), port); return pConn; } diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 83e2630e41..71a51c4cea 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -409,6 +409,7 @@ int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), voi // not qualified, remove it if (fp && (!fp(param, pNode->data))) { doPopFromEntryList(pEntry, pNode); + FREE_HASH_NODE(pNode); } pNode = pNext; From aadc31f73b3bf4e3e804dfa2aa840e95ae43d88a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 3 Aug 2020 14:46:29 +0800 Subject: [PATCH 029/190] [td-225] fix bugs in bottom query. --- src/client/src/tscFunctionImpl.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index c4f768f6ac..6dff3ddc8e 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -1942,11 +1942,12 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, int64_t ts, uint16_t type, SExtTagsInfo *pTagInfo, char *pTags, int16_t stage) { - tValuePair **pList = pInfo->res; - tVariant val = {0}; tVariantCreateFromBinary(&val, pData, tDataTypeDesc[type].nSize, type); - + + tValuePair **pList = pInfo->res; + assert(pList != NULL); + if (pInfo->num < maxLen) { if (pInfo->num == 0) { valuePairAssign(pList[pInfo->num], type, (const char*) &val.i64Key, ts, pTags, pTagInfo, stage); @@ -2379,8 +2380,9 @@ static void bottom_func_second_merge(SQLFunctionCtx *pCtx) { // the intermediate result is binary, we only use the output data type for (int32_t i = 0; i < pInput->num; ++i) { + int16_t type = (pCtx->outputType == TSDB_DATA_TYPE_FLOAT)? TSDB_DATA_TYPE_DOUBLE:pCtx->outputType; do_bottom_function_add(pOutput, pCtx->param[0].i64Key, &pInput->res[i]->v.i64Key, pInput->res[i]->timestamp, - pCtx->outputType, &pCtx->tagInfo, pInput->res[i]->pTags, pCtx->currentStage); + type, &pCtx->tagInfo, pInput->res[i]->pTags, pCtx->currentStage); } SET_VAL(pCtx, pInput->num, pOutput->num); From a79f608dd4d98615dc9d31cc8bd3b1effbc6dfd3 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 3 Aug 2020 14:46:56 +0800 Subject: [PATCH 030/190] [td-225] update the sim script. --- tests/script/general/parser/topbot.sim | 37 ++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/tests/script/general/parser/topbot.sim b/tests/script/general/parser/topbot.sim index 57858ae04e..fdda79451d 100644 --- a/tests/script/general/parser/topbot.sim +++ b/tests/script/general/parser/topbot.sim @@ -66,6 +66,11 @@ if $row != 100 then return -1 endi +sql select bottom(c1, 100) from tb_stb0 +if $row != 100 then + return -1 +endi + sql select last(*) from tb_tb9 if $row != 1 then return -1 @@ -81,4 +86,36 @@ if $row != 0 then return -1 endi +sql create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20)); +sql create table test1 using test tags('beijing'); +sql insert into test1 values(1537146000000, 1, 1, 1, 1, 0.100000, 0.100000, 0, 'taosdata1', '涛思数据1'); +sql insert into test1 values(1537146000001, 2, 2, 2, 2, 1.100000, 1.100000, 1, 'taosdata2', '涛思数据2'); +sql insert into test1 values(1537146000002, 3, 3, 3, 3, 2.100000, 2.100000, 0, 'taosdata3', '涛思数据3'); +sql insert into test1 values(1537146000003, 4, 4, 4, 4, 3.100000, 3.100000, 1, 'taosdata4', '涛思数据4'); +sql insert into test1 values(1537146000004, 5, 5, 5, 5, 4.100000, 4.100000, 0, 'taosdata5', '涛思数据5'); +sql insert into test1 values(1537146000005, 6, 6, 6, 6, 5.100000, 5.100000, 1, 'taosdata6', '涛思数据6'); +sql insert into test1 values(1537146000006, 7, 7, 7, 7, 6.100000, 6.100000, 0, 'taosdata7', '涛思数据7'); +sql insert into test1 values(1537146000007, 8, 8, 8, 8, 7.100000, 7.100000, 1, 'taosdata8', '涛思数据8'); +sql insert into test1 values(1537146000008, 9, 9, 9, 9, 8.100000, 8.100000, 0, 'taosdata9', '涛思数据9'); +sql insert into test1 values(1537146000009, 10, 10, 10, 10, 9.100000, 9.100000, 1, 'taosdata10', '涛思数据10'); +sql select bottom(col5, 10) from test +if $rows != 10 then + return -1 +endi + +if $data01 != 0.10000 then + print expect 0.10000 actual: $data01 + return -1 +endi + +if $data11 != 1.10000 then + print expect 1.10000 actual: $data11 + return -1 +endi + +if $data21 != 2.10000 then + print expect 2.10000 actual: $data21 + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file From 34c8f42a8072c75a72f144ef52298b34a2847d27 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 3 Aug 2020 17:52:32 +0800 Subject: [PATCH 031/190] minor changes --- src/mnode/src/mnodeProfile.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index 5e11ce98de..85457d7a26 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -79,6 +79,7 @@ void mnodeCleanupProfile() { } SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port) { +#if 0 int32_t connSize = taosHashGetSize(tsMnodeConnCache->pHashTable); if (connSize > tsMaxShellConns) { mError("failed to create conn for user:%s ip:%s:%u, conns:%d larger than maxShellConns:%d, ", user, taosIpStr(ip), @@ -86,6 +87,7 @@ SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port) { terrno = TSDB_CODE_MND_TOO_MANY_SHELL_CONNS; return NULL; } +#endif int32_t connId = atomic_add_fetch_32(&tsConnIndex, 1); if (connId == 0) atomic_add_fetch_32(&tsConnIndex, 1); From 9a91f0e4c3eb52bf0c456ca957fafc0c47896cb2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 4 Aug 2020 18:13:28 +0800 Subject: [PATCH 032/190] [td-225] add some logs. --- src/dnode/src/dnodeVRead.c | 5 +- src/inc/taoserror.h | 1 + src/query/inc/qExecutor.h | 4 +- src/query/src/qExecutor.c | 30 +++-- src/query/src/qResultbuf.c | 13 ++- src/util/inc/hash.h | 4 +- src/util/src/hash.c | 233 +++++++++++++++++++++---------------- src/vnode/src/vnodeRead.c | 3 +- 8 files changed, 170 insertions(+), 123 deletions(-) diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c index cb53bb5e60..c99cf87b21 100644 --- a/src/dnode/src/dnodeVRead.c +++ b/src/dnode/src/dnodeVRead.c @@ -202,8 +202,9 @@ static void *dnodeProcessReadQueue(void *param) { break; } - dDebug("%p, msg:%s will be processed in vread queue, qtype:%d", pReadMsg->rpcMsg.ahandle, - taosMsg[pReadMsg->rpcMsg.msgType], type); + dDebug("%p, msg:%s will be processed in vread queue, qtype:%d, msg:%p", pReadMsg->rpcMsg.ahandle, + taosMsg[pReadMsg->rpcMsg.msgType], type, pReadMsg); + int32_t code = vnodeProcessRead(pVnode, pReadMsg); if (type == TAOS_QTYPE_RPC && code != TSDB_CODE_QRY_NOT_READY) { diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index ece08ae173..9af4cee28a 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -219,6 +219,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_DUP_JOIN_KEY, 0, 0x0705, "Duplicated TAOS_DEFINE_ERROR(TSDB_CODE_QRY_EXCEED_TAGS_LIMIT, 0, 0x0706, "Tag conditon too many") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NOT_READY, 0, 0x0707, "Query not ready") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_HAS_RSP, 0, 0x0708, "Query should response") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_IN_EXEC, 0, 0x0709, "Multiple retrieval of this query") // grant TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, 0, 0x0800, "License expired") diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 328078fb60..44d5d26f71 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -184,8 +184,8 @@ enum { typedef struct SQInfo { void* signature; - int32_t pointsInterpo; - int32_t code; // error code to returned to client + int32_t code; // error code to returned to client + pthread_t owner; // if it is in execution void* tsdb; int32_t vgId; STableGroupInfo tableGroupInfo; // table id list < only includes the STable list> diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 613c3ae14c..ac90ca9595 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2784,6 +2784,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { setWindowResultInfo(pResultInfo, pQuery, pRuntimeEnv->stableQuery, buf); resetMergeResultBuf(pQuery, pRuntimeEnv->pCtx, pResultInfo); + // todo add windowRes iterator int64_t lastTimestamp = -1; int64_t startt = taosGetTimestampMs(); @@ -2791,7 +2792,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { int32_t pos = pTree->pNode[0].index; SWindowResInfo *pWindowResInfo = &pTableList[pos]->windowResInfo; - SWindowResult * pWindowRes = getWindowResult(pWindowResInfo, cs.position[pos]); + SWindowResult *pWindowRes = getWindowResult(pWindowResInfo, cs.position[pos]); tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes->pos.pageId); char *b = getPosInResultPage(pRuntimeEnv, PRIMARYKEY_TIMESTAMP_COL_INDEX, pWindowRes, page); @@ -2828,6 +2829,9 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { lastTimestamp = ts; + // move to the next element of current entry + int32_t currentPageId = pWindowRes->pos.pageId; + cs.position[pos] += 1; if (cs.position[pos] >= pWindowResInfo->size) { cs.position[pos] = -1; @@ -2836,6 +2840,12 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { if (--numOfTables == 0) { break; } + } else { + // current page is not needed anymore + SWindowResult *pNextWindowRes = getWindowResult(pWindowResInfo, cs.position[pos]); + if (pNextWindowRes->pos.pageId != currentPageId) { + releaseResBufPage(pRuntimeEnv->pResultBuf, page); + } } } @@ -5081,8 +5091,6 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); clearFirstNTimeWindow(pRuntimeEnv, pQInfo->groupIndex); } - - pQInfo->pointsInterpo += numOfFilled; } static void tableQueryImpl(SQInfo *pQInfo) { @@ -6330,16 +6338,24 @@ static bool doBuildResCheck(SQInfo* pQInfo) { pthread_mutex_unlock(&pQInfo->lock); + // clear qhandle owner +// assert(pQInfo->owner == pthread_self()); +// pQInfo->owner = 0; + return buildRes; } bool qTableQuery(qinfo_t qinfo) { SQInfo *pQInfo = (SQInfo *)qinfo; + assert(pQInfo && pQInfo->signature == pQInfo); +// int64_t threadId = pthread_self(); - if (pQInfo == NULL || pQInfo->signature != pQInfo) { - qDebug("QInfo:%p has been freed, no need to execute", pQInfo); - return false; - } +// int64_t curOwner = 0; +// if ((curOwner = atomic_val_compare_exchange_64(&pQInfo->owner, 0, threadId)) != 0) { +// qError("QInfo:%p qhandle is now executed by thread:%p", pQInfo, (void*) curOwner); +// pQInfo->code = TSDB_CODE_QRY_IN_EXEC; +// return false; +// } if (IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p it is already killed, abort", pQInfo); diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c index a577070970..8235bd7b1f 100644 --- a/src/query/src/qResultbuf.c +++ b/src/query/src/qResultbuf.c @@ -186,7 +186,7 @@ static char* loadPageFromDisk(SDiskbasedResultBuf* pResultBuf, SPageInfo* pg) { return GET_DATA_PAYLOAD(pg); } -#define NO_AVAILABLE_PAGES(_b) ((_b)->numOfPages >= (_b)->inMemPages) +#define NO_IN_MEM_AVAILABLE_PAGES(_b) (listNEles((_b)->lruList) >= (_b)->inMemPages) static SIDList addNewGroup(SDiskbasedResultBuf* pResultBuf, int32_t groupId) { assert(taosHashGet(pResultBuf->groupSet, (const char*) &groupId, sizeof(int32_t)) == NULL); @@ -281,7 +281,7 @@ tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32 pResultBuf->statis.getPages += 1; char* availablePage = NULL; - if (NO_AVAILABLE_PAGES(pResultBuf)) { + if (NO_IN_MEM_AVAILABLE_PAGES(pResultBuf)) { availablePage = evicOneDataPage(pResultBuf); } @@ -340,7 +340,7 @@ tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) { assert((*pi)->pData == NULL && (*pi)->pn == NULL && (*pi)->info.length >= 0 && (*pi)->info.offset >= 0); char* availablePage = NULL; - if (NO_AVAILABLE_PAGES(pResultBuf)) { + if (NO_IN_MEM_AVAILABLE_PAGES(pResultBuf)) { availablePage = evicOneDataPage(pResultBuf); } @@ -396,12 +396,13 @@ void destroyResultBuf(SDiskbasedResultBuf* pResultBuf) { } if (pResultBuf->file != NULL) { - qDebug("QInfo:%p disk-based output buffer closed, total:%" PRId64 " bytes, file size:%"PRId64" bytes", - pResultBuf->handle, pResultBuf->totalBufSize, pResultBuf->fileSize); + qDebug("QInfo:%p res output buffer closed, total:%" PRId64 " bytes, inmem size:%dbytes, file size:%"PRId64" bytes", + pResultBuf->handle, pResultBuf->totalBufSize, listNEles(pResultBuf->lruList) * pResultBuf->pageSize, + pResultBuf->fileSize); fclose(pResultBuf->file); } else { - qDebug("QInfo:%p disk-based output buffer closed, total:%" PRId64 " bytes, no file created", pResultBuf->handle, + qDebug("QInfo:%p res output buffer closed, total:%" PRId64 " bytes, no file created", pResultBuf->handle, pResultBuf->totalBufSize); } diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h index 71493788ac..f289a4e8c3 100644 --- a/src/util/inc/hash.h +++ b/src/util/inc/hash.h @@ -32,7 +32,7 @@ typedef void (*_hash_free_fn_t)(void *param); typedef struct SHashNode { char *key; - struct SHashNode *prev; +// struct SHashNode *prev; struct SHashNode *next; uint32_t hashVal; // the hash value of key, if hashVal == HASH_VALUE_IN_TRASH, this node is moved to trash uint32_t keyLen; // length of the key @@ -47,7 +47,7 @@ typedef enum SHashLockTypeE { typedef struct SHashEntry { int32_t num; // number of elements in current entry SRWLatch latch; // entry latch - SHashNode head; // dummy head + SHashNode *next; } SHashEntry; typedef struct SHashObj { diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 71a51c4cea..8c74db0082 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -20,12 +20,21 @@ #define HASH_NEED_RESIZE(_h) ((_h)->size >= (_h)->capacity * HASH_DEFAULT_LOAD_FACTOR) -#define FREE_HASH_NODE(_n) \ - do { \ - taosTFree((_n)->data); \ - taosTFree(_n); \ +#define DO_FREE_HASH_NODE(_n) \ + do { \ + taosTFree((_n)->data); \ + taosTFree(_n); \ } while (0) +#define FREE_HASH_NODE(_h, _n) \ + do { \ + if ((_h)->freeFp) { \ + (_h)->freeFp((_n)->data); \ + } \ + \ + DO_FREE_HASH_NODE(_n); \ + } while (0); + static FORCE_INLINE void __wr_lock(void *lock, int32_t type) { if (type == HASH_NO_LOCK) { return; @@ -65,17 +74,8 @@ static FORCE_INLINE int32_t taosHashCapacity(int32_t length) { return i; } -/** - * Get SHashNode from hashlist, nodes from trash are not included. - * @param pHashObj Cache objection - * @param key key for hash - * @param keyLen key length - * @param hashVal hash value by hash function - * @return - */ - static FORCE_INLINE SHashNode *doSearchInEntryList(SHashEntry *pe, const void *key, size_t keyLen, uint32_t hashVal) { - SHashNode *pNode = pe->head.next; + SHashNode *pNode = pe->next; while (pNode) { if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { assert(pNode->hashVal == hashVal); @@ -88,28 +88,21 @@ static FORCE_INLINE SHashNode *doSearchInEntryList(SHashEntry *pe, const void *k return pNode; } -static FORCE_INLINE SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, - uint32_t hashVal) { - int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); +static FORCE_INLINE SHashNode *doSerchPrevInEntryList(SHashEntry *pe, const void *key, size_t keyLen, uint32_t hashVal) { + SHashNode *prev= NULL; + SHashNode *pNode = pe->next; - SHashEntry *pe = pHashObj->hashList[slot]; + while (pNode) { + if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { + assert(pNode->hashVal == hashVal); + break; + } - // no data, return directly - if (atomic_load_32(&pe->num) == 0) { - return NULL; + prev = pNode; + pNode = pNode->next; } - if (pHashObj->type == HASH_ENTRY_LOCK) { - taosRLockLatch(&pe->latch); - } - - SHashNode *pNode = doSearchInEntryList(pe, key, keyLen, hashVal); - - if (pHashObj->type == HASH_ENTRY_LOCK) { - taosRUnLockLatch(&pe->latch); - } - - return pNode; + return prev; } /** @@ -153,7 +146,7 @@ static FORCE_INLINE SHashNode *doUpdateHashNode(SHashNode *pNode, SHashNode *pNe * @param pHashObj * @param pNode */ -static void pushfrontNode(SHashEntry *pEntry, SHashNode *pNode); +static void pushfrontNodeInEntryList(SHashEntry *pEntry, SHashNode *pNode); /** * Get the next element in hash table for iterator @@ -225,7 +218,13 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da taosWLockLatch(&pe->latch); } - SHashNode *pNode = pe->head.next; + SHashNode *pNode = pe->next; + if (pe->num > 0) { + assert(pNode != NULL); + } else { + assert(pNode == NULL); + } + while (pNode) { if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { assert(pNode->hashVal == hashVal); @@ -237,7 +236,7 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da if (pNode == NULL) { // no data in hash table with the specified key, add it into hash table - pushfrontNode(pe, pNewNode); + pushfrontNodeInEntryList(pe, pNewNode); if (pHashObj->type == HASH_ENTRY_LOCK) { taosWUnLockLatch(&pe->latch); @@ -261,7 +260,7 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da // enable resize __rd_unlock(&pHashObj->lock, pHashObj->type); - FREE_HASH_NODE(pNewNode); + DO_FREE_HASH_NODE(pNewNode); return pHashObj->enableUpdate ? 0 : -1; } } @@ -301,6 +300,7 @@ void *taosHashGetCB(SHashObj *pHashObj, const void *key, size_t keyLen, void (*f if (fp != NULL) { fp(pNode->data); } + data = pNode->data; } @@ -316,13 +316,12 @@ int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) { return taosHashRemoveWithData(pHashObj, key, keyLen, NULL, 0); } -static FORCE_INLINE void doPopFromEntryList(SHashEntry *pe, SHashNode *pNode) { +static FORCE_INLINE void doPopNextFromEntryList(SHashEntry *pe, SHashNode *pNode) { SHashNode *pNext = pNode->next; - - assert(pNode->prev != NULL); - pNode->prev->next = pNext; if (pNext != NULL) { - pNext->prev = pNode->prev; + pNode->next = pNext->next; + } else { + pNode->next = NULL; } pe->num -= 1; @@ -351,9 +350,27 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe taosWLockLatch(&pe->latch); } - SHashNode *pNode = doSearchInEntryList(pe, key, keyLen, hashVal); - if (pNode != NULL) { - doPopFromEntryList(pe, pNode); + SHashNode *pNode = pe->next; + SHashNode *pRes = NULL; + // remove it + if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { + pe->next = pNode->next; + pRes = pNode; + } else { + while (pNode->next != NULL) { + if (((pNode->next)->keyLen == keyLen) && (memcmp((pNode->next)->key, key, keyLen) == 0)) { + assert((pNode->next)->hashVal == hashVal); + break; + } + + pNode = pNode->next; + } + + + if (pNode->next != NULL) { + pRes = pNode->next; + pNode->next = pNode->next->next; + } } if (pHashObj->type == HASH_ENTRY_LOCK) { @@ -362,18 +379,14 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe __rd_unlock(&pHashObj->lock, pHashObj->type); - if (data != NULL) { - memcpy(data, pNode->data, dsize); + if (data != NULL && pRes != NULL) { + memcpy(data, pRes->data, dsize); } - if (pNode != NULL) { + if (pRes != NULL) { + pe->num -= 1; atomic_sub_fetch_64(&pHashObj->size, 1); - - pNode->next = NULL; - pNode->prev = NULL; - - FREE_HASH_NODE(pNode); - + FREE_HASH_NODE(pHashObj, pRes); return 0; } else { return -1; @@ -391,7 +404,7 @@ int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), voi int32_t numOfEntries = pHashObj->capacity; for (int32_t i = 0; i < numOfEntries; ++i) { SHashEntry *pEntry = pHashObj->hashList[i]; - if (pEntry->num <= 0) { + if (pEntry->num == 0) { continue; } @@ -399,20 +412,35 @@ int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), voi taosWLockLatch(&pEntry->latch); } - SHashNode *pNode = pEntry->head.next; - assert(pNode != NULL); - - SHashNode *pNext = NULL; - while (pNode != NULL) { - pNext = pNode->next; - - // not qualified, remove it + // todo remove first node + SHashNode *pNode = NULL; + while((pNode = pEntry->next) != NULL) { if (fp && (!fp(param, pNode->data))) { - doPopFromEntryList(pEntry, pNode); - FREE_HASH_NODE(pNode); - } + pEntry->num -= 1; + pEntry->next = pNode->next; - pNode = pNext; + FREE_HASH_NODE(pHashObj, pNode); + } else { + break; + } + } + + // handle the following node + if (pNode != NULL) { + assert(pNode == pEntry->next); + SHashNode *pNext = NULL; + + while ((pNext = pNode->next) != NULL) { + // not qualified, remove it + if (fp && (!fp(param, pNext->data))) { + pNode->next = pNext->next; + pEntry->num -= 1; + + FREE_HASH_NODE(pHashObj, pNext); + } else { + pNode = pNext; + } + } } if (pHashObj->type == HASH_ENTRY_LOCK) { @@ -437,18 +465,15 @@ void taosHashCleanup(SHashObj *pHashObj) { for (int32_t i = 0; i < pHashObj->capacity; ++i) { SHashEntry *pEntry = pHashObj->hashList[i]; if (pEntry->num == 0) { - assert(pEntry->head.next == 0); + assert(pEntry->next == 0); continue; } - pNode = pEntry->head.next; + pNode = pEntry->next; while (pNode) { pNext = pNode->next; - if (pHashObj->freeFp) { - pHashObj->freeFp(pNode->data); - } + FREE_HASH_NODE(pHashObj, pNode); - FREE_HASH_NODE(pNode); pNode = pNext; } } @@ -501,6 +526,8 @@ bool taosHashIterNext(SHashMutableIterator *pIter) { while (1) { SHashEntry *pEntry = pIter->pHashObj->hashList[pIter->entryIndex]; if (pEntry->num == 0) { + assert(pEntry->next == NULL); + pIter->entryIndex++; continue; } @@ -509,7 +536,7 @@ bool taosHashIterNext(SHashMutableIterator *pIter) { taosRLockLatch(&pEntry->latch); } - pIter->pCur = pEntry->head.next; + pIter->pCur = pEntry->next; if (pIter->pCur->next) { pIter->pNext = pIter->pCur->next; @@ -595,7 +622,7 @@ void taosHashTableResize(SHashObj *pHashObj) { return; } - void *pNewEntryList = realloc(pHashObj->hashList, sizeof(SHashEntry) * newSize); + void *pNewEntryList = realloc(pHashObj->hashList, sizeof(void*) * newSize); if (pNewEntryList == NULL) { // todo handle error // uDebug("cache resize failed due to out of memory, capacity remain:%d", pHashObj->capacity); return; @@ -616,33 +643,39 @@ void taosHashTableResize(SHashObj *pHashObj) { for (int32_t i = 0; i < pHashObj->capacity; ++i) { SHashEntry *pe = pHashObj->hashList[i]; if (pe->num == 0) { - assert(pe->head.next == NULL); + assert(pe->next == NULL); continue; } - pNode = pe->head.next; - while (pNode) { + while ((pNode = pe->next) != NULL) { int32_t j = HASH_INDEX(pNode->hashVal, pHashObj->capacity); - if (j == i) { // this key locates in the same slot, no need to relocate it - pNode = pNode->next; - } else { - pNext = pNode->next; - assert(pNode != pNext && (pNext == NULL || pNext->prev == pNode) && pNode->prev->next == pNode); + if (j != i) { + pe->num -= 1; + pe->next = pNode->next; - doPopFromEntryList(pe, pNode); - - // clear pointer - pNode->next = NULL; - pNode->prev = NULL; - - // added into new slot SHashEntry *pNewEntry = pHashObj->hashList[j]; - pushfrontNode(pNewEntry, pNode); - - // continue - pNode = pNext; + pushfrontNodeInEntryList(pNewEntry, pNode); + } else { + break; } } + + if (pNode != NULL) { + while ((pNext = pNode->next) != NULL) { + int32_t j = HASH_INDEX(pNext->hashVal, pHashObj->capacity); + if (j != i) { + pNode->next = pNext->next; + pNext->next = NULL; + + // added into new slot + SHashEntry *pNewEntry = pHashObj->hashList[j]; + pushfrontNodeInEntryList(pNewEntry, pNext); + } else { + pNode = pNext; + } + } + } + } // uDebug("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", pHashObj->capacity, @@ -668,17 +701,11 @@ SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, s return pNewNode; } -void pushfrontNode(SHashEntry *pEntry, SHashNode *pNode) { +void pushfrontNodeInEntryList(SHashEntry *pEntry, SHashNode *pNode) { assert(pNode != NULL && pEntry != NULL); - SHashNode *pNext = pEntry->head.next; - if (pNext != NULL) { - pNext->prev = pNode; - } - - pNode->next = pNext; - pNode->prev = &pEntry->head; - pEntry->head.next = pNode; + pNode->next = pEntry->next; + pEntry->next = pNode; pEntry->num += 1; } @@ -700,7 +727,7 @@ SHashNode *getNextHashNode(SHashMutableIterator *pIter) { taosRLockLatch(&pEntry->latch); } - p = pEntry->head.next; + p = pEntry->next; if (pIter->pHashObj->type == HASH_ENTRY_LOCK) { taosRUnLockLatch(&pEntry->latch); diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 3c642b5098..3cf5c1382f 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -74,6 +74,8 @@ static void vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void *qhandle) { pRead->rpcMsg.handle = NULL; atomic_add_fetch_32(&pVnode->refCount, 1); + + vDebug("QInfo:%p add to query task queue for exec, msg:%p", qhandle, pRead); taosWriteQitem(pVnode->rqueue, TAOS_QTYPE_QUERY, pRead); } @@ -83,7 +85,6 @@ static int32_t vnodeDumpQueryResult(SRspRet *pRet, void* pVnode, void* handle, b int32_t code = TSDB_CODE_SUCCESS; if ((code = qDumpRetrieveResult(handle, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len, &continueExec)) == TSDB_CODE_SUCCESS) { if (continueExec) { - vDebug("QInfo:%p add to query task queue for exec", handle); vnodePutItemIntoReadQueue(pVnode, handle); pRet->qhandle = handle; *freeHandle = false; From 6f6896293b6336d7809ce0b2350c36f88bbfe4ec Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 4 Aug 2020 18:57:48 +0800 Subject: [PATCH 033/190] [td-225] add some logs. --- src/util/src/hash.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 8c74db0082..8e4aa48127 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -352,6 +352,7 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe SHashNode *pNode = pe->next; SHashNode *pRes = NULL; + // remove it if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { pe->next = pNode->next; @@ -622,6 +623,7 @@ void taosHashTableResize(SHashObj *pHashObj) { return; } + int64_t st = taosGetTimestampUs(); void *pNewEntryList = realloc(pHashObj->hashList, sizeof(void*) * newSize); if (pNewEntryList == NULL) { // todo handle error // uDebug("cache resize failed due to out of memory, capacity remain:%d", pHashObj->capacity); @@ -678,8 +680,10 @@ void taosHashTableResize(SHashObj *pHashObj) { } - // uDebug("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", pHashObj->capacity, - // ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0); + int64_t et = taosGetTimestampUs(); + + uDebug("hash table resize completed, new capacity:%"PRId64", load factor:%f, elapsed time:%fms", pHashObj->capacity, + ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0); } SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) { From bd2b9b2b3563b0c40836ad2514dfd1baceeede7a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 4 Aug 2020 19:12:04 +0800 Subject: [PATCH 034/190] [td-225] add some logs. --- src/util/src/hash.c | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 8e4aa48127..19f22766d6 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -284,6 +284,7 @@ void *taosHashGetCB(SHashObj *pHashObj, const void *key, size_t keyLen, void (*f // no data, return directly if (atomic_load_32(&pe->num) == 0) { + __rd_unlock(&pHashObj->lock, pHashObj->type); return NULL; } @@ -295,6 +296,12 @@ void *taosHashGetCB(SHashObj *pHashObj, const void *key, size_t keyLen, void (*f taosRLockLatch(&pe->latch); } + if (pe->num > 0) { + assert(pe->next != NULL); + } else { + assert(pe->next == NULL); + } + SHashNode *pNode = doSearchInEntryList(pe, key, keyLen, hashVal); if (pNode != NULL) { if (fp != NULL) { @@ -342,6 +349,8 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe // no data, return directly if (pe->num == 0) { + assert(pe->next == NULL); + __rd_unlock(&pHashObj->lock, pHashObj->type); return -1; } @@ -374,6 +383,12 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe } } + if (pe->num == 0) { + assert(pe->next == NULL); + } else { + assert(pe->next != NULL); + } + if (pHashObj->type == HASH_ENTRY_LOCK) { taosWUnLockLatch(&pe->latch); } @@ -471,6 +486,8 @@ void taosHashCleanup(SHashObj *pHashObj) { } pNode = pEntry->next; + assert(pNode != NULL); + while (pNode) { pNext = pNode->next; FREE_HASH_NODE(pHashObj, pNode); @@ -655,6 +672,12 @@ void taosHashTableResize(SHashObj *pHashObj) { pe->num -= 1; pe->next = pNode->next; + if (pe->num == 0) { + assert(pe->next == NULL); + } else { + assert(pe->next != NULL); + } + SHashEntry *pNewEntry = pHashObj->hashList[j]; pushfrontNodeInEntryList(pNewEntry, pNode); } else { @@ -671,11 +694,25 @@ void taosHashTableResize(SHashObj *pHashObj) { // added into new slot SHashEntry *pNewEntry = pHashObj->hashList[j]; + + if (pNewEntry->num == 0) { + assert(pNewEntry->next == NULL); + } else { + assert(pNewEntry->next != NULL); + } + pushfrontNodeInEntryList(pNewEntry, pNext); } else { pNode = pNext; } } + + if (pe->num == 0) { + assert(pe->next == NULL); + } else { + assert(pe->next != NULL); + } + } } From d6c19bbb5e609ca2293c3f070add384bba07889a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 4 Aug 2020 19:23:24 +0800 Subject: [PATCH 035/190] [td-225] add some logs. --- src/util/src/hash.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 19f22766d6..4c98e59ef0 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -347,6 +347,12 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); SHashEntry *pe = pHashObj->hashList[slot]; + if (pe->num == 0) { + assert(pe->next == NULL); + } else { + assert(pe->next != NULL); + } + // no data, return directly if (pe->num == 0) { assert(pe->next == NULL); @@ -383,12 +389,6 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe } } - if (pe->num == 0) { - assert(pe->next == NULL); - } else { - assert(pe->next != NULL); - } - if (pHashObj->type == HASH_ENTRY_LOCK) { taosWUnLockLatch(&pe->latch); } @@ -403,8 +403,22 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe pe->num -= 1; atomic_sub_fetch_64(&pHashObj->size, 1); FREE_HASH_NODE(pHashObj, pRes); + + if (pe->num == 0) { + assert(pe->next == NULL); + } else { + assert(pe->next != NULL); + } + return 0; } else { + + if (pe->num == 0) { + assert(pe->next == NULL); + } else { + assert(pe->next != NULL); + } + return -1; } } From 6eca95b6ce48c77fbd4376ec04bed75534521f19 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 4 Aug 2020 19:36:01 +0800 Subject: [PATCH 036/190] [td-225] add some logs. --- src/util/src/hash.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 4c98e59ef0..e7e46cd328 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -238,6 +238,12 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da // no data in hash table with the specified key, add it into hash table pushfrontNodeInEntryList(pe, pNewNode); + if (pe->num == 0) { + assert(pe->next == NULL); + } else { + assert(pe->next != NULL); + } + if (pHashObj->type == HASH_ENTRY_LOCK) { taosWUnLockLatch(&pe->latch); } @@ -449,6 +455,12 @@ int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), voi pEntry->num -= 1; pEntry->next = pNode->next; + if (pEntry->num == 0) { + assert(pEntry->next == NULL); + } else { + assert(pEntry->next != NULL); + } + FREE_HASH_NODE(pHashObj, pNode); } else { break; @@ -466,6 +478,12 @@ int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), voi pNode->next = pNext->next; pEntry->num -= 1; + if (pEntry->num == 0) { + assert(pEntry->next == NULL); + } else { + assert(pEntry->next != NULL); + } + FREE_HASH_NODE(pHashObj, pNext); } else { pNode = pNext; @@ -675,6 +693,13 @@ void taosHashTableResize(SHashObj *pHashObj) { pHashObj->capacity = newSize; for (int32_t i = 0; i < pHashObj->capacity; ++i) { SHashEntry *pe = pHashObj->hashList[i]; + + if (pe->num == 0) { + assert(pe->next == NULL); + } else { + assert(pe->next != NULL); + } + if (pe->num == 0) { assert(pe->next == NULL); continue; From 28f2f102aff40f131ab454f2c4dca2e30c0b90b1 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 4 Aug 2020 19:57:16 +0800 Subject: [PATCH 037/190] [td-225] add some logs. --- src/util/src/hash.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/util/src/hash.c b/src/util/src/hash.c index e7e46cd328..298d81251c 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -728,6 +728,8 @@ void taosHashTableResize(SHashObj *pHashObj) { while ((pNext = pNode->next) != NULL) { int32_t j = HASH_INDEX(pNext->hashVal, pHashObj->capacity); if (j != i) { + pe->num -= 1; + pNode->next = pNext->next; pNext->next = NULL; From d08e9972b6f05f7b6e36d9d138ef5d4d6664f580 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 5 Aug 2020 00:48:19 +0800 Subject: [PATCH 038/190] [td-225] prevent qinfo from being released while it is in queue. --- src/query/src/qExecutor.c | 3 ++- src/vnode/src/vnodeRead.c | 26 +++++++++++++++++--------- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index ac90ca9595..b5fc67e4f1 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6408,6 +6408,7 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex return TSDB_CODE_QRY_INVALID_QHANDLE; } + *buildRes = false; SQuery *pQuery = pQInfo->runtimeEnv.pQuery; if (IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p query is killed, code:%d", pQInfo, pQInfo->code); @@ -6751,7 +6752,7 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { return NULL; } - const int32_t DEFAULT_QHANDLE_LIFE_SPAN = tsShellActivityTimer * 2; + const int32_t DEFAULT_QHANDLE_LIFE_SPAN = tsShellActivityTimer * 2 * 1000; SQueryMgmt *pQueryMgmt = pMgmt; if (pQueryMgmt->qinfoPool == NULL) { diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 3cf5c1382f..896b27e03f 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -159,7 +159,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { // current connect is broken if (code == TSDB_CODE_SUCCESS) { handle = qRegisterQInfo(pVnode->qMgmt, (uint64_t) pQInfo); - if (handle == NULL) { // failed to register qhandle + if (handle == NULL) { // failed to register qhandle, todo add error test case vError("vgId:%d QInfo:%p register qhandle failed, return to app, code:%s", pVnode->vgId, (void *)pQInfo, tstrerror(pRsp->code)); pRsp->code = TSDB_CODE_QRY_INVALID_QHANDLE; @@ -180,12 +180,9 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { } if (handle != NULL) { - vDebug("vgId:%d, QInfo:%p, dnode query msg disposed, register qhandle and return to app", vgId, *handle); - + vDebug("vgId:%d, QInfo:%p, dnode query msg disposed, create qhandle and returns to app", vgId, *handle); vnodePutItemIntoReadQueue(pVnode, *handle); - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); } - } else { assert(pCont != NULL); @@ -194,18 +191,19 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { vWarn("QInfo:%p invalid qhandle in continuing exec query, conn:%p", (void*) pCont, pReadMsg->rpcMsg.handle); code = TSDB_CODE_QRY_INVALID_QHANDLE; } else { - vDebug("vgId:%d, QInfo:%p, dnode continue exec query", pVnode->vgId, (void*) pCont); + vDebug("vgId:%d, QInfo:%p, dnode continues to exec query", pVnode->vgId, (void*) pCont); bool freehandle = false; bool buildRes = qTableQuery(*handle); // do execute query - // build query rsp + // build query rsp, the retrieve request has reached here already if (buildRes) { // update the connection info according to the retrieve connection pReadMsg->rpcMsg.handle = qGetResultRetrieveMsg(*handle); assert(pReadMsg->rpcMsg.handle != NULL); - vDebug("vgId:%d, QInfo:%p, start to build result rsp after query paused, %p", pVnode->vgId, *handle, pReadMsg->rpcMsg.handle); + vDebug("vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p", pVnode->vgId, *handle, + pReadMsg->rpcMsg.handle); code = vnodeDumpQueryResult(&pReadMsg->rspRet, pVnode, *handle, &freehandle); // todo test the error code case @@ -214,7 +212,17 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { } } - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, freehandle); + // If retrieval request has not arrived, release the qhandle and decrease the reference count to allow + // the queryMgmt to free it when expired + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); + + // NOTE: + // if the qhandle is put into query vread queue and wait to be executed by worker in read queue, + // the reference count of qhandle can not be decreased. Otherwise, qhandle may be released before or in the + // procedure of query execution + if (freehandle) { + qReleaseQInfo(pVnode->qMgmt, (void **)&handle, freehandle); + } } } From 7bb42b5f57c076a67b494b91a94aaf531d568d71 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 5 Aug 2020 11:42:36 +0800 Subject: [PATCH 039/190] [td-225] fix mem leak --- src/vnode/src/vnodeRead.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 896b27e03f..5c7bc869ba 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -85,12 +85,12 @@ static int32_t vnodeDumpQueryResult(SRspRet *pRet, void* pVnode, void* handle, b int32_t code = TSDB_CODE_SUCCESS; if ((code = qDumpRetrieveResult(handle, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len, &continueExec)) == TSDB_CODE_SUCCESS) { if (continueExec) { + *freeHandle = false; vnodePutItemIntoReadQueue(pVnode, handle); pRet->qhandle = handle; - *freeHandle = false; } else { - vDebug("QInfo:%p exec completed", handle); *freeHandle = true; + vDebug("QInfo:%p exec completed, free handle:%d", handle, *freeHandle); } } else { pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); @@ -214,6 +214,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { // If retrieval request has not arrived, release the qhandle and decrease the reference count to allow // the queryMgmt to free it when expired + void** dup = handle; qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); // NOTE: @@ -221,7 +222,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { // the reference count of qhandle can not be decreased. Otherwise, qhandle may be released before or in the // procedure of query execution if (freehandle) { - qReleaseQInfo(pVnode->qMgmt, (void **)&handle, freehandle); + qReleaseQInfo(pVnode->qMgmt, (void **)&dup, freehandle); } } } @@ -268,16 +269,23 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { //TODO handle malloc failure pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, freeHandle); } else { // result is not ready, return immediately if (!buildRes) { qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); return TSDB_CODE_QRY_NOT_READY; } + void** dup = handle; code = vnodeDumpQueryResult(pRet, pVnode, *handle, &freeHandle); + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); + + // not added into task queue, free it immediate + if (freeHandle) { + qReleaseQInfo(pVnode->qMgmt, (void**) &dup, freeHandle); + } } - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, freeHandle); return code; } From 4222cfcda663152409b1fb04594d4104a7f6df3c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 5 Aug 2020 15:48:35 +0800 Subject: [PATCH 040/190] [td-225] fix bug in hash --- src/util/src/hash.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 298d81251c..06326e037e 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -376,8 +376,9 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe // remove it if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { - pe->next = pNode->next; + pe->num -= 1; pRes = pNode; + pe->next = pNode->next; } else { while (pNode->next != NULL) { if (((pNode->next)->keyLen == keyLen) && (memcmp((pNode->next)->key, key, keyLen) == 0)) { @@ -390,6 +391,7 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe if (pNode->next != NULL) { + pe->num -= 1; pRes = pNode->next; pNode->next = pNode->next->next; } @@ -406,7 +408,6 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe } if (pRes != NULL) { - pe->num -= 1; atomic_sub_fetch_64(&pHashObj->size, 1); FREE_HASH_NODE(pHashObj, pRes); From 0ce566550d061a3dcdde5da6e862b2034ace4997 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 5 Aug 2020 16:24:42 +0800 Subject: [PATCH 041/190] [td-225] fix bug in hash and queryhandle management. --- src/inc/query.h | 3 ++ src/query/src/qExecutor.c | 58 ++++++++++++------------------------- src/util/src/hash.c | 22 +++++++++----- src/util/src/tcache.c | 9 ++++-- src/vnode/src/vnodeRead.c | 61 ++++++++++++++++++--------------------- 5 files changed, 72 insertions(+), 81 deletions(-) diff --git a/src/inc/query.h b/src/inc/query.h index ec1e458b62..0c18f85dc3 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -76,6 +76,9 @@ void* qGetResultRetrieveMsg(qinfo_t qinfo); */ int32_t qKillQuery(qinfo_t qinfo); +int32_t qQueryCompleted(qinfo_t qinfo); + + /** * destroy query info structure * @param qHandle diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index b5fc67e4f1..0ffb1a4cde 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6432,34 +6432,6 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex return code; } -bool qHasMoreResultsToRetrieve(qinfo_t qinfo) { - SQInfo *pQInfo = (SQInfo *)qinfo; - - if (!isValidQInfo(pQInfo) || pQInfo->code != TSDB_CODE_SUCCESS) { - qDebug("QInfo:%p invalid qhandle or error occurs, abort query, code:%x", pQInfo, pQInfo->code); - return false; - } - - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - - bool ret = false; - if (Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) { - ret = false; - } else if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)) { - ret = true; - } else if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { - ret = true; - } else { - assert(0); - } - - if (ret) { - qDebug("QInfo:%p has more results waits for client retrieve", pQInfo); - } - - return ret; -} - int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *contLen, bool* continueExec) { SQInfo *pQInfo = (SQInfo *)qinfo; @@ -6487,11 +6459,11 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co int32_t code = pQInfo->code; if (code == TSDB_CODE_SUCCESS) { - (*pRsp)->offset = htobe64(pQuery->limit.offset); + (*pRsp)->offset = htobe64(pQuery->limit.offset); (*pRsp)->useconds = htobe64(pRuntimeEnv->summary.elapsedTime); } else { - (*pRsp)->useconds = 0; - (*pRsp)->offset = 0; + (*pRsp)->offset = 0; + (*pRsp)->useconds = htobe64(pRuntimeEnv->summary.elapsedTime); } (*pRsp)->precision = htons(pQuery->precision); @@ -6503,22 +6475,30 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co } pQInfo->rspContext = NULL; - pQInfo->dataReady = QUERY_RESULT_NOT_READY; + pQInfo->dataReady = QUERY_RESULT_NOT_READY; if (IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) { - (*pRsp)->completed = 1; // notify no more result to client - } - - if (qHasMoreResultsToRetrieve(pQInfo)) { - *continueExec = true; - } else { // failed to dump result, free qhandle immediately *continueExec = false; - qKillQuery(pQInfo); + (*pRsp)->completed = 1; // notify no more result to client + } else { + *continueExec = true; + qDebug("QInfo:%p has more results waits for client retrieve", pQInfo); } return code; } +int32_t qQueryCompleted(qinfo_t qinfo) { + SQInfo *pQInfo = (SQInfo *)qinfo; + + if (pQInfo == NULL || !isValidQInfo(pQInfo)) { + return TSDB_CODE_QRY_INVALID_QHANDLE; + } + + SQuery* pQuery = pQInfo->runtimeEnv.pQuery; + return IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER); +} + int32_t qKillQuery(qinfo_t qinfo) { SQInfo *pQInfo = (SQInfo *)qinfo; diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 06326e037e..96b4e9cd28 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -353,16 +353,9 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); SHashEntry *pe = pHashObj->hashList[slot]; - if (pe->num == 0) { - assert(pe->next == NULL); - } else { - assert(pe->next != NULL); - } - // no data, return directly if (pe->num == 0) { assert(pe->next == NULL); - __rd_unlock(&pHashObj->lock, pHashObj->type); return -1; } @@ -371,6 +364,21 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe taosWLockLatch(&pe->latch); } + if (pe->num == 0) { + assert(pe->next == NULL); + } else { + assert(pe->next != NULL); + } + + // double check after locked + if (pe->num == 0) { + assert(pe->next == NULL); + taosWUnLockLatch(&pe->latch); + + __rd_unlock(&pHashObj->lock, pHashObj->type); + return -1; + } + SHashNode *pNode = pe->next; SHashNode *pRes = NULL; diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index b5f8515f78..8a4145f2f8 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -438,8 +438,8 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { char* key = pNode->key; char* d = pNode->data; - int32_t ref = T_REF_DEC(pNode); - uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, key, d, ref); + int32_t ref = T_REF_VAL_GET(pNode); + uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, key, d, ref - 1); /* * If it is not referenced by other users, remove it immediately. Otherwise move this node to trashcan wait for all users @@ -449,6 +449,8 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { * that tries to do the same thing. */ if (inTrashCan) { + ref = T_REF_DEC(pNode); + if (ref == 0) { assert(pNode->pTNodeHeader->pData == pNode); @@ -459,7 +461,10 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { doDestroyTrashcanElem(pCacheObj, pNode->pTNodeHeader); } } else { + // NOTE: remove it from hash in the first place, otherwise, the pNode may have been released by other thread + // when reaches here. int32_t ret = taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); + ref = T_REF_DEC(pNode); // successfully remove from hash table, if failed, this node must have been move to trash already, do nothing. // note that the remove operation can be executed only once. diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 5c7bc869ba..0d30be7662 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -66,7 +66,7 @@ int32_t vnodeProcessRead(void *param, SReadMsg *pReadMsg) { return (*vnodeProcessReadMsgFp[msgType])(pVnode, pReadMsg); } -static void vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void *qhandle) { +static void vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void **qhandle) { SReadMsg *pRead = (SReadMsg *)taosAllocateQitem(sizeof(SReadMsg)); pRead->rpcMsg.msgType = TSDB_MSG_TYPE_QUERY; pRead->pCont = qhandle; @@ -75,22 +75,22 @@ static void vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void *qhandle) { atomic_add_fetch_32(&pVnode->refCount, 1); - vDebug("QInfo:%p add to query task queue for exec, msg:%p", qhandle, pRead); + vDebug("QInfo:%p add to vread queue for exec query, msg:%p", *qhandle, pRead); taosWriteQitem(pVnode->rqueue, TAOS_QTYPE_QUERY, pRead); } -static int32_t vnodeDumpQueryResult(SRspRet *pRet, void* pVnode, void* handle, bool* freeHandle) { +static int32_t vnodeDumpQueryResult(SRspRet *pRet, void* pVnode, void** handle, bool* freeHandle) { bool continueExec = false; int32_t code = TSDB_CODE_SUCCESS; - if ((code = qDumpRetrieveResult(handle, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len, &continueExec)) == TSDB_CODE_SUCCESS) { + if ((code = qDumpRetrieveResult(*handle, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len, &continueExec)) == TSDB_CODE_SUCCESS) { if (continueExec) { *freeHandle = false; vnodePutItemIntoReadQueue(pVnode, handle); - pRet->qhandle = handle; + pRet->qhandle = *handle; } else { *freeHandle = true; - vDebug("QInfo:%p exec completed, free handle:%d", handle, *freeHandle); + vDebug("QInfo:%p exec completed, free handle:%d", *handle, *freeHandle); } } else { pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); @@ -181,50 +181,45 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { if (handle != NULL) { vDebug("vgId:%d, QInfo:%p, dnode query msg disposed, create qhandle and returns to app", vgId, *handle); - vnodePutItemIntoReadQueue(pVnode, *handle); + vnodePutItemIntoReadQueue(pVnode, handle); } } else { assert(pCont != NULL); + void** qhandle = (void**) pCont; +// *handle = /*(void*) */pCont; - handle = qAcquireQInfo(pVnode->qMgmt, (uint64_t) pCont); - if (handle == NULL) { - vWarn("QInfo:%p invalid qhandle in continuing exec query, conn:%p", (void*) pCont, pReadMsg->rpcMsg.handle); - code = TSDB_CODE_QRY_INVALID_QHANDLE; - } else { - vDebug("vgId:%d, QInfo:%p, dnode continues to exec query", pVnode->vgId, (void*) pCont); +// handle = qAcquireQInfo(pVnode->qMgmt, (uint64_t) pCont); +// if (handle == NULL) { +// vWarn("QInfo:%p invalid qhandle in continuing exec query, conn:%p", (void*) pCont, pReadMsg->rpcMsg.handle); +// code = TSDB_CODE_QRY_INVALID_QHANDLE; +// } else { + vDebug("vgId:%d, QInfo:%p, dnode continues to exec query", pVnode->vgId, *qhandle); bool freehandle = false; - bool buildRes = qTableQuery(*handle); // do execute query + bool buildRes = qTableQuery(*qhandle); // do execute query // build query rsp, the retrieve request has reached here already if (buildRes) { // update the connection info according to the retrieve connection - pReadMsg->rpcMsg.handle = qGetResultRetrieveMsg(*handle); + pReadMsg->rpcMsg.handle = qGetResultRetrieveMsg(*qhandle); assert(pReadMsg->rpcMsg.handle != NULL); - vDebug("vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p", pVnode->vgId, *handle, + vDebug("vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p", pVnode->vgId, *qhandle, pReadMsg->rpcMsg.handle); - code = vnodeDumpQueryResult(&pReadMsg->rspRet, pVnode, *handle, &freehandle); + code = vnodeDumpQueryResult(&pReadMsg->rspRet, pVnode, qhandle, &freehandle); // todo test the error code case if (code == TSDB_CODE_SUCCESS) { code = TSDB_CODE_QRY_HAS_RSP; } + } else { + freehandle = qQueryCompleted(*qhandle); } - // If retrieval request has not arrived, release the qhandle and decrease the reference count to allow - // the queryMgmt to free it when expired - void** dup = handle; - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); - - // NOTE: - // if the qhandle is put into query vread queue and wait to be executed by worker in read queue, - // the reference count of qhandle can not be decreased. Otherwise, qhandle may be released before or in the - // procedure of query execution - if (freehandle) { - qReleaseQInfo(pVnode->qMgmt, (void **)&dup, freehandle); + // NOTE: if the qhandle is not put into vread queue or query is completed, free the qhandle. + if (freehandle || (!buildRes)) { + qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, freehandle); } - } } return code; @@ -269,7 +264,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { //TODO handle malloc failure pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, freeHandle); + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); } else { // result is not ready, return immediately if (!buildRes) { qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); @@ -277,12 +272,12 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { } void** dup = handle; - code = vnodeDumpQueryResult(pRet, pVnode, *handle, &freeHandle); + code = vnodeDumpQueryResult(pRet, pVnode, handle, &freeHandle); qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); - // not added into task queue, free it immediate + // not added into task queue, the query must be completed already, free qhandle immediate if (freeHandle) { - qReleaseQInfo(pVnode->qMgmt, (void**) &dup, freeHandle); + qReleaseQInfo(pVnode->qMgmt, (void**) &dup, true); } } From 175dfb56a02e3691441a30124bf334fd6b8b8932 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 5 Aug 2020 17:03:59 +0800 Subject: [PATCH 042/190] [td-225] fix bug in hash --- src/util/src/tcache.c | 2 ++ src/vnode/src/vnodeRead.c | 52 +++++++++++++++++---------------------- 2 files changed, 25 insertions(+), 29 deletions(-) diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 8a4145f2f8..51991974ff 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -318,6 +318,8 @@ static void incRefFn(void* ptNode) { assert(ptNode != NULL); SCacheDataNode** p = (SCacheDataNode**) ptNode; + + assert(T_REF_VAL_GET(*p) >= 0); int32_t ret = T_REF_INC(*p); assert(ret > 0); } diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 0d30be7662..e391fc5eb8 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -186,40 +186,35 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { } else { assert(pCont != NULL); void** qhandle = (void**) pCont; -// *handle = /*(void*) */pCont; -// handle = qAcquireQInfo(pVnode->qMgmt, (uint64_t) pCont); -// if (handle == NULL) { -// vWarn("QInfo:%p invalid qhandle in continuing exec query, conn:%p", (void*) pCont, pReadMsg->rpcMsg.handle); -// code = TSDB_CODE_QRY_INVALID_QHANDLE; -// } else { - vDebug("vgId:%d, QInfo:%p, dnode continues to exec query", pVnode->vgId, *qhandle); + vDebug("vgId:%d, QInfo:%p, dnode continues to exec query", pVnode->vgId, *qhandle); - bool freehandle = false; - bool buildRes = qTableQuery(*qhandle); // do execute query + bool freehandle = false; + bool buildRes = qTableQuery(*qhandle); // do execute query - // build query rsp, the retrieve request has reached here already - if (buildRes) { - // update the connection info according to the retrieve connection - pReadMsg->rpcMsg.handle = qGetResultRetrieveMsg(*qhandle); - assert(pReadMsg->rpcMsg.handle != NULL); + // build query rsp, the retrieve request has reached here already + if (buildRes) { + // update the connection info according to the retrieve connection + pReadMsg->rpcMsg.handle = qGetResultRetrieveMsg(*qhandle); + assert(pReadMsg->rpcMsg.handle != NULL); - vDebug("vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p", pVnode->vgId, *qhandle, - pReadMsg->rpcMsg.handle); - code = vnodeDumpQueryResult(&pReadMsg->rspRet, pVnode, qhandle, &freehandle); + vDebug("vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p", pVnode->vgId, *qhandle, + pReadMsg->rpcMsg.handle); + code = vnodeDumpQueryResult(&pReadMsg->rspRet, pVnode, qhandle, &freehandle); - // todo test the error code case - if (code == TSDB_CODE_SUCCESS) { - code = TSDB_CODE_QRY_HAS_RSP; - } - } else { - freehandle = qQueryCompleted(*qhandle); + // todo test the error code case + if (code == TSDB_CODE_SUCCESS) { + code = TSDB_CODE_QRY_HAS_RSP; } + } else { + freehandle = qQueryCompleted(*qhandle); + } - // NOTE: if the qhandle is not put into vread queue or query is completed, free the qhandle. - if (freehandle || (!buildRes)) { - qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, freehandle); - } + // NOTE: if the qhandle is not put into vread queue or query is completed, free the qhandle. + // if not build result, free it not by forced. + if (freehandle || (!buildRes)) { + qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, freehandle); + } } return code; @@ -273,9 +268,8 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { void** dup = handle; code = vnodeDumpQueryResult(pRet, pVnode, handle, &freeHandle); - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); - // not added into task queue, the query must be completed already, free qhandle immediate + // not added into task queue, the query must be completed already, free qhandle immediately if (freeHandle) { qReleaseQInfo(pVnode->qMgmt, (void**) &dup, true); } From 321f7b276bebaef6c485361ccee469ba8737d7ec Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 5 Aug 2020 23:07:34 +0800 Subject: [PATCH 043/190] [td-225] fix bug in hash --- src/util/src/hash.c | 13 +----- src/util/src/tcache.c | 86 +-------------------------------------- src/vnode/src/vnodeRead.c | 12 +++--- 3 files changed, 8 insertions(+), 103 deletions(-) diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 96b4e9cd28..a70256666f 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -329,17 +329,6 @@ int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) { return taosHashRemoveWithData(pHashObj, key, keyLen, NULL, 0); } -static FORCE_INLINE void doPopNextFromEntryList(SHashEntry *pe, SHashNode *pNode) { - SHashNode *pNext = pNode->next; - if (pNext != NULL) { - pNode->next = pNext->next; - } else { - pNode->next = NULL; - } - - pe->num -= 1; -} - int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t dsize) { if (pHashObj == NULL || pHashObj->size <= 0) { return -1; @@ -457,7 +446,7 @@ int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), voi taosWLockLatch(&pEntry->latch); } - // todo remove first node + // todo remove the first node SHashNode *pNode = NULL; while((pNode = pEntry->next) != NULL) { if (fp && (!fp(param, pNode->data))) { diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 51991974ff..b6df852876 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -116,16 +116,6 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo free(pNode); } -/** - * move the old node into trash - * @param pCacheObj - * @param pNode - */ -static FORCE_INLINE void taosCacheMoveToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) { - taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); - taosAddToTrash(pCacheObj, pNode); -} - static FORCE_INLINE void doRemoveElemInTrashcan(SCacheObj* pCacheObj, STrashElem *pElem) { if (pElem->pData->signature != (uint64_t) pElem->pData) { uError("key:sig:0x%" PRIx64 " %p data has been released, ignore", pElem->pData->signature, pElem->pData); @@ -152,52 +142,6 @@ static FORCE_INLINE void doDestroyTrashcanElem(SCacheObj* pCacheObj, STrashElem free(pElem->pData); free(pElem); } -/** - * update data in cache - * @param pCacheObj - * @param pNode - * @param key - * @param keyLen - * @param pData - * @param dataSize - * @return - */ -static UNUSED_FUNC SCacheDataNode *taosUpdateCacheImpl(SCacheObj *pCacheObj, SCacheDataNode* pNode, SCacheDataNode* pNewNode, - const char *key, int32_t keyLen) { - - // only a node is not referenced by any other object, in-place update it - if (T_REF_VAL_GET(pNode) > 0) { - taosCacheMoveToTrash(pCacheObj, pNode); - } - - T_REF_INC(pNewNode); - - // addedTime new element to hashtable - taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNewNode, sizeof(void *)); - return pNewNode; -} - -/** - * addedTime data into hash table - * @param key - * @param pData - * @param size - * @param pCacheObj - * @param keyLen - * @param pNode - * @return - */ -static FORCE_INLINE SCacheDataNode *taosAddToCacheImpl(SCacheObj *pCacheObj, const char *key, size_t keyLen, const void *pData, - size_t dataSize, uint64_t duration) { - SCacheDataNode *pNode = taosCreateCacheNode(key, keyLen, pData, dataSize, duration); - if (pNode == NULL) { - return NULL; - } - - T_REF_INC(pNode); - taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNode, sizeof(void *)); - return pNode; -} /** * do cleanup the taos cache @@ -318,8 +262,8 @@ static void incRefFn(void* ptNode) { assert(ptNode != NULL); SCacheDataNode** p = (SCacheDataNode**) ptNode; - assert(T_REF_VAL_GET(*p) >= 0); + int32_t ret = T_REF_INC(*p); assert(ret > 0); } @@ -344,34 +288,6 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen return pData; } -//void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t keyLen, uint64_t expireTime) { -// if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) == 0) { -// return NULL; -// } -// -// __cache_rd_lock(pCacheObj); -// -// SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen); -// if (ptNode != NULL) { -// T_REF_INC(*ptNode); -// (*ptNode)->expireTime = expireTime; // taosGetTimestampMs() + (*ptNode)->lifespan; -// } -// -// __cache_unlock(pCacheObj); -// -// if (ptNode != NULL) { -// atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1); -// uDebug("cache:%s, key:%p, %p expireTime is updated in cache, refcnt:%d", pCacheObj->name, key, -// (*ptNode)->data, T_REF_VAL_GET(*ptNode)); -// } else { -// atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1); -// uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key); -// } -// -// atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1); -// return (ptNode != NULL) ? (*ptNode)->data : NULL; -//} - void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) { if (pCacheObj == NULL || data == NULL) return NULL; diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index e391fc5eb8..26c1062479 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -259,20 +259,20 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { //TODO handle malloc failure pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); + freeHandle = true; } else { // result is not ready, return immediately if (!buildRes) { qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); return TSDB_CODE_QRY_NOT_READY; } - void** dup = handle; code = vnodeDumpQueryResult(pRet, pVnode, handle, &freeHandle); + } - // not added into task queue, the query must be completed already, free qhandle immediately - if (freeHandle) { - qReleaseQInfo(pVnode->qMgmt, (void**) &dup, true); - } + // if qhandle is not added into task queue, the query must be completed already or paused with error , + // free qhandle immediately + if (freeHandle) { + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); } return code; From 846704de87b6dab093180ce946f91407a137cd05 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 6 Aug 2020 10:36:41 +0800 Subject: [PATCH 044/190] [td-473] --- src/client/inc/tsclient.h | 9 +++--- src/client/src/tscSQLParser.c | 56 ++++++++++++++++++++--------------- 2 files changed, 37 insertions(+), 28 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index e3e1d44514..4fd9549e75 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -80,8 +80,9 @@ typedef struct STableMetaInfo { * 2. keep the vgroup index for multi-vnode insertion */ int32_t vgroupIndex; - char name[TSDB_TABLE_ID_LEN]; // (super) table name - SArray* tagColList; // SArray, involved tag columns + char name[TSDB_TABLE_FNAME_LEN]; // (super) table name + char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql + SArray* tagColList; // SArray, involved tag columns } STableMetaInfo; /* the structure for sql function in select clause */ @@ -128,7 +129,7 @@ typedef struct SCond { } SCond; typedef struct SJoinNode { - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; uint64_t uid; int16_t tagColId; } SJoinNode; @@ -162,7 +163,7 @@ typedef struct SParamInfo { } SParamInfo; typedef struct STableDataBlocks { - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; int8_t tsSource; // where does the UNIX timestamp come from, server or client bool ordered; // if current rows are ordered or not int64_t vgId; // virtual group id diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4b70e54265..e5d8269ff9 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1087,11 +1087,11 @@ int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pDB, SSQL *xlen = totalLen; } - if (totalLen < TSDB_TABLE_ID_LEN) { + if (totalLen < TSDB_TABLE_FNAME_LEN) { fullName[totalLen] = 0; } - return (totalLen < TSDB_TABLE_ID_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_TSC_INVALID_SQL; + return (totalLen < TSDB_TABLE_FNAME_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_TSC_INVALID_SQL; } static void extractColumnNameFromString(tSQLExprItem* pItem) { @@ -2136,13 +2136,10 @@ int32_t getTableIndexImpl(SSQLToken* pTableToken, SQueryInfo* pQueryInfo, SColum } pIndex->tableIndex = COLUMN_INDEX_INITIAL_VAL; - char tableName[TSDB_TABLE_ID_LEN] = {0}; - for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i); - extractTableName(pTableMetaInfo->name, tableName); - - if (strncasecmp(tableName, pTableToken->z, pTableToken->n) == 0 && strlen(tableName) == pTableToken->n) { + char* name = pTableMetaInfo->aliasName; + if (strncasecmp(name, pTableToken->z, pTableToken->n) == 0 && strlen(name) == pTableToken->n) { pIndex->tableIndex = i; break; } @@ -3658,7 +3655,7 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStringBuilder sb1; memset(&sb1, 0, sizeof(sb1)); taosStringBuilderAppendStringLen(&sb1, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN); - char db[TSDB_TABLE_ID_LEN] = {0}; + char db[TSDB_TABLE_FNAME_LEN] = {0}; // remove the duplicated input table names int32_t num = 0; @@ -3683,7 +3680,7 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, taosStringBuilderAppendStringLen(&sb1, TBNAME_LIST_SEP, 1); } - char idBuf[TSDB_TABLE_ID_LEN] = {0}; + char idBuf[TSDB_TABLE_FNAME_LEN] = {0}; int32_t xlen = strlen(segments[i]); SSQLToken t = {.z = segments[i], .n = xlen, .type = TK_STRING}; @@ -5915,15 +5912,16 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { assert(pQuerySql != NULL && (pQuerySql->from == NULL || pQuerySql->from->nExpr > 0)); - const char* msg0 = "invalid table name"; - const char* msg1 = "table name too long"; - const char* msg2 = "point interpolation query needs timestamp"; - const char* msg5 = "fill only available for interval query"; - const char* msg6 = "start(end) time of query range required or time range too large"; - const char* msg7 = "illegal number of tables in from clause"; - const char* msg8 = "too many columns in selection clause"; - const char* msg9 = "TWA query requires both the start and end time"; - const char* msg10= "too many tables in from clause"; + const char* msg0 = "invalid table name"; + const char* msg1 = "table name too long"; + const char* msg2 = "point interpolation query needs timestamp"; + const char* msg5 = "fill only available for interval query"; + const char* msg6 = "start(end) time of query range required or time range too large"; + const char* msg7 = "illegal number of tables in from clause"; + const char* msg8 = "too many columns in selection clause"; + const char* msg9 = "TWA query requires both the start and end time"; + const char* msg10 = "too many tables in from clause"; + const char* msg11 = "invalid table alias name"; int32_t code = TSDB_CODE_SUCCESS; @@ -5966,7 +5964,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { } // set all query tables, which are maybe more than one. - for (int32_t i = 0; i < pQuerySql->from->nExpr; ++i) { + for (int32_t i = 0; i < pQuerySql->from->nExpr; ) { tVariant* pTableItem = &pQuerySql->from->a[i].pVar; if (pTableItem->nType != TSDB_DATA_TYPE_BINARY) { @@ -5980,24 +5978,34 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0); } - if (pQueryInfo->numOfTables <= i) { // more than one table + if (pQueryInfo->numOfTables <= i/2) { // more than one table tscAddEmptyMetaInfo(pQueryInfo); } - STableMetaInfo* pMeterInfo1 = tscGetMetaInfo(pQueryInfo, i); + STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo, i); SSQLToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz}; - if (tscSetTableFullName(pMeterInfo1, &t, pSql) != TSDB_CODE_SUCCESS) { + if (tscSetTableFullName(pTableMetaInfo1, &t, pSql) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - code = tscGetTableMeta(pSql, pMeterInfo1); + tVariant* pTableItem1 = &pQuerySql->from->a[i + 1].pVar; + SSQLToken aliasName = {.z = pTableItem1->pz, .n = pTableItem1->nLen, .type = TK_STRING}; + if (tscValidateName(&aliasName) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg11); + } + + tstrncpy(pTableMetaInfo1->aliasName, pTableItem1->pz, sizeof(pTableMetaInfo1->aliasName)); + + code = tscGetTableMeta(pSql, pTableMetaInfo1); if (code != TSDB_CODE_SUCCESS) { return code; } + + i += 2; } - assert(pQueryInfo->numOfTables == pQuerySql->from->nExpr); + assert(pQueryInfo->numOfTables == pQuerySql->from->nExpr / 2); bool isSTable = false; if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { From cd163fc0d4aef5cee099f4c0306e6d68cb3a5259 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 6 Aug 2020 10:37:20 +0800 Subject: [PATCH 045/190] [td-473] --- src/query/inc/sql.y | 38 +++++++++++++++++++++++++++++++++++--- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index 15cfeee6b2..2b2a967edf 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -26,7 +26,12 @@ #include #include #include +#include "qSqlparser.h" +#include "tcmdtype.h" +#include "tstoken.h" +#include "ttokendef.h" #include "tutil.h" +#include "tvariant.h" } %syntax_error { @@ -254,7 +259,7 @@ alter_db_optr(Y) ::= alter_db_optr(Z) keep(X). { Y = Z; Y.keep = X; } alter_db_optr(Y) ::= alter_db_optr(Z) blocks(X). { Y = Z; Y.numOfBlocks = strtol(X.z, NULL, 10); } alter_db_optr(Y) ::= alter_db_optr(Z) comp(X). { Y = Z; Y.compressionLevel = strtol(X.z, NULL, 10); } alter_db_optr(Y) ::= alter_db_optr(Z) wal(X). { Y = Z; Y.walLevel = strtol(X.z, NULL, 10); } -alter_db_optr(Y) ::= alter_db_optr(Z) fsync(X). { Y = Z; Y.fsyncPeriod = strtod(X.z, NULL, 10); } +alter_db_optr(Y) ::= alter_db_optr(Z) fsync(X). { Y = Z; Y.fsyncPeriod = strtol(X.z, NULL, 10); } %type typename {TAOS_FIELD} typename(A) ::= ids(X). { @@ -422,8 +427,35 @@ as(X) ::= . { X.n = 0; } from(A) ::= FROM tablelist(X). {A = X;} %type tablelist {tVariantList*} -tablelist(A) ::= ids(X) cpxName(Y). { toTSDBType(X.type); X.n += Y.n; A = tVariantListAppendToken(NULL, &X, -1);} -tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z). { toTSDBType(X.type); X.n += Z.n; A = tVariantListAppendToken(Y, &X, -1); } +tablelist(A) ::= ids(X) cpxName(Y). { + toTSDBType(X.type); + X.n += Y.n; + A = tVariantListAppendToken(NULL, &X, -1); + A = tVariantListAppendToken(A, &X, -1); // table alias name +} + +tablelist(A) ::= ids(X) cpxName(Y) ids(Z). { + toTSDBType(X.type); + toTSDBType(Z.type); + X.n += Y.n; + A = tVariantListAppendToken(NULL, &X, -1); + A = tVariantListAppendToken(A, &Z, -1); +} + +tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z). { + toTSDBType(X.type); + X.n += Z.n; + A = tVariantListAppendToken(Y, &X, -1); + A = tVariantListAppendToken(A, &X, -1); +} + +tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z) ids(F). { + toTSDBType(X.type); + toTSDBType(F.type); + X.n += Z.n; + A = tVariantListAppendToken(Y, &X, -1); + A = tVariantListAppendToken(A, &F, -1); +} // The value of interval should be the form of "number+[a,s,m,h,d,n,y]" or "now" %type tmvar {SSQLToken} From 49f476c3a6a631cb1d1dd5fd836c09ae7b34afbb Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 6 Aug 2020 10:38:46 +0800 Subject: [PATCH 046/190] [td-473] --- src/query/src/sql.c | 783 +++++++++++++++++++++++--------------------- 1 file changed, 409 insertions(+), 374 deletions(-) diff --git a/src/query/src/sql.c b/src/query/src/sql.c index fe26f61725..75ef2f3218 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -126,17 +126,17 @@ typedef union { #define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo #define ParseARG_STORE yypParser->pInfo = pInfo #define YYFALLBACK 1 -#define YYNSTATE 243 -#define YYNRULE 226 +#define YYNSTATE 245 +#define YYNRULE 228 #define YYNTOKEN 207 -#define YY_MAX_SHIFT 242 -#define YY_MIN_SHIFTREDUCE 405 -#define YY_MAX_SHIFTREDUCE 630 -#define YY_ERROR_ACTION 631 -#define YY_ACCEPT_ACTION 632 -#define YY_NO_ACTION 633 -#define YY_MIN_REDUCE 634 -#define YY_MAX_REDUCE 859 +#define YY_MAX_SHIFT 244 +#define YY_MIN_SHIFTREDUCE 407 +#define YY_MAX_SHIFTREDUCE 634 +#define YY_ERROR_ACTION 635 +#define YY_ACCEPT_ACTION 636 +#define YY_NO_ACTION 637 +#define YY_MIN_REDUCE 638 +#define YY_MAX_REDUCE 865 /************* End control #defines *******************************************/ /* Define the yytestcase() macro to be a no-op if is not already defined @@ -202,64 +202,64 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (552) +#define YY_ACTTAB_COUNT (554) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 103, 446, 135, 673, 632, 242, 126, 515, 135, 447, - /* 10 */ 135, 158, 847, 41, 43, 11, 35, 36, 846, 157, - /* 20 */ 847, 29, 134, 446, 197, 39, 37, 40, 38, 155, - /* 30 */ 103, 447, 139, 34, 33, 217, 216, 32, 31, 30, - /* 40 */ 41, 43, 767, 35, 36, 32, 31, 30, 29, 756, - /* 50 */ 446, 197, 39, 37, 40, 38, 182, 802, 447, 192, - /* 60 */ 34, 33, 21, 21, 32, 31, 30, 406, 407, 408, - /* 70 */ 409, 410, 411, 412, 413, 414, 415, 416, 417, 241, - /* 80 */ 41, 43, 228, 35, 36, 194, 843, 58, 29, 21, - /* 90 */ 842, 197, 39, 37, 40, 38, 166, 167, 753, 753, - /* 100 */ 34, 33, 168, 56, 32, 31, 30, 778, 841, 16, - /* 110 */ 235, 208, 234, 233, 207, 206, 205, 232, 204, 231, - /* 120 */ 230, 229, 203, 215, 151, 753, 732, 586, 719, 720, - /* 130 */ 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, - /* 140 */ 731, 43, 8, 35, 36, 61, 113, 21, 29, 153, - /* 150 */ 240, 197, 39, 37, 40, 38, 239, 238, 95, 775, - /* 160 */ 34, 33, 165, 99, 32, 31, 30, 169, 35, 36, - /* 170 */ 214, 213, 592, 29, 595, 103, 197, 39, 37, 40, - /* 180 */ 38, 220, 756, 753, 236, 34, 33, 175, 12, 32, - /* 190 */ 31, 30, 162, 599, 179, 178, 590, 767, 593, 103, - /* 200 */ 596, 161, 162, 599, 756, 17, 590, 148, 593, 152, - /* 210 */ 596, 154, 26, 88, 87, 142, 185, 567, 568, 16, - /* 220 */ 235, 147, 234, 233, 159, 160, 219, 232, 196, 231, - /* 230 */ 230, 229, 801, 76, 159, 160, 162, 599, 547, 228, - /* 240 */ 590, 3, 593, 17, 596, 74, 78, 83, 86, 77, - /* 250 */ 26, 39, 37, 40, 38, 80, 59, 754, 21, 34, - /* 260 */ 33, 544, 60, 32, 31, 30, 18, 140, 159, 160, - /* 270 */ 181, 737, 539, 736, 27, 734, 735, 150, 682, 184, - /* 280 */ 738, 126, 740, 741, 739, 674, 531, 141, 126, 528, - /* 290 */ 42, 529, 558, 530, 752, 591, 46, 594, 34, 33, - /* 300 */ 42, 598, 32, 31, 30, 116, 117, 68, 64, 67, - /* 310 */ 588, 598, 143, 50, 73, 72, 597, 170, 171, 130, - /* 320 */ 128, 91, 90, 89, 98, 47, 597, 144, 559, 616, - /* 330 */ 51, 26, 14, 13, 42, 145, 600, 521, 520, 201, - /* 340 */ 13, 46, 22, 22, 48, 598, 589, 10, 9, 535, - /* 350 */ 533, 536, 534, 85, 84, 146, 137, 133, 856, 138, - /* 360 */ 597, 136, 755, 812, 811, 163, 808, 807, 164, 777, - /* 370 */ 747, 218, 794, 100, 793, 769, 114, 115, 26, 684, - /* 380 */ 112, 202, 131, 183, 24, 211, 681, 212, 855, 532, - /* 390 */ 70, 854, 93, 852, 118, 702, 554, 25, 23, 132, - /* 400 */ 671, 79, 52, 186, 669, 81, 82, 667, 666, 172, - /* 410 */ 127, 664, 190, 663, 662, 661, 660, 652, 129, 658, - /* 420 */ 49, 656, 654, 766, 781, 782, 795, 104, 195, 44, - /* 430 */ 193, 191, 189, 187, 210, 105, 75, 28, 221, 199, - /* 440 */ 222, 223, 53, 225, 224, 149, 226, 62, 65, 703, - /* 450 */ 227, 237, 630, 173, 174, 629, 177, 665, 628, 119, - /* 460 */ 176, 92, 121, 125, 120, 751, 122, 123, 659, 124, - /* 470 */ 108, 106, 107, 109, 110, 111, 94, 1, 2, 621, - /* 480 */ 180, 184, 541, 55, 57, 555, 101, 156, 188, 198, - /* 490 */ 19, 63, 5, 560, 102, 4, 6, 601, 20, 15, - /* 500 */ 7, 488, 484, 200, 482, 481, 480, 477, 450, 209, - /* 510 */ 66, 45, 22, 69, 71, 517, 516, 514, 471, 54, - /* 520 */ 469, 461, 467, 463, 465, 459, 457, 487, 486, 485, - /* 530 */ 483, 479, 478, 476, 46, 448, 421, 419, 634, 633, - /* 540 */ 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, - /* 550 */ 96, 97, + /* 0 */ 105, 448, 137, 677, 636, 244, 128, 517, 137, 449, + /* 10 */ 137, 160, 853, 41, 43, 11, 35, 36, 852, 159, + /* 20 */ 853, 29, 136, 448, 199, 39, 37, 40, 38, 157, + /* 30 */ 105, 449, 141, 34, 33, 219, 218, 32, 31, 30, + /* 40 */ 41, 43, 771, 35, 36, 32, 31, 30, 29, 760, + /* 50 */ 448, 199, 39, 37, 40, 38, 184, 808, 449, 194, + /* 60 */ 34, 33, 21, 21, 32, 31, 30, 408, 409, 410, + /* 70 */ 411, 412, 413, 414, 415, 416, 417, 418, 419, 243, + /* 80 */ 41, 43, 230, 35, 36, 196, 849, 60, 29, 21, + /* 90 */ 848, 199, 39, 37, 40, 38, 168, 169, 757, 757, + /* 100 */ 34, 33, 170, 56, 32, 31, 30, 782, 847, 16, + /* 110 */ 237, 210, 236, 235, 209, 208, 207, 234, 206, 233, + /* 120 */ 232, 231, 205, 217, 153, 757, 736, 590, 723, 724, + /* 130 */ 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, + /* 140 */ 735, 43, 8, 35, 36, 63, 115, 21, 29, 155, + /* 150 */ 242, 199, 39, 37, 40, 38, 241, 240, 97, 779, + /* 160 */ 34, 33, 167, 101, 32, 31, 30, 171, 35, 36, + /* 170 */ 216, 215, 596, 29, 599, 105, 199, 39, 37, 40, + /* 180 */ 38, 222, 760, 757, 238, 34, 33, 177, 12, 32, + /* 190 */ 31, 30, 164, 603, 181, 180, 594, 771, 597, 105, + /* 200 */ 600, 163, 164, 603, 760, 17, 594, 150, 597, 154, + /* 210 */ 600, 156, 26, 90, 89, 144, 187, 571, 572, 16, + /* 220 */ 237, 149, 236, 235, 161, 162, 221, 234, 198, 233, + /* 230 */ 232, 231, 807, 78, 161, 162, 164, 603, 549, 230, + /* 240 */ 594, 3, 597, 17, 600, 76, 80, 85, 88, 79, + /* 250 */ 26, 39, 37, 40, 38, 82, 61, 758, 21, 34, + /* 260 */ 33, 546, 62, 32, 31, 30, 18, 142, 161, 162, + /* 270 */ 183, 741, 541, 740, 27, 738, 739, 152, 686, 186, + /* 280 */ 742, 128, 744, 745, 743, 678, 533, 143, 128, 530, + /* 290 */ 42, 531, 562, 532, 756, 595, 46, 598, 34, 33, + /* 300 */ 42, 602, 32, 31, 30, 118, 119, 70, 66, 69, + /* 310 */ 592, 602, 145, 50, 75, 74, 601, 172, 173, 132, + /* 320 */ 130, 93, 92, 91, 100, 47, 601, 146, 563, 620, + /* 330 */ 51, 26, 14, 13, 42, 147, 604, 523, 522, 203, + /* 340 */ 13, 46, 22, 22, 48, 602, 593, 10, 9, 537, + /* 350 */ 535, 538, 536, 87, 86, 148, 139, 135, 862, 140, + /* 360 */ 601, 138, 759, 818, 817, 165, 814, 813, 166, 781, + /* 370 */ 751, 220, 800, 786, 788, 773, 102, 799, 26, 116, + /* 380 */ 114, 117, 688, 185, 204, 133, 24, 213, 685, 534, + /* 390 */ 214, 861, 95, 72, 860, 858, 558, 120, 706, 25, + /* 400 */ 23, 134, 52, 188, 675, 81, 673, 83, 84, 671, + /* 410 */ 670, 174, 192, 129, 668, 667, 666, 665, 664, 656, + /* 420 */ 49, 131, 662, 660, 658, 770, 57, 58, 801, 44, + /* 430 */ 197, 195, 193, 191, 189, 28, 106, 212, 77, 223, + /* 440 */ 224, 225, 226, 201, 53, 227, 228, 229, 239, 64, + /* 450 */ 67, 634, 151, 175, 176, 633, 178, 179, 632, 669, + /* 460 */ 186, 625, 94, 96, 123, 127, 2, 122, 707, 755, + /* 470 */ 121, 124, 125, 111, 107, 108, 126, 109, 110, 112, + /* 480 */ 663, 113, 182, 1, 543, 55, 59, 559, 103, 158, + /* 490 */ 564, 5, 190, 104, 6, 65, 490, 605, 4, 19, + /* 500 */ 20, 15, 200, 7, 202, 486, 484, 483, 482, 479, + /* 510 */ 452, 211, 68, 45, 22, 71, 73, 519, 518, 516, + /* 520 */ 54, 473, 471, 463, 469, 465, 467, 461, 459, 489, + /* 530 */ 488, 487, 485, 481, 480, 478, 46, 450, 423, 421, + /* 540 */ 638, 637, 637, 637, 637, 637, 637, 637, 637, 637, + /* 550 */ 637, 637, 98, 99, }; static const YYCODETYPE yy_lookahead[] = { /* 0 */ 211, 1, 262, 215, 208, 209, 218, 5, 262, 9, @@ -299,25 +299,25 @@ static const YYCODETYPE yy_lookahead[] = { /* 340 */ 104, 104, 104, 104, 123, 110, 37, 129, 130, 5, /* 350 */ 5, 7, 7, 72, 73, 262, 262, 262, 248, 262, /* 360 */ 125, 262, 248, 243, 243, 243, 243, 243, 243, 211, - /* 370 */ 244, 243, 269, 211, 269, 246, 211, 211, 106, 211, + /* 370 */ 244, 243, 269, 211, 211, 246, 211, 269, 106, 211, /* 380 */ 250, 211, 211, 246, 211, 211, 211, 211, 211, 105, /* 390 */ 211, 211, 59, 211, 211, 211, 110, 211, 211, 211, /* 400 */ 211, 211, 120, 265, 211, 211, 211, 211, 211, 211, /* 410 */ 211, 211, 265, 211, 211, 211, 211, 211, 211, 211, - /* 420 */ 122, 211, 211, 259, 212, 212, 212, 258, 114, 119, - /* 430 */ 118, 113, 112, 111, 75, 257, 84, 124, 83, 212, - /* 440 */ 49, 80, 212, 53, 82, 212, 81, 216, 216, 226, - /* 450 */ 79, 75, 5, 136, 5, 5, 5, 212, 5, 225, - /* 460 */ 136, 213, 220, 219, 224, 246, 223, 221, 212, 222, - /* 470 */ 254, 256, 255, 253, 252, 251, 213, 217, 214, 87, - /* 480 */ 127, 107, 100, 108, 104, 100, 99, 1, 99, 101, - /* 490 */ 104, 72, 115, 100, 99, 99, 115, 100, 104, 99, - /* 500 */ 99, 9, 5, 101, 5, 5, 5, 5, 76, 15, - /* 510 */ 72, 16, 104, 130, 130, 5, 5, 100, 5, 99, - /* 520 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - /* 530 */ 5, 5, 5, 5, 104, 76, 59, 58, 0, 273, - /* 540 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 550 */ 21, 21, 273, 273, 273, 273, 273, 273, 273, 273, + /* 420 */ 122, 211, 211, 211, 211, 259, 212, 212, 212, 119, + /* 430 */ 114, 118, 113, 112, 111, 124, 258, 75, 84, 83, + /* 440 */ 49, 80, 82, 212, 212, 53, 81, 79, 75, 216, + /* 450 */ 216, 5, 212, 136, 5, 5, 136, 5, 5, 212, + /* 460 */ 107, 87, 213, 213, 220, 219, 214, 224, 226, 246, + /* 470 */ 225, 223, 221, 253, 257, 256, 222, 255, 254, 252, + /* 480 */ 212, 251, 127, 217, 100, 108, 104, 100, 99, 1, + /* 490 */ 100, 115, 99, 99, 115, 72, 9, 100, 99, 104, + /* 500 */ 104, 99, 101, 99, 101, 5, 5, 5, 5, 5, + /* 510 */ 76, 15, 72, 16, 104, 130, 130, 5, 5, 100, + /* 520 */ 99, 5, 5, 5, 5, 5, 5, 5, 5, 5, + /* 530 */ 5, 5, 5, 5, 5, 5, 104, 76, 59, 58, + /* 540 */ 0, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 550 */ 273, 273, 21, 21, 273, 273, 273, 273, 273, 273, /* 560 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, /* 570 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, /* 580 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, @@ -337,83 +337,84 @@ static const YYCODETYPE yy_lookahead[] = { /* 720 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, /* 730 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, /* 740 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 750 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 750 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 760 */ 273, }; -#define YY_SHIFT_COUNT (242) +#define YY_SHIFT_COUNT (244) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (538) +#define YY_SHIFT_MAX (540) static const unsigned short int yy_shift_ofst[] = { /* 0 */ 144, 24, 134, 191, 235, 49, 49, 49, 49, 49, /* 10 */ 49, 0, 22, 235, 284, 284, 284, 106, 49, 49, - /* 20 */ 49, 49, 49, 161, 4, 4, 552, 201, 235, 235, + /* 20 */ 49, 49, 49, 161, 4, 4, 554, 201, 235, 235, /* 30 */ 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, /* 40 */ 235, 235, 235, 235, 235, 284, 284, 2, 2, 2, - /* 50 */ 2, 2, 2, 43, 2, 225, 49, 49, 101, 101, - /* 60 */ 157, 49, 49, 49, 49, 49, 49, 49, 49, 49, + /* 50 */ 2, 2, 2, 43, 2, 225, 49, 49, 49, 49, + /* 60 */ 101, 101, 157, 49, 49, 49, 49, 49, 49, 49, /* 70 */ 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, /* 80 */ 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, - /* 90 */ 49, 49, 49, 49, 49, 49, 49, 49, 272, 333, - /* 100 */ 333, 286, 286, 333, 282, 298, 310, 314, 312, 318, - /* 110 */ 320, 322, 313, 272, 333, 333, 359, 359, 333, 352, - /* 120 */ 355, 391, 361, 362, 390, 365, 371, 333, 376, 333, - /* 130 */ 376, 552, 552, 27, 67, 67, 67, 127, 152, 226, - /* 140 */ 226, 226, 181, 265, 265, 265, 265, 241, 255, 39, - /* 150 */ 60, 8, 8, 96, 172, 192, 228, 229, 236, 167, - /* 160 */ 290, 309, 142, 221, 209, 237, 238, 239, 185, 218, - /* 170 */ 344, 345, 281, 447, 317, 449, 450, 324, 451, 453, - /* 180 */ 392, 353, 374, 382, 375, 380, 385, 387, 486, 389, - /* 190 */ 393, 395, 386, 377, 394, 381, 397, 396, 400, 388, - /* 200 */ 401, 402, 419, 492, 497, 499, 500, 501, 502, 432, - /* 210 */ 494, 438, 495, 383, 384, 408, 510, 511, 417, 420, - /* 220 */ 408, 513, 515, 516, 517, 518, 519, 520, 521, 522, - /* 230 */ 523, 524, 525, 526, 527, 528, 430, 459, 529, 530, - /* 240 */ 477, 479, 538, + /* 90 */ 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, + /* 100 */ 272, 333, 333, 286, 286, 333, 282, 298, 310, 316, + /* 110 */ 313, 319, 321, 323, 311, 272, 333, 333, 362, 362, + /* 120 */ 333, 354, 356, 391, 361, 360, 392, 365, 368, 333, + /* 130 */ 373, 333, 373, 554, 554, 27, 67, 67, 67, 127, + /* 140 */ 152, 226, 226, 226, 181, 265, 265, 265, 265, 241, + /* 150 */ 255, 39, 60, 8, 8, 96, 172, 192, 228, 229, + /* 160 */ 236, 167, 290, 309, 142, 221, 209, 237, 238, 239, + /* 170 */ 185, 218, 344, 345, 281, 446, 317, 449, 450, 320, + /* 180 */ 452, 453, 374, 355, 353, 384, 377, 382, 387, 389, + /* 190 */ 488, 393, 390, 394, 395, 376, 396, 379, 397, 399, + /* 200 */ 402, 401, 404, 403, 423, 487, 500, 501, 502, 503, + /* 210 */ 504, 434, 496, 440, 497, 385, 386, 410, 512, 513, + /* 220 */ 419, 421, 410, 516, 517, 518, 519, 520, 521, 522, + /* 230 */ 523, 524, 525, 526, 527, 528, 529, 530, 432, 461, + /* 240 */ 531, 532, 479, 481, 540, }; -#define YY_REDUCE_COUNT (132) +#define YY_REDUCE_COUNT (134) #define YY_REDUCE_MIN (-260) -#define YY_REDUCE_MAX (264) +#define YY_REDUCE_MAX (268) static const short yy_reduce_ofst[] = { /* 0 */ -204, -101, 44, -260, -252, -211, -181, -149, -148, -122, /* 10 */ -64, -104, -61, -254, -199, -66, -44, -49, -48, -36, /* 20 */ -12, 15, 47, -212, 63, 70, 13, -247, -240, -230, /* 30 */ -176, -172, -154, -138, -53, 5, 25, 50, 65, 73, /* 40 */ 93, 94, 95, 97, 99, 110, 114, 120, 121, 122, - /* 50 */ 123, 124, 125, 126, 128, 129, 158, 162, 103, 105, - /* 60 */ 130, 165, 166, 168, 170, 171, 173, 174, 175, 176, + /* 50 */ 123, 124, 125, 126, 128, 129, 158, 162, 163, 165, + /* 60 */ 103, 108, 130, 168, 170, 171, 173, 174, 175, 176, /* 70 */ 177, 179, 180, 182, 183, 184, 186, 187, 188, 189, /* 80 */ 190, 193, 194, 195, 196, 197, 198, 199, 200, 202, - /* 90 */ 203, 204, 205, 206, 207, 208, 210, 211, 137, 212, - /* 100 */ 213, 138, 147, 214, 164, 169, 178, 215, 217, 216, - /* 110 */ 220, 222, 224, 219, 227, 230, 231, 232, 233, 223, - /* 120 */ 234, 240, 242, 243, 246, 247, 244, 245, 248, 256, - /* 130 */ 263, 260, 264, + /* 90 */ 203, 204, 205, 206, 207, 208, 210, 211, 212, 213, + /* 100 */ 137, 214, 215, 138, 147, 216, 166, 178, 217, 219, + /* 110 */ 222, 224, 220, 227, 230, 223, 231, 232, 233, 234, + /* 120 */ 240, 242, 245, 243, 244, 248, 251, 254, 246, 247, + /* 130 */ 249, 268, 250, 266, 252, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 631, 683, 672, 849, 849, 631, 631, 631, 631, 631, - /* 10 */ 631, 779, 649, 849, 631, 631, 631, 631, 631, 631, - /* 20 */ 631, 631, 631, 685, 685, 685, 774, 631, 631, 631, - /* 30 */ 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, - /* 40 */ 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, - /* 50 */ 631, 631, 631, 631, 631, 631, 631, 631, 798, 798, - /* 60 */ 772, 631, 631, 631, 631, 631, 631, 631, 631, 631, - /* 70 */ 631, 631, 631, 631, 631, 631, 631, 631, 631, 670, - /* 80 */ 631, 668, 631, 631, 631, 631, 631, 631, 631, 631, - /* 90 */ 631, 631, 631, 631, 631, 657, 631, 631, 631, 651, - /* 100 */ 651, 631, 631, 651, 805, 809, 803, 791, 799, 790, - /* 110 */ 786, 785, 813, 631, 651, 651, 680, 680, 651, 701, - /* 120 */ 699, 697, 689, 695, 691, 693, 687, 651, 678, 651, - /* 130 */ 678, 718, 733, 631, 814, 848, 804, 832, 831, 844, - /* 140 */ 838, 837, 631, 836, 835, 834, 833, 631, 631, 631, - /* 150 */ 631, 840, 839, 631, 631, 631, 631, 631, 631, 631, - /* 160 */ 631, 631, 816, 810, 806, 631, 631, 631, 631, 631, - /* 170 */ 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, - /* 180 */ 631, 631, 771, 631, 631, 780, 631, 631, 631, 631, - /* 190 */ 631, 631, 800, 631, 792, 631, 631, 631, 631, 631, - /* 200 */ 631, 748, 631, 631, 631, 631, 631, 631, 631, 631, - /* 210 */ 631, 631, 631, 631, 631, 853, 631, 631, 631, 742, - /* 220 */ 851, 631, 631, 631, 631, 631, 631, 631, 631, 631, - /* 230 */ 631, 631, 631, 631, 631, 631, 704, 631, 655, 653, - /* 240 */ 631, 647, 631, + /* 0 */ 635, 687, 676, 855, 855, 635, 635, 635, 635, 635, + /* 10 */ 635, 783, 653, 855, 635, 635, 635, 635, 635, 635, + /* 20 */ 635, 635, 635, 689, 689, 689, 778, 635, 635, 635, + /* 30 */ 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, + /* 40 */ 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, + /* 50 */ 635, 635, 635, 635, 635, 635, 635, 785, 787, 635, + /* 60 */ 804, 804, 776, 635, 635, 635, 635, 635, 635, 635, + /* 70 */ 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, + /* 80 */ 635, 674, 635, 672, 635, 635, 635, 635, 635, 635, + /* 90 */ 635, 635, 635, 635, 635, 635, 635, 661, 635, 635, + /* 100 */ 635, 655, 655, 635, 635, 655, 811, 815, 809, 797, + /* 110 */ 805, 796, 792, 791, 819, 635, 655, 655, 684, 684, + /* 120 */ 655, 705, 703, 701, 693, 699, 695, 697, 691, 655, + /* 130 */ 682, 655, 682, 722, 737, 635, 820, 854, 810, 838, + /* 140 */ 837, 850, 844, 843, 635, 842, 841, 840, 839, 635, + /* 150 */ 635, 635, 635, 846, 845, 635, 635, 635, 635, 635, + /* 160 */ 635, 635, 635, 635, 822, 816, 812, 635, 635, 635, + /* 170 */ 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, + /* 180 */ 635, 635, 635, 635, 775, 635, 635, 784, 635, 635, + /* 190 */ 635, 635, 635, 635, 806, 635, 798, 635, 635, 635, + /* 200 */ 635, 635, 635, 752, 635, 635, 635, 635, 635, 635, + /* 210 */ 635, 635, 635, 635, 635, 635, 635, 859, 635, 635, + /* 220 */ 635, 746, 857, 635, 635, 635, 635, 635, 635, 635, + /* 230 */ 635, 635, 635, 635, 635, 635, 635, 635, 708, 635, + /* 240 */ 659, 657, 635, 651, 635, }; /********** End of lemon-generated parsing tables *****************************/ @@ -1154,84 +1155,86 @@ static const char *const yyRuleName[] = { /* 145 */ "as ::=", /* 146 */ "from ::= FROM tablelist", /* 147 */ "tablelist ::= ids cpxName", - /* 148 */ "tablelist ::= tablelist COMMA ids cpxName", - /* 149 */ "tmvar ::= VARIABLE", - /* 150 */ "interval_opt ::= INTERVAL LP tmvar RP", - /* 151 */ "interval_opt ::=", - /* 152 */ "fill_opt ::=", - /* 153 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", - /* 154 */ "fill_opt ::= FILL LP ID RP", - /* 155 */ "sliding_opt ::= SLIDING LP tmvar RP", - /* 156 */ "sliding_opt ::=", - /* 157 */ "orderby_opt ::=", - /* 158 */ "orderby_opt ::= ORDER BY sortlist", - /* 159 */ "sortlist ::= sortlist COMMA item sortorder", - /* 160 */ "sortlist ::= item sortorder", - /* 161 */ "item ::= ids cpxName", - /* 162 */ "sortorder ::= ASC", - /* 163 */ "sortorder ::= DESC", - /* 164 */ "sortorder ::=", - /* 165 */ "groupby_opt ::=", - /* 166 */ "groupby_opt ::= GROUP BY grouplist", - /* 167 */ "grouplist ::= grouplist COMMA item", - /* 168 */ "grouplist ::= item", - /* 169 */ "having_opt ::=", - /* 170 */ "having_opt ::= HAVING expr", - /* 171 */ "limit_opt ::=", - /* 172 */ "limit_opt ::= LIMIT signed", - /* 173 */ "limit_opt ::= LIMIT signed OFFSET signed", - /* 174 */ "limit_opt ::= LIMIT signed COMMA signed", - /* 175 */ "slimit_opt ::=", - /* 176 */ "slimit_opt ::= SLIMIT signed", - /* 177 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", - /* 178 */ "slimit_opt ::= SLIMIT signed COMMA signed", - /* 179 */ "where_opt ::=", - /* 180 */ "where_opt ::= WHERE expr", - /* 181 */ "expr ::= LP expr RP", - /* 182 */ "expr ::= ID", - /* 183 */ "expr ::= ID DOT ID", - /* 184 */ "expr ::= ID DOT STAR", - /* 185 */ "expr ::= INTEGER", - /* 186 */ "expr ::= MINUS INTEGER", - /* 187 */ "expr ::= PLUS INTEGER", - /* 188 */ "expr ::= FLOAT", - /* 189 */ "expr ::= MINUS FLOAT", - /* 190 */ "expr ::= PLUS FLOAT", - /* 191 */ "expr ::= STRING", - /* 192 */ "expr ::= NOW", - /* 193 */ "expr ::= VARIABLE", - /* 194 */ "expr ::= BOOL", - /* 195 */ "expr ::= ID LP exprlist RP", - /* 196 */ "expr ::= ID LP STAR RP", - /* 197 */ "expr ::= expr AND expr", - /* 198 */ "expr ::= expr OR expr", - /* 199 */ "expr ::= expr LT expr", - /* 200 */ "expr ::= expr GT expr", - /* 201 */ "expr ::= expr LE expr", - /* 202 */ "expr ::= expr GE expr", - /* 203 */ "expr ::= expr NE expr", - /* 204 */ "expr ::= expr EQ expr", - /* 205 */ "expr ::= expr PLUS expr", - /* 206 */ "expr ::= expr MINUS expr", - /* 207 */ "expr ::= expr STAR expr", - /* 208 */ "expr ::= expr SLASH expr", - /* 209 */ "expr ::= expr REM expr", - /* 210 */ "expr ::= expr LIKE expr", - /* 211 */ "expr ::= expr IN LP exprlist RP", - /* 212 */ "exprlist ::= exprlist COMMA expritem", - /* 213 */ "exprlist ::= expritem", - /* 214 */ "expritem ::= expr", - /* 215 */ "expritem ::=", - /* 216 */ "cmd ::= RESET QUERY CACHE", - /* 217 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 218 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 219 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 220 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 221 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 222 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 223 */ "cmd ::= KILL CONNECTION INTEGER", - /* 224 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 225 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 148 */ "tablelist ::= ids cpxName ids", + /* 149 */ "tablelist ::= tablelist COMMA ids cpxName", + /* 150 */ "tablelist ::= tablelist COMMA ids cpxName ids", + /* 151 */ "tmvar ::= VARIABLE", + /* 152 */ "interval_opt ::= INTERVAL LP tmvar RP", + /* 153 */ "interval_opt ::=", + /* 154 */ "fill_opt ::=", + /* 155 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", + /* 156 */ "fill_opt ::= FILL LP ID RP", + /* 157 */ "sliding_opt ::= SLIDING LP tmvar RP", + /* 158 */ "sliding_opt ::=", + /* 159 */ "orderby_opt ::=", + /* 160 */ "orderby_opt ::= ORDER BY sortlist", + /* 161 */ "sortlist ::= sortlist COMMA item sortorder", + /* 162 */ "sortlist ::= item sortorder", + /* 163 */ "item ::= ids cpxName", + /* 164 */ "sortorder ::= ASC", + /* 165 */ "sortorder ::= DESC", + /* 166 */ "sortorder ::=", + /* 167 */ "groupby_opt ::=", + /* 168 */ "groupby_opt ::= GROUP BY grouplist", + /* 169 */ "grouplist ::= grouplist COMMA item", + /* 170 */ "grouplist ::= item", + /* 171 */ "having_opt ::=", + /* 172 */ "having_opt ::= HAVING expr", + /* 173 */ "limit_opt ::=", + /* 174 */ "limit_opt ::= LIMIT signed", + /* 175 */ "limit_opt ::= LIMIT signed OFFSET signed", + /* 176 */ "limit_opt ::= LIMIT signed COMMA signed", + /* 177 */ "slimit_opt ::=", + /* 178 */ "slimit_opt ::= SLIMIT signed", + /* 179 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", + /* 180 */ "slimit_opt ::= SLIMIT signed COMMA signed", + /* 181 */ "where_opt ::=", + /* 182 */ "where_opt ::= WHERE expr", + /* 183 */ "expr ::= LP expr RP", + /* 184 */ "expr ::= ID", + /* 185 */ "expr ::= ID DOT ID", + /* 186 */ "expr ::= ID DOT STAR", + /* 187 */ "expr ::= INTEGER", + /* 188 */ "expr ::= MINUS INTEGER", + /* 189 */ "expr ::= PLUS INTEGER", + /* 190 */ "expr ::= FLOAT", + /* 191 */ "expr ::= MINUS FLOAT", + /* 192 */ "expr ::= PLUS FLOAT", + /* 193 */ "expr ::= STRING", + /* 194 */ "expr ::= NOW", + /* 195 */ "expr ::= VARIABLE", + /* 196 */ "expr ::= BOOL", + /* 197 */ "expr ::= ID LP exprlist RP", + /* 198 */ "expr ::= ID LP STAR RP", + /* 199 */ "expr ::= expr AND expr", + /* 200 */ "expr ::= expr OR expr", + /* 201 */ "expr ::= expr LT expr", + /* 202 */ "expr ::= expr GT expr", + /* 203 */ "expr ::= expr LE expr", + /* 204 */ "expr ::= expr GE expr", + /* 205 */ "expr ::= expr NE expr", + /* 206 */ "expr ::= expr EQ expr", + /* 207 */ "expr ::= expr PLUS expr", + /* 208 */ "expr ::= expr MINUS expr", + /* 209 */ "expr ::= expr STAR expr", + /* 210 */ "expr ::= expr SLASH expr", + /* 211 */ "expr ::= expr REM expr", + /* 212 */ "expr ::= expr LIKE expr", + /* 213 */ "expr ::= expr IN LP exprlist RP", + /* 214 */ "exprlist ::= exprlist COMMA expritem", + /* 215 */ "exprlist ::= expritem", + /* 216 */ "expritem ::= expr", + /* 217 */ "expritem ::=", + /* 218 */ "cmd ::= RESET QUERY CACHE", + /* 219 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 220 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 221 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 222 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 223 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 224 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 225 */ "cmd ::= KILL CONNECTION INTEGER", + /* 226 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 227 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1837,84 +1840,86 @@ static const struct { { 263, 0 }, /* (145) as ::= */ { 250, -2 }, /* (146) from ::= FROM tablelist */ { 264, -2 }, /* (147) tablelist ::= ids cpxName */ - { 264, -4 }, /* (148) tablelist ::= tablelist COMMA ids cpxName */ - { 265, -1 }, /* (149) tmvar ::= VARIABLE */ - { 252, -4 }, /* (150) interval_opt ::= INTERVAL LP tmvar RP */ - { 252, 0 }, /* (151) interval_opt ::= */ - { 253, 0 }, /* (152) fill_opt ::= */ - { 253, -6 }, /* (153) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - { 253, -4 }, /* (154) fill_opt ::= FILL LP ID RP */ - { 254, -4 }, /* (155) sliding_opt ::= SLIDING LP tmvar RP */ - { 254, 0 }, /* (156) sliding_opt ::= */ - { 256, 0 }, /* (157) orderby_opt ::= */ - { 256, -3 }, /* (158) orderby_opt ::= ORDER BY sortlist */ - { 266, -4 }, /* (159) sortlist ::= sortlist COMMA item sortorder */ - { 266, -2 }, /* (160) sortlist ::= item sortorder */ - { 268, -2 }, /* (161) item ::= ids cpxName */ - { 269, -1 }, /* (162) sortorder ::= ASC */ - { 269, -1 }, /* (163) sortorder ::= DESC */ - { 269, 0 }, /* (164) sortorder ::= */ - { 255, 0 }, /* (165) groupby_opt ::= */ - { 255, -3 }, /* (166) groupby_opt ::= GROUP BY grouplist */ - { 270, -3 }, /* (167) grouplist ::= grouplist COMMA item */ - { 270, -1 }, /* (168) grouplist ::= item */ - { 257, 0 }, /* (169) having_opt ::= */ - { 257, -2 }, /* (170) having_opt ::= HAVING expr */ - { 259, 0 }, /* (171) limit_opt ::= */ - { 259, -2 }, /* (172) limit_opt ::= LIMIT signed */ - { 259, -4 }, /* (173) limit_opt ::= LIMIT signed OFFSET signed */ - { 259, -4 }, /* (174) limit_opt ::= LIMIT signed COMMA signed */ - { 258, 0 }, /* (175) slimit_opt ::= */ - { 258, -2 }, /* (176) slimit_opt ::= SLIMIT signed */ - { 258, -4 }, /* (177) slimit_opt ::= SLIMIT signed SOFFSET signed */ - { 258, -4 }, /* (178) slimit_opt ::= SLIMIT signed COMMA signed */ - { 251, 0 }, /* (179) where_opt ::= */ - { 251, -2 }, /* (180) where_opt ::= WHERE expr */ - { 262, -3 }, /* (181) expr ::= LP expr RP */ - { 262, -1 }, /* (182) expr ::= ID */ - { 262, -3 }, /* (183) expr ::= ID DOT ID */ - { 262, -3 }, /* (184) expr ::= ID DOT STAR */ - { 262, -1 }, /* (185) expr ::= INTEGER */ - { 262, -2 }, /* (186) expr ::= MINUS INTEGER */ - { 262, -2 }, /* (187) expr ::= PLUS INTEGER */ - { 262, -1 }, /* (188) expr ::= FLOAT */ - { 262, -2 }, /* (189) expr ::= MINUS FLOAT */ - { 262, -2 }, /* (190) expr ::= PLUS FLOAT */ - { 262, -1 }, /* (191) expr ::= STRING */ - { 262, -1 }, /* (192) expr ::= NOW */ - { 262, -1 }, /* (193) expr ::= VARIABLE */ - { 262, -1 }, /* (194) expr ::= BOOL */ - { 262, -4 }, /* (195) expr ::= ID LP exprlist RP */ - { 262, -4 }, /* (196) expr ::= ID LP STAR RP */ - { 262, -3 }, /* (197) expr ::= expr AND expr */ - { 262, -3 }, /* (198) expr ::= expr OR expr */ - { 262, -3 }, /* (199) expr ::= expr LT expr */ - { 262, -3 }, /* (200) expr ::= expr GT expr */ - { 262, -3 }, /* (201) expr ::= expr LE expr */ - { 262, -3 }, /* (202) expr ::= expr GE expr */ - { 262, -3 }, /* (203) expr ::= expr NE expr */ - { 262, -3 }, /* (204) expr ::= expr EQ expr */ - { 262, -3 }, /* (205) expr ::= expr PLUS expr */ - { 262, -3 }, /* (206) expr ::= expr MINUS expr */ - { 262, -3 }, /* (207) expr ::= expr STAR expr */ - { 262, -3 }, /* (208) expr ::= expr SLASH expr */ - { 262, -3 }, /* (209) expr ::= expr REM expr */ - { 262, -3 }, /* (210) expr ::= expr LIKE expr */ - { 262, -5 }, /* (211) expr ::= expr IN LP exprlist RP */ - { 271, -3 }, /* (212) exprlist ::= exprlist COMMA expritem */ - { 271, -1 }, /* (213) exprlist ::= expritem */ - { 272, -1 }, /* (214) expritem ::= expr */ - { 272, 0 }, /* (215) expritem ::= */ - { 209, -3 }, /* (216) cmd ::= RESET QUERY CACHE */ - { 209, -7 }, /* (217) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - { 209, -7 }, /* (218) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - { 209, -7 }, /* (219) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - { 209, -7 }, /* (220) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - { 209, -8 }, /* (221) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - { 209, -9 }, /* (222) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - { 209, -3 }, /* (223) cmd ::= KILL CONNECTION INTEGER */ - { 209, -5 }, /* (224) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - { 209, -5 }, /* (225) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + { 264, -3 }, /* (148) tablelist ::= ids cpxName ids */ + { 264, -4 }, /* (149) tablelist ::= tablelist COMMA ids cpxName */ + { 264, -5 }, /* (150) tablelist ::= tablelist COMMA ids cpxName ids */ + { 265, -1 }, /* (151) tmvar ::= VARIABLE */ + { 252, -4 }, /* (152) interval_opt ::= INTERVAL LP tmvar RP */ + { 252, 0 }, /* (153) interval_opt ::= */ + { 253, 0 }, /* (154) fill_opt ::= */ + { 253, -6 }, /* (155) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + { 253, -4 }, /* (156) fill_opt ::= FILL LP ID RP */ + { 254, -4 }, /* (157) sliding_opt ::= SLIDING LP tmvar RP */ + { 254, 0 }, /* (158) sliding_opt ::= */ + { 256, 0 }, /* (159) orderby_opt ::= */ + { 256, -3 }, /* (160) orderby_opt ::= ORDER BY sortlist */ + { 266, -4 }, /* (161) sortlist ::= sortlist COMMA item sortorder */ + { 266, -2 }, /* (162) sortlist ::= item sortorder */ + { 268, -2 }, /* (163) item ::= ids cpxName */ + { 269, -1 }, /* (164) sortorder ::= ASC */ + { 269, -1 }, /* (165) sortorder ::= DESC */ + { 269, 0 }, /* (166) sortorder ::= */ + { 255, 0 }, /* (167) groupby_opt ::= */ + { 255, -3 }, /* (168) groupby_opt ::= GROUP BY grouplist */ + { 270, -3 }, /* (169) grouplist ::= grouplist COMMA item */ + { 270, -1 }, /* (170) grouplist ::= item */ + { 257, 0 }, /* (171) having_opt ::= */ + { 257, -2 }, /* (172) having_opt ::= HAVING expr */ + { 259, 0 }, /* (173) limit_opt ::= */ + { 259, -2 }, /* (174) limit_opt ::= LIMIT signed */ + { 259, -4 }, /* (175) limit_opt ::= LIMIT signed OFFSET signed */ + { 259, -4 }, /* (176) limit_opt ::= LIMIT signed COMMA signed */ + { 258, 0 }, /* (177) slimit_opt ::= */ + { 258, -2 }, /* (178) slimit_opt ::= SLIMIT signed */ + { 258, -4 }, /* (179) slimit_opt ::= SLIMIT signed SOFFSET signed */ + { 258, -4 }, /* (180) slimit_opt ::= SLIMIT signed COMMA signed */ + { 251, 0 }, /* (181) where_opt ::= */ + { 251, -2 }, /* (182) where_opt ::= WHERE expr */ + { 262, -3 }, /* (183) expr ::= LP expr RP */ + { 262, -1 }, /* (184) expr ::= ID */ + { 262, -3 }, /* (185) expr ::= ID DOT ID */ + { 262, -3 }, /* (186) expr ::= ID DOT STAR */ + { 262, -1 }, /* (187) expr ::= INTEGER */ + { 262, -2 }, /* (188) expr ::= MINUS INTEGER */ + { 262, -2 }, /* (189) expr ::= PLUS INTEGER */ + { 262, -1 }, /* (190) expr ::= FLOAT */ + { 262, -2 }, /* (191) expr ::= MINUS FLOAT */ + { 262, -2 }, /* (192) expr ::= PLUS FLOAT */ + { 262, -1 }, /* (193) expr ::= STRING */ + { 262, -1 }, /* (194) expr ::= NOW */ + { 262, -1 }, /* (195) expr ::= VARIABLE */ + { 262, -1 }, /* (196) expr ::= BOOL */ + { 262, -4 }, /* (197) expr ::= ID LP exprlist RP */ + { 262, -4 }, /* (198) expr ::= ID LP STAR RP */ + { 262, -3 }, /* (199) expr ::= expr AND expr */ + { 262, -3 }, /* (200) expr ::= expr OR expr */ + { 262, -3 }, /* (201) expr ::= expr LT expr */ + { 262, -3 }, /* (202) expr ::= expr GT expr */ + { 262, -3 }, /* (203) expr ::= expr LE expr */ + { 262, -3 }, /* (204) expr ::= expr GE expr */ + { 262, -3 }, /* (205) expr ::= expr NE expr */ + { 262, -3 }, /* (206) expr ::= expr EQ expr */ + { 262, -3 }, /* (207) expr ::= expr PLUS expr */ + { 262, -3 }, /* (208) expr ::= expr MINUS expr */ + { 262, -3 }, /* (209) expr ::= expr STAR expr */ + { 262, -3 }, /* (210) expr ::= expr SLASH expr */ + { 262, -3 }, /* (211) expr ::= expr REM expr */ + { 262, -3 }, /* (212) expr ::= expr LIKE expr */ + { 262, -5 }, /* (213) expr ::= expr IN LP exprlist RP */ + { 271, -3 }, /* (214) exprlist ::= exprlist COMMA expritem */ + { 271, -1 }, /* (215) exprlist ::= expritem */ + { 272, -1 }, /* (216) expritem ::= expr */ + { 272, 0 }, /* (217) expritem ::= */ + { 209, -3 }, /* (218) cmd ::= RESET QUERY CACHE */ + { 209, -7 }, /* (219) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + { 209, -7 }, /* (220) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + { 209, -7 }, /* (221) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + { 209, -7 }, /* (222) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + { 209, -8 }, /* (223) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + { 209, -9 }, /* (224) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + { 209, -3 }, /* (225) cmd ::= KILL CONNECTION INTEGER */ + { 209, -5 }, /* (226) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + { 209, -5 }, /* (227) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -2465,29 +2470,59 @@ static void yy_reduce( {yymsp[-1].minor.yy498 = yymsp[0].minor.yy498;} break; case 147: /* tablelist ::= ids cpxName */ -{ toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yylhsminor.yy498 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1);} +{ + toTSDBType(yymsp[-1].minor.yy0.type); + yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; + yylhsminor.yy498 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yylhsminor.yy498 = tVariantListAppendToken(yylhsminor.yy498, &yymsp[-1].minor.yy0, -1); // table alias name +} yymsp[-1].minor.yy498 = yylhsminor.yy498; break; - case 148: /* tablelist ::= tablelist COMMA ids cpxName */ -{ toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yylhsminor.yy498 = tVariantListAppendToken(yymsp[-3].minor.yy498, &yymsp[-1].minor.yy0, -1); } + case 148: /* tablelist ::= ids cpxName ids */ +{ + toTSDBType(yymsp[-2].minor.yy0.type); + toTSDBType(yymsp[0].minor.yy0.type); + yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; + yylhsminor.yy498 = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); + yylhsminor.yy498 = tVariantListAppendToken(yylhsminor.yy498, &yymsp[0].minor.yy0, -1); +} + yymsp[-2].minor.yy498 = yylhsminor.yy498; + break; + case 149: /* tablelist ::= tablelist COMMA ids cpxName */ +{ + toTSDBType(yymsp[-1].minor.yy0.type); + yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; + yylhsminor.yy498 = tVariantListAppendToken(yymsp[-3].minor.yy498, &yymsp[-1].minor.yy0, -1); + yylhsminor.yy498 = tVariantListAppendToken(yylhsminor.yy498, &yymsp[-1].minor.yy0, -1); +} yymsp[-3].minor.yy498 = yylhsminor.yy498; break; - case 149: /* tmvar ::= VARIABLE */ + case 150: /* tablelist ::= tablelist COMMA ids cpxName ids */ +{ + toTSDBType(yymsp[-2].minor.yy0.type); + toTSDBType(yymsp[0].minor.yy0.type); + yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; + yylhsminor.yy498 = tVariantListAppendToken(yymsp[-4].minor.yy498, &yymsp[-2].minor.yy0, -1); + yylhsminor.yy498 = tVariantListAppendToken(yylhsminor.yy498, &yymsp[0].minor.yy0, -1); +} + yymsp[-4].minor.yy498 = yylhsminor.yy498; + break; + case 151: /* tmvar ::= VARIABLE */ {yylhsminor.yy0 = yymsp[0].minor.yy0;} yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 150: /* interval_opt ::= INTERVAL LP tmvar RP */ - case 155: /* sliding_opt ::= SLIDING LP tmvar RP */ yytestcase(yyruleno==155); + case 152: /* interval_opt ::= INTERVAL LP tmvar RP */ + case 157: /* sliding_opt ::= SLIDING LP tmvar RP */ yytestcase(yyruleno==157); {yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; } break; - case 151: /* interval_opt ::= */ - case 156: /* sliding_opt ::= */ yytestcase(yyruleno==156); + case 153: /* interval_opt ::= */ + case 158: /* sliding_opt ::= */ yytestcase(yyruleno==158); {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } break; - case 152: /* fill_opt ::= */ + case 154: /* fill_opt ::= */ {yymsp[1].minor.yy498 = 0; } break; - case 153: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + case 155: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ { tVariant A = {0}; toTSDBType(yymsp[-3].minor.yy0.type); @@ -2497,33 +2532,33 @@ static void yy_reduce( yymsp[-5].minor.yy498 = yymsp[-1].minor.yy498; } break; - case 154: /* fill_opt ::= FILL LP ID RP */ + case 156: /* fill_opt ::= FILL LP ID RP */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-3].minor.yy498 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } break; - case 157: /* orderby_opt ::= */ - case 165: /* groupby_opt ::= */ yytestcase(yyruleno==165); + case 159: /* orderby_opt ::= */ + case 167: /* groupby_opt ::= */ yytestcase(yyruleno==167); {yymsp[1].minor.yy498 = 0;} break; - case 158: /* orderby_opt ::= ORDER BY sortlist */ - case 166: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==166); + case 160: /* orderby_opt ::= ORDER BY sortlist */ + case 168: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==168); {yymsp[-2].minor.yy498 = yymsp[0].minor.yy498;} break; - case 159: /* sortlist ::= sortlist COMMA item sortorder */ + case 161: /* sortlist ::= sortlist COMMA item sortorder */ { yylhsminor.yy498 = tVariantListAppend(yymsp[-3].minor.yy498, &yymsp[-1].minor.yy134, yymsp[0].minor.yy46); } yymsp[-3].minor.yy498 = yylhsminor.yy498; break; - case 160: /* sortlist ::= item sortorder */ + case 162: /* sortlist ::= item sortorder */ { yylhsminor.yy498 = tVariantListAppend(NULL, &yymsp[-1].minor.yy134, yymsp[0].minor.yy46); } yymsp[-1].minor.yy498 = yylhsminor.yy498; break; - case 161: /* item ::= ids cpxName */ + case 163: /* item ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; @@ -2532,196 +2567,196 @@ static void yy_reduce( } yymsp[-1].minor.yy134 = yylhsminor.yy134; break; - case 162: /* sortorder ::= ASC */ + case 164: /* sortorder ::= ASC */ {yymsp[0].minor.yy46 = TSDB_ORDER_ASC; } break; - case 163: /* sortorder ::= DESC */ + case 165: /* sortorder ::= DESC */ {yymsp[0].minor.yy46 = TSDB_ORDER_DESC;} break; - case 164: /* sortorder ::= */ + case 166: /* sortorder ::= */ {yymsp[1].minor.yy46 = TSDB_ORDER_ASC;} break; - case 167: /* grouplist ::= grouplist COMMA item */ + case 169: /* grouplist ::= grouplist COMMA item */ { yylhsminor.yy498 = tVariantListAppend(yymsp[-2].minor.yy498, &yymsp[0].minor.yy134, -1); } yymsp[-2].minor.yy498 = yylhsminor.yy498; break; - case 168: /* grouplist ::= item */ + case 170: /* grouplist ::= item */ { yylhsminor.yy498 = tVariantListAppend(NULL, &yymsp[0].minor.yy134, -1); } yymsp[0].minor.yy498 = yylhsminor.yy498; break; - case 169: /* having_opt ::= */ - case 179: /* where_opt ::= */ yytestcase(yyruleno==179); - case 215: /* expritem ::= */ yytestcase(yyruleno==215); + case 171: /* having_opt ::= */ + case 181: /* where_opt ::= */ yytestcase(yyruleno==181); + case 217: /* expritem ::= */ yytestcase(yyruleno==217); {yymsp[1].minor.yy64 = 0;} break; - case 170: /* having_opt ::= HAVING expr */ - case 180: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==180); + case 172: /* having_opt ::= HAVING expr */ + case 182: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==182); {yymsp[-1].minor.yy64 = yymsp[0].minor.yy64;} break; - case 171: /* limit_opt ::= */ - case 175: /* slimit_opt ::= */ yytestcase(yyruleno==175); + case 173: /* limit_opt ::= */ + case 177: /* slimit_opt ::= */ yytestcase(yyruleno==177); {yymsp[1].minor.yy216.limit = -1; yymsp[1].minor.yy216.offset = 0;} break; - case 172: /* limit_opt ::= LIMIT signed */ - case 176: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==176); + case 174: /* limit_opt ::= LIMIT signed */ + case 178: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==178); {yymsp[-1].minor.yy216.limit = yymsp[0].minor.yy207; yymsp[-1].minor.yy216.offset = 0;} break; - case 173: /* limit_opt ::= LIMIT signed OFFSET signed */ - case 177: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ yytestcase(yyruleno==177); + case 175: /* limit_opt ::= LIMIT signed OFFSET signed */ + case 179: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ yytestcase(yyruleno==179); {yymsp[-3].minor.yy216.limit = yymsp[-2].minor.yy207; yymsp[-3].minor.yy216.offset = yymsp[0].minor.yy207;} break; - case 174: /* limit_opt ::= LIMIT signed COMMA signed */ - case 178: /* slimit_opt ::= SLIMIT signed COMMA signed */ yytestcase(yyruleno==178); + case 176: /* limit_opt ::= LIMIT signed COMMA signed */ + case 180: /* slimit_opt ::= SLIMIT signed COMMA signed */ yytestcase(yyruleno==180); {yymsp[-3].minor.yy216.limit = yymsp[0].minor.yy207; yymsp[-3].minor.yy216.offset = yymsp[-2].minor.yy207;} break; - case 181: /* expr ::= LP expr RP */ + case 183: /* expr ::= LP expr RP */ {yymsp[-2].minor.yy64 = yymsp[-1].minor.yy64; } break; - case 182: /* expr ::= ID */ + case 184: /* expr ::= ID */ {yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);} yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 183: /* expr ::= ID DOT ID */ + case 185: /* expr ::= ID DOT ID */ {yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 184: /* expr ::= ID DOT STAR */ + case 186: /* expr ::= ID DOT STAR */ {yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ALL);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 185: /* expr ::= INTEGER */ + case 187: /* expr ::= INTEGER */ {yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);} yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 186: /* expr ::= MINUS INTEGER */ - case 187: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==187); + case 188: /* expr ::= MINUS INTEGER */ + case 189: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==189); {yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);} yymsp[-1].minor.yy64 = yylhsminor.yy64; break; - case 188: /* expr ::= FLOAT */ + case 190: /* expr ::= FLOAT */ {yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);} yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 189: /* expr ::= MINUS FLOAT */ - case 190: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==190); + case 191: /* expr ::= MINUS FLOAT */ + case 192: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==192); {yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);} yymsp[-1].minor.yy64 = yylhsminor.yy64; break; - case 191: /* expr ::= STRING */ + case 193: /* expr ::= STRING */ {yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);} yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 192: /* expr ::= NOW */ + case 194: /* expr ::= NOW */ {yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); } yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 193: /* expr ::= VARIABLE */ + case 195: /* expr ::= VARIABLE */ {yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);} yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 194: /* expr ::= BOOL */ + case 196: /* expr ::= BOOL */ {yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);} yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 195: /* expr ::= ID LP exprlist RP */ + case 197: /* expr ::= ID LP exprlist RP */ { yylhsminor.yy64 = tSQLExprCreateFunction(yymsp[-1].minor.yy290, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } yymsp[-3].minor.yy64 = yylhsminor.yy64; break; - case 196: /* expr ::= ID LP STAR RP */ + case 198: /* expr ::= ID LP STAR RP */ { yylhsminor.yy64 = tSQLExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } yymsp[-3].minor.yy64 = yylhsminor.yy64; break; - case 197: /* expr ::= expr AND expr */ + case 199: /* expr ::= expr AND expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_AND);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 198: /* expr ::= expr OR expr */ + case 200: /* expr ::= expr OR expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_OR); } yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 199: /* expr ::= expr LT expr */ + case 201: /* expr ::= expr LT expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_LT);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 200: /* expr ::= expr GT expr */ + case 202: /* expr ::= expr GT expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_GT);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 201: /* expr ::= expr LE expr */ + case 203: /* expr ::= expr LE expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_LE);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 202: /* expr ::= expr GE expr */ + case 204: /* expr ::= expr GE expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_GE);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 203: /* expr ::= expr NE expr */ + case 205: /* expr ::= expr NE expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_NE);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 204: /* expr ::= expr EQ expr */ + case 206: /* expr ::= expr EQ expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_EQ);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 205: /* expr ::= expr PLUS expr */ + case 207: /* expr ::= expr PLUS expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_PLUS); } yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 206: /* expr ::= expr MINUS expr */ + case 208: /* expr ::= expr MINUS expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_MINUS); } yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 207: /* expr ::= expr STAR expr */ + case 209: /* expr ::= expr STAR expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_STAR); } yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 208: /* expr ::= expr SLASH expr */ + case 210: /* expr ::= expr SLASH expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_DIVIDE);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 209: /* expr ::= expr REM expr */ + case 211: /* expr ::= expr REM expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_REM); } yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 210: /* expr ::= expr LIKE expr */ + case 212: /* expr ::= expr LIKE expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_LIKE); } yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 211: /* expr ::= expr IN LP exprlist RP */ + case 213: /* expr ::= expr IN LP exprlist RP */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-4].minor.yy64, (tSQLExpr*)yymsp[-1].minor.yy290, TK_IN); } yymsp[-4].minor.yy64 = yylhsminor.yy64; break; - case 212: /* exprlist ::= exprlist COMMA expritem */ + case 214: /* exprlist ::= exprlist COMMA expritem */ {yylhsminor.yy290 = tSQLExprListAppend(yymsp[-2].minor.yy290,yymsp[0].minor.yy64,0);} yymsp[-2].minor.yy290 = yylhsminor.yy290; break; - case 213: /* exprlist ::= expritem */ + case 215: /* exprlist ::= expritem */ {yylhsminor.yy290 = tSQLExprListAppend(0,yymsp[0].minor.yy64,0);} yymsp[0].minor.yy290 = yylhsminor.yy290; break; - case 214: /* expritem ::= expr */ + case 216: /* expritem ::= expr */ {yylhsminor.yy64 = yymsp[0].minor.yy64;} yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 216: /* cmd ::= RESET QUERY CACHE */ + case 218: /* cmd ::= RESET QUERY CACHE */ { setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} break; - case 217: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + case 219: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy523, NULL, TSDB_ALTER_TABLE_ADD_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 218: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + case 220: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -2732,14 +2767,14 @@ static void yy_reduce( setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 219: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + case 221: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy523, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 220: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + case 222: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -2750,7 +2785,7 @@ static void yy_reduce( setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 221: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + case 223: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -2764,7 +2799,7 @@ static void yy_reduce( setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 222: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + case 224: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; @@ -2776,13 +2811,13 @@ static void yy_reduce( setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 223: /* cmd ::= KILL CONNECTION INTEGER */ + case 225: /* cmd ::= KILL CONNECTION INTEGER */ {setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} break; - case 224: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ + case 226: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} break; - case 225: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ + case 227: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} break; default: From 80d44b2ff0b12b2bdb65823335588796d98ef594 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 6 Aug 2020 10:39:25 +0800 Subject: [PATCH 047/190] [td-225] refactor codes --- src/client/src/tscParseInsert.c | 4 ++-- src/client/src/tscServer.c | 6 +++--- src/client/src/tscSql.c | 2 +- src/inc/taosdef.h | 2 +- src/inc/taosmsg.h | 26 +++++++++++++------------- src/mnode/src/mnodeDb.c | 2 +- src/mnode/src/mnodeShow.c | 2 +- src/mnode/src/mnodeTable.c | 22 +++++++++++----------- src/plugins/monitor/src/monitorMain.c | 2 +- 9 files changed, 34 insertions(+), 34 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index c23f40b4ed..af875a28c5 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -989,7 +989,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) { } int validateTableName(char *tblName, int len, SSQLToken* psTblToken) { - tstrncpy(psTblToken->z, tblName, TSDB_TABLE_ID_LEN); + tstrncpy(psTblToken->z, tblName, TSDB_TABLE_FNAME_LEN); psTblToken->n = len; psTblToken->type = TK_ID; @@ -1077,7 +1077,7 @@ int tsParseInsertSql(SSqlObj *pSql) { } pCmd->curSql = sToken.z; - char buf[TSDB_TABLE_ID_LEN]; + char buf[TSDB_TABLE_FNAME_LEN]; SSQLToken sTblToken; sTblToken.z = buf; // Check if the table name available or not diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index b1c69dfdc0..a9dee643b2 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -1551,7 +1551,7 @@ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { // fill head info SMgmtHead *pMgmt = (SMgmtHead *)(pCmd->payload + tsRpcHeadSize); - memset(pMgmt->db, 0, TSDB_TABLE_ID_LEN); // server don't need the db + memset(pMgmt->db, 0, TSDB_TABLE_FNAME_LEN); // server don't need the db SCMMultiTableInfoMsg *pInfoMsg = (SCMMultiTableInfoMsg *)(pCmd->payload + tsRpcHeadSize + sizeof(SMgmtHead)); pInfoMsg->numOfTables = htonl((int32_t)pCmd->count); @@ -1592,7 +1592,7 @@ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { //// tagLen += strlen(pQueryInfo->tagCond.tbnameCond.cond) * TSDB_NCHAR_SIZE; //// } //// -//// int32_t joinCondLen = (TSDB_TABLE_ID_LEN + sizeof(int16_t)) * 2; +//// int32_t joinCondLen = (TSDB_TABLE_FNAME_LEN + sizeof(int16_t)) * 2; //// int32_t elemSize = sizeof(SSuperTableMetaElemMsg) * pQueryInfo->numOfTables; //// //// int32_t colSize = pQueryInfo->groupbyExpr.numOfGroupCols*sizeof(SColIndex); @@ -1954,7 +1954,7 @@ int tscProcessShowRsp(SSqlObj *pSql) { } int tscProcessConnectRsp(SSqlObj *pSql) { - char temp[TSDB_TABLE_ID_LEN * 2]; + char temp[TSDB_TABLE_FNAME_LEN * 2]; STscObj *pObj = pSql->pTscObj; SSqlRes *pRes = &pSql->res; diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 10720874e2..af8e913563 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -812,7 +812,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t } char *nextStr; - char tblName[TSDB_TABLE_ID_LEN]; + char tblName[TSDB_TABLE_FNAME_LEN]; int payloadLen = 0; char *pMsg = pCmd->payload; while (1) { diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index c3a808b765..ea445ee543 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -232,7 +232,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size); #define TSDB_NODE_NAME_LEN 64 #define TSDB_TABLE_NAME_LEN 193 // it is a null-terminated string #define TSDB_DB_NAME_LEN 33 -#define TSDB_TABLE_ID_LEN (TSDB_ACCT_LEN + TSDB_DB_NAME_LEN + TSDB_TABLE_NAME_LEN) +#define TSDB_TABLE_FNAME_LEN (TSDB_ACCT_LEN + TSDB_DB_NAME_LEN + TSDB_TABLE_NAME_LEN) #define TSDB_COL_NAME_LEN 65 #define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 64 #define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 57e0b46f06..0b47971353 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -246,13 +246,13 @@ typedef struct { uint64_t uid; uint64_t superTableUid; uint64_t createdTime; - char tableId[TSDB_TABLE_ID_LEN]; - char superTableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; + char superTableId[TSDB_TABLE_FNAME_LEN]; char data[]; } SMDCreateTableMsg; typedef struct { - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN]; int8_t igExists; int8_t getMeta; @@ -265,12 +265,12 @@ typedef struct { } SCMCreateTableMsg; typedef struct { - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; int8_t igNotExists; } SCMDropTableMsg; typedef struct { - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN]; int16_t type; /* operation type */ int16_t numOfCols; /* number of schema */ @@ -297,7 +297,7 @@ typedef struct { typedef struct { char clientVersion[TSDB_VERSION_LEN]; char msgVersion[TSDB_VERSION_LEN]; - char db[TSDB_TABLE_ID_LEN]; + char db[TSDB_TABLE_FNAME_LEN]; } SCMConnectMsg; typedef struct { @@ -347,14 +347,14 @@ typedef struct { int32_t vgId; int32_t sid; uint64_t uid; - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; } SMDDropTableMsg; typedef struct { int32_t contLen; int32_t vgId; uint64_t uid; - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; } SMDDropSTableMsg; typedef struct { @@ -527,7 +527,7 @@ typedef struct { } SCMCreateDbMsg, SCMAlterDbMsg; typedef struct { - char db[TSDB_TABLE_ID_LEN]; + char db[TSDB_TABLE_FNAME_LEN]; uint8_t ignoreNotExists; } SCMDropDbMsg, SCMUseDbMsg; @@ -637,7 +637,7 @@ typedef struct { } SMDCreateVnodeMsg, SMDAlterVnodeMsg; typedef struct { - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; int16_t createFlag; char tags[]; } SCMTableInfoMsg; @@ -664,7 +664,7 @@ typedef struct { typedef struct STableMetaMsg { int32_t contLen; - char tableId[TSDB_TABLE_ID_LEN]; // table id + char tableId[TSDB_TABLE_FNAME_LEN]; // table id uint8_t numOfTags; uint8_t precision; uint8_t tableType; @@ -685,7 +685,7 @@ typedef struct SMultiTableMeta { typedef struct { int32_t dataLen; - char name[TSDB_TABLE_ID_LEN]; + char name[TSDB_TABLE_FNAME_LEN]; char data[TSDB_MAX_TAGS_LEN + TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * TSDB_MAX_TAGS]; } STagData; @@ -771,7 +771,7 @@ typedef struct { uint64_t uid; uint64_t stime; // stream starting time int32_t status; - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; } SMDAlterStreamMsg; typedef struct { diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index 48acc6787c..11013a8ef9 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -189,7 +189,7 @@ void mnodeDecDbRef(SDbObj *pDb) { } SDbObj *mnodeGetDbByTableId(char *tableId) { - char db[TSDB_TABLE_ID_LEN], *pos; + char db[TSDB_TABLE_FNAME_LEN], *pos; // tableId format should be : acct.db.table pos = strstr(tableId, TS_PATH_DELIMITER); diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index 995bfbe840..9983c111f6 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -302,7 +302,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) { SAcctObj *pAcct = pUser->pAcct; if (pConnectMsg->db[0]) { - char dbName[TSDB_TABLE_ID_LEN * 3] = {0}; + char dbName[TSDB_TABLE_FNAME_LEN * 3] = {0}; sprintf(dbName, "%x%s%s", pAcct->acctId, TS_PATH_DELIMITER, pConnectMsg->db); SDbObj *pDb = mnodeGetDb(dbName); if (pDb == NULL) { diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index e599e1df19..73dc7dbd54 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -215,7 +215,7 @@ static int32_t mnodeChildTableActionEncode(SSdbOper *pOper) { assert(pTable != NULL && pOper->rowData != NULL); int32_t len = strlen(pTable->info.tableId); - if (len >= TSDB_TABLE_ID_LEN) return TSDB_CODE_MND_INVALID_TABLE_ID; + if (len >= TSDB_TABLE_FNAME_LEN) return TSDB_CODE_MND_INVALID_TABLE_ID; memcpy(pOper->rowData, pTable->info.tableId, len); memset(pOper->rowData + len, 0, 1); @@ -246,7 +246,7 @@ static int32_t mnodeChildTableActionDecode(SSdbOper *pOper) { if (pTable == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY; int32_t len = strlen(pOper->rowData); - if (len >= TSDB_TABLE_ID_LEN) { + if (len >= TSDB_TABLE_FNAME_LEN) { free(pTable); return TSDB_CODE_MND_INVALID_TABLE_ID; } @@ -348,7 +348,7 @@ static int32_t mnodeInitChildTables() { .tableId = SDB_TABLE_CTABLE, .tableName = "ctables", .hashSessions = TSDB_DEFAULT_CTABLES_HASH_SIZE, - .maxRowSize = sizeof(SChildTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_ID_LEN + TSDB_CQ_SQL_SIZE, + .maxRowSize = sizeof(SChildTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_FNAME_LEN + TSDB_CQ_SQL_SIZE, .refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj, .keyType = SDB_KEY_VAR_STRING, .insertFp = mnodeChildTableActionInsert, @@ -471,7 +471,7 @@ static int32_t mnodeSuperTableActionEncode(SSdbOper *pOper) { assert(pOper->pObj != NULL && pOper->rowData != NULL); int32_t len = strlen(pStable->info.tableId); - if (len >= TSDB_TABLE_ID_LEN) len = TSDB_CODE_MND_INVALID_TABLE_ID; + if (len >= TSDB_TABLE_FNAME_LEN) len = TSDB_CODE_MND_INVALID_TABLE_ID; memcpy(pOper->rowData, pStable->info.tableId, len); memset(pOper->rowData + len, 0, 1); @@ -495,7 +495,7 @@ static int32_t mnodeSuperTableActionDecode(SSdbOper *pOper) { if (pStable == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY; int32_t len = strlen(pOper->rowData); - if (len >= TSDB_TABLE_ID_LEN){ + if (len >= TSDB_TABLE_FNAME_LEN){ free(pStable); return TSDB_CODE_MND_INVALID_TABLE_ID; } @@ -531,7 +531,7 @@ static int32_t mnodeInitSuperTables() { .tableId = SDB_TABLE_STABLE, .tableName = "stables", .hashSessions = TSDB_DEFAULT_STABLES_HASH_SIZE, - .maxRowSize = sizeof(SSuperTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_ID_LEN, + .maxRowSize = sizeof(SSuperTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_FNAME_LEN, .refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj, .keyType = SDB_KEY_VAR_STRING, .insertFp = mnodeSuperTableActionInsert, @@ -1456,7 +1456,7 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { // reserve space int32_t contLen = sizeof(SCMSTableVgroupRspMsg) + 32 * sizeof(SCMVgroupInfo) + sizeof(SVgroupsInfo); for (int32_t i = 0; i < numOfTable; ++i) { - char *stableName = (char*)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_ID_LEN) * i; + char *stableName = (char*)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_FNAME_LEN) * i; SSuperTableObj *pTable = mnodeGetSuperTable(stableName); if (pTable != NULL && pTable->vgHash != NULL) { contLen += (taosHashGetSize(pTable->vgHash) * sizeof(SCMVgroupInfo) + sizeof(SVgroupsInfo)); @@ -1473,7 +1473,7 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { char *msg = (char *)pRsp + sizeof(SCMSTableVgroupRspMsg); for (int32_t i = 0; i < numOfTable; ++i) { - char * stableName = (char *)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_ID_LEN)*i; + char * stableName = (char *)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_FNAME_LEN)*i; SSuperTableObj *pTable = mnodeGetSuperTable(stableName); if (pTable == NULL) { mError("app:%p:%p, stable:%s, not exist while get stable vgroup info", pMsg->rpcMsg.ahandle, pMsg, stableName); @@ -1820,7 +1820,7 @@ static int32_t mnodeSendDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) { return TSDB_CODE_MND_OUT_OF_MEMORY; } - tstrncpy(pDrop->tableId, pTable->info.tableId, TSDB_TABLE_ID_LEN); + tstrncpy(pDrop->tableId, pTable->info.tableId, TSDB_TABLE_FNAME_LEN); pDrop->vgId = htonl(pTable->vgId); pDrop->contLen = htonl(sizeof(SMDDropTableMsg)); pDrop->sid = htonl(pTable->sid); @@ -2071,7 +2071,7 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) { pMeta->sid = htonl(pTable->sid); pMeta->precision = pDb->cfg.precision; pMeta->tableType = pTable->info.type; - tstrncpy(pMeta->tableId, pTable->info.tableId, TSDB_TABLE_ID_LEN); + tstrncpy(pMeta->tableId, pTable->info.tableId, TSDB_TABLE_FNAME_LEN); if (pTable->info.type == TSDB_CHILD_TABLE) { pMeta->sversion = htons(pTable->superTable->sversion); @@ -2440,7 +2440,7 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { pMultiMeta->numOfTables = 0; for (int32_t t = 0; t < pInfo->numOfTables; ++t) { - char * tableId = (char *)(pInfo->tableIds + t * TSDB_TABLE_ID_LEN); + char * tableId = (char *)(pInfo->tableIds + t * TSDB_TABLE_FNAME_LEN); SChildTableObj *pTable = mnodeGetChildTable(tableId); if (pTable == NULL) continue; diff --git a/src/plugins/monitor/src/monitorMain.c b/src/plugins/monitor/src/monitorMain.c index b31fc368af..4dc79aef68 100644 --- a/src/plugins/monitor/src/monitorMain.c +++ b/src/plugins/monitor/src/monitorMain.c @@ -189,7 +189,7 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) { snprintf(sql, SQL_LENGTH, "create table if not exists %s.slowquery(ts timestamp, username " "binary(%d), created_time timestamp, time bigint, sql binary(%d))", - tsMonitorDbName, TSDB_TABLE_ID_LEN - 1, TSDB_SLOW_QUERY_SQL_LEN); + tsMonitorDbName, TSDB_TABLE_FNAME_LEN - 1, TSDB_SLOW_QUERY_SQL_LEN); } else if (cmd == MONITOR_CMD_CREATE_TB_LOG) { snprintf(sql, SQL_LENGTH, "create table if not exists %s.log(ts timestamp, level tinyint, " From 8b3ed9e50f6c5e6b94bd51fe7a3ec689cf496df6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 6 Aug 2020 11:49:18 +0800 Subject: [PATCH 048/190] [td-225] update the page size --- src/query/inc/qExtbuffer.h | 2 +- src/query/src/qExtbuffer.c | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/query/inc/qExtbuffer.h b/src/query/inc/qExtbuffer.h index 36fc0c9820..0bdcf5c45e 100644 --- a/src/query/inc/qExtbuffer.h +++ b/src/query/inc/qExtbuffer.h @@ -29,7 +29,7 @@ extern "C" { #define MAX_TMPFILE_PATH_LENGTH PATH_MAX #define INITIAL_ALLOCATION_BUFFER_SIZE 64 -#define DEFAULT_PAGE_SIZE (4096L) // 16k larger than the SHistoInfo +#define DEFAULT_PAGE_SIZE (1024L) // 16k larger than the SHistoInfo typedef enum EXT_BUFFER_FLUSH_MODEL { /* diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index 21b5361acb..f11a16810c 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -12,15 +12,14 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ -#include "qExtbuffer.h" #include "os.h" +#include "qExtbuffer.h" #include "queryLog.h" #include "taos.h" #include "taosdef.h" #include "taosmsg.h" #include "tsqlfunction.h" #include "tulog.h" -#include "tutil.h" #define COLMODEL_GET_VAL(data, schema, allrow, rowId, colId) \ (data + (schema)->pFields[colId].offset * (allrow) + (rowId) * (schema)->pFields[colId].field.bytes) From 9191700f1048124789bd4ca1049f291aeb7d6a02 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 6 Aug 2020 13:24:13 +0800 Subject: [PATCH 049/190] [td-805] opt perf. --- src/query/src/qExecutor.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 0ffb1a4cde..e9d1ffa639 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -125,6 +125,9 @@ static void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv); (tw)->ekey = (tw)->skey + ((_q)->intervalTime - 1); \ } while (0) +#define SET_STABLE_QUERY_OVER(_q) ((_q)->tableIndex = (_q)->tableqinfoGroupInfo.numOfTables) +#define IS_STASBLE_QUERY_OVER(_q) ((_q)->tableIndex >= (_q)->tableqinfoGroupInfo.numOfTables) + // todo move to utility static int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *group); @@ -2656,6 +2659,10 @@ int32_t mergeIntoGroupResult(SQInfo *pQInfo) { qDebug("QInfo:%p no result in group %d, continue", pQInfo, pQInfo->groupIndex - 1); } + if (pQInfo->groupIndex == numOfGroups) { + SET_STABLE_QUERY_OVER(pQInfo); + } + qDebug("QInfo:%p merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "ms", pQInfo, pQInfo->groupIndex - 1, numOfGroups, taosGetTimestampMs() - st); @@ -2674,7 +2681,7 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) { // check if all results has been sent to client int32_t numOfGroup = GET_NUM_OF_TABLEGROUP(pQInfo); if (pQInfo->numOfGroupResultPages == 0 && pQInfo->groupIndex == numOfGroup) { - pQInfo->tableIndex = pQInfo->tableqinfoGroupInfo.numOfTables; // set query completed + SET_STABLE_QUERY_OVER(pQInfo); return; } } @@ -3821,7 +3828,7 @@ static void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBloc } } -bool queryHasRemainResults(SQueryRuntimeEnv* pRuntimeEnv) { +bool queryHasRemainResForTableQuery(SQueryRuntimeEnv* pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; SFillInfo *pFillInfo = pRuntimeEnv->pFillInfo; @@ -3830,8 +3837,7 @@ bool queryHasRemainResults(SQueryRuntimeEnv* pRuntimeEnv) { } if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) { - // There are results not returned to client yet, so filling operation applied to the remain result is required - // in the first place. + // There are results not returned to client yet, so filling applied to the remain result is required firstly. int32_t remain = taosNumOfRemainRows(pFillInfo); if (remain > 0) { return true; @@ -3885,14 +3891,14 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data data += sizeof(STableIdInfo); } - // all data returned, set query over + // Check if query is completed or not for stable query or normal table query respectively. if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { if (pQInfo->runtimeEnv.stableQuery) { - if (pQInfo->tableIndex >= pQInfo->tableqinfoGroupInfo.numOfTables) { + if (IS_STASBLE_QUERY_OVER(pQInfo)) { setQueryStatus(pQuery, QUERY_OVER); } } else { - if (!queryHasRemainResults(&pQInfo->runtimeEnv)) { + if (!queryHasRemainResForTableQuery(&pQInfo->runtimeEnv)) { setQueryStatus(pQuery, QUERY_OVER); } } @@ -3938,7 +3944,7 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int ret = 0; } - if (!queryHasRemainResults(pRuntimeEnv)) { + if (!queryHasRemainResForTableQuery(pRuntimeEnv)) { return ret; } } @@ -4702,7 +4708,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { // the limitation of output result is reached, set the query completed if (limitResults(pRuntimeEnv)) { - pQInfo->tableIndex = pQInfo->tableqinfoGroupInfo.numOfTables; + SET_STABLE_QUERY_OVER(pQInfo); break; } @@ -5097,7 +5103,7 @@ static void tableQueryImpl(SQInfo *pQInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; - if (queryHasRemainResults(pRuntimeEnv)) { + if (queryHasRemainResForTableQuery(pRuntimeEnv)) { if (pQuery->fillType != TSDB_FILL_NONE) { /* @@ -6598,7 +6604,7 @@ static void buildTagQueryResult(SQInfo* pQInfo) { *(int64_t*) pQuery->sdata[0]->data = num; count = 1; - pQInfo->tableIndex = num; //set query completed + SET_STABLE_QUERY_OVER(pQInfo); qDebug("QInfo:%p create count(tbname) query, res:%d rows:1", pQInfo, count); } else { // return only the tags|table name etc. count = 0; From f14c76d2f6f17516ff493d1c3b380a3ccb222ec7 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 6 Aug 2020 15:33:39 +0800 Subject: [PATCH 050/190] [td-805] decrease the memory consumption during interval query. --- src/query/inc/qExecutor.h | 3 ++- src/query/src/qExecutor.c | 24 +++++++++++++++++++----- src/vnode/src/vnodeRead.c | 4 ++++ 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 44d5d26f71..c7026b45c6 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -52,10 +52,10 @@ typedef struct SWindowStatus { typedef struct SWindowResult { uint16_t numOfRows; // number of rows of current time window + SWindowStatus status; // this result status: closed or opened SPosInfo pos; // Position of current result in disk-based output buffer SResultInfo* resultInfo; // For each result column, there is a resultInfo STimeWindow window; // The time window that current result covers. - SWindowStatus status; // this result status: closed or opened } SWindowResult; /** @@ -122,6 +122,7 @@ typedef struct SQueryCostInfo { uint32_t discardBlocks; uint64_t elapsedTime; uint64_t computTime; + uint64_t internalSupSize; } SQueryCostInfo; typedef struct SQuery { diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index e9d1ffa639..6f4afcd6bd 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -398,8 +398,18 @@ static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWin // more than the capacity, reallocate the resources if (pWindowResInfo->size >= pWindowResInfo->capacity) { - int64_t newCap = pWindowResInfo->capacity * 1.5; + int64_t newCap = 0; + if (pWindowResInfo->capacity > 10000) { + newCap = pWindowResInfo->capacity * 1.25; + } else { + newCap = pWindowResInfo->capacity * 1.5; + } + + printf("%ld\n", newCap); + char *t = realloc(pWindowResInfo->pResult, newCap * sizeof(SWindowResult)); + pRuntimeEnv->summary.internalSupSize += (newCap - pWindowResInfo->capacity) * sizeof(SWindowResult); + if (t == NULL) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } @@ -2659,7 +2669,7 @@ int32_t mergeIntoGroupResult(SQInfo *pQInfo) { qDebug("QInfo:%p no result in group %d, continue", pQInfo, pQInfo->groupIndex - 1); } - if (pQInfo->groupIndex == numOfGroups) { + if (pQInfo->groupIndex == numOfGroups && pQInfo->offset == pQInfo->numOfGroupResultPages) { SET_STABLE_QUERY_OVER(pQInfo); } @@ -2705,7 +2715,6 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) { memcpy(pDest + offset * bytes, pData->data + pRuntimeEnv->offset[i] * pData->num, bytes * pData->num); } -// rows += pData->num; offset += pData->num; } @@ -2796,6 +2805,11 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { int64_t startt = taosGetTimestampMs(); while (1) { + if (IS_QUERY_KILLED(pQInfo)) { + qDebug("QInfo:%p it is already killed, abort", pQInfo); + longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); + } + int32_t pos = pTree->pNode[0].index; SWindowResInfo *pWindowResInfo = &pTableList[pos]->windowResInfo; @@ -3958,6 +3972,8 @@ static void queryCostStatis(SQInfo *pQInfo) { " load data block:%d, total rows:%"PRId64 ", check rows:%"PRId64, pQInfo, pSummary->elapsedTime, pSummary->totalBlocks, pSummary->loadBlockStatis, pSummary->loadBlocks, pSummary->totalRows, pSummary->totalCheckedRows); + + qDebug("QInfo:%p :cost summary: internal size:%"PRId64, pQInfo, pSummary->internalSupSize); } static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBlockInfo) { @@ -6039,8 +6055,6 @@ static void freeQInfo(SQInfo *pQInfo) { } SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - setQueryKilled(pQInfo); - qDebug("QInfo:%p start to free QInfo", pQInfo); for (int32_t col = 0; col < pQuery->numOfOutput; ++col) { taosTFree(pQuery->sdata[col]); diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 26c1062479..a02413822e 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -251,6 +251,10 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { return code; } + // todo add more error check here + // register the qhandle to connect to quit query immediate if connection is broken + vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, *handle, pVnode->vgId); + bool freeHandle = true; bool buildRes = false; From 4545cfaa2976811282772b761f109a853d8297d4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 7 Aug 2020 10:58:55 +0800 Subject: [PATCH 051/190] [td-805] add log for internal structure. --- src/query/src/qExecutor.c | 2 ++ src/query/src/qUtil.c | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 6f4afcd6bd..68239c5c63 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -419,6 +419,8 @@ static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWin int32_t inc = newCap - pWindowResInfo->capacity; memset(&pWindowResInfo->pResult[pWindowResInfo->capacity], 0, sizeof(SWindowResult) * inc); + pRuntimeEnv->summary.internalSupSize += (pQuery->numOfOutput * sizeof(SResultInfo) + pRuntimeEnv->interBufSize) * inc; + for (int32_t i = pWindowResInfo->capacity; i < newCap; ++i) { createQueryResultInfo(pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, pRuntimeEnv->interBufSize); } diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 007a25f772..a01eb33ec7 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -46,12 +46,17 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun pWindowResInfo->size = 0; pWindowResInfo->prevSKey = TSKEY_INITIAL_VAL; + pRuntimeEnv->summary.internalSupSize += sizeof(SWindowResult) * threshold; + // use the pointer arraylist pWindowResInfo->pResult = calloc(threshold, sizeof(SWindowResult)); if (pWindowResInfo->pResult == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; } + pRuntimeEnv->summary.internalSupSize += sizeof(SWindowResult) * threshold; + pRuntimeEnv->summary.internalSupSize += (pRuntimeEnv->pQuery->numOfOutput * sizeof(SResultInfo) + pRuntimeEnv->interBufSize) * pWindowResInfo->capacity; + for (int32_t i = 0; i < pWindowResInfo->capacity; ++i) { int32_t code = createQueryResultInfo(pRuntimeEnv->pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, pRuntimeEnv->interBufSize); if (code != TSDB_CODE_SUCCESS) { From 39dc3c422c95101817e21f3d94818d1055ffc796 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 7 Aug 2020 12:08:42 +0800 Subject: [PATCH 052/190] [td-1065] --- src/query/src/qResultbuf.c | 20 ++++++++++++-------- src/query/tests/resultBufferTest.cpp | 1 - 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c index 8235bd7b1f..777210bd11 100644 --- a/src/query/src/qResultbuf.c +++ b/src/query/src/qResultbuf.c @@ -7,6 +7,7 @@ #include "taoserror.h" #define GET_DATA_PAYLOAD(_p) ((_p)->pData + POINTER_BYTES) +#define NO_IN_MEM_AVAILABLE_PAGES(_b) (listNEles((_b)->lruList) >= (_b)->inMemPages) int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t rowSize, int32_t pagesize, int32_t inMemBufSize, const void* handle) { @@ -25,7 +26,7 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t ro pResBuf->comp = true; pResBuf->file = NULL; pResBuf->handle = handle; - pResBuf->fileSize = 0; + pResBuf->fileSize = 0; // at least more than 2 pages must be in memory assert(inMemBufSize >= pagesize * 2); @@ -186,8 +187,6 @@ static char* loadPageFromDisk(SDiskbasedResultBuf* pResultBuf, SPageInfo* pg) { return GET_DATA_PAYLOAD(pg); } -#define NO_IN_MEM_AVAILABLE_PAGES(_b) (listNEles((_b)->lruList) >= (_b)->inMemPages) - static SIDList addNewGroup(SDiskbasedResultBuf* pResultBuf, int32_t groupId) { assert(taosHashGet(pResultBuf->groupSet, (const char*) &groupId, sizeof(int32_t)) == NULL); @@ -211,11 +210,12 @@ static SPageInfo* registerPage(SDiskbasedResultBuf* pResultBuf, int32_t groupId, pResultBuf->numOfPages += 1; SPageInfo* ppi = malloc(sizeof(SPageInfo));//{ .info = PAGE_INFO_INITIALIZER, .pageId = pageId, .pn = NULL}; - ppi->info = PAGE_INFO_INITIALIZER; - ppi->pageId = pageId; - ppi->pData = NULL; - ppi->pn = NULL; - ppi->used = true; + + ppi->pageId = pageId; + ppi->pData = NULL; + ppi->info = PAGE_INFO_INITIALIZER; + ppi->used = true; + ppi->pn = NULL; return *(SPageInfo**) taosArrayPush(list, &ppi); } @@ -246,6 +246,8 @@ static char* evicOneDataPage(SDiskbasedResultBuf* pResultBuf) { // all pages are referenced by user, try to allocate new space if (pn == NULL) { int32_t prev = pResultBuf->inMemPages; + + // increase by 50% of previous mem pages pResultBuf->inMemPages = pResultBuf->inMemPages * 1.5; qWarn("%p in memory buf page not sufficient, expand from %d to %d, page size:%d", pResultBuf, prev, @@ -353,6 +355,8 @@ tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) { ((void**)((*pi)->pData))[0] = (*pi); lruListPushFront(pResultBuf->lruList, *pi); + (*pi)->used = true; + loadPageFromDisk(pResultBuf, *pi); return GET_DATA_PAYLOAD(*pi); } diff --git a/src/query/tests/resultBufferTest.cpp b/src/query/tests/resultBufferTest.cpp index e9611a3232..7b946d8589 100644 --- a/src/query/tests/resultBufferTest.cpp +++ b/src/query/tests/resultBufferTest.cpp @@ -130,7 +130,6 @@ void recyclePageTest() { ASSERT_TRUE(t4 == pBufPage4); ASSERT_TRUE(pageId == 4); releaseResBufPage(pResultBuf, t4); - releaseResBufPage(pResultBuf, t4); tFilePage* pBufPage5 = getNewDataBuf(pResultBuf, groupId, &pageId); tFilePage* t5 = getResBufPage(pResultBuf, pageId); From 68ca5f522557380ff1f0e02252c8e5d6443d15ee Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 7 Aug 2020 14:11:47 +0800 Subject: [PATCH 053/190] [td-225]fix memory leaks. --- src/query/src/qTsbuf.c | 1 + src/query/tests/CMakeLists.txt | 2 +- src/query/tests/tsBufTest.cpp | 10 ++++++++++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c index fe39fe4e4a..95581e1869 100644 --- a/src/query/src/qTsbuf.c +++ b/src/query/src/qTsbuf.c @@ -72,6 +72,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { // invalid file if (header.magic != TS_COMP_FILE_MAGIC) { + tsBufDestroy(pTSBuf); return NULL; } diff --git a/src/query/tests/CMakeLists.txt b/src/query/tests/CMakeLists.txt index 1856223391..bd7dcd4b89 100644 --- a/src/query/tests/CMakeLists.txt +++ b/src/query/tests/CMakeLists.txt @@ -10,6 +10,6 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR}) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) - ADD_EXECUTABLE(queryTest ${SOURCE_LIST}) + ADD_EXECUTABLE(queryTest ./unitTest.cpp ./tsBufTest.cpp) TARGET_LINK_LIBRARIES(queryTest taos query gtest pthread gcov) ENDIF() diff --git a/src/query/tests/tsBufTest.cpp b/src/query/tests/tsBufTest.cpp index 28b1d9cefe..f8738eec9c 100644 --- a/src/query/tests/tsBufTest.cpp +++ b/src/query/tests/tsBufTest.cpp @@ -47,6 +47,8 @@ void simpleTest() { EXPECT_EQ(pTSBuf->block.numOfElem, num); tsBufDestroy(pTSBuf); + + free(list); } // one large list of ts, the ts list need to be split into several small blocks @@ -71,6 +73,7 @@ void largeTSTest() { EXPECT_EQ(pTSBuf->block.numOfElem, num); tsBufDestroy(pTSBuf); + free(list); } void multiTagsTest() { @@ -208,6 +211,8 @@ void loadDataTest() { int64_t e = taosGetTimestampUs(); printf("end:%" PRIu64 ", elapsed:%" PRIu64 ", total obj:%d\n", e, e - s, x); + tsBufDestroy(pTSBuf); + tsBufDestroy(pNewBuf); } void randomIncTsTest() {} @@ -338,6 +343,8 @@ void TSTraverse() { } } } + + tsBufDestroy(pTSBuf); } void performanceTest() {} @@ -352,9 +359,12 @@ void invalidFileTest() { STSBuf* pNewBuf = tsBufCreateFromFile("/tmp/test", true); EXPECT_TRUE(pNewBuf == NULL); + tsBufDestroy(pNewBuf); pNewBuf = tsBufCreateFromFile("/tmp/911", true); EXPECT_TRUE(pNewBuf == NULL); + + tsBufDestroy(pNewBuf); } void mergeDiffVnodeBufferTest() { From 0ee12789ddb23eed26b7b15d622ffc81ed6956a9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 7 Aug 2020 14:12:30 +0800 Subject: [PATCH 054/190] [td-225]update test cmake --- src/query/tests/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/tests/CMakeLists.txt b/src/query/tests/CMakeLists.txt index bd7dcd4b89..1856223391 100644 --- a/src/query/tests/CMakeLists.txt +++ b/src/query/tests/CMakeLists.txt @@ -10,6 +10,6 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR}) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) - ADD_EXECUTABLE(queryTest ./unitTest.cpp ./tsBufTest.cpp) + ADD_EXECUTABLE(queryTest ${SOURCE_LIST}) TARGET_LINK_LIBRARIES(queryTest taos query gtest pthread gcov) ENDIF() From b2c166213b4a6abf21934583cfdd95e4528f40d4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 7 Aug 2020 14:37:14 +0800 Subject: [PATCH 055/190] [td-225] add check after killing query. --- src/query/src/qExecutor.c | 25 ++++++++++++++++--------- src/vnode/src/vnodeRead.c | 19 ++++++++++++------- 2 files changed, 28 insertions(+), 16 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 68239c5c63..c7e0a53502 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6361,8 +6361,8 @@ static bool doBuildResCheck(SQInfo* pQInfo) { pthread_mutex_unlock(&pQInfo->lock); // clear qhandle owner -// assert(pQInfo->owner == pthread_self()); -// pQInfo->owner = 0; + assert(pQInfo->owner == pthread_self()); + pQInfo->owner = 0; return buildRes; } @@ -6370,14 +6370,14 @@ static bool doBuildResCheck(SQInfo* pQInfo) { bool qTableQuery(qinfo_t qinfo) { SQInfo *pQInfo = (SQInfo *)qinfo; assert(pQInfo && pQInfo->signature == pQInfo); -// int64_t threadId = pthread_self(); + int64_t threadId = pthread_self(); -// int64_t curOwner = 0; -// if ((curOwner = atomic_val_compare_exchange_64(&pQInfo->owner, 0, threadId)) != 0) { -// qError("QInfo:%p qhandle is now executed by thread:%p", pQInfo, (void*) curOwner); -// pQInfo->code = TSDB_CODE_QRY_IN_EXEC; -// return false; -// } + int64_t curOwner = 0; + if ((curOwner = atomic_val_compare_exchange_64(&pQInfo->owner, 0, threadId)) != 0) { + qError("QInfo:%p qhandle is now executed by thread:%p", pQInfo, (void*) curOwner); + pQInfo->code = TSDB_CODE_QRY_IN_EXEC; + return false; + } if (IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p it is already killed, abort", pQInfo); @@ -6529,6 +6529,13 @@ int32_t qKillQuery(qinfo_t qinfo) { } setQueryKilled(pQInfo); + + // Wait for the query executing thread being stopped/ + // Once the query is stopped, the owner of qHandle will be cleared immediately. + while(pQInfo->owner != 0) { + taosMsleep(100); + } + return TSDB_CODE_SUCCESS; } diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index a02413822e..8ca76ef22d 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -225,8 +225,8 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { SRspRet *pRet = &pReadMsg->rspRet; SRetrieveTableMsg *pRetrieve = pCont; - pRetrieve->qhandle = htobe64(pRetrieve->qhandle); pRetrieve->free = htons(pRetrieve->free); + pRetrieve->qhandle = htobe64(pRetrieve->qhandle); vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed, free:%d, conn:%p", pVnode->vgId, (void*) pRetrieve->qhandle, pRetrieve->free, pReadMsg->rpcMsg.handle); @@ -236,24 +236,29 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { void** handle = qAcquireQInfo(pVnode->qMgmt, pRetrieve->qhandle); if (handle == NULL || (*handle) != (void*) pRetrieve->qhandle) { code = TSDB_CODE_QRY_INVALID_QHANDLE; - vDebug("vgId:%d, invalid qhandle in fetch result, QInfo:%p", pVnode->vgId, (void*) pRetrieve->qhandle); + vDebug("vgId:%d, invalid qhandle in retrieving result, QInfo:%p", pVnode->vgId, (void*) pRetrieve->qhandle); vnodeBuildNoResultQueryRsp(pRet); return code; } if (pRetrieve->free == 1) { - vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, *handle); + vWarn("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, *handle); qKillQuery(*handle); qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); vnodeBuildNoResultQueryRsp(pRet); + code = TSDB_CODE_TSC_QUERY_CANCELLED; return code; } - // todo add more error check here // register the qhandle to connect to quit query immediate if connection is broken - vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, *handle, pVnode->vgId); + if (vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { + vError("vgId:%d, QInfo:%p, retrieve discarded since link is broken, %p", pVnode->vgId, *handle, pReadMsg->rpcMsg.handle); + code = TSDB_CODE_RPC_NETWORK_UNAVAIL; + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); + return code; + } bool freeHandle = true; bool buildRes = false; @@ -273,8 +278,8 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { code = vnodeDumpQueryResult(pRet, pVnode, handle, &freeHandle); } - // if qhandle is not added into task queue, the query must be completed already or paused with error , - // free qhandle immediately + // If qhandle is not added into vread queue, the query should be completed already or paused with error. + // Here free qhandle immediately if (freeHandle) { qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); } From 24aa1a357d1d1aab6093e49388a6add44e0456aa Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 7 Aug 2020 14:47:32 +0800 Subject: [PATCH 056/190] [td-979] --- src/client/src/tscSQLParser.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index e5d8269ff9..5519dd69c2 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -4385,7 +4385,8 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg16 = "only support one column"; const char* msg17 = "invalid column name"; const char* msg18 = "primary timestamp column cannot be dropped"; - + const char* msg19 = "invalid new tag name"; + SSqlCmd* pCmd = &pSql->cmd; SAlterTableSQL* pAlterSQL = pInfo->pAlterInfo; SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); @@ -4486,12 +4487,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { SSQLToken srcToken = {.z = pSrcItem->pVar.pz, .n = pSrcItem->pVar.nLen, .type = TK_STRING}; if (getColumnIndexByName(pCmd, &srcToken, pQueryInfo, &srcIndex) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg17); } SSQLToken destToken = {.z = pDstItem->pVar.pz, .n = pDstItem->pVar.nLen, .type = TK_STRING}; if (getColumnIndexByName(pCmd, &destToken, pQueryInfo, &destIndex) == TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg19); } char name[TSDB_COL_NAME_LEN] = {0}; From 364e3f52b30a79afb3e54b8eb4f89415dfd679d7 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 7 Aug 2020 06:48:28 +0000 Subject: [PATCH 057/190] minor changes --- src/plugins/http/src/httpContext.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index e367911695..f7694ded97 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -167,8 +167,8 @@ bool httpInitContext(HttpContext *pContext) { memset(pParser, 0, sizeof(HttpParser)); pParser->pCur = pParser->pLast = pParser->buffer; - httpDebug("context:%p, fd:%d, ip:%s, thread:%s, accessTimes:%d, parsed:%d", - pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, pContext->accessTimes, pContext->parsed); + httpDebug("context:%p, fd:%d, ip:%s, accessTimes:%d, parsed:%d", pContext, pContext->fd, pContext->ipstr, + pContext->accessTimes, pContext->parsed); return true; } From 17e1c19b81f9f73b2d0c678b74a469743e475fcb Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 7 Aug 2020 09:30:52 +0000 Subject: [PATCH 058/190] remove Idx file --- src/tsdb/inc/tsdbMain.h | 15 ------------ src/tsdb/src/tsdbFile.c | 4 ---- src/tsdb/src/tsdbMemTable.c | 5 ---- src/tsdb/src/tsdbRWHelper.c | 48 ------------------------------------- 4 files changed, 72 deletions(-) diff --git a/src/tsdb/inc/tsdbMain.h b/src/tsdb/inc/tsdbMain.h index 6119f7086c..16a2e17f1e 100644 --- a/src/tsdb/inc/tsdbMain.h +++ b/src/tsdb/inc/tsdbMain.h @@ -151,18 +151,10 @@ typedef struct { // ------------------ tsdbFile.c extern const char* tsdbFileSuffix[]; typedef enum { -#ifdef TSDB_IDX - TSDB_FILE_TYPE_IDX = 0, - TSDB_FILE_TYPE_HEAD, -#else TSDB_FILE_TYPE_HEAD = 0, -#endif TSDB_FILE_TYPE_DATA, TSDB_FILE_TYPE_LAST, TSDB_FILE_TYPE_MAX, -#ifdef TSDB_IDX - TSDB_FILE_TYPE_NIDX, -#endif TSDB_FILE_TYPE_NHEAD, TSDB_FILE_TYPE_NLAST } TSDB_FILE_TYPE; @@ -281,9 +273,6 @@ typedef struct { TSKEY minKey; TSKEY maxKey; SFileGroup fGroup; -#ifdef TSDB_IDX - SFile nIdxF; -#endif SFile nHeadF; SFile nLastF; } SHelperFile; @@ -497,10 +486,6 @@ void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TS #define helperState(h) (h)->state #define TSDB_NLAST_FILE_OPENED(h) ((h)->files.nLastF.fd > 0) #define helperFileId(h) ((h)->files.fGroup.fileId) -#ifdef TSDB_IDX -#define helperIdxF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_IDX])) -#define helperNewIdxF(h) (&((h)->files.nIdxF)) -#endif #define helperHeadF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_HEAD])) #define helperDataF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_DATA])) #define helperLastF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_LAST])) diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index 8cec7c08e1..a5435ad872 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -20,11 +20,7 @@ #include "tutil.h" #define TAOS_RANDOM_FILE_FAIL_TEST -#ifdef TSDB_IDX -const char *tsdbFileSuffix[] = {".idx", ".head", ".data", ".last", "", ".i", ".h", ".l"}; -#else const char *tsdbFileSuffix[] = {".head", ".data", ".last", "", ".h", ".l"}; -#endif static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type); static void tsdbDestroyFile(SFile *pFile); diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 990db76b7e..0303c47146 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -683,11 +683,6 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe pthread_rwlock_wrlock(&(pFileH->fhlock)); -#ifdef TSDB_IDX - rename(helperNewIdxF(pHelper)->fname, helperIdxF(pHelper)->fname); - pGroup->files[TSDB_FILE_TYPE_IDX].info = helperNewIdxF(pHelper)->info; -#endif - rename(helperNewHeadF(pHelper)->fname, helperHeadF(pHelper)->fname); pGroup->files[TSDB_FILE_TYPE_HEAD].info = helperNewHeadF(pHelper)->info; diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c index fb79747d30..4b5acd8fe4 100644 --- a/src/tsdb/src/tsdbRWHelper.c +++ b/src/tsdb/src/tsdbRWHelper.c @@ -110,31 +110,16 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) { // Set the files pHelper->files.fGroup = *pGroup; if (helperType(pHelper) == TSDB_WRITE_HELPER) { -#ifdef TSDB_IDX - tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NIDX, helperNewIdxF(pHelper)->fname); -#endif tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NHEAD, helperNewHeadF(pHelper)->fname); tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NLAST, helperNewLastF(pHelper)->fname); } // Open the files -#ifdef TSDB_IDX - if (tsdbOpenFile(helperIdxF(pHelper), O_RDONLY) < 0) return -1; -#endif if (tsdbOpenFile(helperHeadF(pHelper), O_RDONLY) < 0) return -1; if (helperType(pHelper) == TSDB_WRITE_HELPER) { if (tsdbOpenFile(helperDataF(pHelper), O_RDWR) < 0) return -1; if (tsdbOpenFile(helperLastF(pHelper), O_RDWR) < 0) return -1; -#ifdef TSDB_IDX - // Create and open .i file - pFile = helperNewIdxF(pHelper); - if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) return -1; - pFile->info.size = TSDB_FILE_HEAD_SIZE; - pFile->info.magic = TSDB_FILE_INIT_MAGIC; - if (tsdbUpdateFileHeader(pFile) < 0) return -1; -#endif - // Create and open .h pFile = helperNewHeadF(pHelper); if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) return -1; @@ -164,11 +149,6 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) { int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError) { SFile *pFile = NULL; -#ifdef TSDB_IDX - pFile = helperIdxF(pHelper); - tsdbCloseFile(pFile); -#endif - pFile = helperHeadF(pHelper); tsdbCloseFile(pFile); @@ -199,18 +179,6 @@ int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError) { } if (helperType(pHelper) == TSDB_WRITE_HELPER) { -#ifdef TSDB_IDX - pFile = helperNewIdxF(pHelper); - if (pFile->fd > 0) { - if (!hasError) { - tsdbUpdateFileHeader(pFile); - fsync(pFile->fd); - } - tsdbCloseFile(pFile); - if (hasError) (void)remove(pFile->fname); - } -#endif - pFile = helperNewHeadF(pHelper); if (pFile->fd > 0) { if (!hasError) { @@ -412,10 +380,6 @@ int tsdbWriteCompInfo(SRWHelper *pHelper) { return -1; } -#ifdef TSDB_IDX - pFile = helperNewIdxF(pHelper); -#endif - if (taosTSizeof(pHelper->pWIdx) < pFile->info.len + sizeof(SCompIdx) + 12) { pHelper->pWIdx = taosTRealloc(pHelper->pWIdx, taosTSizeof(pHelper->pWIdx) == 0 ? 1024 : taosTSizeof(pHelper->pWIdx) * 2); if (pHelper->pWIdx == NULL) { @@ -435,11 +399,7 @@ int tsdbWriteCompIdx(SRWHelper *pHelper) { ASSERT(helperType(pHelper) == TSDB_WRITE_HELPER); off_t offset = 0; -#ifdef TSDB_IDX - SFile *pFile = helperNewIdxF(pHelper); -#else SFile *pFile = helperNewHeadF(pHelper); -#endif pFile->info.len += sizeof(TSCKSUM); if (taosTSizeof(pHelper->pWIdx) < pFile->info.len) { @@ -474,11 +434,7 @@ int tsdbWriteCompIdx(SRWHelper *pHelper) { int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) { ASSERT(pHelper->state == TSDB_HELPER_FILE_SET_AND_OPEN); -#ifdef TSDB_IDX - SFile *pFile = helperIdxF(pHelper); -#else SFile *pFile = helperHeadF(pHelper); -#endif int fd = pFile->fd; if (!helperHasState(pHelper, TSDB_HELPER_IDX_LOAD)) { @@ -1052,10 +1008,6 @@ static void tsdbResetHelperFileImpl(SRWHelper *pHelper) { helperLastF(pHelper)->fd = -1; helperNewHeadF(pHelper)->fd = -1; helperNewLastF(pHelper)->fd = -1; -#ifdef TSDB_IDX - helperIdxF(pHelper)->fd = -1; - helperNewIdxF(pHelper)->fd = -1; -#endif } static int tsdbInitHelperFile(SRWHelper *pHelper) { From 562e62e9330a959aa9e9134ea69e195c2476fa4c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 7 Aug 2020 17:44:32 +0800 Subject: [PATCH 059/190] [td-225] add check after killing query. --- src/client/inc/tscUtil.h | 3 +++ src/client/inc/tsclient.h | 1 + src/client/src/tscSql.c | 54 +++++++++++++-------------------------- src/client/src/tscUtil.c | 18 +++++++++++++ 4 files changed, 40 insertions(+), 36 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 590f205e1d..72ca96891a 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -270,6 +270,9 @@ void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRo void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp); int tscSetMgmtEpSetFromCfg(const char *first, const char *second); +bool tscSetSqlOwner(SSqlObj* pSql); +void tscClearSqlOwner(SSqlObj* pSql); + void* malloc_throw(size_t size); void* calloc_throw(size_t nmemb, size_t size); char* strdup_throw(const char* str); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 3ad4cd9455..49f7cec889 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -302,6 +302,7 @@ typedef struct STscObj { typedef struct SSqlObj { void *signature; + pthread_t owner; // owner of sql object, by which it is executed STscObj *pTscObj; void *pRpcCtx; void (*fp)(); diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index af8e913563..f01f1aa384 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -233,35 +233,6 @@ static void waitForRetrieveRsp(void *param, TAOS_RES *tres, int numOfRows) { sem_post(&pSql->rspSem); } -TAOS_RES* taos_query(TAOS *taos, const char *sqlstr) { - STscObj *pObj = (STscObj *)taos; - if (pObj == NULL || pObj->signature != pObj) { - terrno = TSDB_CODE_TSC_DISCONNECTED; - return NULL; - } - - int32_t sqlLen = strlen(sqlstr); - if (sqlLen > tsMaxSQLStringLen) { - tscError("sql string exceeds max length:%d", tsMaxSQLStringLen); - terrno = TSDB_CODE_TSC_INVALID_SQL; - return NULL; - } - - taosNotePrintTsc(sqlstr); - - SSqlObj* pSql = calloc(1, sizeof(SSqlObj)); - if (pSql == NULL) { - tscError("failed to malloc sqlObj"); - terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; - return NULL; - } - - doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen); - - // wait for the callback function to post the semaphore - tsem_wait(&pSql->rspSem); - return pSql; -} TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen) { STscObj *pObj = (STscObj *)taos; if (pObj == NULL || pObj->signature != pObj) { @@ -274,7 +245,9 @@ TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen) { terrno = TSDB_CODE_TSC_INVALID_SQL; return NULL; } - + + taosNotePrintTsc(sqlstr); + SSqlObj* pSql = calloc(1, sizeof(SSqlObj)); if (pSql == NULL) { tscError("failed to malloc sqlObj"); @@ -287,6 +260,11 @@ TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen) { tsem_wait(&pSql->rspSem); return pSql; } + +TAOS_RES* taos_query(TAOS *taos, const char *sqlstr) { + return taos_query_c(taos, sqlstr, strlen(sqlstr)); +} + int taos_result_precision(TAOS_RES *res) { SSqlObj *pSql = (SSqlObj *)res; if (pSql == NULL || pSql->signature != pSql) return 0; @@ -422,7 +400,10 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { pCmd->command == TSDB_SQL_INSERT) { return NULL; } - + + // set the sql object owner + tscSetSqlOwner(pSql); + // current data set are exhausted, fetch more data from node if (pRes->row >= pRes->numOfRows && (pRes->completed != true || hasMoreVnodesToTry(pSql) || hasMoreClauseToTry(pSql)) && (pCmd->command == TSDB_SQL_RETRIEVE || @@ -441,7 +422,10 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { sem_wait(&pSql->rspSem); } - return doSetResultRowData(pSql, true); + void* data = doSetResultRowData(pSql, true); + + tscClearSqlOwner(pSql); + return data; } int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { @@ -509,7 +493,7 @@ int taos_select_db(TAOS *taos, const char *db) { } // send free message to vnode to free qhandle and corresponding resources in vnode -static bool tscFreeQhandleInVnode(SSqlObj* pSql) { +static bool tscKillQueryInVnode(SSqlObj* pSql) { SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; @@ -557,16 +541,14 @@ void taos_free_result(TAOS_RES *res) { } pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE; - if (!tscFreeQhandleInVnode(pSql)) { + if (!tscKillQueryInVnode(pSql)) { tscFreeSqlObj(pSql); tscDebug("%p sqlObj is freed by app", pSql); } } -// todo should not be used in async query int taos_errno(TAOS_RES *tres) { SSqlObj *pSql = (SSqlObj *) tres; - if (pSql == NULL || pSql->signature != pSql) { return terrno; } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 5409d90939..fdc019e97b 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -2223,3 +2223,21 @@ int tscSetMgmtEpSetFromCfg(const char *first, const char *second) { return 0; } + +bool tscSetSqlOwner(SSqlObj* pSql) { + SSqlRes* pRes = &pSql->res; + + // set the sql object owner + uint64_t threadId = taosGetPthreadId(); + if (atomic_val_compare_exchange_64(&pSql->owner, 0, threadId) != 0) { + pRes->code = TSDB_CODE_QRY_IN_EXEC; + return false; + } + + return true; +} + +void tscClearSqlOwner(SSqlObj* pSql) { + assert(pSql->owner != 0); + atomic_store_64(&pSql->owner, 0); +} \ No newline at end of file From aea35d6b23248e7cfc950e6bf65297452e348c55 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 8 Aug 2020 10:35:57 +0800 Subject: [PATCH 060/190] [td-225] fix bugs. --- src/client/inc/tsclient.h | 1 + src/client/src/tscLocalMerge.c | 73 +++++++++++++--------------------- src/client/src/tscSQLParser.c | 6 +-- 3 files changed, 32 insertions(+), 48 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 49f7cec889..6d02bc7fbd 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -256,6 +256,7 @@ typedef struct SResRec { typedef struct { int64_t numOfRows; // num of results in current retrieved + int64_t numOfRowsGroup; // num of results of current group int64_t numOfTotal; // num of total results int64_t numOfClauseTotal; // num of total result in current subclause char * pRsp; diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 95f45ad105..ff5fbb2699 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -856,24 +856,6 @@ void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SQueryInfo tColModelAppend(pModel, pLocalReducer->discardData, pLocalReducer->prevRowOfInput, 0, 1, 1); } -// todo merge with following function -// static void reversedCopyResultToDstBuf(SQueryInfo* pQueryInfo, SSqlRes *pRes, tFilePage *pFinalDataPage) { -// -// for (int32_t i = 0; i < pQueryInfo->exprList.numOfExprs; ++i) { -// TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); -// -// int32_t offset = tscFieldInfoGetOffset(pQueryInfo, i); -// char * src = pFinalDataPage->data + (pRes->numOfRows - 1) * pField->bytes + pRes->numOfRows * offset; -// char * dst = pRes->data + pRes->numOfRows * offset; -// -// for (int32_t j = 0; j < pRes->numOfRows; ++j) { -// memcpy(dst, src, (size_t)pField->bytes); -// dst += pField->bytes; -// src -= pField->bytes; -// } -// } -//} - static void reversedCopyFromInterpolationToDstBuf(SQueryInfo *pQueryInfo, SSqlRes *pRes, tFilePage **pResPages, SLocalReducer *pLocalReducer) { assert(0); @@ -907,20 +889,10 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO tFilePage * pFinalDataPage = pLocalReducer->pResultBuf; SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); -// if (pRes->pLocalReducer != pLocalReducer) { -// /* -// * Release the SSqlObj is called, and it is int destroying function invoked by other thread. -// * However, the other thread will WAIT until current process fully completes. -// * Since the flag of release struct is set by doLocalReduce function -// */ -// assert(pRes->pLocalReducer == NULL); -// } - // no interval query, no fill operation if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) { pRes->data = pLocalReducer->pFinalRes; pRes->numOfRows = pFinalDataPage->num; - pRes->numOfClauseTotal += pRes->numOfRows; if (pQueryInfo->limit.offset > 0) { if (pQueryInfo->limit.offset < pRes->numOfRows) { @@ -931,22 +903,22 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO tColModelCompact(pLocalReducer->resColModel, pFinalDataPage, prevSize); pRes->numOfRows -= pQueryInfo->limit.offset; - pRes->numOfClauseTotal -= pQueryInfo->limit.offset; pQueryInfo->limit.offset = 0; } else { pQueryInfo->limit.offset -= pRes->numOfRows; pRes->numOfRows = 0; - pRes->numOfClauseTotal = 0; } } - if (pQueryInfo->limit.limit >= 0 && pRes->numOfClauseTotal > pQueryInfo->limit.limit) { + pRes->numOfRowsGroup += pRes->numOfRows; + + if (pQueryInfo->limit.limit >= 0 && pRes->numOfRowsGroup > pQueryInfo->limit.limit) { /* impose the limitation of output rows on the final result */ int32_t prevSize = pFinalDataPage->num; - int32_t overflow = pRes->numOfClauseTotal - pQueryInfo->limit.limit; + int32_t overflow = pRes->numOfRowsGroup - pQueryInfo->limit.limit; assert(overflow < pRes->numOfRows); - pRes->numOfClauseTotal = pQueryInfo->limit.limit; + pRes->numOfRowsGroup = pQueryInfo->limit.limit; pRes->numOfRows -= overflow; pFinalDataPage->num -= overflow; @@ -957,6 +929,8 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO } memcpy(pRes->data, pFinalDataPage->data, pRes->numOfRows * pLocalReducer->finalRowSize); + + pRes->numOfClauseTotal += pRes->numOfRows; pFinalDataPage->num = 0; return; } @@ -969,7 +943,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalReducer->resColModel->capacity); } - + while (1) { int64_t newRows = taosGenerateDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity); @@ -986,7 +960,6 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO pRes->data = pLocalReducer->pFinalRes; pRes->numOfRows = newRows; - pRes->numOfClauseTotal += newRows; pQueryInfo->limit.offset = 0; break; @@ -1010,15 +983,13 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO } if (pRes->numOfRows > 0) { - if (pQueryInfo->limit.limit >= 0 && pRes->numOfClauseTotal > pQueryInfo->limit.limit) { - int32_t overflow = pRes->numOfClauseTotal - pQueryInfo->limit.limit; + if (pQueryInfo->limit.limit >= 0 && pRes->numOfRows > pQueryInfo->limit.limit) { + int32_t overflow = pRes->numOfRows - pQueryInfo->limit.limit; pRes->numOfRows -= overflow; - - assert(pRes->numOfRows >= 0); - - pRes->numOfClauseTotal = pQueryInfo->limit.limit; pFinalDataPage->num -= overflow; + assert(pRes->numOfRows >= 0 && pFinalDataPage->num > 0); + /* set remain data to be discarded, and reset the interpolation information */ savePrevRecordAndSetupInterpoInfo(pLocalReducer, pQueryInfo, pFillInfo); } @@ -1032,6 +1003,9 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO } else { // todo bug?? reversedCopyFromInterpolationToDstBuf(pQueryInfo, pRes, pResPages, pLocalReducer); } + + pRes->numOfRowsGroup += pRes->numOfRows; + pRes->numOfClauseTotal += pRes->numOfRows; } pFinalDataPage->num = 0; @@ -1227,7 +1201,10 @@ static bool saveGroupResultInfo(SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - pRes->numOfGroups += 1; + + if (pRes->numOfRowsGroup > 0) { + pRes->numOfGroups += 1; + } // the output group is limited by the slimit clause if (reachGroupResultLimit(pQueryInfo, pRes)) { @@ -1266,7 +1243,12 @@ bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool no pRes->numOfRows = 0; pQueryInfo->slimit.offset -= 1; pLocalReducer->discard = !noMoreCurrentGroupRes; - + + if (pLocalReducer->discard) { + SColumnModel *pInternModel = pLocalReducer->pDesc->pColumnModel; + tColModelAppend(pInternModel, pLocalReducer->discardData, pLocalReducer->pTempBuffer->data, 0, 1, 1); + } + return false; } @@ -1299,7 +1281,7 @@ void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) { // static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { // In handling data in other groups, we need to reset the interpolation information for a new group data pRes->numOfRows = 0; - pRes->numOfClauseTotal = 0; + pRes->numOfRowsGroup = 0; SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); @@ -1363,7 +1345,8 @@ static bool doHandleLastRemainData(SSqlObj *pSql) { if ((isAllSourcesCompleted(pLocalReducer) && !pLocalReducer->hasPrevRow) || pLocalReducer->pLocalDataSrc[0] == NULL || prevGroupCompleted) { // if fillType == TSDB_FILL_NONE, return directly - if (pQueryInfo->fillType != TSDB_FILL_NONE) { + if (pQueryInfo->fillType != TSDB_FILL_NONE && + ((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) { int64_t etime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.ekey : pQueryInfo->window.skey; assert(pFillInfo->numOfRows == 0); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 5519dd69c2..40be0efab6 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5954,13 +5954,13 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { return doLocalQueryProcess(pCmd, pQueryInfo, pQuerySql); } - if (pQuerySql->from->nExpr > TSDB_MAX_JOIN_TABLE_NUM) { + if (pQuerySql->from->nExpr > TSDB_MAX_JOIN_TABLE_NUM * 2) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); } pQueryInfo->command = TSDB_SQL_SELECT; - if (pQuerySql->from->nExpr > 2) { + if (pQuerySql->from->nExpr > 4) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg10); } @@ -5983,7 +5983,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { tscAddEmptyMetaInfo(pQueryInfo); } - STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo, i); + STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo, i/2); SSQLToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz}; if (tscSetTableFullName(pTableMetaInfo1, &t, pSql) != TSDB_CODE_SUCCESS) { From 3c2786021660ca9e1210411cad967ecec41f7b6f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 8 Aug 2020 10:55:27 +0800 Subject: [PATCH 061/190] [td-225] --- src/query/src/qExecutor.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index c7e0a53502..c64f9d5c48 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -405,8 +405,6 @@ static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWin newCap = pWindowResInfo->capacity * 1.5; } - printf("%ld\n", newCap); - char *t = realloc(pWindowResInfo->pResult, newCap * sizeof(SWindowResult)); pRuntimeEnv->summary.internalSupSize += (newCap - pWindowResInfo->capacity) * sizeof(SWindowResult); From a945922c67f60af866cea10bb82dd298279f2c7a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 8 Aug 2020 13:44:26 +0800 Subject: [PATCH 062/190] [td-225] fix invalid read. --- src/query/src/qAst.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/query/src/qAst.c b/src/query/src/qAst.c index 44051c7e3b..05df8d95ba 100644 --- a/src/query/src/qAst.c +++ b/src/query/src/qAst.c @@ -689,7 +689,8 @@ static void tQueryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, } if (addToResult) { - taosArrayPush(res, pData); + STableKeyInfo info = {.pTable = *(void**)pData, .lastKey = -1}; + taosArrayPush(res, &info); } } From 4706b400d089065663d837ca30dc5eeaaaedf93b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 8 Aug 2020 14:18:58 +0800 Subject: [PATCH 063/190] [td-225] fix invalid read. --- src/query/inc/qAst.h | 2 +- src/query/src/qAst.c | 26 ++++++++++++++++---------- src/tsdb/src/tsdbRead.c | 2 +- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/src/query/inc/qAst.h b/src/query/inc/qAst.h index 01b4c16ac1..ec568a6cdb 100644 --- a/src/query/inc/qAst.h +++ b/src/query/inc/qAst.h @@ -48,7 +48,7 @@ typedef struct tQueryInfo { SSchema sch; // schema of tags char* q; __compar_fn_t compare; // filter function - void* param; // STSchema + bool indexed; // indexed columns } tQueryInfo; typedef struct SExprTraverseSupp { diff --git a/src/query/src/qAst.c b/src/query/src/qAst.c index 05df8d95ba..6d9e5856e2 100644 --- a/src/query/src/qAst.c +++ b/src/query/src/qAst.c @@ -427,8 +427,9 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr if (ret != 0) { break; } - - taosArrayPush(result, SL_GET_NODE_DATA(pNode)); + + STableKeyInfo info = {.pTable = *(void**)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(result, &info); } } else if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL) { // greater equal bool comp = true; @@ -445,7 +446,8 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr if (ret == 0 && optr == TSDB_RELATION_GREATER) { continue; } else { - taosArrayPush(result, SL_GET_NODE_DATA(pNode)); + STableKeyInfo info = {.pTable = *(void**)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(result, &info); comp = false; } } @@ -458,8 +460,9 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr if (comp) { continue; } - - taosArrayPush(result, SL_GET_NODE_DATA(pNode)); + + STableKeyInfo info = {.pTable = *(void**)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(result, &info); } tSkipListDestroyIter(iter); @@ -472,8 +475,9 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr if (comp) { continue; } - - taosArrayPush(result, SL_GET_NODE_DATA(pNode)); + + STableKeyInfo info = {.pTable = *(void**)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(result, &info); } } else { @@ -496,12 +500,14 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr if (ret == 0 && optr == TSDB_RELATION_LESS) { continue; } else { - taosArrayPush(result, SL_GET_NODE_DATA(pNode)); + STableKeyInfo info = {.pTable = *(void**)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(result, &info); comp = false; // no need to compare anymore } } } } + free(cond.start); free(cond.end); tSkipListDestroyIter(iter); @@ -689,7 +695,7 @@ static void tQueryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, } if (addToResult) { - STableKeyInfo info = {.pTable = *(void**)pData, .lastKey = -1}; + STableKeyInfo info = {.pTable = *(void**)pData, .lastKey = TSKEY_INITIAL_VAL}; taosArrayPush(res, &info); } } @@ -717,7 +723,7 @@ void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, S } tQueryInfo *pQueryInfo = pExpr->_node.info; - if (pQueryInfo->sch.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX && pQueryInfo->optr != TSDB_RELATION_LIKE) { + if (pQueryInfo->indexed && pQueryInfo->optr != TSDB_RELATION_LIKE) { tQueryIndexColumn(pSkipList, pQueryInfo, result); } else { tQueryIndexlessColumn(pSkipList, pQueryInfo, result, param->nodeFilterFn); diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index b086451dd1..e06bced896 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -2102,7 +2102,7 @@ void filterPrepare(void* expr, void* param) { pInfo->sch = *pSchema; pInfo->optr = pExpr->_node.optr; pInfo->compare = getComparFunc(pSchema->type, pInfo->optr); - pInfo->param = pTSSchema; + pInfo->indexed = pTSSchema->columns->colId == pInfo->sch.colId; if (pInfo->optr == TSDB_RELATION_IN) { pInfo->q = (char*) pCond->arr; From 8e30461cff375e9aa8013cbfc5a795854677a8ad Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 8 Aug 2020 16:33:38 +0800 Subject: [PATCH 064/190] [td-225] fix memory leak --- src/tsdb/src/tsdbRead.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index e06bced896..17cee360f3 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -2488,11 +2488,7 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) { STableCheckInfo* pTableCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i); destroyTableMemIterator(pTableCheckInfo); - if (pTableCheckInfo->pDataCols != NULL) { - taosTFree(pTableCheckInfo->pDataCols->buf); - } - - taosTFree(pTableCheckInfo->pDataCols); + tdFreeDataCols(pTableCheckInfo->pDataCols); taosTFree(pTableCheckInfo->pCompInfo); } taosArrayDestroy(pQueryHandle->pTableCheckInfo); From 2136789e857a0b1eba43d1e3b0f8b8016492fda3 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Sat, 8 Aug 2020 17:09:33 +0800 Subject: [PATCH 065/190] [TD-662] --- src/dnode/inc/dnodeCheck.h | 31 +++++ src/dnode/src/dnodeCheck.c | 275 +++++++++++++++++++++++++++++++++++++ src/dnode/src/dnodeMain.c | 3 + src/inc/taosdef.h | 13 ++ 4 files changed, 322 insertions(+) create mode 100644 src/dnode/inc/dnodeCheck.h create mode 100644 src/dnode/src/dnodeCheck.c diff --git a/src/dnode/inc/dnodeCheck.h b/src/dnode/inc/dnodeCheck.h new file mode 100644 index 0000000000..73f6423134 --- /dev/null +++ b/src/dnode/inc/dnodeCheck.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_DNODE_CHECK_H +#define TDENGINE_DNODE_CHECK_H + +#ifdef __cplusplus +extern "C" { +#endif + +int32_t dnodeInitCheck(); +void dnodeStartCheck(); +void dnodeCleanupCheck(); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/dnode/src/dnodeCheck.c b/src/dnode/src/dnodeCheck.c new file mode 100644 index 0000000000..ce03fd093b --- /dev/null +++ b/src/dnode/src/dnodeCheck.c @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include +#include +#include +#include +#include + +#include "os.h" +#include "taosdef.h" +#include "tglobal.h" +#include "mnode.h" +#include "dnodeInt.h" +#include "dnodeCheck.h" + +typedef struct { + bool enable; + char * name; + int32_t (*initFp)(); + int32_t (*startFp)(); + void (*cleanUpFp)(); + void (*stopFp)(); +} SCheckItem; + +static SCheckItem tsCheckItem[TSDB_CHECK_ITEM_MAX] = {{0}}; +int64_t tsMinFreeMemSizeForStart = 0; + +static int bindTcpPort(int port) { + int serverSocket; + struct sockaddr_in server_addr; + + if ((serverSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) { + dError("socket() fail: %s", strerror(errno)); + return -1; + } + + bzero(&server_addr, sizeof(server_addr)); + server_addr.sin_family = AF_INET; + server_addr.sin_port = htons(port); + server_addr.sin_addr.s_addr = htonl(INADDR_ANY); + + if (bind(serverSocket, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) { + dError("port:%d tcp bind() fail: %s", port, strerror(errno)); + close(serverSocket); + return -1; + } + + + if (listen(serverSocket, 5) < 0) { + dError("port:%d listen() fail: %s", port, strerror(errno)); + close(serverSocket); + return -1; + } + + close(serverSocket); + return 0; +} + +static int bindUdpPort(int port) { + int serverSocket; + struct sockaddr_in server_addr; + + if ((serverSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) { + dError("socket() fail: %s", strerror(errno)); + return -1; + } + + bzero(&server_addr, sizeof(server_addr)); + server_addr.sin_family = AF_INET; + server_addr.sin_port = htons(port); + server_addr.sin_addr.s_addr = htonl(INADDR_ANY); + + if (bind(serverSocket, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) { + dError("port:%d udp bind() fail: %s", port, strerror(errno)); + close(serverSocket); + return -1; + } + + close(serverSocket); + return 0; +} + +static int dnodeCheckNetwork() { + int ret; + int startPort = tsServerPort; + + for (int port = startPort; port < startPort + 12; port++) { + ret = bindTcpPort(port); + if (0 != ret) { + return -1; + } + ret = bindUdpPort(port); + if (0 != ret) { + return -1; + } + } + + return 0; +} + +static int dnodeCheckMem() { + float memoryUsedMB; + float memoryAvailMB; + if (true != taosGetSysMemory(&memoryUsedMB)) { + dError("failed to get system mem infomation, errno:%u, reason:%s", errno, strerror(errno)); + return -1; + } + + memoryAvailMB = (float)tsTotalMemoryMB - memoryUsedMB; + + if (memoryAvailMB < tsMinFreeMemSizeForStart) { + dError("free mem %f too little, quit", memoryAvailMB); + return -1; + } + + return 0; +} + +static int dnodeCheckCpu() { + // TODO: + return 0; +} + +static int dnodeCheckDisk() { + if (tsAvailDataDirGB < tsMinimalDataDirGB) { + dError("free disk size: %f GB, too little, quit", tsAvailDataDirGB); + return -1; + } + + if (tsAvailLogDirGB < tsMinimalLogDirGB) { + dError("free disk size: %f GB, too little, quit", tsAvailLogDirGB); + return -1; + } + + if (tsAvailTmpDirectorySpace < tsReservedTmpDirectorySpace) { + dError("free disk size: %f GB, too little, quit", tsAvailTmpDirectorySpace); + return -1; + } + + return 0; +} + +static int dnodeCheckOs() { + // TODO: + + return 0; +} +static int dnodeCheckAccess() { + // TODO: + + return 0; +} + +static int dnodeCheckVersion() { + // TODO: + + return 0; +} + +static int dnodeCheckDatafile() { + // TODO: + + return 0; +} + +static void dnodeAllocCheckItem() { + tsCheckItem[TSDB_CHECK_ITEM_NETWORK].enable = true; + tsCheckItem[TSDB_CHECK_ITEM_NETWORK].name = "network"; + tsCheckItem[TSDB_CHECK_ITEM_NETWORK].initFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_NETWORK].cleanUpFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_NETWORK].startFp = dnodeCheckNetwork; + tsCheckItem[TSDB_CHECK_ITEM_NETWORK].stopFp = NULL; + + tsCheckItem[TSDB_CHECK_ITEM_MEM].enable = true; + tsCheckItem[TSDB_CHECK_ITEM_MEM].name = "mem"; + tsCheckItem[TSDB_CHECK_ITEM_MEM].initFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_MEM].cleanUpFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_MEM].startFp = dnodeCheckMem; + tsCheckItem[TSDB_CHECK_ITEM_MEM].stopFp = NULL; + + tsCheckItem[TSDB_CHECK_ITEM_CPU].enable = true; + tsCheckItem[TSDB_CHECK_ITEM_CPU].name = "cpu"; + tsCheckItem[TSDB_CHECK_ITEM_CPU].initFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_CPU].cleanUpFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_CPU].startFp = dnodeCheckCpu; + tsCheckItem[TSDB_CHECK_ITEM_CPU].stopFp = NULL; + + tsCheckItem[TSDB_CHECK_ITEM_DISK].enable = true; + tsCheckItem[TSDB_CHECK_ITEM_DISK].name = "disk"; + tsCheckItem[TSDB_CHECK_ITEM_DISK].initFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_DISK].cleanUpFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_DISK].startFp = dnodeCheckDisk; + tsCheckItem[TSDB_CHECK_ITEM_DISK].stopFp = NULL; + + tsCheckItem[TSDB_CHECK_ITEM_OS].enable = true; + tsCheckItem[TSDB_CHECK_ITEM_OS].name = "os"; + tsCheckItem[TSDB_CHECK_ITEM_OS].initFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_OS].cleanUpFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_OS].startFp = dnodeCheckOs; + tsCheckItem[TSDB_CHECK_ITEM_OS].stopFp = NULL; + + tsCheckItem[TSDB_CHECK_ITEM_ACCESS].enable = true; + tsCheckItem[TSDB_CHECK_ITEM_ACCESS].name = "access"; + tsCheckItem[TSDB_CHECK_ITEM_ACCESS].initFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_ACCESS].cleanUpFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_ACCESS].startFp = dnodeCheckAccess; + tsCheckItem[TSDB_CHECK_ITEM_ACCESS].stopFp = NULL; + + tsCheckItem[TSDB_CHECK_ITEM_VERSION].enable = true; + tsCheckItem[TSDB_CHECK_ITEM_VERSION].name = "version"; + tsCheckItem[TSDB_CHECK_ITEM_VERSION].initFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_VERSION].cleanUpFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_VERSION].startFp = dnodeCheckVersion; + tsCheckItem[TSDB_CHECK_ITEM_VERSION].stopFp = NULL; + + tsCheckItem[TSDB_CHECK_ITEM_DATAFILE].enable = true; + tsCheckItem[TSDB_CHECK_ITEM_DATAFILE].name = "datafile"; + tsCheckItem[TSDB_CHECK_ITEM_DATAFILE].initFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_DATAFILE].cleanUpFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_DATAFILE].startFp = dnodeCheckDatafile; + tsCheckItem[TSDB_CHECK_ITEM_DATAFILE].stopFp = NULL; +} + +void dnodeCleanupCheck() { + for (ECheckItemType index = 0; index < TSDB_CHECK_ITEM_MAX; ++index) { + if (tsCheckItem[index].enable && tsCheckItem[index].stopFp) { + (*tsCheckItem[index].stopFp)(); + } + if (tsCheckItem[index].cleanUpFp) { + (*tsCheckItem[index].cleanUpFp)(); + } + } +} + +int32_t dnodeInitCheck() { + dnodeAllocCheckItem(); + + for (ECheckItemType index = 0; index < TSDB_CHECK_ITEM_MAX; ++index) { + if (tsCheckItem[index].initFp) { + if ((*tsCheckItem[index].initFp)() != 0) { + dError("failed to init check item:%s", tsCheckItem[index].name); + return -1; + } + } + } + + return 0; +} + +void dnodeStartCheck() { + for (ECheckItemType index = 0; index < TSDB_CHECK_ITEM_MAX; ++index) { + if (tsCheckItem[index].enable && tsCheckItem[index].startFp) { + if ((*tsCheckItem[index].startFp)() != 0) { + dError("failed to check item:%s", tsCheckItem[index].name); + exit(-1); + } + } + } +} + + diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 96f8f43265..9d22493485 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -24,6 +24,7 @@ #include "dnodeMgmt.h" #include "dnodePeer.h" #include "dnodeModule.h" +#include "dnodeCheck.h" #include "dnodeVRead.h" #include "dnodeVWrite.h" #include "dnodeMRead.h" @@ -61,6 +62,7 @@ static const SDnodeComponent tsDnodeComponents[] = { {"mgmt-tmr", dnodeInitMgmtTimer, dnodeCleanupMgmtTimer}, {"shell", dnodeInitShell, dnodeCleanupShell}, {"telemetry", dnodeInitTelemetry, dnodeCleanupTelemetry}, + {"check", dnodeInitCheck, dnodeCleanupCheck}, }; static int dnodeCreateDir(const char *dir) { @@ -123,6 +125,7 @@ int32_t dnodeInitSystem() { return -1; } + dnodeStartCheck(); dnodeStartModules(); dnodeSetRunStatus(TSDB_DNODE_RUN_STATUS_RUNING); diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index ff05c840da..accdef0d47 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -416,6 +416,19 @@ typedef enum { TSDB_MOD_MAX } EModuleType; + typedef enum { + TSDB_CHECK_ITEM_NETWORK, + TSDB_CHECK_ITEM_MEM, + TSDB_CHECK_ITEM_CPU, + TSDB_CHECK_ITEM_DISK, + TSDB_CHECK_ITEM_OS, + TSDB_CHECK_ITEM_ACCESS, + TSDB_CHECK_ITEM_VERSION, + TSDB_CHECK_ITEM_DATAFILE, + TSDB_CHECK_ITEM_MAX + } ECheckItemType; + + #ifdef __cplusplus } #endif From 222ebe3602bf0ac9fd7766a9d38b5a268b336a1b Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Sat, 8 Aug 2020 17:14:57 +0800 Subject: [PATCH 066/190] add file size statistics --- src/tsdb/inc/tsdbMain.h | 14 +++++++++++--- src/tsdb/src/tsdbFile.c | 2 +- src/tsdb/src/tsdbMemTable.c | 4 ++-- src/tsdb/src/tsdbRWHelper.c | 36 ++++++++++++++++++++++++++++-------- 4 files changed, 42 insertions(+), 14 deletions(-) diff --git a/src/tsdb/inc/tsdbMain.h b/src/tsdb/inc/tsdbMain.h index 16a2e17f1e..f42c00f171 100644 --- a/src/tsdb/inc/tsdbMain.h +++ b/src/tsdb/inc/tsdbMain.h @@ -154,11 +154,19 @@ typedef enum { TSDB_FILE_TYPE_HEAD = 0, TSDB_FILE_TYPE_DATA, TSDB_FILE_TYPE_LAST, - TSDB_FILE_TYPE_MAX, + TSDB_FILE_TYPE_STAT, TSDB_FILE_TYPE_NHEAD, - TSDB_FILE_TYPE_NLAST + TSDB_FILE_TYPE_NDATA, + TSDB_FILE_TYPE_NLAST, + TSDB_FILE_TYPE_NSTAT } TSDB_FILE_TYPE; +#ifndef TDINTERNAL +#define TSDB_FILE_TYPE_MAX (TSDB_FILE_TYPE_LAST+1) +#else +#define TSDB_FILE_TYPE_MAX (TSDB_FILE_TYPE_STAT+1) +#endif + typedef struct { uint32_t magic; uint32_t len; @@ -497,7 +505,7 @@ int tsdbInitWriteHelper(SRWHelper* pHelper, STsdbRepo* pRepo); void tsdbDestroyHelper(SRWHelper* pHelper); void tsdbResetHelper(SRWHelper* pHelper); int tsdbSetAndOpenHelperFile(SRWHelper* pHelper, SFileGroup* pGroup); -int tsdbCloseHelperFile(SRWHelper* pHelper, bool hasError); +int tsdbCloseHelperFile(SRWHelper* pHelper, bool hasError, SFileGroup* pGroup); int tsdbSetHelperTable(SRWHelper* pHelper, STable* pTable, STsdbRepo* pRepo); int tsdbCommitTableData(SRWHelper* pHelper, SCommitIter* pCommitIter, SDataCols* pDataCols, TSKEY maxKey); int tsdbMoveLastBlockIfNeccessary(SRWHelper* pHelper); diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index a5435ad872..d30377ad9f 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -20,7 +20,7 @@ #include "tutil.h" #define TAOS_RANDOM_FILE_FAIL_TEST -const char *tsdbFileSuffix[] = {".head", ".data", ".last", "", ".h", ".l"}; +const char *tsdbFileSuffix[] = {".head", ".data", ".last", ".stat", ".h", ".d", ".l", ".s"}; static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type); static void tsdbDestroyFile(SFile *pFile); diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 0303c47146..04b8b64e06 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -679,7 +679,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe } taosTFree(dataDir); - tsdbCloseHelperFile(pHelper, 0); + tsdbCloseHelperFile(pHelper, 0, pGroup); pthread_rwlock_wrlock(&(pFileH->fhlock)); @@ -701,7 +701,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe _err: taosTFree(dataDir); - tsdbCloseHelperFile(pHelper, 1); + tsdbCloseHelperFile(pHelper, 1, NULL); return -1; } diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c index 4b5acd8fe4..a00ed8998a 100644 --- a/src/tsdb/src/tsdbRWHelper.c +++ b/src/tsdb/src/tsdbRWHelper.c @@ -91,7 +91,7 @@ void tsdbResetHelper(SRWHelper *pHelper) { tsdbResetHelperTableImpl(pHelper); // Reset the file part - tsdbCloseHelperFile(pHelper, false); + tsdbCloseHelperFile(pHelper, false, NULL); tsdbResetHelperFileImpl(pHelper); pHelper->state = TSDB_HELPER_CLEAR_STATE; @@ -120,6 +120,14 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) { if (tsdbOpenFile(helperDataF(pHelper), O_RDWR) < 0) return -1; if (tsdbOpenFile(helperLastF(pHelper), O_RDWR) < 0) return -1; + // NOTE: For data file compatibility + if (helperDataF(pHelper)->info.size == TSDB_FILE_HEAD_SIZE) { + helperDataF(pHelper)->info.size = (uint64_t)lseek(helperDataF(pHelper)->fd, 0, SEEK_END); + } + if (helperLastF(pHelper)->info.size == TSDB_FILE_HEAD_SIZE) { + helperLastF(pHelper)->info.size = (uint64_t)lseek(helperLastF(pHelper)->fd, 0, SEEK_END); + } + // Create and open .h pFile = helperNewHeadF(pHelper); if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) return -1; @@ -146,7 +154,7 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) { return 0; } -int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError) { +int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError, SFileGroup *pGroup) { SFile *pFile = NULL; pFile = helperHeadF(pHelper); @@ -157,10 +165,11 @@ int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError) { if (helperType(pHelper) == TSDB_WRITE_HELPER) { if (!hasError) { tsdbUpdateFileHeader(pFile); - fsync(pFile->fd); } else { - // TODO: shrink back to origin + ASSERT(pGroup != NULL); + taosFtruncate(pFile->fd, pGroup->files[TSDB_FILE_TYPE_DATA].info.size); } + fsync(pFile->fd); } tsdbCloseFile(pFile); } @@ -170,10 +179,11 @@ int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError) { if (helperType(pHelper) == TSDB_WRITE_HELPER && !TSDB_NLAST_FILE_OPENED(pHelper)) { if (!hasError) { tsdbUpdateFileHeader(pFile); - fsync(pFile->fd); } else { - // TODO: shrink back to origin + ASSERT(pGroup != NULL); + taosFtruncate(pFile->fd, pGroup->files[TSDB_FILE_TYPE_LAST].info.size); } + fsync(pFile->fd); } tsdbCloseFile(pFile); } @@ -390,6 +400,9 @@ int tsdbWriteCompInfo(SRWHelper *pHelper) { void *pBuf = POINTER_SHIFT(pHelper->pWIdx, pFile->info.len); pFile->info.len += tsdbEncodeSCompIdx(&pBuf, &(pHelper->curCompIdx)); + + pFile->info.size += pIdx->len; + ASSERT(pFile->info.size == lseek(pFile->fd, 0, SEEK_CUR)); } return 0; @@ -420,7 +433,7 @@ int tsdbWriteCompIdx(SRWHelper *pHelper) { return -1; } - pFile->info.offset = offset; + ASSERT(offset == pFile->info.size); if (taosTWrite(pFile->fd, (void *)pHelper->pWIdx, pFile->info.len) < pFile->info.len) { tsdbError("vgId:%d failed to write %d bytes to file %s since %s", REPO_ID(pHelper->pRepo), pFile->info.len, @@ -429,6 +442,10 @@ int tsdbWriteCompIdx(SRWHelper *pHelper) { return -1; } + pFile->info.offset = offset; + pFile->info.size += pFile->info.len; + ASSERT(pFile->info.size == lseek(pFile->fd, 0, SEEK_CUR)); + return 0; } @@ -803,6 +820,9 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa (int)(pCompBlock->numOfRows), pCompBlock->len, pCompBlock->numOfCols, pCompBlock->keyFirst, pCompBlock->keyLast); + pFile->info.size += pCompBlock->len; + ASSERT(pFile->info.size == lseek(pFile->fd, 0, SEEK_CUR)); + return 0; _err: @@ -1016,7 +1036,7 @@ static int tsdbInitHelperFile(SRWHelper *pHelper) { } static void tsdbDestroyHelperFile(SRWHelper *pHelper) { - tsdbCloseHelperFile(pHelper, false); + tsdbCloseHelperFile(pHelper, false, NULL); tsdbResetHelperFileImpl(pHelper); taosTZfree(pHelper->idxH.pIdxArray); taosTZfree(pHelper->pWIdx); From e7bd0ca7f1bff52f26280053c998efb7e8b25521 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Sat, 8 Aug 2020 17:35:24 +0800 Subject: [PATCH 067/190] [TD-662] --- src/dnode/inc/dnodeCheck.h | 1 - src/dnode/src/dnodeCheck.c | 16 +++++----------- src/dnode/src/dnodeMain.c | 3 +-- 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/src/dnode/inc/dnodeCheck.h b/src/dnode/inc/dnodeCheck.h index 73f6423134..a4880b3c11 100644 --- a/src/dnode/inc/dnodeCheck.h +++ b/src/dnode/inc/dnodeCheck.h @@ -21,7 +21,6 @@ extern "C" { #endif int32_t dnodeInitCheck(); -void dnodeStartCheck(); void dnodeCleanupCheck(); #ifdef __cplusplus diff --git a/src/dnode/src/dnodeCheck.c b/src/dnode/src/dnodeCheck.c index ce03fd093b..a5b6fa09dd 100644 --- a/src/dnode/src/dnodeCheck.c +++ b/src/dnode/src/dnodeCheck.c @@ -14,12 +14,6 @@ */ #define _DEFAULT_SOURCE -#include -#include -#include -#include -#include - #include "os.h" #include "taosdef.h" #include "tglobal.h" @@ -59,7 +53,6 @@ static int bindTcpPort(int port) { return -1; } - if (listen(serverSocket, 5) < 0) { dError("port:%d listen() fail: %s", port, strerror(errno)); close(serverSocket); @@ -101,10 +94,12 @@ static int dnodeCheckNetwork() { for (int port = startPort; port < startPort + 12; port++) { ret = bindTcpPort(port); if (0 != ret) { + dError("failed to tcp bind port %d, quit", port); return -1; } ret = bindUdpPort(port); if (0 != ret) { + dError("failed to udp bind port %d, quit", port); return -1; } } @@ -258,10 +253,6 @@ int32_t dnodeInitCheck() { } } - return 0; -} - -void dnodeStartCheck() { for (ECheckItemType index = 0; index < TSDB_CHECK_ITEM_MAX; ++index) { if (tsCheckItem[index].enable && tsCheckItem[index].startFp) { if ((*tsCheckItem[index].startFp)() != 0) { @@ -270,6 +261,9 @@ void dnodeStartCheck() { } } } + + return 0; } + diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 9d22493485..dded10bd1c 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -49,6 +49,7 @@ typedef struct { } SDnodeComponent; static const SDnodeComponent tsDnodeComponents[] = { + {"check", dnodeInitCheck, dnodeCleanupCheck}, // NOTES: dnodeInitCheck must be first component !!! {"storage", dnodeInitStorage, dnodeCleanupStorage}, {"vread", dnodeInitVnodeRead, dnodeCleanupVnodeRead}, {"vwrite", dnodeInitVnodeWrite, dnodeCleanupVnodeWrite}, @@ -62,7 +63,6 @@ static const SDnodeComponent tsDnodeComponents[] = { {"mgmt-tmr", dnodeInitMgmtTimer, dnodeCleanupMgmtTimer}, {"shell", dnodeInitShell, dnodeCleanupShell}, {"telemetry", dnodeInitTelemetry, dnodeCleanupTelemetry}, - {"check", dnodeInitCheck, dnodeCleanupCheck}, }; static int dnodeCreateDir(const char *dir) { @@ -125,7 +125,6 @@ int32_t dnodeInitSystem() { return -1; } - dnodeStartCheck(); dnodeStartModules(); dnodeSetRunStatus(TSDB_DNODE_RUN_STATUS_RUNING); From c1b3156563520e9a4bdc5c13a230fb0d7f06c28b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 8 Aug 2020 17:48:08 +0800 Subject: [PATCH 068/190] [td-225] fix memory leak --- src/client/src/tscSystem.c | 3 ++- src/mnode/src/mnodeDb.c | 2 +- src/mnode/src/mnodeTable.c | 2 +- src/plugins/monitor/src/monitorMain.c | 24 +++++++++++++++++------- src/query/src/qTokenizer.c | 6 +++++- src/util/inc/tstoken.h | 4 ++-- 6 files changed, 28 insertions(+), 13 deletions(-) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index a252beec33..2cc237dd83 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -164,7 +164,8 @@ void taos_cleanup() { taosCleanUpScheduler(tscQhandle); tscQhandle = NULL; } - + + taosCleanupKeywordsTable(); taosCloseLog(); taosTmrCleanUp(tscTmr); diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index 803e44cea2..f6249986fd 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -1046,7 +1046,7 @@ static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg) { if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pDrop->db); if (pMsg->pDb == NULL) { if (pDrop->ignoreNotExists) { - mDebug("db:%s, db is not exist, think drop success", pDrop->db); + mDebug("db:%s, db is not exist, treat as success", pDrop->db); return TSDB_CODE_SUCCESS; } else { mError("db:%s, failed to drop, invalid db", pDrop->db); diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 0912a6f3ff..dbc1bffa5c 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -751,7 +751,7 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) { if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pDrop->tableId); if (pMsg->pTable == NULL) { if (pDrop->igNotExists) { - mDebug("app:%p:%p, table:%s, table is not exist, think drop success", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); + mDebug("app:%p:%p, table:%s, table is not exist, treat as success", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); return TSDB_CODE_SUCCESS; } else { mError("app:%p:%p, table:%s, failed to drop table, table not exist", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); diff --git a/src/plugins/monitor/src/monitorMain.c b/src/plugins/monitor/src/monitorMain.c index 4dc79aef68..644835abc0 100644 --- a/src/plugins/monitor/src/monitorMain.c +++ b/src/plugins/monitor/src/monitorMain.c @@ -131,7 +131,10 @@ static void monitorInitConn(void *para, void *unused) { } static void monitorInitConnCb(void *param, TAOS_RES *result, int32_t code) { - if (code < 0) { + // free it firstly in any cases. + taos_free_result(result); + + if (code != TSDB_CODE_SUCCESS) { monitorError("monitor:%p, connect to database failed, reason:%s", tsMonitorConn.conn, tstrerror(code)); taos_close(tsMonitorConn.conn); tsMonitorConn.conn = NULL; @@ -214,7 +217,7 @@ static void monitorInitDatabase() { } static void monitorInitDatabaseCb(void *param, TAOS_RES *result, int32_t code) { - if (-code == TSDB_CODE_MND_TABLE_ALREADY_EXIST || -code == TSDB_CODE_MND_DB_ALREADY_EXIST || code >= 0) { + if (code == TSDB_CODE_MND_TABLE_ALREADY_EXIST || code == TSDB_CODE_MND_DB_ALREADY_EXIST || code >= 0) { monitorDebug("monitor:%p, sql success, reason:%s, %s", tsMonitorConn.conn, tstrerror(code), tsMonitorConn.sql); if (tsMonitorConn.cmdIndex == MONITOR_CMD_CREATE_TB_LOG) { monitorInfo("dnode:%s is started", tsLocalEp); @@ -226,6 +229,8 @@ static void monitorInitDatabaseCb(void *param, TAOS_RES *result, int32_t code) { tsMonitorConn.state = MONITOR_STATE_UN_INIT; monitorStartSystemRetry(); } + + taos_free_result(result); } void monitorStopSystem() { @@ -238,6 +243,8 @@ void monitorStopSystem() { if (tsMonitorConn.timer != NULL) { taosTmrStopA(&(tsMonitorConn.timer)); } + + taos_close(tsMonitorConn.conn); } void monitorCleanUpSystem() { @@ -250,13 +257,16 @@ static void monitorStartTimer() { } static void dnodeMontiorLogCallback(void *param, TAOS_RES *result, int32_t code) { - if (code < 0) { - monitorError("monitor:%p, save %s failed, reason:%s", tsMonitorConn.conn, (char *)param, tstrerror(code)); - } else if (code == 0) { - monitorError("monitor:%p, save %s failed, affect rows:%d", tsMonitorConn.conn, (char *)param, code); + int32_t c = taos_errno(result); + + if (c != TSDB_CODE_SUCCESS) { + monitorError("monitor:%p, save %s failed, reason:%s", tsMonitorConn.conn, (char *)param, tstrerror(c)); } else { - monitorDebug("monitor:%p, save %s info success, reason:%s", tsMonitorConn.conn, (char *)param, tstrerror(code)); + int32_t rows = taos_affected_rows(result); + monitorDebug("monitor:%p, save %s succ, rows:%d", tsMonitorConn.conn, (char *)param, rows); } + + taos_free_result(result); } // unit is MB diff --git a/src/query/src/qTokenizer.c b/src/query/src/qTokenizer.c index 7418bc6895..1fa565ca90 100644 --- a/src/query/src/qTokenizer.c +++ b/src/query/src/qTokenizer.c @@ -25,7 +25,7 @@ // All the keywords of the SQL language are stored in a hash table typedef struct SKeyword { const char* name; // The keyword name - uint16_t type; // type + uint16_t type; // type uint8_t len; // length } SKeyword; @@ -659,3 +659,7 @@ SSQLToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr, uint32_t numOfIgn } bool isKeyWord(const char* z, int32_t len) { return (tSQLKeywordCode((char*)z, len) != TK_ID); } + +void taosCleanupKeywordsTable() { + taosHashCleanup(KeywordHashTable); +} diff --git a/src/util/inc/tstoken.h b/src/util/inc/tstoken.h index 258e62cf8a..c1c6f2de7a 100644 --- a/src/util/inc/tstoken.h +++ b/src/util/inc/tstoken.h @@ -24,8 +24,6 @@ extern "C" { #include "tutil.h" #include "ttokendef.h" - - #define TSQL_TBNAME "TBNAME" #define TSQL_TBNAME_L "tbname" @@ -182,6 +180,8 @@ static FORCE_INLINE int32_t isValidNumber(const SSQLToken* pToken) { return (i < pToken->n)? TK_ILLEGAL:type; } +void taosCleanupKeywordsTable(); + #ifdef __cplusplus } #endif From 448dbb0bd0494b85708ee82a061767019115629b Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 8 Aug 2020 20:08:02 +0000 Subject: [PATCH 069/190] [TD-1041] bugfix --- src/client/src/tscSQLParser.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index bcf43a1a8b..817fe3d3f7 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1902,6 +1902,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col if (changeFunctionID(optr, &functionId) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } + tscInsertPrimaryTSSourceColumn(pQueryInfo, &index); + colIndex += 1; // the first column is ts pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, resultSize, false); addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double), 0); From 90173d94c814182c6b5f7470e001d68f5c2b458a Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sun, 9 Aug 2020 05:33:08 +0000 Subject: [PATCH 070/190] TD-1102 --- src/vnode/src/vnodeMain.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 6abd255dc4..9695f90c30 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -251,10 +251,17 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { appH.cqCreateFunc = cqCreate; appH.cqDropFunc = cqDrop; sprintf(temp, "%s/tsdb", rootDir); + + terrno = 0; pVnode->tsdb = tsdbOpenRepo(temp, &appH); if (pVnode->tsdb == NULL) { vnodeCleanUp(pVnode); return terrno; + } else if (terrno != 0 && pVnode->syncCfg.replica <= 1) { + vError("vgId:%d, failed to open tsdb, replica:%d reason:%s", pVnode->vgId, pVnode->syncCfg.replica, + tstrerror(terrno)); + vnodeCleanUp(pVnode); + return terrno; } sprintf(temp, "%s/wal", rootDir); From 824158587034416837cb60aa1b79315b86ea85dd Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Sun, 9 Aug 2020 09:45:07 +0000 Subject: [PATCH 071/190] fix influxdbTest timeout error. [TD-1051] --- tests/comparisonTest/influxdb/main.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/comparisonTest/influxdb/main.go b/tests/comparisonTest/influxdb/main.go index a6550013e3..2fb16fad89 100644 --- a/tests/comparisonTest/influxdb/main.go +++ b/tests/comparisonTest/influxdb/main.go @@ -110,6 +110,7 @@ func writeDataImp(wInfo *WriteInfo, wg *sync.WaitGroup, arguments *ProArgs) { Addr: arguments.host, Username: arguments.username, Password: arguments.password, + Timeout: 300 * time.Second, }) if err != nil { @@ -220,6 +221,7 @@ func readData(arguments *ProArgs) { Addr: arguments.host, Username: arguments.username, Password: arguments.password, + Timeout: 300 * time.Second, }) if err != nil { From ceab359b914840df830f72832af9456c550ba69c Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sun, 9 Aug 2020 20:59:23 +0800 Subject: [PATCH 072/190] TD-1057 --- src/os/src/windows/w64Env.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/os/src/windows/w64Env.c b/src/os/src/windows/w64Env.c index c6046ecae7..57d34d4003 100644 --- a/src/os/src/windows/w64Env.c +++ b/src/os/src/windows/w64Env.c @@ -22,7 +22,7 @@ extern void taosWinSocketInit(); void osInit() { if (configDir[0] == 0) { - strcpy(configDir, "~/TDengine/cfg"); + strcpy(configDir, "C:/TDengine/cfg"); } strcpy(tsVnodeDir, "C:/TDengine/data"); From c9ac6b03e0ca84e516ec47dbdfcb962c903ce0b8 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 9 Aug 2020 14:23:08 +0000 Subject: [PATCH 073/190] set tag conditions limit --- src/inc/taosdef.h | 1 + src/query/src/qExecutor.c | 2 +- src/tsdb/src/tsdbRead.c | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index f8b9d0ee79..b4cecec8e4 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -242,6 +242,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size); #define TSDB_MAX_BYTES_PER_ROW 16384 #define TSDB_MAX_TAGS_LEN 16384 #define TSDB_MAX_TAGS 128 +#define TSDB_MAX_TAG_CONDITIONS 1024 #define TSDB_AUTH_LEN 16 #define TSDB_KEY_LEN 16 diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 34700a33f3..c208d61330 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -5547,7 +5547,7 @@ static int32_t buildAirthmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTable qDebug("qmsg:%p create arithmetic expr from binary string: %s", pQueryMsg, pArithExprInfo->base.arg[0].argValue.pz); tExprNode* pExprNode = NULL; - TRY(TSDB_MAX_TAGS) { + TRY(TSDB_MAX_TAG_CONDITIONS) { pExprNode = exprTreeFromBinary(pArithExprInfo->base.arg[0].argValue.pz, pArithExprInfo->base.arg[0].argBytes); } CATCH( code ) { CLEANUP_EXECUTE(); diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 98153efe3e..fe32a0e1aa 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -2373,7 +2373,7 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, TSKEY skey, co int32_t ret = TSDB_CODE_SUCCESS; tExprNode* expr = NULL; - TRY(TSDB_MAX_TAGS) { + TRY(TSDB_MAX_TAG_CONDITIONS) { expr = exprTreeFromTableName(tbnameCond); if (expr == NULL) { expr = exprTreeFromBinary(pTagCond, len); @@ -2567,4 +2567,4 @@ static int tsdbCheckInfoCompar(const void* key1, const void* key2) { ASSERT(false); return 0; } -} \ No newline at end of file +} From a9cabb54fa67d8081780f216f33c969a0c933b05 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sun, 9 Aug 2020 22:59:42 +0800 Subject: [PATCH 074/190] TD-1057 --- src/client/src/tscSql.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 3e5280401f..6b3653ff63 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -256,6 +256,7 @@ TAOS_RES* taos_query(TAOS *taos, const char *sqlstr) { return NULL; } + tsem_init(&pSql->rspSem, 0, 0); doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen); // wait for the callback function to post the semaphore From 41d97eeea7257ce966d5e9c0a69e19b52f1afcd2 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sun, 9 Aug 2020 15:21:03 +0000 Subject: [PATCH 075/190] TD-1057 --- src/client/src/tscSql.c | 14 +++++++------- src/client/src/tscSub.c | 8 ++++---- src/client/src/tscSubquery.c | 4 ++-- src/client/src/tscUtil.c | 2 +- src/dnode/src/dnodeSystem.c | 8 ++++---- src/dnode/src/dnodeTelemetry.c | 8 ++++---- src/kit/taosdemo/taosdemo.c | 30 +++++++++++++++--------------- src/query/inc/qExecutor.h | 2 +- src/rpc/test/rclient.c | 10 +++++----- src/rpc/test/rsclient.c | 6 +++--- src/sync/src/tarbitrator.c | 8 ++++---- src/sync/test/syncClient.c | 10 +++++----- 12 files changed, 55 insertions(+), 55 deletions(-) diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 6b3653ff63..7bb9be5d5c 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -141,7 +141,7 @@ static void syncConnCallback(void *param, TAOS_RES *tres, int code) { SSqlObj *pSql = (SSqlObj *) tres; assert(pSql != NULL); - sem_post(&pSql->rspSem); + tsem_post(&pSql->rspSem); } TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port) { @@ -156,7 +156,7 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha pSql->param = pSql; tscProcessSql(pSql); - sem_wait(&pSql->rspSem); + tsem_wait(&pSql->rspSem); if (pSql->res.code != TSDB_CODE_SUCCESS) { terrno = pSql->res.code; @@ -225,12 +225,12 @@ void waitForQueryRsp(void *param, TAOS_RES *tres, int code) { assert(tres != NULL); SSqlObj *pSql = (SSqlObj *) tres; - sem_post(&pSql->rspSem); + tsem_post(&pSql->rspSem); } static void waitForRetrieveRsp(void *param, TAOS_RES *tres, int numOfRows) { SSqlObj* pSql = (SSqlObj*) tres; - sem_post(&pSql->rspSem); + tsem_post(&pSql->rspSem); } TAOS_RES* taos_query(TAOS *taos, const char *sqlstr) { @@ -439,7 +439,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { pCmd->command == TSDB_SQL_CLI_VERSION || pCmd->command == TSDB_SQL_CURRENT_USER )) { taos_fetch_rows_a(res, waitForRetrieveRsp, pSql->pTscObj); - sem_wait(&pSql->rspSem); + tsem_wait(&pSql->rspSem); } return doSetResultRowData(pSql, true); @@ -729,7 +729,7 @@ static void asyncCallback(void *param, TAOS_RES *tres, int code) { assert(param != NULL); SSqlObj *pSql = ((SSqlObj *)param); pSql->res.code = code; - sem_post(&pSql->rspSem); + tsem_post(&pSql->rspSem); } int taos_validate_sql(TAOS *taos, const char *sql) { @@ -780,7 +780,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) { pSql->param = pSql; int code = tsParseSql(pSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - sem_wait(&pSql->rspSem); + tsem_wait(&pSql->rspSem); code = pSql->res.code; } if (code != TSDB_CODE_SUCCESS) { diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c index e9f2c1dc1d..608551c7f3 100644 --- a/src/client/src/tscSub.c +++ b/src/client/src/tscSub.c @@ -33,7 +33,7 @@ typedef struct SSubscriptionProgress { typedef struct SSub { void * signature; char topic[32]; - sem_t sem; + tsem_t sem; int64_t lastSyncTime; int64_t lastConsumeTime; TAOS * taos; @@ -85,7 +85,7 @@ static void asyncCallback(void *param, TAOS_RES *tres, int code) { assert(param != NULL); SSub *pSub = ((SSub *)param); pSub->pSql->res.code = code; - sem_post(&pSub->sem); + tsem_post(&pSub->sem); } @@ -154,7 +154,7 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char* code = tsParseSql(pSql, false); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - sem_wait(&pSub->sem); + tsem_wait(&pSub->sem); code = pSql->res.code; } if (code != TSDB_CODE_SUCCESS) { @@ -451,7 +451,7 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) { pSql->fetchFp = asyncCallback; pSql->param = pSub; tscDoQuery(pSql); - sem_wait(&pSub->sem); + tsem_wait(&pSub->sem); if (pRes->code != TSDB_CODE_SUCCESS) { continue; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 8a596d8893..4e188d4fb6 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2057,7 +2057,7 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) { } doBuildResFromSubqueries(pSql); - sem_post(&pSql->rspSem); + tsem_post(&pSql->rspSem); return; @@ -2083,7 +2083,7 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) { // free(pState); // // pRes->completed = true; // set query completed -// sem_post(&pSql->rspSem); +// tsem_post(&pSql->rspSem); // return; // } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 1b6d18be0c..582411fc0c 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -387,7 +387,7 @@ void tscFreeSqlObj(SSqlObj* pSql) { pCmd->allocSize = 0; taosTFree(pSql->sqlstr); - sem_destroy(&pSql->rspSem); + tsem_destroy(&pSql->rspSem); free(pSql); } diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c index 2519684878..543e1c9639 100644 --- a/src/dnode/src/dnodeSystem.c +++ b/src/dnode/src/dnodeSystem.c @@ -22,7 +22,7 @@ #include "dnodeMain.h" static void signal_handler(int32_t signum, siginfo_t *sigInfo, void *context); -static sem_t exitSem; +static tsem_t exitSem; int32_t main(int32_t argc, char *argv[]) { // Set global configuration file @@ -88,7 +88,7 @@ int32_t main(int32_t argc, char *argv[]) { #endif } - if (sem_init(&exitSem, 0, 0) != 0) { + if (tsem_init(&exitSem, 0, 0) != 0) { printf("failed to create exit semphore\n"); exit(EXIT_FAILURE); } @@ -117,7 +117,7 @@ int32_t main(int32_t argc, char *argv[]) { syslog(LOG_INFO, "Started TDengine service successfully."); - for (int res = sem_wait(&exitSem); res != 0; res = sem_wait(&exitSem)) { + for (int res = tsem_wait(&exitSem); res != 0; res = tsem_wait(&exitSem)) { if (res != EINTR) { syslog(LOG_ERR, "failed to wait exit semphore: %d", res); break; @@ -157,5 +157,5 @@ static void signal_handler(int32_t signum, siginfo_t *sigInfo, void *context) { sigaction(SIGUSR2, &act, NULL); // inform main thread to exit - sem_post(&exitSem); + tsem_post(&exitSem); } diff --git a/src/dnode/src/dnodeTelemetry.c b/src/dnode/src/dnodeTelemetry.c index 892fd1d903..8ed4a9518b 100644 --- a/src/dnode/src/dnodeTelemetry.c +++ b/src/dnode/src/dnodeTelemetry.c @@ -36,7 +36,7 @@ #include "dnodeInt.h" #include "dnodeTelemetry.h" -static sem_t tsExitSem; +static tsem_t tsExitSem; static pthread_t tsTelemetryThread; #define TELEMETRY_SERVER "telemetry.taosdata.com" @@ -266,7 +266,7 @@ int32_t dnodeInitTelemetry() { return 0; } - if (sem_init(&tsExitSem, 0, 0) == -1) { + if (tsem_init(&tsExitSem, 0, 0) == -1) { // just log the error, it is ok for telemetry to fail dTrace("failed to create semaphore for telemetry, reason:%s", strerror(errno)); return 0; @@ -291,8 +291,8 @@ void dnodeCleanupTelemetry() { } if (tsTelemetryThread) { - sem_post(&tsExitSem); + tsem_post(&tsExitSem); pthread_join(tsTelemetryThread, NULL); - sem_destroy(&tsExitSem); + tsem_destroy(&tsExitSem); } } diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 9d46ac5055..859e22a178 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -440,9 +440,9 @@ typedef struct { char* cols; bool use_metric; - sem_t mutex_sem; + tsem_t mutex_sem; int notFinished; - sem_t lock_sem; + tsem_t lock_sem; } info; typedef struct { @@ -459,9 +459,9 @@ typedef struct { int data_of_order; int data_of_rate; - sem_t *mutex_sem; - int *notFinished; - sem_t *lock_sem; + tsem_t *mutex_sem; + int *notFinished; + tsem_t *lock_sem; } sTable; /* ******************************* Global @@ -729,9 +729,9 @@ int main(int argc, char *argv[]) { t_info->end_table_id = i < b ? last + a : last + a - 1; last = t_info->end_table_id + 1; - sem_init(&(t_info->mutex_sem), 0, 1); + tsem_init(&(t_info->mutex_sem), 0, 1); t_info->notFinished = t_info->end_table_id - t_info->start_table_id + 1; - sem_init(&(t_info->lock_sem), 0, 0); + tsem_init(&(t_info->lock_sem), 0, 0); if (query_mode == SYNC) { pthread_create(pids + i, NULL, syncWrite, t_info); @@ -762,8 +762,8 @@ int main(int argc, char *argv[]) { for (int i = 0; i < threads; i++) { info *t_info = infos + i; taos_close(t_info->taos); - sem_destroy(&(t_info->mutex_sem)); - sem_destroy(&(t_info->lock_sem)); + tsem_destroy(&(t_info->mutex_sem)); + tsem_destroy(&(t_info->lock_sem)); } free(pids); @@ -1021,8 +1021,8 @@ void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntable for (int i = 0; i < threads; i++) { info *t_info = infos + i; - sem_destroy(&(t_info->mutex_sem)); - sem_destroy(&(t_info->lock_sem)); + tsem_destroy(&(t_info->mutex_sem)); + tsem_destroy(&(t_info->lock_sem)); } free(pids); @@ -1272,7 +1272,7 @@ void *asyncWrite(void *sarg) { taos_query_a(winfo->taos, "show databases", callBack, tb_info); } - sem_wait(&(winfo->lock_sem)); + tsem_wait(&(winfo->lock_sem)); free(tb_infos); return NULL; @@ -1292,10 +1292,10 @@ void callBack(void *param, TAOS_RES *res, int code) { // If finished; if (tb_info->counter >= tb_info->target) { - sem_wait(tb_info->mutex_sem); + tsem_wait(tb_info->mutex_sem); (*(tb_info->notFinished))--; - if (*(tb_info->notFinished) == 0) sem_post(tb_info->lock_sem); - sem_post(tb_info->mutex_sem); + if (*(tb_info->notFinished) == 0) tsem_post(tb_info->lock_sem); + tsem_post(tb_info->mutex_sem); return; } diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index bd2e0a4470..9757036783 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -186,7 +186,7 @@ typedef struct SQInfo { void* signature; int32_t pointsInterpo; int32_t code; // error code to returned to client -// sem_t dataReady; +//tsem_t dataReady; void* tsdb; int32_t vgId; diff --git a/src/rpc/test/rclient.c b/src/rpc/test/rclient.c index 6ec2d82445..7a963e9ce4 100644 --- a/src/rpc/test/rclient.c +++ b/src/rpc/test/rclient.c @@ -26,8 +26,8 @@ typedef struct { int num; int numOfReqs; int msgSize; - sem_t rspSem; - sem_t *pOverSem; + tsem_t rspSem; + tsem_t *pOverSem; pthread_t thread; void *pRpc; } SInfo; @@ -39,7 +39,7 @@ static void processResponse(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { if (pEpSet) pInfo->epSet = *pEpSet; rpcFreeCont(pMsg->pCont); - sem_post(&pInfo->rspSem); + tsem_post(&pInfo->rspSem); } static int tcount = 0; @@ -60,7 +60,7 @@ static void *sendRequest(void *param) { rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg); if ( pInfo->num % 20000 == 0 ) tInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num); - sem_wait(&pInfo->rspSem); + tsem_wait(&pInfo->rspSem); } tDebug("thread:%d, it is over", pInfo->index); @@ -171,7 +171,7 @@ int main(int argc, char *argv[]) { pInfo->epSet = epSet; pInfo->numOfReqs = numOfReqs; pInfo->msgSize = msgSize; - sem_init(&pInfo->rspSem, 0, 0); + tsem_init(&pInfo->rspSem, 0, 0); pInfo->pRpc = pRpc; pthread_create(&pInfo->thread, &thattr, sendRequest, pInfo); pInfo++; diff --git a/src/rpc/test/rsclient.c b/src/rpc/test/rsclient.c index 6e6961784b..a152d8e4a5 100644 --- a/src/rpc/test/rsclient.c +++ b/src/rpc/test/rsclient.c @@ -27,8 +27,8 @@ typedef struct { int num; int numOfReqs; int msgSize; - sem_t rspSem; - sem_t *pOverSem; + tsem_t rspSem; + tsem_t *pOverSem; pthread_t thread; void *pRpc; } SInfo; @@ -171,7 +171,7 @@ int main(int argc, char *argv[]) { pInfo->epSet = epSet; pInfo->numOfReqs = numOfReqs; pInfo->msgSize = msgSize; - sem_init(&pInfo->rspSem, 0, 0); + tsem_init(&pInfo->rspSem, 0, 0); pInfo->pRpc = pRpc; pthread_create(&pInfo->thread, &thattr, sendRequest, pInfo); pInfo++; diff --git a/src/sync/src/tarbitrator.c b/src/sync/src/tarbitrator.c index 3c6db88a9c..3538391a94 100644 --- a/src/sync/src/tarbitrator.c +++ b/src/sync/src/tarbitrator.c @@ -31,7 +31,7 @@ static void arbSignalHandler(int32_t signum, siginfo_t *sigInfo, void *context); static void arbProcessIncommingConnection(int connFd, uint32_t sourceIp); static void arbProcessBrokenLink(void *param); static int arbProcessPeerMsg(void *param, void *buffer); -static sem_t tsArbSem; +static tsem_t tsArbSem; static ttpool_h tsArbTcpPool; typedef struct { @@ -61,7 +61,7 @@ int main(int argc, char *argv[]) { } } - if (sem_init(&tsArbSem, 0, 0) != 0) { + if (tsem_init(&tsArbSem, 0, 0) != 0) { printf("failed to create exit semphore\n"); exit(EXIT_FAILURE); } @@ -98,7 +98,7 @@ int main(int argc, char *argv[]) { sInfo("TAOS arbitrator: %s:%d is running", tsNodeFqdn, tsServerPort); - for (int res = sem_wait(&tsArbSem); res != 0; res = sem_wait(&tsArbSem)) { + for (int res = tsem_wait(&tsArbSem); res != 0; res = tsem_wait(&tsArbSem)) { if (res != EINTR) break; } @@ -185,6 +185,6 @@ static void arbSignalHandler(int32_t signum, siginfo_t *sigInfo, void *context) sInfo("shut down signal is %d, sender PID:%d", signum, sigInfo->si_pid); // inform main thread to exit - sem_post(&tsArbSem); + tsem_post(&tsArbSem); } diff --git a/src/sync/test/syncClient.c b/src/sync/test/syncClient.c index cd873b758b..16053d1088 100644 --- a/src/sync/test/syncClient.c +++ b/src/sync/test/syncClient.c @@ -25,8 +25,8 @@ typedef struct { int num; int numOfReqs; int msgSize; - sem_t rspSem; - sem_t *pOverSem; + tsem_t rspSem; + tsem_t *pOverSem; pthread_t thread; void *pRpc; } SInfo; @@ -38,7 +38,7 @@ void processResponse(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { if (pEpSet) pInfo->epSet = *pEpSet; rpcFreeCont(pMsg->pCont); - sem_post(&pInfo->rspSem); + tsem_post(&pInfo->rspSem); } int tcount = 0; @@ -59,7 +59,7 @@ void *sendRequest(void *param) { rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg); if ( pInfo->num % 20000 == 0 ) uInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num); - sem_wait(&pInfo->rspSem); + tsem_wait(&pInfo->rspSem); } uDebug("thread:%d, it is over", pInfo->index); @@ -169,7 +169,7 @@ int main(int argc, char *argv[]) { pInfo->epSet = epSet; pInfo->numOfReqs = numOfReqs; pInfo->msgSize = msgSize; - sem_init(&pInfo->rspSem, 0, 0); + tsem_init(&pInfo->rspSem, 0, 0); pInfo->pRpc = pRpc; pthread_create(&pInfo->thread, &thattr, sendRequest, pInfo); pInfo++; From 9ccdaec95d89d887e0ea35b04696ffa48a436bb7 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Sun, 9 Aug 2020 23:25:43 +0800 Subject: [PATCH 076/190] fix typo in taos sql md file --- documentation20/webdocs/markdowndocs/TAOS SQL-ch.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md index 3cf811ab66..6c6a3afb60 100644 --- a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md @@ -157,7 +157,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ```mysql DROP TABLE [IF EXISTS] stb_name; ``` - 删除STable会自动删除通过STable创建的字表。 + 删除STable会自动删除通过STable创建的子表。 - **显示当前数据库下的所有超级表信息** @@ -206,7 +206,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ``` 修改超级表的标签名,从超级表修改某个标签名后,该超级表下的所有子表也会自动更新该标签名。 -- **修改字表标签值** +- **修改子表标签值** ```mysql ALTER TABLE tb_name SET TAG tag_name=new_tag_value; From 77cfb02ea0dcf9380ed0cd239e9d1ebd96d600d4 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Mon, 10 Aug 2020 09:59:15 +0800 Subject: [PATCH 077/190] add file size statistics --- src/tsdb/src/tsdbFile.c | 4 ++++ src/tsdb/src/tsdbRWHelper.c | 8 -------- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index b913070e8a..b8173d41b3 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -409,6 +409,10 @@ static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type) { pBuf = taosDecodeFixedU32(pBuf, &version); pBuf = tsdbDecodeSFileInfo(pBuf, &(pFile->info)); + if (pFile->info.size == TSDB_FILE_HEAD_SIZE) { + pFile->info.size = lseek(pFile->fd, 0, SEEK_END); + } + if (version != TSDB_FILE_VERSION) { tsdbError("vgId:%d file %s version %u is not the same as program version %u which may cause problem", REPO_ID(pRepo), pFile->fname, version, TSDB_FILE_VERSION); diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c index d30a060c2b..5d0e9f16b2 100644 --- a/src/tsdb/src/tsdbRWHelper.c +++ b/src/tsdb/src/tsdbRWHelper.c @@ -120,14 +120,6 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) { if (tsdbOpenFile(helperDataF(pHelper), O_RDWR) < 0) return -1; if (tsdbOpenFile(helperLastF(pHelper), O_RDWR) < 0) return -1; - // NOTE: For data file compatibility - if (helperDataF(pHelper)->info.size == TSDB_FILE_HEAD_SIZE) { - helperDataF(pHelper)->info.size = (uint64_t)lseek(helperDataF(pHelper)->fd, 0, SEEK_END); - } - if (helperLastF(pHelper)->info.size == TSDB_FILE_HEAD_SIZE) { - helperLastF(pHelper)->info.size = (uint64_t)lseek(helperLastF(pHelper)->fd, 0, SEEK_END); - } - // Create and open .h pFile = helperNewHeadF(pHelper); if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) return -1; From 1f66342c4b12cd121ca637939576534a8fd6f753 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Mon, 10 Aug 2020 11:12:16 +0800 Subject: [PATCH 078/190] remove part of assert --- src/tsdb/src/tsdbRWHelper.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c index 5d0e9f16b2..1e082e39be 100644 --- a/src/tsdb/src/tsdbRWHelper.c +++ b/src/tsdb/src/tsdbRWHelper.c @@ -394,7 +394,7 @@ int tsdbWriteCompInfo(SRWHelper *pHelper) { pFile->info.len += tsdbEncodeSCompIdx(&pBuf, &(pHelper->curCompIdx)); pFile->info.size += pIdx->len; - ASSERT(pFile->info.size == lseek(pFile->fd, 0, SEEK_CUR)); + // ASSERT(pFile->info.size == lseek(pFile->fd, 0, SEEK_CUR)); } return 0; @@ -436,7 +436,7 @@ int tsdbWriteCompIdx(SRWHelper *pHelper) { pFile->info.offset = offset; pFile->info.size += pFile->info.len; - ASSERT(pFile->info.size == lseek(pFile->fd, 0, SEEK_CUR)); + // ASSERT(pFile->info.size == lseek(pFile->fd, 0, SEEK_CUR)); return 0; } @@ -813,7 +813,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa pCompBlock->keyLast); pFile->info.size += pCompBlock->len; - ASSERT(pFile->info.size == lseek(pFile->fd, 0, SEEK_CUR)); + // ASSERT(pFile->info.size == lseek(pFile->fd, 0, SEEK_CUR)); return 0; From 2cee0797b428bf7e7192ec58066bed70f7105ae5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 10 Aug 2020 15:56:41 +0800 Subject: [PATCH 079/190] [td-1101]add some logs --- src/tsdb/src/tsdbRead.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index ccc631fb58..2e9520c360 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -172,6 +172,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab if (pQueryHandle == NULL) { goto out_of_memory; } + pQueryHandle->order = pCond->order; pQueryHandle->window = pCond->twindow; pQueryHandle->pTsdb = tsdb; @@ -183,6 +184,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab pQueryHandle->qinfo = qinfo; pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock; pQueryHandle->allocSize = 0; + pQueryHandle->locateStart = false; if (tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb) != 0) { goto out_of_memory; @@ -193,6 +195,12 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab size_t sizeOfGroup = taosArrayGetSize(groupList->pGroupList); assert(sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0); + if (ASCENDING_TRAVERSE(pCond->order)) { + assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey); + } else { + assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey); + } + // allocate buffer in order to load data blocks from file int32_t numOfCols = pCond->numOfCols; @@ -243,6 +251,8 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab info.pTableObj->type == TSDB_CHILD_TABLE || info.pTableObj->type == TSDB_STREAM_TABLE)); taosArrayPush(pQueryHandle->pTableCheckInfo, &info); + tsdbDebug("%p check table uid:%"PRId64", tid:%d from lastKey:%"PRId64" %p", pQueryHandle, info.tableId.uid, + info.tableId.tid, info.lastKey, qinfo); } } @@ -645,7 +655,7 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo int16_t* colIds = pQueryHandle->defaultLoadColumn->pData; int32_t ret = tsdbLoadBlockDataCols(&(pQueryHandle->rhelper), pBlock, pCheckInfo->pCompInfo, colIds, QH_GET_NUM_OF_COLS(pQueryHandle)); - if (ret == TSDB_CODE_SUCCESS) { + if (ret == TSDB_CODE_SUCCESS) { SDataBlockLoadInfo* pBlockLoadInfo = &pQueryHandle->dataBlockLoadInfo; pBlockLoadInfo->fileGroup = pQueryHandle->pFileGroup; @@ -1071,6 +1081,14 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* TSKEY* tsArray = pCols->cols[0].pData; + if (ASCENDING_TRAVERSE(pQueryHandle->order)) { + TSKEY s = tsArray[cur->pos]; + assert(s >= pQueryHandle->window.skey && s <= pQueryHandle->window.ekey); + } else { + TSKEY s = tsArray[cur->pos]; + assert(s <= pQueryHandle->window.skey && s >= pQueryHandle->window.ekey); + } + // for search the endPos, so the order needs to reverse int32_t order = (pQueryHandle->order == TSDB_ORDER_ASC)? TSDB_ORDER_DESC:TSDB_ORDER_ASC; @@ -1550,7 +1568,7 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists STableCheckInfo* pCheckInfo = pBlockInfo->pTableCheckInfo; // current block is done, try next - if (!cur->mixBlock || cur->blockCompleted) { + if ((!cur->mixBlock) || cur->blockCompleted) { if ((cur->slot == pQueryHandle->numOfBlocks - 1 && ASCENDING_TRAVERSE(pQueryHandle->order)) || (cur->slot == 0 && !ASCENDING_TRAVERSE(pQueryHandle->order))) { // all data blocks in current file has been checked already, try next file if exists @@ -1569,6 +1587,7 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists return TSDB_CODE_SUCCESS; } } else { + tsdbDebug("%p continue in current data block, index:%d, %p", pQueryHandle, cur->slot, pQueryHandle->qinfo); handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->compBlock, pCheckInfo); *exists = pQueryHandle->realNumOfRows > 0; From 8aad94d7eb8d192801aa45abbe86d15b83e9aa6a Mon Sep 17 00:00:00 2001 From: eurake Date: Mon, 10 Aug 2020 17:31:12 +0800 Subject: [PATCH 080/190] Update cluster-ch.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 删除多余英文字符 --- documentation20/webdocs/markdowndocs/cluster-ch.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/webdocs/markdowndocs/cluster-ch.md b/documentation20/webdocs/markdowndocs/cluster-ch.md index 2df6d2cb0e..afe0272387 100644 --- a/documentation20/webdocs/markdowndocs/cluster-ch.md +++ b/documentation20/webdocs/markdowndocs/cluster-ch.md @@ -107,7 +107,7 @@ CREATE DATABASE demo replica 3; ``` 一个DB里的数据会被切片分到多个vnode group,vnode group里的vnode数目就是DB的副本数,同一个vnode group里各vnode的数据是完全一致的。为保证高可用性,vnode group里的vnode一定要分布在不同的dnode里(实际部署时,需要在不同的物理机上),只要一个vgroup里超过半数的vnode处于工作状态,这个vgroup就能正常的对外服务。 -一个dnode里可能有多个DB的数据,因此一个dnode离线时,可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作,那么该vnode group就无法对外服务,无法插入或读取数据,这样会影响到它所属的DB的一部分表的d读写操作。 +一个dnode里可能有多个DB的数据,因此一个dnode离线时,可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作,那么该vnode group就无法对外服务,无法插入或读取数据,这样会影响到它所属的DB的一部分表的读写操作。 因为vnode的引入,无法简单的给出结论:“集群中过半dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个节点不工作,那整个集群就无法正常工作了。 From e00d0728872db857a7b4eb6ce5c92ae3d1e503c3 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 10 Aug 2020 17:38:09 +0800 Subject: [PATCH 081/190] [td-1101] add time range check, fix bugs for projection query. --- src/query/src/qExecutor.c | 5 +++++ src/tsdb/src/tsdbRead.c | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 1277e7bfbb..a08a5476e3 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1342,6 +1342,11 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl if ((pQuery->limit.limit >= 0) && (pQuery->limit.limit + pQuery->limit.offset) <= numOfRes) { setQueryStatus(pQuery, QUERY_COMPLETED); } + + if (((pTableQInfo->lastKey > pTableQInfo->win.ekey) && QUERY_IS_ASC_QUERY(pQuery)) || + ((pTableQInfo->lastKey < pTableQInfo->win.ekey) && (!QUERY_IS_ASC_QUERY(pQuery)))) { + setQueryStatus(pQuery, QUERY_COMPLETED); + } } } diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 2e9520c360..d40084be23 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -196,9 +196,9 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab assert(sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0); if (ASCENDING_TRAVERSE(pCond->order)) { - assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey); - } else { assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey); + } else { + assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey); } // allocate buffer in order to load data blocks from file From dcfab65c6a4498d64ac3568a6ff370683a67ddcd Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 10 Aug 2020 18:10:06 +0800 Subject: [PATCH 082/190] [td-255] fix errors. --- src/tsdb/src/tsdbRead.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 0aae3b2ad9..07b50301d3 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -232,10 +232,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab } STsdbMeta* pMeta = tsdbGetMeta(tsdb); - assert(pMeta != NULL); - - size_t sizeOfGroup = taosArrayGetSize(groupList->pGroupList); - assert(sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0); + assert(pMeta != NULL && sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0); for (int32_t i = 0; i < sizeOfGroup; ++i) { SArray* group = *(SArray**) taosArrayGet(groupList->pGroupList, i); @@ -251,11 +248,13 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab .tableId = ((STable*)(pKeyInfo->pTable))->tableId, .pTableObj = pKeyInfo->pTable, }; - info.tableId = pTable->tableId; assert(info.pTableObj != NULL && (info.pTableObj->type == TSDB_NORMAL_TABLE || info.pTableObj->type == TSDB_CHILD_TABLE || info.pTableObj->type == TSDB_STREAM_TABLE)); + info.tableId.tid = info.pTableObj->tableId.tid; + info.tableId.uid = info.pTableObj->tableId.uid; + if (ASCENDING_TRAVERSE(pQueryHandle->order)) { assert(info.lastKey >= pQueryHandle->window.skey); } else { From 4bd41a0fbf2d1fb400169a7df1ba9fccdd39f9dd Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 10 Aug 2020 18:17:14 +0800 Subject: [PATCH 083/190] TD-1057 --- src/client/src/tscSub.c | 2 +- src/client/src/tscSubquery.c | 12 +++--- src/client/src/tscUtil.c | 2 +- src/os/inc/os.h | 1 + src/os/inc/osCommon.h | 31 +++++++++++++++ src/os/inc/osWindows.h | 3 ++ src/os/src/detail/osMemory.c | 8 ++-- src/plugins/http/src/httpContext.c | 2 +- src/plugins/http/src/httpSession.c | 2 +- src/query/src/qExecutor.c | 12 +++--- src/tsdb/src/tsdbRWHelper.c | 4 +- src/tsdb/src/tsdbRead.c | 8 ++-- src/util/src/tkvstore.c | 2 +- tests/script/test.bat | 60 ++++++++++++++++++++++++++++++ 14 files changed, 122 insertions(+), 27 deletions(-) create mode 100644 src/os/inc/osCommon.h create mode 100644 tests/script/test.bat diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c index 608551c7f3..5d8e601882 100644 --- a/src/client/src/tscSub.c +++ b/src/client/src/tscSub.c @@ -339,7 +339,7 @@ static int tscLoadSubscriptionProgress(SSub* pSub) { fclose(fp); taosArraySort(progress, tscCompareSubscriptionProgress); - tscDebug("subscription progress loaded, %zu tables: %s", taosArrayGetSize(progress), pSub->topic); + tscDebug("subscription progress loaded, %%" PRIzu " tables: %s", taosArrayGetSize(progress), pSub->topic); return 1; } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 4e188d4fb6..0718338237 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -362,7 +362,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { } size_t numOfCols = taosArrayGetSize(pNewQueryInfo->colList); - tscDebug("%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%zu, colList:%zu, fieldsInfo:%d, name:%s", + tscDebug("%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s", pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, taosArrayGetSize(pNewQueryInfo->exprList), numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, pTableMetaInfo->name); } @@ -522,7 +522,7 @@ static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* tscDebug( "%p subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, " - "numOfExpr:%zu, colList:%zu, numOfOutputFields:%d, name:%s", + "numOfExpr:%" PRIzu ", colList:%" PRIzu ", numOfOutputFields:%d, name:%s", pParent, pSql, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type, tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, pTableMetaInfo->name); @@ -1225,7 +1225,7 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter tscDebug( "%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, transfer to tid_tag query to retrieve (tableId, tags), " - "exprInfo:%zu, colList:%zu, fieldsInfo:%d, tagIndex:%d, name:%s", + "exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, tagIndex:%d, name:%s", pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo), numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, index.columnIndex, pNewQueryInfo->pTableMetaInfo[0]->name); } else { @@ -1260,7 +1260,7 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter tscDebug( "%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%u, transfer to ts_comp query to retrieve timestamps, " - "exprInfo:%zu, colList:%zu, fieldsInfo:%d, name:%s", + "exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s", pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo), numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, pNewQueryInfo->pTableMetaInfo[0]->name); } @@ -1915,7 +1915,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { pSql->pSubs = calloc(size, POINTER_BYTES); pSql->numOfSubs = (uint16_t)size; - tscDebug("%p submit data to %zu vnode(s)", pSql, size); + tscDebug("%p submit data to %" PRIzu " vnode(s)", pSql, size); SSubqueryState *pState = calloc(1, sizeof(SSubqueryState)); pState->numOfTotal = pSql->numOfSubs; @@ -1949,7 +1949,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { tscDebug("%p sub:%p create subObj success. orderOfSub:%d", pSql, pNew, numOfSub); numOfSub++; } else { - tscDebug("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%zu, code:%s", pSql, numOfSub, + tscDebug("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%" PRIzu ", code:%s", pSql, numOfSub, size, tstrerror(pRes->code)); goto _error; } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 582411fc0c..3cd91c5ad1 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1864,7 +1864,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void size_t size = taosArrayGetSize(pNewQueryInfo->colList); tscDebug( - "%p new subquery:%p, tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%zu, colList:%zu," + "%p new subquery:%p, tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu "," "fieldInfo:%d, name:%s, qrang:%" PRId64 " - %" PRId64 " order:%d, limit:%" PRId64, pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo), size, pNewQueryInfo->fieldsInfo.numOfOutput, pFinalInfo->name, pNewQueryInfo->window.skey, diff --git a/src/os/inc/os.h b/src/os/inc/os.h index 700b29ce98..11c423a500 100644 --- a/src/os/inc/os.h +++ b/src/os/inc/os.h @@ -41,6 +41,7 @@ extern "C" { #endif #include "osAtomic.h" +#include "osCommon.h" #include "osDef.h" #include "osDir.h" #include "osFile.h" diff --git a/src/os/inc/osCommon.h b/src/os/inc/osCommon.h new file mode 100644 index 0000000000..70d2b2c0c2 --- /dev/null +++ b/src/os/inc/osCommon.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_OS_COMMON_H +#define TDENGINE_OS_COMMON_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef TAOS_OS_DEF_ZU + #define PRIzu "zu" +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/os/inc/osWindows.h b/src/os/inc/osWindows.h index caab61536e..6665dcd920 100644 --- a/src/os/inc/osWindows.h +++ b/src/os/inc/osWindows.h @@ -97,6 +97,9 @@ typedef SOCKET eventfd_t; #define TAOS_OS_DEF_EPOLL #define TAOS_EPOLL_WAIT_TIME 100 +#define TAOS_OS_DEF_ZU + #define PRIzu "ld" + #define TAOS_OS_FUNC_STRING_WCHAR int twcslen(const wchar_t *wcs); #define TAOS_OS_FUNC_STRING_GETLINE diff --git a/src/os/src/detail/osMemory.c b/src/os/src/detail/osMemory.c index 3bbe806369..dfd320be89 100644 --- a/src/os/src/detail/osMemory.c +++ b/src/os/src/detail/osMemory.c @@ -42,7 +42,7 @@ static bool random_alloc_fail(size_t size, const char* file, uint32_t line) { } if (fpAllocLog != NULL) { - fprintf(fpAllocLog, "%s:%d: memory allocation of %zu bytes will fail.\n", file, line, size); + fprintf(fpAllocLog, "%s:%d: memory allocation of %" PRIzu " bytes will fail.\n", file, line, size); } return true; @@ -159,7 +159,7 @@ static void* malloc_detect_leak(size_t size, const char* file, uint32_t line) { } if (size > UINT32_MAX && fpAllocLog != NULL) { - fprintf(fpAllocLog, "%s:%d: size too large: %zu.\n", file, line, size); + fprintf(fpAllocLog, "%s:%d: size too large: %" PRIzu ".\n", file, line, size); } blk->file = file; @@ -207,7 +207,7 @@ static void* realloc_detect_leak(void* ptr, size_t size, const char* file, uint3 } if (size > UINT32_MAX && fpAllocLog != NULL) { - fprintf(fpAllocLog, "%s:%d: size too large: %zu.\n", file, line, size); + fprintf(fpAllocLog, "%s:%d: size too large: %" PRIzu ".\n", file, line, size); } blk = (SMemBlock*)p; @@ -295,7 +295,7 @@ static void dump_memory_leak() { atomic_store_ptr(&lock, 0); - fprintf(fpAllocLog, "\nnumber of blocks: %zu, total bytes: %zu\n", numOfBlk, totalSize); + fprintf(fpAllocLog, "\nnumber of blocks: %" PRIzu ", total bytes: %" PRIzu "\n", numOfBlk, totalSize); fflush(fpAllocLog); } diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index e367911695..7d6cca511e 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -70,7 +70,7 @@ bool httpInitContexts() { void httpCleanupContexts() { if (tsHttpServer.contextCache != NULL) { SCacheObj *cache = tsHttpServer.contextCache; - httpInfo("context cache is cleanuping, size:%zu", taosHashGetSize(cache->pHashTable)); + httpInfo("context cache is cleanuping, size:%" PRIzu "", taosHashGetSize(cache->pHashTable)); taosCacheCleanup(tsHttpServer.contextCache); tsHttpServer.contextCache = NULL; } diff --git a/src/plugins/http/src/httpSession.c b/src/plugins/http/src/httpSession.c index fce85df45e..ad57f0fc29 100644 --- a/src/plugins/http/src/httpSession.c +++ b/src/plugins/http/src/httpSession.c @@ -107,7 +107,7 @@ static void httpDestroySession(void *data) { void httpCleanUpSessions() { if (tsHttpServer.sessionCache != NULL) { SCacheObj *cache = tsHttpServer.sessionCache; - httpInfo("session cache is cleanuping, size:%zu", taosHashGetSize(cache->pHashTable)); + httpInfo("session cache is cleanuping, size:%" PRIzu "", taosHashGetSize(cache->pHashTable)); taosCacheCleanup(tsHttpServer.sessionCache); tsHttpServer.sessionCache = NULL; } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index be3d476be5..71bf0eaa5a 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -4478,7 +4478,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { while (pQInfo->groupIndex < numOfGroups) { SArray* group = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, pQInfo->groupIndex); - qDebug("QInfo:%p last_row query on group:%d, total group:%zu, current group:%p", pQInfo, pQInfo->groupIndex, + qDebug("QInfo:%p last_row query on group:%d, total group:%" PRIzu ", current group:%p", pQInfo, pQInfo->groupIndex, numOfGroups, group); STsdbQueryCond cond = { @@ -4552,7 +4552,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { while (pQInfo->groupIndex < numOfGroups) { SArray* group = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, pQInfo->groupIndex); - qDebug("QInfo:%p group by normal columns group:%d, total group:%zu", pQInfo, pQInfo->groupIndex, numOfGroups); + qDebug("QInfo:%p group by normal columns group:%d, total group:%" PRIzu "", pQInfo, pQInfo->groupIndex, numOfGroups); STsdbQueryCond cond = { .colList = pQuery->colList, @@ -4743,7 +4743,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } qDebug( - "QInfo %p numOfTables:%"PRIu64", index:%d, numOfGroups:%zu, %"PRId64" points returned, total:%"PRId64", offset:%" PRId64, + "QInfo %p numOfTables:%"PRIu64", index:%d, numOfGroups:%" PRIzu ", %"PRId64" points returned, total:%"PRId64", offset:%" PRId64, pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQInfo->tableIndex, numOfGroups, pQuery->rec.rows, pQuery->rec.total, pQuery->limit.offset); } @@ -6241,11 +6241,11 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi goto _over; } - qDebug("qmsg:%p query on %zu tables in one group from client", pQueryMsg, tableGroupInfo.numOfTables); + qDebug("qmsg:%p query on %" PRIzu " tables in one group from client", pQueryMsg, tableGroupInfo.numOfTables); } int64_t el = taosGetTimestampUs() - st; - qDebug("qmsg:%p tag filter completed, numOfTables:%zu, elapsed time:%"PRId64"us", pQueryMsg, tableGroupInfo.numOfTables, el); + qDebug("qmsg:%p tag filter completed, numOfTables:%" PRIzu ", elapsed time:%"PRId64"us", pQueryMsg, tableGroupInfo.numOfTables, el); } else { assert(0); } @@ -6356,7 +6356,7 @@ bool qTableQuery(qinfo_t qinfo) { if (IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p query is killed", pQInfo); } else if (pQuery->rec.rows == 0) { - qDebug("QInfo:%p over, %zu tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total); + qDebug("QInfo:%p over, %" PRIzu " tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total); } else { qDebug("QInfo:%p query paused, %" PRId64 " rows returned, numOfTotal:%" PRId64 " rows", pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows); diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c index d63bf8ab78..41da1b2dc6 100644 --- a/src/tsdb/src/tsdbRWHelper.c +++ b/src/tsdb/src/tsdbRWHelper.c @@ -608,14 +608,14 @@ int tsdbLoadCompData(SRWHelper *pHelper, SCompBlock *pCompBlock, void *target) { } if (taosTRead(pFile->fd, (void *)pHelper->pCompData, tsize) < tsize) { - tsdbError("vgId:%d failed to read %zu bytes from file %s since %s", REPO_ID(pHelper->pRepo), tsize, pFile->fname, + tsdbError("vgId:%d failed to read %" PRIzu " bytes from file %s since %s", REPO_ID(pHelper->pRepo), tsize, pFile->fname, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); return -1; } if (!taosCheckChecksumWhole((uint8_t *)pHelper->pCompData, (uint32_t)tsize)) { - tsdbError("vgId:%d file %s is broken, offset %" PRId64 " size %zu", REPO_ID(pHelper->pRepo), pFile->fname, + tsdbError("vgId:%d file %s is broken, offset %" PRId64 " size %" PRIzu "", REPO_ID(pHelper->pRepo), pFile->fname, (int64_t)pCompBlock->offset, tsize); terrno = TSDB_CODE_TDB_FILE_CORRUPTED; return -1; diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 17b0239e3b..051dcf7adf 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -250,7 +250,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab taosArraySort(pQueryHandle->pTableCheckInfo, tsdbCheckInfoCompar); pQueryHandle->defaultLoadColumn = getDefaultLoadColumns(pQueryHandle, true); - tsdbDebug("%p total numOfTable:%zu in query, %p", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qinfo); + tsdbDebug("%p total numOfTable:%" PRIzu " in query, %p", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qinfo); tsdbInitDataBlockLoadInfo(&pQueryHandle->dataBlockLoadInfo); tsdbInitCompBlockLoadInfo(&pQueryHandle->compBlockLoadInfo); @@ -2190,7 +2190,7 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC } taosArrayPush(pTableGroup, &sa); - tsdbDebug("all %zu tables belong to one group", size); + tsdbDebug("all %" PRIzu " tables belong to one group", size); } else { STableGroupSupporter *pSupp = (STableGroupSupporter *) calloc(1, sizeof(STableGroupSupporter)); pSupp->numOfCols = numOfOrderCols; @@ -2309,7 +2309,7 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, const char* pT pGroupInfo->numOfTables = taosArrayGetSize(res); pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols); - tsdbDebug("%p no table name/tag condition, all tables belong to one group, numOfTables:%zu", tsdb, pGroupInfo->numOfTables); + tsdbDebug("%p no table name/tag condition, all tables belong to one group, numOfTables:%" PRIzu "", tsdb, pGroupInfo->numOfTables); taosArrayDestroy(res); if (tsdbUnlockRepoMeta(tsdb) < 0) goto _error; @@ -2352,7 +2352,7 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, const char* pT pGroupInfo->numOfTables = taosArrayGetSize(res); pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols); - tsdbDebug("%p stable tid:%d, uid:%"PRIu64" query, numOfTables:%zu, belong to %zu groups", tsdb, pTable->tableId.tid, + tsdbDebug("%p stable tid:%d, uid:%"PRIu64" query, numOfTables:%" PRIzu ", belong to %" PRIzu " groups", tsdb, pTable->tableId.tid, pTable->tableId.uid, pGroupInfo->numOfTables, taosArrayGetSize(pGroupInfo->pGroupList)); taosArrayDestroy(res); diff --git a/src/util/src/tkvstore.c b/src/util/src/tkvstore.c index 9657d82773..c10e882d57 100644 --- a/src/util/src/tkvstore.c +++ b/src/util/src/tkvstore.c @@ -509,7 +509,7 @@ static int tdRestoreKVStore(SKVStore *pStore) { ssize_t tsize = taosTRead(pStore->fd, tbuf, sizeof(SKVRecord)); if (tsize == 0) break; if (tsize < sizeof(SKVRecord)) { - uError("failed to read %zu bytes from file %s at offset %" PRId64 "since %s", sizeof(SKVRecord), pStore->fname, + uError("failed to read %" PRIzu " bytes from file %s at offset %" PRId64 "since %s", sizeof(SKVRecord), pStore->fname, pStore->info.size, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); goto _err; diff --git a/tests/script/test.bat b/tests/script/test.bat new file mode 100644 index 0000000000..1574b5013e --- /dev/null +++ b/tests/script/test.bat @@ -0,0 +1,60 @@ +@echo off + +echo TDengine in windows +echo Start TDengine Testing Case ... + +set "SCRIPT_DIR=%~dp0" +echo SCRIPT_DIR: %SCRIPT_DIR% + +set "BUILD_DIR=%~dp0..\..\debug\build\bin" +set "TSIM=%~dp0..\..\debug\build\bin\tsim" +echo BUILD_DIR: %BUILD_DIR% + +set "SIM_DIR=%~dp0..\..\sim" +echo SIM_DIR: %SIM_DIR% + +set "TSIM_DIR=%~dp0..\..\sim\tsim" +echo TSIM_DIR: %TSIM_DIR% + +set "CFG_DIR=%~dp0..\..\sim\tsim\cfg" +echo CFG_DIR: %CFG_DIR% + +set "LOG_DIR=%~dp0..\..\sim\tsim\log" +echo LOG_DIR: %LOG_DIR% + +set "TAOS_CFG=%~dp0..\..\sim\tsim\cfg\taos.cfg" +echo TAOS_CFG: %TAOS_CFG% + +if not exist %SIM_DIR% mkdir %SIM_DIR% +if not exist %TSIM_DIR% mkdir %TSIM_DIR% +if exist %CFG_DIR% rmdir /s/q %CFG_DIR% +if exist %LOG_DIR% rmdir /s/q %LOG_DIR% +if not exist %CFG_DIR% mkdir %CFG_DIR% +if not exist %LOG_DIR% mkdir %LOG_DIR% + +echo firstEp %FIRSTEP% > %TAOS_CFG% +echo serverPort 6030 >> %TAOS_CFG% +echo wal 2 >> %TAOS_CFG% +echo asyncLog 0 >> %TAOS_CFG% +echo locale en_US.UTF-8 >> %TAOS_CFG% +echo logDir %LOG_DIR% >> %TAOS_CFG% +echo scriptDir %SCRIPT_DIR% >> %TAOS_CFG% +echo numOfLogLines 100000000 >> %TAOS_CFG% +echo tmrDebugFlag 131 >> %TAOS_CFG% +echo rpcDebugFlag 143 >> %TAOS_CFG% +echo cDebugFlag 143 >> %TAOS_CFG% +echo qdebugFlag 143 >> %TAOS_CFG% +echo udebugFlag 143 >> %TAOS_CFG% + +set "FILE_NAME=windows\testSuite.sim" +set "FIRSTEP=localhost" +if "%1" == "-f" set "FILE_NAME=%2" +if "%1" == "-h" set "FIRSTEP=%2" +if "%3" == "-f" set "FILE_NAME=%4" +if "%3" == "-h" set "FIRSTEP=%4" + +echo FILE_NAME: %FILE_NAME% +echo FIRSTEP: %FIRSTEP% +echo ExcuteCmd: %tsim% -c %CFG_DIR% -f %FILE_NAME% + +%tsim% -c %CFG_DIR% -f %FILE_NAME% \ No newline at end of file From 98cec99dc16a2c46e28bbaeb4783682f80cf7482 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 10 Aug 2020 18:30:16 +0800 Subject: [PATCH 084/190] [td-255] fix counting value error bug. --- src/util/src/hash.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 9e262916ce..93514f87cf 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -451,6 +451,8 @@ int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), voi while((pNode = pEntry->next) != NULL) { if (fp && (!fp(param, pNode->data))) { pEntry->num -= 1; + atomic_sub_fetch_64(&pHashObj->size, 1); + pEntry->next = pNode->next; if (pEntry->num == 0) { @@ -475,6 +477,7 @@ int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), voi if (fp && (!fp(param, pNext->data))) { pNode->next = pNext->next; pEntry->num -= 1; + atomic_sub_fetch_64(&pHashObj->size, 1); if (pEntry->num == 0) { assert(pEntry->next == NULL); From f6712d984ac70b1743151cc15d940c3976488b51 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 10 Aug 2020 18:43:16 +0800 Subject: [PATCH 085/190] [td-255]remove unused functions. --- src/util/src/hash.c | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 93514f87cf..8df3e3d4d3 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -88,23 +88,6 @@ static FORCE_INLINE SHashNode *doSearchInEntryList(SHashEntry *pe, const void *k return pNode; } -static FORCE_INLINE SHashNode *doSerchPrevInEntryList(SHashEntry *pe, const void *key, size_t keyLen, uint32_t hashVal) { - SHashNode *prev= NULL; - SHashNode *pNode = pe->next; - - while (pNode) { - if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { - assert(pNode->hashVal == hashVal); - break; - } - - prev = pNode; - pNode = pNode->next; - } - - return prev; -} - /** * Resize the hash list if the threshold is reached * From 5aae4b90786096e4438bd66b23d3f37a0943d1e6 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 10 Aug 2020 18:46:20 +0800 Subject: [PATCH 086/190] TD-1057 first compile version of windows 64 client --- src/client/src/tscSub.c | 2 +- tests/script/windows/testSuite.sim | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c index 5d8e601882..0e1be926f4 100644 --- a/src/client/src/tscSub.c +++ b/src/client/src/tscSub.c @@ -339,7 +339,7 @@ static int tscLoadSubscriptionProgress(SSub* pSub) { fclose(fp); taosArraySort(progress, tscCompareSubscriptionProgress); - tscDebug("subscription progress loaded, %%" PRIzu " tables: %s", taosArrayGetSize(progress), pSub->topic); + tscDebug("subscription progress loaded, %" PRIzu " tables: %s", taosArrayGetSize(progress), pSub->topic); return 1; } diff --git a/tests/script/windows/testSuite.sim b/tests/script/windows/testSuite.sim index e372217b62..fc574ed9c4 100644 --- a/tests/script/windows/testSuite.sim +++ b/tests/script/windows/testSuite.sim @@ -1,14 +1,14 @@ run windows/alter/table.sim run windows/alter/metrics.sim -run windows/compute/avg.sim +#run windows/compute/avg.sim run windows/compute/bottom.sim run windows/compute/count.sim run windows/compute/diff.sim run windows/compute/first.sim run windows/compute/interval.sim run windows/compute/last.sim -run windows/compute/leastsquare.sim +##run windows/compute/leastsquare.sim run windows/compute/max.sim run windows/compute/min.sim run windows/compute/percentile.sim @@ -37,12 +37,12 @@ run windows/field/tinyint.sim run windows/import/basic.sim run windows/insert/basic.sim -run windows/insert/query_block1_file.sim -run windows/insert/query_block1_memory.sim -run windows/insert/query_block2_file.sim -run windows/insert/query_block2_memory.sim -run windows/insert/query_file_memory.sim -run windows/insert/query_multi_file.sim +#run windows/insert/query_block1_file.sim +#run windows/insert/query_block1_memory.sim +#run windows/insert/query_block2_file.sim +#run windows/insert/query_block2_memory.sim +#run windows/insert/query_file_memory.sim +#run windows/insert/query_multi_file.sim run windows/table/binary.sim run windows/table/bool.sim From bc73587498eecfdac33c80c7f8ca0fe2df6a456c Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Mon, 10 Aug 2020 18:47:44 +0800 Subject: [PATCH 087/190] udpate taos-jdbcdriver version for jdbc demos --- tests/examples/JDBC/JDBCDemo/pom.xml | 2 +- tests/examples/JDBC/SpringJdbcTemplate/pom.xml | 2 +- tests/examples/JDBC/springbootdemo/pom.xml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/examples/JDBC/JDBCDemo/pom.xml b/tests/examples/JDBC/JDBCDemo/pom.xml index d015c62772..50313a0a0c 100644 --- a/tests/examples/JDBC/JDBCDemo/pom.xml +++ b/tests/examples/JDBC/JDBCDemo/pom.xml @@ -63,7 +63,7 @@ com.taosdata.jdbc taos-jdbcdriver - 1.0.1 + 2.0.2 diff --git a/tests/examples/JDBC/SpringJdbcTemplate/pom.xml b/tests/examples/JDBC/SpringJdbcTemplate/pom.xml index 45abc5354a..b796d52d28 100644 --- a/tests/examples/JDBC/SpringJdbcTemplate/pom.xml +++ b/tests/examples/JDBC/SpringJdbcTemplate/pom.xml @@ -41,7 +41,7 @@ com.taosdata.jdbc taos-jdbcdriver - 1.0.3 + 2.0.2 diff --git a/tests/examples/JDBC/springbootdemo/pom.xml b/tests/examples/JDBC/springbootdemo/pom.xml index 74522979c0..5f31d36d6e 100644 --- a/tests/examples/JDBC/springbootdemo/pom.xml +++ b/tests/examples/JDBC/springbootdemo/pom.xml @@ -63,7 +63,7 @@ com.taosdata.jdbc taos-jdbcdriver - 1.0.3 + 2.0.2 From 9eafba461fe0b45bf8136ea0217fc13ebbad13ea Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Mon, 10 Aug 2020 18:58:25 +0800 Subject: [PATCH 088/190] TD-1102 --- src/tsdb/inc/tsdbMain.h | 1 + src/tsdb/src/tsdbFile.c | 85 ++++++++++++++++++++++++++++++--------- src/tsdb/src/tsdbMain.c | 5 +++ src/vnode/src/vnodeMain.c | 2 +- 4 files changed, 73 insertions(+), 20 deletions(-) diff --git a/src/tsdb/inc/tsdbMain.h b/src/tsdb/inc/tsdbMain.h index f42c00f171..678e167351 100644 --- a/src/tsdb/inc/tsdbMain.h +++ b/src/tsdb/inc/tsdbMain.h @@ -186,6 +186,7 @@ typedef struct { typedef struct { int fileId; + int state; // 0 for health, 1 for problem SFile files[TSDB_FILE_TYPE_MAX]; } SFileGroup; diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index b8173d41b3..71d88ff29e 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -13,6 +13,8 @@ * along with this program. If not, see . */ #define _DEFAULT_SOURCE +#include + #include "os.h" #include "talgo.h" #include "tchecksum.h" @@ -23,10 +25,11 @@ const char *tsdbFileSuffix[] = {".head", ".data", ".last", ".stat", ".h", ".d", ".l", ".s"}; -static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type); -static void tsdbDestroyFile(SFile *pFile); -static int compFGroup(const void *arg1, const void *arg2); -static int keyFGroupCompFunc(const void *key, const void *fgroup); +static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type); +static void tsdbDestroyFile(SFile *pFile); +static int compFGroup(const void *arg1, const void *arg2); +static int keyFGroupCompFunc(const void *key, const void *fgroup); +static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo); // ---------------- INTERNAL FUNCTIONS ---------------- STsdbFileH *tsdbNewFileH(STsdbCfg *pCfg) { @@ -69,12 +72,14 @@ void tsdbFreeFileH(STsdbFileH *pFileH) { int tsdbOpenFileH(STsdbRepo *pRepo) { ASSERT(pRepo != NULL && pRepo->tsdbFileH != NULL); - char *tDataDir = NULL; - DIR * dir = NULL; - int fid = 0; - int vid = 0; + char * tDataDir = NULL; + DIR * dir = NULL; + int fid = 0; + int vid = 0; + regex_t regex1, regex2; + int code = 0; - SFileGroup fileGroup = {0}; + SFileGroup fileGroup = {0}; STsdbFileH *pFileH = pRepo->tsdbFileH; tDataDir = tsdbGetDataDirName(pRepo->rootDir); @@ -90,28 +95,56 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { goto _err; } + code = regcomp(®ex1, "^v[0-9]+f[0-9]+\\.(head|data|last|stat)$", REG_EXTENDED); + if (code != 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + goto _err; + } + + code = regcomp(®ex2, "^v[0-9]+f[0-9]+\\.(h|d|l|s)$", REG_EXTENDED); + if (code != 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + goto _err; + } + struct dirent *dp = NULL; while ((dp = readdir(dir)) != NULL) { - if (strncmp(dp->d_name, ".", 1) == 0 || strncmp(dp->d_name, "..", 2) == 0) continue; - sscanf(dp->d_name, "v%df%d", &vid, &fid); + if (strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0) continue; - if (tsdbSearchFGroup(pRepo->tsdbFileH, fid, TD_EQ) != NULL) continue; + code = regexec(®ex1, dp->d_name, 0, NULL, 0); + if (code == 0) { + sscanf(dp->d_name, "v%df%d", &vid, &fid); + if (vid != REPO_ID(pRepo)) { + tsdbError("vgId:%d invalid file %s exists, ignore it", REPO_ID(pRepo), dp->d_name); + continue; + } - memset((void *)(&fileGroup), 0, sizeof(SFileGroup)); - fileGroup.fileId = fid; - for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) { - if (tsdbInitFile(&fileGroup.files[type], pRepo, fid, type) < 0) { - tsdbError("vgId:%d failed to init file fid %d type %d", REPO_ID(pRepo), fid, type); + if (tsdbSearchFGroup(pFileH, fid, TD_EQ) != NULL) continue; + memset((void *)(&fileGroup), 0, sizeof(SFileGroup)); + fileGroup.fileId = fid; + + tsdbInitFileGroup(&fileGroup, pRepo); + } else if (code == REG_NOMATCH) { + code = regexec(®ex2, dp->d_name, 0, NULL, 0); + if (code == 0) { + tsdbDebug("vgId:%d invalid file %s exists, remove it", REPO_ID(pRepo), dp->d_name); + remove(dp->d_name); + } else if (code == REG_NOMATCH) { + tsdbError("vgId:%d invalid file %s exists, ignore it", REPO_ID(pRepo), dp->d_name); + continue; + } else { goto _err; } + } else { + goto _err; } - tsdbDebug("vgId:%d file group %d init", REPO_ID(pRepo), fid); - pFileH->pFGroup[pFileH->nFGroups++] = fileGroup; qsort((void *)(pFileH->pFGroup), pFileH->nFGroups, sizeof(SFileGroup), compFGroup); } + regfree(®ex1); + regfree(®ex2); taosTFree(tDataDir); closedir(dir); return 0; @@ -119,6 +152,9 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { _err: for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) tsdbDestroyFile(&fileGroup.files[type]); + regfree(®ex1); + regfree(®ex2); + taosTFree(tDataDir); if (dir != NULL) closedir(dir); tsdbCloseFileH(pRepo); @@ -450,3 +486,14 @@ static int keyFGroupCompFunc(const void *key, const void *fgroup) { return fid > pFGroup->fileId ? 1 : -1; } } + +static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo) { + for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) { + if (tsdbInitFile(&pFGroup->files[type], pRepo, pFGroup->fileId, type) < 0) { + memset(&pFGroup->files[type].info, 0, sizeof(STsdbFileInfo)); + pFGroup->files[type].info.magic = TSDB_FILE_INIT_MAGIC; + pFGroup->state = 1; + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + } + } +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 3acad05504..628a8bac81 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -71,6 +71,8 @@ static void tsdbStopStream(STsdbRepo *pRepo); // Function declaration int32_t tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg) { + taosRemoveDir(rootDir); + if (mkdir(rootDir, 0755) < 0) { tsdbError("vgId:%d failed to create rootDir %s since %s", pCfg->tsdbId, rootDir, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); @@ -95,6 +97,8 @@ TSDB_REPO_T *tsdbOpenRepo(char *rootDir, STsdbAppH *pAppH) { STsdbCfg config = {0}; STsdbRepo *pRepo = NULL; + terrno = TSDB_CODE_SUCCESS; + if (tsdbLoadConfig(rootDir, &config) < 0) { tsdbError("failed to open repo in rootDir %s since %s", rootDir, tstrerror(terrno)); return NULL; @@ -799,6 +803,7 @@ static int tsdbRestoreInfo(STsdbRepo *pRepo) { tsdbInitFileGroupIter(pFileH, &iter, TSDB_ORDER_DESC); while ((pFGroup = tsdbGetFileGroupNext(&iter)) != NULL) { + if (pFGroup->state) continue; if (tsdbSetAndOpenHelperFile(&rhelper, pFGroup) < 0) goto _err; if (tsdbLoadCompIdx(&rhelper, NULL) < 0) goto _err; for (int i = 1; i < pMeta->maxTables; i++) { diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 9695f90c30..f191c6f1e5 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -257,7 +257,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { if (pVnode->tsdb == NULL) { vnodeCleanUp(pVnode); return terrno; - } else if (terrno != 0 && pVnode->syncCfg.replica <= 1) { + } else if (terrno != TSDB_CODE_SUCCESS && pVnode->syncCfg.replica <= 1) { vError("vgId:%d, failed to open tsdb, replica:%d reason:%s", pVnode->vgId, pVnode->syncCfg.replica, tstrerror(terrno)); vnodeCleanUp(pVnode); From 7e4cf6511a4cf01efa8d5635992e5fbb37c9d8ae Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 10 Aug 2020 19:05:00 +0800 Subject: [PATCH 089/190] [td-255]remove unused functions. --- src/util/src/tcache.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index e1ba80c9d0..54a4b289b7 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -30,14 +30,6 @@ static FORCE_INLINE void __cache_wr_lock(SCacheObj *pCacheObj) { #endif } -static FORCE_INLINE void __cache_rd_lock(SCacheObj *pCacheObj) { -#if defined(LINUX) - pthread_rwlock_rdlock(&pCacheObj->lock); -#else - pthread_mutex_lock(&pCacheObj->lock); -#endif -} - static FORCE_INLINE void __cache_unlock(SCacheObj *pCacheObj) { #if defined(LINUX) pthread_rwlock_unlock(&pCacheObj->lock); From e57965f64ace0f9c08f8e6ad70dd99cee249b7e6 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 10 Aug 2020 13:48:59 +0000 Subject: [PATCH 090/190] TD-1057 --- tests/script/windows/alter/metrics.sim | 7 +++++++ tests/script/windows/alter/table.sim | 7 +++++++ tests/script/windows/compute/avg.sim | 7 +++++++ tests/script/windows/compute/bottom.sim | 7 +++++++ tests/script/windows/compute/count.sim | 7 +++++++ tests/script/windows/compute/diff.sim | 7 +++++++ tests/script/windows/compute/first.sim | 7 +++++++ tests/script/windows/compute/interval.sim | 7 +++++++ tests/script/windows/compute/last.sim | 7 +++++++ tests/script/windows/compute/leastsquare.sim | 7 +++++++ tests/script/windows/compute/max.sim | 7 +++++++ tests/script/windows/compute/min.sim | 7 +++++++ tests/script/windows/compute/percentile.sim | 7 +++++++ tests/script/windows/compute/stddev.sim | 7 +++++++ tests/script/windows/compute/sum.sim | 7 +++++++ tests/script/windows/compute/top.sim | 7 +++++++ tests/script/windows/db/basic.sim | 7 +++++++ tests/script/windows/db/len.sim | 7 +++++++ tests/script/windows/field/2.sim | 7 +++++++ tests/script/windows/field/3.sim | 7 +++++++ tests/script/windows/field/4.sim | 7 +++++++ tests/script/windows/field/5.sim | 7 +++++++ tests/script/windows/field/6.sim | 7 +++++++ tests/script/windows/field/bigint.sim | 7 +++++++ tests/script/windows/field/binary.sim | 7 +++++++ tests/script/windows/field/bool.sim | 7 +++++++ tests/script/windows/field/double.sim | 7 +++++++ tests/script/windows/field/float.sim | 7 +++++++ tests/script/windows/field/int.sim | 7 +++++++ tests/script/windows/field/single.sim | 7 +++++++ tests/script/windows/field/smallint.sim | 7 +++++++ tests/script/windows/field/tinyint.sim | 7 +++++++ tests/script/windows/import/basic.sim | 7 +++++++ tests/script/windows/insert/basic.sim | 7 +++++++ .../script/windows/insert/query_block1_file.sim | 9 ++++++++- .../windows/insert/query_block1_memory.sim | 7 +++++++ .../script/windows/insert/query_block2_file.sim | 7 +++++++ .../windows/insert/query_block2_memory.sim | 7 +++++++ .../script/windows/insert/query_file_memory.sim | 7 +++++++ tests/script/windows/insert/query_multi_file.sim | 7 +++++++ tests/script/windows/table/binary.sim | 7 +++++++ tests/script/windows/table/bool.sim | 7 +++++++ tests/script/windows/table/column_name.sim | 8 ++++++++ tests/script/windows/table/column_num.sim | 8 ++++++++ tests/script/windows/table/column_value.sim | 8 ++++++++ tests/script/windows/table/db.table.sim | 8 ++++++++ tests/script/windows/table/double.sim | 7 +++++++ tests/script/windows/table/float.sim | 7 +++++++ tests/script/windows/table/table.sim | 7 +++++++ tests/script/windows/table/table_len.sim | 7 +++++++ tests/script/windows/tag/3.sim | 7 +++++++ tests/script/windows/tag/4.sim | 7 +++++++ tests/script/windows/tag/5.sim | 7 +++++++ tests/script/windows/tag/6.sim | 7 +++++++ tests/script/windows/tag/add.sim | 7 +++++++ tests/script/windows/tag/bigint.sim | 7 +++++++ tests/script/windows/tag/binary.sim | 7 +++++++ tests/script/windows/tag/binary_binary.sim | 7 +++++++ tests/script/windows/tag/bool.sim | 7 +++++++ tests/script/windows/tag/bool_binary.sim | 7 +++++++ tests/script/windows/tag/bool_int.sim | 7 +++++++ tests/script/windows/tag/change.sim | 7 +++++++ tests/script/windows/tag/column.sim | 7 +++++++ tests/script/windows/tag/create.sim | 7 +++++++ tests/script/windows/tag/delete.sim | 7 +++++++ tests/script/windows/tag/double.sim | 7 +++++++ tests/script/windows/tag/filter.sim | 7 +++++++ tests/script/windows/tag/float.sim | 7 +++++++ tests/script/windows/tag/int.sim | 7 +++++++ tests/script/windows/tag/int_binary.sim | 7 +++++++ tests/script/windows/tag/int_float.sim | 7 +++++++ tests/script/windows/tag/set.sim | 7 +++++++ tests/script/windows/tag/smallint.sim | 7 +++++++ tests/script/windows/tag/tinyint.sim | 7 +++++++ tests/script/windows/testSuite.sim | 16 ++++++++-------- tests/script/windows/vector/metrics_field.sim | 7 +++++++ tests/script/windows/vector/metrics_mix.sim | 7 +++++++ tests/script/windows/vector/metrics_query.sim | 7 +++++++ tests/script/windows/vector/metrics_tag.sim | 7 +++++++ tests/script/windows/vector/metrics_time.sim | 7 +++++++ tests/script/windows/vector/multi.sim | 7 +++++++ tests/script/windows/vector/single.sim | 7 +++++++ tests/script/windows/vector/table_field.sim | 7 +++++++ tests/script/windows/vector/table_mix.sim | 7 +++++++ tests/script/windows/vector/table_query.sim | 7 +++++++ tests/script/windows/vector/table_time.sim | 7 +++++++ 86 files changed, 608 insertions(+), 9 deletions(-) diff --git a/tests/script/windows/alter/metrics.sim b/tests/script/windows/alter/metrics.sim index 3717d8c1ed..7d5dc77949 100644 --- a/tests/script/windows/alter/metrics.sim +++ b/tests/script/windows/alter/metrics.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + print ======== step1 sql create database d2 sql use d2 diff --git a/tests/script/windows/alter/table.sim b/tests/script/windows/alter/table.sim index 3b811a065e..03182e613d 100644 --- a/tests/script/windows/alter/table.sim +++ b/tests/script/windows/alter/table.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + print ======== step1 sql create database d1 sql use d1 diff --git a/tests/script/windows/compute/avg.sim b/tests/script/windows/compute/avg.sim index 1374ca5a25..b655abf163 100644 --- a/tests/script/windows/compute/avg.sim +++ b/tests/script/windows/compute/avg.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_av_db $tbPrefix = m_av_tb $mtPrefix = m_av_mt diff --git a/tests/script/windows/compute/bottom.sim b/tests/script/windows/compute/bottom.sim index e908c774e4..dc104a8ebd 100644 --- a/tests/script/windows/compute/bottom.sim +++ b/tests/script/windows/compute/bottom.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_bo_db $tbPrefix = m_bo_tb $mtPrefix = m_bo_mt diff --git a/tests/script/windows/compute/count.sim b/tests/script/windows/compute/count.sim index 54544f0354..9c9d8821b0 100644 --- a/tests/script/windows/compute/count.sim +++ b/tests/script/windows/compute/count.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_co_db $tbPrefix = m_co_tb $mtPrefix = m_co_mt diff --git a/tests/script/windows/compute/diff.sim b/tests/script/windows/compute/diff.sim index 6c2829872a..667fcdbcff 100644 --- a/tests/script/windows/compute/diff.sim +++ b/tests/script/windows/compute/diff.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_di_db $tbPrefix = m_di_tb $mtPrefix = m_di_mt diff --git a/tests/script/windows/compute/first.sim b/tests/script/windows/compute/first.sim index 9a0c02fe4b..d6e1b1deea 100644 --- a/tests/script/windows/compute/first.sim +++ b/tests/script/windows/compute/first.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_fi_db $tbPrefix = m_fi_tb $mtPrefix = m_fi_mt diff --git a/tests/script/windows/compute/interval.sim b/tests/script/windows/compute/interval.sim index 365c6d9d31..4bf548ccf2 100644 --- a/tests/script/windows/compute/interval.sim +++ b/tests/script/windows/compute/interval.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_in_db $tbPrefix = m_in_tb $mtPrefix = m_in_mt diff --git a/tests/script/windows/compute/last.sim b/tests/script/windows/compute/last.sim index aa9699778f..63d4d3ecbd 100644 --- a/tests/script/windows/compute/last.sim +++ b/tests/script/windows/compute/last.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_la_db $tbPrefix = m_la_tb $mtPrefix = m_la_mt diff --git a/tests/script/windows/compute/leastsquare.sim b/tests/script/windows/compute/leastsquare.sim index bb7404edd0..4cd3ad1fb9 100644 --- a/tests/script/windows/compute/leastsquare.sim +++ b/tests/script/windows/compute/leastsquare.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_le_db $tbPrefix = m_le_tb $mtPrefix = m_le_mt diff --git a/tests/script/windows/compute/max.sim b/tests/script/windows/compute/max.sim index a19d122ecd..e480736550 100644 --- a/tests/script/windows/compute/max.sim +++ b/tests/script/windows/compute/max.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_ma_db $tbPrefix = m_ma_tb $mtPrefix = m_ma_mt diff --git a/tests/script/windows/compute/min.sim b/tests/script/windows/compute/min.sim index 216f2061d7..1ff637cecd 100644 --- a/tests/script/windows/compute/min.sim +++ b/tests/script/windows/compute/min.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_mi_db $tbPrefix = m_mi_tb $mtPrefix = m_mi_mt diff --git a/tests/script/windows/compute/percentile.sim b/tests/script/windows/compute/percentile.sim index 20b2740d6e..5e327055a8 100644 --- a/tests/script/windows/compute/percentile.sim +++ b/tests/script/windows/compute/percentile.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_pe_db $tbPrefix = m_pe_tb $mtPrefix = m_pe_mt diff --git a/tests/script/windows/compute/stddev.sim b/tests/script/windows/compute/stddev.sim index c02b3e4ab3..2aa481248a 100644 --- a/tests/script/windows/compute/stddev.sim +++ b/tests/script/windows/compute/stddev.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_st_db $tbPrefix = m_st_tb $mtPrefix = m_st_mt diff --git a/tests/script/windows/compute/sum.sim b/tests/script/windows/compute/sum.sim index 04af1d457a..30e98a5b25 100644 --- a/tests/script/windows/compute/sum.sim +++ b/tests/script/windows/compute/sum.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_su_db $tbPrefix = m_su_tb $mtPrefix = m_su_mt diff --git a/tests/script/windows/compute/top.sim b/tests/script/windows/compute/top.sim index b3c698c064..9590997ef7 100644 --- a/tests/script/windows/compute/top.sim +++ b/tests/script/windows/compute/top.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_to_db $tbPrefix = m_to_tb $mtPrefix = m_to_mt diff --git a/tests/script/windows/db/basic.sim b/tests/script/windows/db/basic.sim index f1e18d15a5..fffde94d66 100644 --- a/tests/script/windows/db/basic.sim +++ b/tests/script/windows/db/basic.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + print ============================ dnode1 start $i = 0 diff --git a/tests/script/windows/db/len.sim b/tests/script/windows/db/len.sim index f922e7e05a..5afa2496dd 100644 --- a/tests/script/windows/db/len.sim +++ b/tests/script/windows/db/len.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + print =============== step1 sql_error drop database dd diff --git a/tests/script/windows/field/2.sim b/tests/script/windows/field/2.sim index 3d4492083e..8ac6fa1a1b 100644 --- a/tests/script/windows/field/2.sim +++ b/tests/script/windows/field/2.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_bt_db $tbPrefix = fi_bt_tb $mtPrefix = fi_bt_mt diff --git a/tests/script/windows/field/3.sim b/tests/script/windows/field/3.sim index fb7d60d12a..331e930b31 100644 --- a/tests/script/windows/field/3.sim +++ b/tests/script/windows/field/3.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_3_db $tbPrefix = fi_3_tb $mtPrefix = fi_3_mt diff --git a/tests/script/windows/field/4.sim b/tests/script/windows/field/4.sim index f7ffa9807c..c6224c46ee 100644 --- a/tests/script/windows/field/4.sim +++ b/tests/script/windows/field/4.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_4_db $tbPrefix = fi_4_tb $mtPrefix = fi_4_mt diff --git a/tests/script/windows/field/5.sim b/tests/script/windows/field/5.sim index e408871693..d1f40059d0 100644 --- a/tests/script/windows/field/5.sim +++ b/tests/script/windows/field/5.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_5_db $tbPrefix = fi_5_tb $mtPrefix = fi_5_mt diff --git a/tests/script/windows/field/6.sim b/tests/script/windows/field/6.sim index d1551d63b5..98071f87df 100644 --- a/tests/script/windows/field/6.sim +++ b/tests/script/windows/field/6.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_6_db $tbPrefix = fi_6_tb $mtPrefix = fi_6_mt diff --git a/tests/script/windows/field/bigint.sim b/tests/script/windows/field/bigint.sim index 9ccaeb3723..bef571f445 100644 --- a/tests/script/windows/field/bigint.sim +++ b/tests/script/windows/field/bigint.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_bi_db $tbPrefix = fi_bi_tb $mtPrefix = fi_bi_mt diff --git a/tests/script/windows/field/binary.sim b/tests/script/windows/field/binary.sim index 8b86c4dbea..72a356e684 100644 --- a/tests/script/windows/field/binary.sim +++ b/tests/script/windows/field/binary.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_by_db $tbPrefix = fi_by_tb $mtPrefix = fi_by_mt diff --git a/tests/script/windows/field/bool.sim b/tests/script/windows/field/bool.sim index 5f2c61475c..abc970264d 100644 --- a/tests/script/windows/field/bool.sim +++ b/tests/script/windows/field/bool.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_bo_db $tbPrefix = fi_bo_tb $mtPrefix = fi_bo_mt diff --git a/tests/script/windows/field/double.sim b/tests/script/windows/field/double.sim index ea7e075208..e805e0373b 100644 --- a/tests/script/windows/field/double.sim +++ b/tests/script/windows/field/double.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_do_db $tbPrefix = fi_do_tb $mtPrefix = fi_do_mt diff --git a/tests/script/windows/field/float.sim b/tests/script/windows/field/float.sim index 5be59bae3b..4178ab4e1e 100644 --- a/tests/script/windows/field/float.sim +++ b/tests/script/windows/field/float.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_fl_db $tbPrefix = fi_fl_tb $mtPrefix = fi_fl_mt diff --git a/tests/script/windows/field/int.sim b/tests/script/windows/field/int.sim index d7d26b7341..05dc19094d 100644 --- a/tests/script/windows/field/int.sim +++ b/tests/script/windows/field/int.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_in_db $tbPrefix = fi_in_tb $mtPrefix = fi_in_mt diff --git a/tests/script/windows/field/single.sim b/tests/script/windows/field/single.sim index 0199133ecd..6422b7f697 100644 --- a/tests/script/windows/field/single.sim +++ b/tests/script/windows/field/single.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_si_db $tbPrefix = fi_si_tb $mtPrefix = fi_si_mt diff --git a/tests/script/windows/field/smallint.sim b/tests/script/windows/field/smallint.sim index 8bee463292..8bf41f45a5 100644 --- a/tests/script/windows/field/smallint.sim +++ b/tests/script/windows/field/smallint.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_sm_db $tbPrefix = fi_sm_tb $mtPrefix = fi_sm_mt diff --git a/tests/script/windows/field/tinyint.sim b/tests/script/windows/field/tinyint.sim index 65bffca095..16c19ba38d 100644 --- a/tests/script/windows/field/tinyint.sim +++ b/tests/script/windows/field/tinyint.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_ti_db $tbPrefix = fi_ti_tb $mtPrefix = fi_ti_mt diff --git a/tests/script/windows/import/basic.sim b/tests/script/windows/import/basic.sim index c20378ee88..491b4f8b34 100644 --- a/tests/script/windows/import/basic.sim +++ b/tests/script/windows/import/basic.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + sql create database ibadb sql use ibadb sql create table tb(ts timestamp, i int) diff --git a/tests/script/windows/insert/basic.sim b/tests/script/windows/insert/basic.sim index be0980a2d4..54cbd3f4d9 100644 --- a/tests/script/windows/insert/basic.sim +++ b/tests/script/windows/insert/basic.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = tb_in_db $tbPrefix = tb_in_tb diff --git a/tests/script/windows/insert/query_block1_file.sim b/tests/script/windows/insert/query_block1_file.sim index 3eb1d402e8..388ed061e5 100644 --- a/tests/script/windows/insert/query_block1_file.sim +++ b/tests/script/windows/insert/query_block1_file.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = tb_1f_db $tbPrefix = tb_1f_tb @@ -24,7 +31,7 @@ $y = $N / 2 while $x > $y $ms = $x . m $xt = - . $x - sql insert into $tb values (now - $ms , - $x ) + sql insert into $tb values (now - $ms , $x ) $x = $x - 1 endw diff --git a/tests/script/windows/insert/query_block1_memory.sim b/tests/script/windows/insert/query_block1_memory.sim index 60d31e52d6..9e4fc68d09 100644 --- a/tests/script/windows/insert/query_block1_memory.sim +++ b/tests/script/windows/insert/query_block1_memory.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = tb_1m_db $tbPrefix = tb_1m_tb diff --git a/tests/script/windows/insert/query_block2_file.sim b/tests/script/windows/insert/query_block2_file.sim index e9f562a538..9fd4434476 100644 --- a/tests/script/windows/insert/query_block2_file.sim +++ b/tests/script/windows/insert/query_block2_file.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = tb_2f_db $tbPrefix = tb_2f_tb diff --git a/tests/script/windows/insert/query_block2_memory.sim b/tests/script/windows/insert/query_block2_memory.sim index fd173f1356..ede7f3efc6 100644 --- a/tests/script/windows/insert/query_block2_memory.sim +++ b/tests/script/windows/insert/query_block2_memory.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = tb_2m_db $tbPrefix = tb_2m_tb diff --git a/tests/script/windows/insert/query_file_memory.sim b/tests/script/windows/insert/query_file_memory.sim index e9b0c69ea5..083beb4ac5 100644 --- a/tests/script/windows/insert/query_file_memory.sim +++ b/tests/script/windows/insert/query_file_memory.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = tb_fm_db $tbPrefix = tb_fm_tb diff --git a/tests/script/windows/insert/query_multi_file.sim b/tests/script/windows/insert/query_multi_file.sim index 84c091fb21..465970f942 100644 --- a/tests/script/windows/insert/query_multi_file.sim +++ b/tests/script/windows/insert/query_multi_file.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = tb_mf_db $tbPrefix = tb_mf_tb diff --git a/tests/script/windows/table/binary.sim b/tests/script/windows/table/binary.sim index 69354ed5c8..64a081c72f 100644 --- a/tests/script/windows/table/binary.sim +++ b/tests/script/windows/table/binary.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = lm_bn_db $tbPrefix = lm_bn_tb diff --git a/tests/script/windows/table/bool.sim b/tests/script/windows/table/bool.sim index 9e434d801a..9486c42221 100644 --- a/tests/script/windows/table/bool.sim +++ b/tests/script/windows/table/bool.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = lm_bo_db $tbPrefix = lm_bo_tb diff --git a/tests/script/windows/table/column_name.sim b/tests/script/windows/table/column_name.sim index bbac293fed..fffb1334e5 100644 --- a/tests/script/windows/table/column_name.sim +++ b/tests/script/windows/table/column_name.sim @@ -1,6 +1,14 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + + $i = 0 $dbPrefix = lm_cm_db $tbPrefix = lm_cm_tb diff --git a/tests/script/windows/table/column_num.sim b/tests/script/windows/table/column_num.sim index f7ead41437..d182696ce0 100644 --- a/tests/script/windows/table/column_num.sim +++ b/tests/script/windows/table/column_num.sim @@ -1,6 +1,14 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + + $i = 0 $dbPrefix = lm_cn_db $tbPrefix = lm_cn_tb diff --git a/tests/script/windows/table/column_value.sim b/tests/script/windows/table/column_value.sim index 9dbaf7ceab..c59e7af8ba 100644 --- a/tests/script/windows/table/column_value.sim +++ b/tests/script/windows/table/column_value.sim @@ -1,6 +1,14 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + + $i = 0 $dbPrefix = lm_cv_db $tbPrefix = lm_cv_tb diff --git a/tests/script/windows/table/db.table.sim b/tests/script/windows/table/db.table.sim index 8d244e011f..97a9e6fbe9 100644 --- a/tests/script/windows/table/db.table.sim +++ b/tests/script/windows/table/db.table.sim @@ -1,6 +1,14 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + + $i = 0 $dbPrefix = lm_dt_db $tbPrefix = lm_dt_tb diff --git a/tests/script/windows/table/double.sim b/tests/script/windows/table/double.sim index 1402982c98..93bf3bb149 100644 --- a/tests/script/windows/table/double.sim +++ b/tests/script/windows/table/double.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = lm_do_db $tbPrefix = lm_do_tb diff --git a/tests/script/windows/table/float.sim b/tests/script/windows/table/float.sim index 57b626f865..684f78a386 100644 --- a/tests/script/windows/table/float.sim +++ b/tests/script/windows/table/float.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = lm_fl_db $tbPrefix = lm_fl_tb diff --git a/tests/script/windows/table/table.sim b/tests/script/windows/table/table.sim index 55be8af851..985620152a 100644 --- a/tests/script/windows/table/table.sim +++ b/tests/script/windows/table/table.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ============================ dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = ob_tb_db $tbPrefix = ob_tb_tb diff --git a/tests/script/windows/table/table_len.sim b/tests/script/windows/table/table_len.sim index cdd1f31731..367f1c6895 100644 --- a/tests/script/windows/table/table_len.sim +++ b/tests/script/windows/table/table_len.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = lm_tb_db $tbPrefix = lm_tb_tb diff --git a/tests/script/windows/tag/3.sim b/tests/script/windows/tag/3.sim index 9ffa11b03f..63a8766727 100644 --- a/tests/script/windows/tag/3.sim +++ b/tests/script/windows/tag/3.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_3_db $tbPrefix = ta_3_tb $mtPrefix = ta_3_mt diff --git a/tests/script/windows/tag/4.sim b/tests/script/windows/tag/4.sim index beabe1fd8f..7e9af7ece7 100644 --- a/tests/script/windows/tag/4.sim +++ b/tests/script/windows/tag/4.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_4_db $tbPrefix = ta_4_tb $mtPrefix = ta_4_mt diff --git a/tests/script/windows/tag/5.sim b/tests/script/windows/tag/5.sim index 161d98756c..5dc128a0e0 100644 --- a/tests/script/windows/tag/5.sim +++ b/tests/script/windows/tag/5.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_5_db $tbPrefix = ta_5_tb $mtPrefix = ta_5_mt diff --git a/tests/script/windows/tag/6.sim b/tests/script/windows/tag/6.sim index b8666305bd..12e9c597f0 100644 --- a/tests/script/windows/tag/6.sim +++ b/tests/script/windows/tag/6.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_6_db $tbPrefix = ta_6_tb $mtPrefix = ta_6_mt diff --git a/tests/script/windows/tag/add.sim b/tests/script/windows/tag/add.sim index 2c72d01955..0a1416b68c 100644 --- a/tests/script/windows/tag/add.sim +++ b/tests/script/windows/tag/add.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_ad_db $tbPrefix = ta_ad_tb $mtPrefix = ta_ad_mt diff --git a/tests/script/windows/tag/bigint.sim b/tests/script/windows/tag/bigint.sim index 4406c7386d..d988ad1fdc 100644 --- a/tests/script/windows/tag/bigint.sim +++ b/tests/script/windows/tag/bigint.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_bi_db $tbPrefix = ta_bi_tb $mtPrefix = ta_bi_mt diff --git a/tests/script/windows/tag/binary.sim b/tests/script/windows/tag/binary.sim index deeae81117..9dc18cfa94 100644 --- a/tests/script/windows/tag/binary.sim +++ b/tests/script/windows/tag/binary.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_by_db $tbPrefix = ta_by_tb $mtPrefix = ta_by_mt diff --git a/tests/script/windows/tag/binary_binary.sim b/tests/script/windows/tag/binary_binary.sim index c1f93bc656..ba688aa80e 100644 --- a/tests/script/windows/tag/binary_binary.sim +++ b/tests/script/windows/tag/binary_binary.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_bib_db $tbPrefix = ta_bib_tb $mtPrefix = ta_bib_mt diff --git a/tests/script/windows/tag/bool.sim b/tests/script/windows/tag/bool.sim index 81ea20064b..a7e5d909c5 100644 --- a/tests/script/windows/tag/bool.sim +++ b/tests/script/windows/tag/bool.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_bo_db $tbPrefix = ta_bo_tb $mtPrefix = ta_bo_mt diff --git a/tests/script/windows/tag/bool_binary.sim b/tests/script/windows/tag/bool_binary.sim index c3daf2c242..639f6c5f2f 100644 --- a/tests/script/windows/tag/bool_binary.sim +++ b/tests/script/windows/tag/bool_binary.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_bob_db $tbPrefix = ta_bob_tb $mtPrefix = ta_bob_mt diff --git a/tests/script/windows/tag/bool_int.sim b/tests/script/windows/tag/bool_int.sim index 79e4b67bfa..900cc9e8a1 100644 --- a/tests/script/windows/tag/bool_int.sim +++ b/tests/script/windows/tag/bool_int.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_boi_db $tbPrefix = ta_boi_tb $mtPrefix = ta_boi_mt diff --git a/tests/script/windows/tag/change.sim b/tests/script/windows/tag/change.sim index 2901842190..75a976bbb1 100644 --- a/tests/script/windows/tag/change.sim +++ b/tests/script/windows/tag/change.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_ch_db $tbPrefix = ta_ch_tb $mtPrefix = ta_ch_mt diff --git a/tests/script/windows/tag/column.sim b/tests/script/windows/tag/column.sim index 131f3e06ea..9f5bfce07e 100644 --- a/tests/script/windows/tag/column.sim +++ b/tests/script/windows/tag/column.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_co_db $tbPrefix = ta_co_tb $mtPrefix = ta_co_mt diff --git a/tests/script/windows/tag/create.sim b/tests/script/windows/tag/create.sim index 5beba21727..6a76c93d83 100644 --- a/tests/script/windows/tag/create.sim +++ b/tests/script/windows/tag/create.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_cr_db $tbPrefix = ta_cr_tb $mtPrefix = ta_cr_mt diff --git a/tests/script/windows/tag/delete.sim b/tests/script/windows/tag/delete.sim index e2395c8f97..9e8ea9aba0 100644 --- a/tests/script/windows/tag/delete.sim +++ b/tests/script/windows/tag/delete.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_de_db $tbPrefix = ta_de_tb $mtPrefix = ta_de_mt diff --git a/tests/script/windows/tag/double.sim b/tests/script/windows/tag/double.sim index c08351a41b..5445b1dbea 100644 --- a/tests/script/windows/tag/double.sim +++ b/tests/script/windows/tag/double.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_do_db $tbPrefix = ta_do_tb $mtPrefix = ta_do_mt diff --git a/tests/script/windows/tag/filter.sim b/tests/script/windows/tag/filter.sim index b70e56fdb6..f704f32cd2 100644 --- a/tests/script/windows/tag/filter.sim +++ b/tests/script/windows/tag/filter.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_fi_db $tbPrefix = ta_fi_tb $mtPrefix = ta_fi_mt diff --git a/tests/script/windows/tag/float.sim b/tests/script/windows/tag/float.sim index 79eabb2cb4..64424c1e20 100644 --- a/tests/script/windows/tag/float.sim +++ b/tests/script/windows/tag/float.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_fl_db $tbPrefix = ta_fl_tb $mtPrefix = ta_fl_mt diff --git a/tests/script/windows/tag/int.sim b/tests/script/windows/tag/int.sim index d3921218fd..7d7b5271d1 100644 --- a/tests/script/windows/tag/int.sim +++ b/tests/script/windows/tag/int.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_in_db $tbPrefix = ta_in_tb $mtPrefix = ta_in_mt diff --git a/tests/script/windows/tag/int_binary.sim b/tests/script/windows/tag/int_binary.sim index 96f4f18966..1dd4771605 100644 --- a/tests/script/windows/tag/int_binary.sim +++ b/tests/script/windows/tag/int_binary.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_inb_db $tbPrefix = ta_inb_tb $mtPrefix = ta_inb_mt diff --git a/tests/script/windows/tag/int_float.sim b/tests/script/windows/tag/int_float.sim index 768e86b96d..cdb9032d8c 100644 --- a/tests/script/windows/tag/int_float.sim +++ b/tests/script/windows/tag/int_float.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_inf_db $tbPrefix = ta_inf_tb $mtPrefix = ta_inf_mt diff --git a/tests/script/windows/tag/set.sim b/tests/script/windows/tag/set.sim index 580f91cb49..16103c6ce8 100644 --- a/tests/script/windows/tag/set.sim +++ b/tests/script/windows/tag/set.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_se_db $tbPrefix = ta_se_tb $mtPrefix = ta_se_mt diff --git a/tests/script/windows/tag/smallint.sim b/tests/script/windows/tag/smallint.sim index 1b7ff0860b..dbab4f2d43 100644 --- a/tests/script/windows/tag/smallint.sim +++ b/tests/script/windows/tag/smallint.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_sm_db $tbPrefix = ta_sm_tb $mtPrefix = ta_sm_mt diff --git a/tests/script/windows/tag/tinyint.sim b/tests/script/windows/tag/tinyint.sim index 8fe957664c..7a0237c0d9 100644 --- a/tests/script/windows/tag/tinyint.sim +++ b/tests/script/windows/tag/tinyint.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_ti_db $tbPrefix = ta_ti_tb $mtPrefix = ta_ti_mt diff --git a/tests/script/windows/testSuite.sim b/tests/script/windows/testSuite.sim index fc574ed9c4..e372217b62 100644 --- a/tests/script/windows/testSuite.sim +++ b/tests/script/windows/testSuite.sim @@ -1,14 +1,14 @@ run windows/alter/table.sim run windows/alter/metrics.sim -#run windows/compute/avg.sim +run windows/compute/avg.sim run windows/compute/bottom.sim run windows/compute/count.sim run windows/compute/diff.sim run windows/compute/first.sim run windows/compute/interval.sim run windows/compute/last.sim -##run windows/compute/leastsquare.sim +run windows/compute/leastsquare.sim run windows/compute/max.sim run windows/compute/min.sim run windows/compute/percentile.sim @@ -37,12 +37,12 @@ run windows/field/tinyint.sim run windows/import/basic.sim run windows/insert/basic.sim -#run windows/insert/query_block1_file.sim -#run windows/insert/query_block1_memory.sim -#run windows/insert/query_block2_file.sim -#run windows/insert/query_block2_memory.sim -#run windows/insert/query_file_memory.sim -#run windows/insert/query_multi_file.sim +run windows/insert/query_block1_file.sim +run windows/insert/query_block1_memory.sim +run windows/insert/query_block2_file.sim +run windows/insert/query_block2_memory.sim +run windows/insert/query_file_memory.sim +run windows/insert/query_multi_file.sim run windows/table/binary.sim run windows/table/bool.sim diff --git a/tests/script/windows/vector/metrics_field.sim b/tests/script/windows/vector/metrics_field.sim index 8e9a87239e..e7c926e141 100644 --- a/tests/script/windows/vector/metrics_field.sim +++ b/tests/script/windows/vector/metrics_field.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_mf_db $tbPrefix = m_mf_tb $mtPrefix = m_mf_mt diff --git a/tests/script/windows/vector/metrics_mix.sim b/tests/script/windows/vector/metrics_mix.sim index 0f960deb4d..3d94a96385 100644 --- a/tests/script/windows/vector/metrics_mix.sim +++ b/tests/script/windows/vector/metrics_mix.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_mx_db $tbPrefix = m_mx_tb $mtPrefix = m_mx_mt diff --git a/tests/script/windows/vector/metrics_query.sim b/tests/script/windows/vector/metrics_query.sim index a0df4c9b04..c292c6b306 100644 --- a/tests/script/windows/vector/metrics_query.sim +++ b/tests/script/windows/vector/metrics_query.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_mq_db $tbPrefix = m_mq_tb $mtPrefix = m_mq_mt diff --git a/tests/script/windows/vector/metrics_tag.sim b/tests/script/windows/vector/metrics_tag.sim index 22fd19bc89..f51a449d71 100644 --- a/tests/script/windows/vector/metrics_tag.sim +++ b/tests/script/windows/vector/metrics_tag.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_mtg_db $tbPrefix = m_mtg_tb $mtPrefix = m_mtg_mt diff --git a/tests/script/windows/vector/metrics_time.sim b/tests/script/windows/vector/metrics_time.sim index da102b64a3..716f49d1e5 100644 --- a/tests/script/windows/vector/metrics_time.sim +++ b/tests/script/windows/vector/metrics_time.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_mt_db $tbPrefix = m_mt_tb $mtPrefix = m_mt_mt diff --git a/tests/script/windows/vector/multi.sim b/tests/script/windows/vector/multi.sim index adcc94db3b..415384d243 100644 --- a/tests/script/windows/vector/multi.sim +++ b/tests/script/windows/vector/multi.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_mu_db $tbPrefix = m_mu_tb $mtPrefix = m_mu_mt diff --git a/tests/script/windows/vector/single.sim b/tests/script/windows/vector/single.sim index 61135fc6b5..f3f3862e54 100644 --- a/tests/script/windows/vector/single.sim +++ b/tests/script/windows/vector/single.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_si_db $tbPrefix = m_si_tb $mtPrefix = m_si_mt diff --git a/tests/script/windows/vector/table_field.sim b/tests/script/windows/vector/table_field.sim index ec50bc7a2a..0c5df695fb 100644 --- a/tests/script/windows/vector/table_field.sim +++ b/tests/script/windows/vector/table_field.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_tf_db $tbPrefix = m_tf_tb $mtPrefix = m_tf_mt diff --git a/tests/script/windows/vector/table_mix.sim b/tests/script/windows/vector/table_mix.sim index 653171b302..3d660b5611 100644 --- a/tests/script/windows/vector/table_mix.sim +++ b/tests/script/windows/vector/table_mix.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_tm_db $tbPrefix = m_tm_tb $mtPrefix = m_tm_mt diff --git a/tests/script/windows/vector/table_query.sim b/tests/script/windows/vector/table_query.sim index cdbd96f4d0..27fcd0f654 100644 --- a/tests/script/windows/vector/table_query.sim +++ b/tests/script/windows/vector/table_query.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_tq_db $tbPrefix = m_tq_tb $mtPrefix = m_tq_mt diff --git a/tests/script/windows/vector/table_time.sim b/tests/script/windows/vector/table_time.sim index 48bcf86dca..8a3804c619 100644 --- a/tests/script/windows/vector/table_time.sim +++ b/tests/script/windows/vector/table_time.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_tt_db $tbPrefix = m_tt_tb $mtPrefix = m_tt_mt From d79cad4b641fbf66fdebd876edd14830e8c4f990 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 10 Aug 2020 14:34:50 +0000 Subject: [PATCH 091/190] TD-1057 --- tests/script/{test.bat => wtest.bat} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/script/{test.bat => wtest.bat} (100%) diff --git a/tests/script/test.bat b/tests/script/wtest.bat similarity index 100% rename from tests/script/test.bat rename to tests/script/wtest.bat From d4f39548913c02d7c8a4b7e0bf8ea028e3fe986b Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 10 Aug 2020 23:22:01 +0800 Subject: [PATCH 092/190] TD-1057 change compile errors --- src/client/src/tscLocalMerge.c | 6 +++--- src/client/src/tscSQLParser.c | 6 +++--- src/client/src/tscSql.c | 2 +- src/client/src/tscSubquery.c | 2 +- src/client/src/tscUtil.c | 8 ++++---- src/query/src/qExecutor.c | 12 ++++++------ src/query/src/qResultbuf.c | 12 ++++++------ src/tsdb/src/tsdbRead.c | 2 +- src/util/src/hash.c | 16 ++++++++-------- src/util/src/tcache.c | 6 +++--- 10 files changed, 36 insertions(+), 36 deletions(-) diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 5955515bbd..d176d026f0 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -914,8 +914,8 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO if (pQueryInfo->limit.limit >= 0 && pRes->numOfRowsGroup > pQueryInfo->limit.limit) { /* impose the limitation of output rows on the final result */ - int32_t prevSize = pFinalDataPage->num; - int32_t overflow = pRes->numOfRowsGroup - pQueryInfo->limit.limit; + int32_t prevSize = (int32_t)pFinalDataPage->num; + int32_t overflow = (int32_t)(pRes->numOfRowsGroup - pQueryInfo->limit.limit); assert(overflow < pRes->numOfRows); pRes->numOfRowsGroup = pQueryInfo->limit.limit; @@ -984,7 +984,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO if (pRes->numOfRows > 0) { if (pQueryInfo->limit.limit >= 0 && pRes->numOfRows > pQueryInfo->limit.limit) { - int32_t overflow = pRes->numOfRows - pQueryInfo->limit.limit; + int32_t overflow = (int32_t)(pRes->numOfRows - pQueryInfo->limit.limit); pRes->numOfRows -= overflow; pFinalDataPage->num -= overflow; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 1a97a9a997..05218dc74c 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1182,7 +1182,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t char* c = tbufGetData(&bw, true); // set the serialized binary string as the parameter of arithmetic expression - addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, len, index.tableIndex); + addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, (int32_t)len, index.tableIndex); insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName, pExpr); @@ -1237,7 +1237,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel } for (int32_t i = 0; i < pSelection->nExpr; ++i) { - int32_t outputIndex = tscSqlExprNumOfExprs(pQueryInfo); + int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); tSQLExprItem* pItem = &pSelection->a[i]; // project on all fields @@ -3144,7 +3144,7 @@ static int32_t arithmeticExprToString(tSQLExpr* pExpr, char** str) { int32_t code = doArithmeticExprToString(pExpr, str); if (code == TSDB_CODE_SUCCESS) { // remove out the parenthesis - int32_t len = strlen(start); + int32_t len = (int32_t)strlen(start); memmove(start, start + 1, len - 2); start[len - 2] = 0; } diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 18859a770f..2c48c76b1c 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -263,7 +263,7 @@ TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen) { } TAOS_RES* taos_query(TAOS *taos, const char *sqlstr) { - return taos_query_c(taos, sqlstr, strlen(sqlstr)); + return taos_query_c(taos, sqlstr, (uint32_t)strlen(sqlstr)); } int taos_result_precision(TAOS_RES *res) { diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 98f07933f7..75644c355c 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -1027,7 +1027,7 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - int32_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); + int32_t numOfExprs = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * numOfExprs); for (int32_t i = 0; i < numOfExprs; ++i) { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index e4dcf852ec..60723676df 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1683,7 +1683,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm // current sql function is not direct output result, so create a dummy output field static void doSetNewFieldInfo(SQueryInfo* pNewQueryInfo, SSqlExpr* pExpr) { - TAOS_FIELD f = {.type = pExpr->resType, .bytes = pExpr->resBytes}; + TAOS_FIELD f = {.type = (uint8_t)pExpr->resType, .bytes = pExpr->resBytes}; tstrncpy(f.name, pExpr->aliasName, sizeof(f.name)); SFieldSupInfo* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, &f); @@ -1693,7 +1693,7 @@ static void doSetNewFieldInfo(SQueryInfo* pNewQueryInfo, SSqlExpr* pExpr) { } static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pQueryInfo, SQueryInfo* pNewQueryInfo, int64_t uid) { - int32_t numOfOutput = tscSqlExprNumOfExprs(pNewQueryInfo); + int32_t numOfOutput = (int32_t)tscSqlExprNumOfExprs(pNewQueryInfo); if (numOfOutput == 0) { return; } @@ -2044,7 +2044,7 @@ bool hasMoreVnodesToTry(SSqlObj* pSql) { int32_t numOfVgroups = pTableMetaInfo->vgroupList->numOfVgroups; if (pTableMetaInfo->pVgroupTables != NULL) { - numOfVgroups = taosArrayGetSize(pTableMetaInfo->pVgroupTables); + numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); } return tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && @@ -2249,6 +2249,6 @@ bool tscSetSqlOwner(SSqlObj* pSql) { } void tscClearSqlOwner(SSqlObj* pSql) { - assert(pSql->owner != 0); + assert(taosCheckPthreadValid(pSql->owner)); atomic_store_64(&pSql->owner, 0); } \ No newline at end of file diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index bf9e8374bb..945e35c9f6 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -405,9 +405,9 @@ static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWin if (pWindowResInfo->size >= pWindowResInfo->capacity) { int64_t newCap = 0; if (pWindowResInfo->capacity > 10000) { - newCap = pWindowResInfo->capacity * 1.25; + newCap = (int64_t)(pWindowResInfo->capacity * 1.25); } else { - newCap = pWindowResInfo->capacity * 1.5; + newCap = (int64_t)(pWindowResInfo->capacity * 1.5); } char *t = realloc(pWindowResInfo->pResult, newCap * sizeof(SWindowResult)); @@ -2725,7 +2725,7 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) { memcpy(pDest + offset * bytes, pData->data + pRuntimeEnv->offset[i] * pData->num, bytes * pData->num); } - offset += pData->num; + offset += (int32_t)pData->num; } assert(pQuery->rec.rows == 0); @@ -3051,7 +3051,7 @@ void disableFuncInReverseScan(SQInfo *pQInfo) { static void setupQueryRangeForReverseScan(SQInfo* pQInfo) { SQuery* pQuery = pQInfo->runtimeEnv.pQuery; - int32_t numOfGroups = GET_NUM_OF_TABLEGROUP(pQInfo); + int32_t numOfGroups = (int32_t)(GET_NUM_OF_TABLEGROUP(pQInfo)); for(int32_t i = 0; i < numOfGroups; ++i) { SArray *group = GET_TABLEGROUP(pQInfo, i); @@ -6379,7 +6379,7 @@ static bool doBuildResCheck(SQInfo* pQInfo) { // clear qhandle owner assert(pQInfo->owner == pthread_self()); - pQInfo->owner = 0; + taosResetPthread(&pQInfo->owner); return buildRes; } @@ -6549,7 +6549,7 @@ int32_t qKillQuery(qinfo_t qinfo) { // Wait for the query executing thread being stopped/ // Once the query is stopped, the owner of qHandle will be cleared immediately. - while(pQInfo->owner != 0) { + while (taosCheckPthreadValid(pQInfo->owner)) { taosMsleep(100); } diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c index 7c40c33933..4c198ac176 100644 --- a/src/query/src/qResultbuf.c +++ b/src/query/src/qResultbuf.c @@ -6,7 +6,7 @@ #include "queryLog.h" #include "taoserror.h" -#define GET_DATA_PAYLOAD(_p) ((_p)->pData + POINTER_BYTES) +#define GET_DATA_PAYLOAD(_p) ((char *)(_p)->pData + POINTER_BYTES) #define NO_IN_MEM_AVAILABLE_PAGES(_b) (listNEles((_b)->lruList) >= (_b)->inMemPages) int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t rowSize, int32_t pagesize, @@ -248,7 +248,7 @@ static char* evicOneDataPage(SDiskbasedResultBuf* pResultBuf) { int32_t prev = pResultBuf->inMemPages; // increase by 50% of previous mem pages - pResultBuf->inMemPages = pResultBuf->inMemPages * 1.5f; + pResultBuf->inMemPages = (int32_t)(pResultBuf->inMemPages * 1.5f); qWarn("%p in memory buf page not sufficient, expand from %d to %d, page size:%d", pResultBuf, prev, pResultBuf->inMemPages, pResultBuf->pageSize); @@ -313,7 +313,7 @@ tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32 ((void**)pi->pData)[0] = pi; pi->used = true; - return GET_DATA_PAYLOAD(pi); + return (void *)(GET_DATA_PAYLOAD(pi)); } tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) { @@ -327,7 +327,7 @@ tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) { // no need to update the LRU list if only one page exists if (pResultBuf->numOfPages == 1) { (*pi)->used = true; - return GET_DATA_PAYLOAD(*pi); + return (void *)(GET_DATA_PAYLOAD(*pi)); } SPageInfo** pInfo = (SPageInfo**) ((*pi)->pn->data); @@ -336,7 +336,7 @@ tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) { lruListMoveToFront(pResultBuf->lruList, (*pi)); (*pi)->used = true; - return GET_DATA_PAYLOAD(*pi); + return (void *)(GET_DATA_PAYLOAD(*pi)); } else { // not in memory assert((*pi)->pData == NULL && (*pi)->pn == NULL && (*pi)->info.length >= 0 && (*pi)->info.offset >= 0); @@ -358,7 +358,7 @@ tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) { (*pi)->used = true; loadPageFromDisk(pResultBuf, *pi); - return GET_DATA_PAYLOAD(*pi); + return (void *)(GET_DATA_PAYLOAD(*pi)); } } diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 61c1293dd5..572af8b3b6 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -245,9 +245,9 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab STableCheckInfo info = { .lastKey = pKeyInfo->lastKey, - .tableId = ((STable*)(pKeyInfo->pTable))->tableId, .pTableObj = pKeyInfo->pTable, }; + info.tableId = ((STable*)(pKeyInfo->pTable))->tableId; assert(info.pTableObj != NULL && (info.pTableObj->type == TSDB_NORMAL_TABLE || info.pTableObj->type == TSDB_CHILD_TABLE || info.pTableObj->type == TSDB_STREAM_TABLE)); diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 8df3e3d4d3..714f36f1cb 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -167,7 +167,7 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTyp void *p = calloc(pHashObj->capacity, sizeof(SHashEntry)); for (int32_t i = 0; i < pHashObj->capacity; ++i) { - pHashObj->hashList[i] = p + i * sizeof(SHashEntry); + pHashObj->hashList[i] = (void *)((char *)p + i * sizeof(SHashEntry)); } taosArrayPush(pHashObj->pMemBlock, &p); @@ -179,7 +179,7 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTyp size_t taosHashGetSize(const SHashObj *pHashObj) { return (pHashObj == NULL) ? 0 : pHashObj->size; } int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t size) { - uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); + uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen); SHashNode *pNewNode = doCreateHashNode(key, keyLen, data, size, hashVal); if (pNewNode == NULL) { return -1; @@ -263,7 +263,7 @@ void *taosHashGetCB(SHashObj *pHashObj, const void *key, size_t keyLen, void (*f return NULL; } - uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); + uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen); // only add the read lock to disable the resize process __rd_lock(&pHashObj->lock, pHashObj->type); @@ -317,7 +317,7 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe return -1; } - uint32_t hashVal = (*pHashObj->hashFp)(key, keyLen); + uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen); // disable the resize process __rd_lock(&pHashObj->lock, pHashObj->type); @@ -418,7 +418,7 @@ int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), voi // disable the resize process __rd_lock(&pHashObj->lock, pHashObj->type); - int32_t numOfEntries = pHashObj->capacity; + int32_t numOfEntries = (int32_t)pHashObj->capacity; for (int32_t i = 0; i < numOfEntries; ++i) { SHashEntry *pEntry = pHashObj->hashList[i]; if (pEntry->num == 0) { @@ -649,7 +649,7 @@ void taosHashTableResize(SHashObj *pHashObj) { SHashNode *pNode = NULL; SHashNode *pNext = NULL; - int32_t newSize = pHashObj->capacity << 1u; + int32_t newSize = (int32_t)(pHashObj->capacity << 1u); if (newSize > HASH_MAX_CAPACITY) { // uDebug("current capacity:%d, maximum capacity:%d, no resize applied due to limitation is reached", // pHashObj->capacity, HASH_MAX_CAPACITY); @@ -669,7 +669,7 @@ void taosHashTableResize(SHashObj *pHashObj) { void * p = calloc(inc, sizeof(SHashEntry)); for (int32_t i = 0; i < inc; ++i) { - pHashObj->hashList[i + pHashObj->capacity] = p + i * sizeof(SHashEntry); + pHashObj->hashList[i + pHashObj->capacity] = (void *)((char *)p + i * sizeof(SHashEntry)); } taosArrayPush(pHashObj->pMemBlock, &p); @@ -762,7 +762,7 @@ SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, s pNewNode->key = pNewNode->data + dsize; memcpy(pNewNode->key, key, keyLen); - pNewNode->keyLen = keyLen; + pNewNode->keyLen = (uint32_t)keyLen; pNewNode->hashVal = hashVal; return pNewNode; } diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 54a4b289b7..7bb7133337 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -95,7 +95,7 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo } pCacheObj->totalSize -= pNode->size; - int32_t size = taosHashGetSize(pCacheObj->pHashTable); + int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable); assert(size > 0); uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, num:%d size:%" PRId64 "bytes", @@ -388,7 +388,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { } else { // ref == 0 atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size); - int32_t size = taosHashGetSize(pCacheObj->pHashTable); + int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable); uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, num:%d size:%" PRId64 "bytes", pCacheObj->name, pNode->key, pNode->data, pNode->size, size, pCacheObj->totalSize); @@ -560,7 +560,7 @@ bool travHashTableFn(void* param, void* data) { SCacheObj* pCacheObj= ps->pCacheObj; SCacheDataNode* pNode = *(SCacheDataNode **) data; - if (pNode->expireTime < ps->time && T_REF_VAL_GET(pNode) <= 0) { + if ((int64_t)pNode->expireTime < ps->time && T_REF_VAL_GET(pNode) <= 0) { taosCacheReleaseNode(pCacheObj, pNode); // this node should be remove from hash table From d7ed734e9f4e7cda097bc09f74bd86e1a60c0795 Mon Sep 17 00:00:00 2001 From: eurake Date: Mon, 10 Aug 2020 23:44:09 +0800 Subject: [PATCH 093/190] Update CMakeLists.txt --- src/balance/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/balance/CMakeLists.txt b/src/balance/CMakeLists.txt index d5d9ba1d9c..6c26e50b87 100644 --- a/src/balance/CMakeLists.txt +++ b/src/balance/CMakeLists.txt @@ -3,11 +3,11 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/mnode/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/dnode/inc) -INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/sdb/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc) INCLUDE_DIRECTORIES(${TD_ENTERPRISE_DIR}/src/inc) INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(src SRC) IF (TD_LINUX) ADD_LIBRARY(balance ${SRC}) -ENDIF () \ No newline at end of file +ENDIF () From 2dfcac405ac90db943db3610387fc16255f53aec Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 10 Aug 2020 23:51:53 +0800 Subject: [PATCH 094/190] TD-1057 change pthread_t to int64 --- src/os/inc/osSemphone.h | 1 + src/os/src/detail/osSemphone.c | 1 + src/os/src/windows/w64Semphone.c | 4 ++++ src/query/inc/qExecutor.h | 2 +- src/query/src/qExecutor.c | 12 ++++++------ 5 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/os/inc/osSemphone.h b/src/os/inc/osSemphone.h index c4fc98988a..fd88d2d798 100644 --- a/src/os/inc/osSemphone.h +++ b/src/os/inc/osSemphone.h @@ -32,6 +32,7 @@ extern "C" { bool taosCheckPthreadValid(pthread_t thread); int64_t taosGetPthreadId(); void taosResetPthread(pthread_t *thread); +bool taosComparePthread(pthread_t first, pthread_t second); #ifdef __cplusplus } diff --git a/src/os/src/detail/osSemphone.c b/src/os/src/detail/osSemphone.c index 1f1ef268c6..74f8859029 100644 --- a/src/os/src/detail/osSemphone.c +++ b/src/os/src/detail/osSemphone.c @@ -21,5 +21,6 @@ bool taosCheckPthreadValid(pthread_t thread) { return thread != 0; } int64_t taosGetPthreadId() { return (int64_t)pthread_self(); } void taosResetPthread(pthread_t *thread) { *thread = 0; } +bool taosComparePthread(pthread_t first, pthread_t second) { return first == second; } #endif \ No newline at end of file diff --git a/src/os/src/windows/w64Semphone.c b/src/os/src/windows/w64Semphone.c index 7ed6228228..ded7e41843 100644 --- a/src/os/src/windows/w64Semphone.c +++ b/src/os/src/windows/w64Semphone.c @@ -32,3 +32,7 @@ int64_t taosGetPthreadId() { return (int64_t)pthread_self(); #endif } + +bool taosComparePthread(pthread_t first, pthread_t second) { + return first.p == second.p; +} diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index c7026b45c6..b5487561b2 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -186,7 +186,7 @@ enum { typedef struct SQInfo { void* signature; int32_t code; // error code to returned to client - pthread_t owner; // if it is in execution + int64_t owner; // if it is in execution void* tsdb; int32_t vgId; STableGroupInfo tableGroupInfo; // table id list < only includes the STable list> diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 945e35c9f6..34700a33f3 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -130,8 +130,8 @@ static void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv); (tw)->ekey = (tw)->skey + ((_q)->intervalTime - 1); \ } while (0) -#define SET_STABLE_QUERY_OVER(_q) ((_q)->tableIndex = (_q)->tableqinfoGroupInfo.numOfTables) -#define IS_STASBLE_QUERY_OVER(_q) ((_q)->tableIndex >= (_q)->tableqinfoGroupInfo.numOfTables) +#define SET_STABLE_QUERY_OVER(_q) ((_q)->tableIndex = (int32_t)((_q)->tableqinfoGroupInfo.numOfTables)) +#define IS_STASBLE_QUERY_OVER(_q) ((_q)->tableIndex >= (int32_t)((_q)->tableqinfoGroupInfo.numOfTables)) // todo move to utility static int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *group); @@ -6378,8 +6378,8 @@ static bool doBuildResCheck(SQInfo* pQInfo) { pthread_mutex_unlock(&pQInfo->lock); // clear qhandle owner - assert(pQInfo->owner == pthread_self()); - taosResetPthread(&pQInfo->owner); + assert(pQInfo->owner == taosGetPthreadId()); + pQInfo->owner = 0; return buildRes; } @@ -6387,7 +6387,7 @@ static bool doBuildResCheck(SQInfo* pQInfo) { bool qTableQuery(qinfo_t qinfo) { SQInfo *pQInfo = (SQInfo *)qinfo; assert(pQInfo && pQInfo->signature == pQInfo); - int64_t threadId = pthread_self(); + int64_t threadId = taosGetPthreadId(); int64_t curOwner = 0; if ((curOwner = atomic_val_compare_exchange_64(&pQInfo->owner, 0, threadId)) != 0) { @@ -6549,7 +6549,7 @@ int32_t qKillQuery(qinfo_t qinfo) { // Wait for the query executing thread being stopped/ // Once the query is stopped, the owner of qHandle will be cleared immediately. - while (taosCheckPthreadValid(pQInfo->owner)) { + while (pQInfo->owner != 0) { taosMsleep(100); } From 0ac540218a10859abb7960859e20a47b7d2053db Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 10 Aug 2020 23:57:14 +0800 Subject: [PATCH 095/190] TD-1057 minor changes --- src/client/src/tscLocalMerge.c | 2 +- src/client/src/tscSQLParser.c | 2 +- src/query/src/qResultbuf.c | 6 +++--- src/tsdb/src/tsdbRead.c | 2 +- src/util/src/tcache.c | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index d176d026f0..8436777ddf 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -915,7 +915,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO if (pQueryInfo->limit.limit >= 0 && pRes->numOfRowsGroup > pQueryInfo->limit.limit) { /* impose the limitation of output rows on the final result */ int32_t prevSize = (int32_t)pFinalDataPage->num; - int32_t overflow = (int32_t)(pRes->numOfRowsGroup - pQueryInfo->limit.limit); + int32_t overflow = (int32_t)(pRes->numOfRowsGroup - pQueryInfo->limit.limit); assert(overflow < pRes->numOfRows); pRes->numOfRowsGroup = pQueryInfo->limit.limit; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 05218dc74c..8e398fbf1f 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1182,7 +1182,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t char* c = tbufGetData(&bw, true); // set the serialized binary string as the parameter of arithmetic expression - addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, (int32_t)len, index.tableIndex); + addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, (int32_t)len, index.tableIndex); insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName, pExpr); diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c index 4c198ac176..51ff892b33 100644 --- a/src/query/src/qResultbuf.c +++ b/src/query/src/qResultbuf.c @@ -327,7 +327,7 @@ tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) { // no need to update the LRU list if only one page exists if (pResultBuf->numOfPages == 1) { (*pi)->used = true; - return (void *)(GET_DATA_PAYLOAD(*pi)); + return (void *)(GET_DATA_PAYLOAD(*pi)); } SPageInfo** pInfo = (SPageInfo**) ((*pi)->pn->data); @@ -336,7 +336,7 @@ tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) { lruListMoveToFront(pResultBuf->lruList, (*pi)); (*pi)->used = true; - return (void *)(GET_DATA_PAYLOAD(*pi)); + return (void *)(GET_DATA_PAYLOAD(*pi)); } else { // not in memory assert((*pi)->pData == NULL && (*pi)->pn == NULL && (*pi)->info.length >= 0 && (*pi)->info.offset >= 0); @@ -358,7 +358,7 @@ tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) { (*pi)->used = true; loadPageFromDisk(pResultBuf, *pi); - return (void *)(GET_DATA_PAYLOAD(*pi)); + return (void *)(GET_DATA_PAYLOAD(*pi)); } } diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 572af8b3b6..4edbadc725 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -247,7 +247,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab .lastKey = pKeyInfo->lastKey, .pTableObj = pKeyInfo->pTable, }; - info.tableId = ((STable*)(pKeyInfo->pTable))->tableId; + info.tableId = ((STable*)(pKeyInfo->pTable))->tableId; assert(info.pTableObj != NULL && (info.pTableObj->type == TSDB_NORMAL_TABLE || info.pTableObj->type == TSDB_CHILD_TABLE || info.pTableObj->type == TSDB_STREAM_TABLE)); diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 7bb7133337..fee7ed3c8b 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -388,7 +388,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { } else { // ref == 0 atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size); - int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable); + int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable); uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, num:%d size:%" PRId64 "bytes", pCacheObj->name, pNode->key, pNode->data, pNode->size, size, pCacheObj->totalSize); From 36c43566da7c0dfd961458326b789caa43710900 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 11 Aug 2020 00:50:09 +0800 Subject: [PATCH 096/190] [td-255]fix bugs in table alias name. --- src/client/src/tscSQLParser.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 1a97a9a997..88d6ab5217 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5996,7 +5996,12 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg11); } - tstrncpy(pTableMetaInfo1->aliasName, pTableItem1->pz, sizeof(pTableMetaInfo1->aliasName)); + // has no table alias name + if (memcmp(pTableItem->pz, pTableItem1->pz, pTableItem1->nLen) == 0) { + extractTableName(pTableMetaInfo1->name, pTableMetaInfo1->aliasName); + } else { + tstrncpy(pTableMetaInfo1->aliasName, pTableItem1->pz, sizeof(pTableMetaInfo1->aliasName)); + } code = tscGetTableMeta(pSql, pTableMetaInfo1); if (code != TSDB_CODE_SUCCESS) { From 49944faf7d8aff8936817c20104494742532d8d2 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 11 Aug 2020 09:47:23 +0800 Subject: [PATCH 097/190] TD-1081 change httpContext keep time --- src/mnode/src/mnodeProfile.c | 4 ++-- src/mnode/src/mnodeShow.c | 2 +- src/plugins/http/src/httpContext.c | 2 +- src/plugins/http/src/httpSession.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index 85457d7a26..01a824baa7 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -35,8 +35,8 @@ #include "mnodeVgroup.h" #include "mnodeWrite.h" -#define CONN_KEEP_TIME (tsShellActivityTimer * 3) -#define CONN_CHECK_TIME (tsShellActivityTimer * 2) +#define CONN_KEEP_TIME (tsShellActivityTimer * 3000) +#define CONN_CHECK_TIME (tsShellActivityTimer * 2000) #define QUERY_ID_SIZE 20 #define QUERY_STREAM_SAVE_SIZE 20 diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index 9983c111f6..aec946722f 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -389,7 +389,7 @@ static bool mnodeAccquireShowObj(SShowObj *pShow) { static void* mnodePutShowObj(SShowObj *pShow) { if (tsMnodeShowCache != NULL) { pShow->index = atomic_add_fetch_32(&tsShowObjIndex, 1); - SShowObj **ppShow = taosCachePut(tsMnodeShowCache, &pShow, sizeof(int64_t), &pShow, sizeof(int64_t), 6); + SShowObj **ppShow = taosCachePut(tsMnodeShowCache, &pShow, sizeof(int64_t), &pShow, sizeof(int64_t), 6000); pShow->ppShow = (void**)ppShow; mDebug("%p, show is put into cache, data:%p index:%d", pShow, ppShow, pShow->index); return pShow; diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index ad0d302c82..38b829e406 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -108,7 +108,7 @@ HttpContext *httpCreateContext(int32_t fd) { pContext->lastAccessTime = taosGetTimestampSec(); pContext->state = HTTP_CONTEXT_STATE_READY; - HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &pContext, sizeof(int64_t), &pContext, sizeof(int64_t), 3); + HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &pContext, sizeof(int64_t), &pContext, sizeof(int64_t), 3000); pContext->ppContext = ppContext; httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext); diff --git a/src/plugins/http/src/httpSession.c b/src/plugins/http/src/httpSession.c index ad57f0fc29..f19679e072 100644 --- a/src/plugins/http/src/httpSession.c +++ b/src/plugins/http/src/httpSession.c @@ -34,7 +34,7 @@ void httpCreateSession(HttpContext *pContext, void *taos) { session.refCount = 1; int32_t len = snprintf(session.id, HTTP_SESSION_ID_LEN, "%s.%s", pContext->user, pContext->pass); - pContext->session = taosCachePut(server->sessionCache, session.id, len, &session, sizeof(HttpSession), tsHttpSessionExpire); + pContext->session = taosCachePut(server->sessionCache, session.id, len, &session, sizeof(HttpSession), tsHttpSessionExpire * 1000); // void *temp = pContext->session; // taosCacheRelease(server->sessionCache, (void **)&temp, false); From f1e19aae4ca704ab1aa5620e90b4259dab02e0ee Mon Sep 17 00:00:00 2001 From: Hui Li Date: Tue, 11 Aug 2020 10:33:17 +0800 Subject: [PATCH 098/190] [fix bug] --- packaging/tools/install_client.sh | 10 +++++++--- packaging/tools/remove.sh | 6 ++++-- packaging/tools/remove_client.sh | 6 ++++-- src/dnode/src/dnodeCheck.c | 2 +- 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 605944e9b3..0ec8dbd0cc 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -31,6 +31,7 @@ cfg_install_dir="/etc/taos" if [ "$osType" != "Darwin" ]; then bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" + lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" else bin_link_dir="/usr/local/bin" @@ -45,7 +46,7 @@ install_main_dir="/usr/local/taos" bin_dir="/usr/local/taos/bin" # v1.5 jar dir -v15_java_app_dir="/usr/local/lib/taos" +#v15_java_app_dir="/usr/local/lib/taos" # Color setting RED='\033[0;31m' @@ -90,7 +91,7 @@ function install_bin() { ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* - #Make link + #Make link [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : if [ "$osType" == "Darwin" ]; then [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : @@ -106,13 +107,16 @@ function clean_lib() { function install_lib() { # Remove links ${csudo} rm -f ${lib_link_dir}/libtaos.* || : - ${csudo} rm -rf ${v15_java_app_dir} || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : + #${csudo} rm -rf ${v15_java_app_dir} || : ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* if [ "$osType" != "Darwin" ]; then ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : else ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 9fb8731449..63e09dc568 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -18,11 +18,12 @@ log_link_dir="/usr/local/taos/log" cfg_link_dir="/usr/local/taos/cfg" bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" install_nginxd_dir="/usr/local/nginxd" # v1.5 jar dir -v15_java_app_dir="/usr/local/lib/taos" +#v15_java_app_dir="/usr/local/lib/taos" service_config_dir="/etc/systemd/system" taos_service_name="taosd" @@ -78,7 +79,8 @@ function clean_bin() { function clean_lib() { # Remove link ${csudo} rm -f ${lib_link_dir}/libtaos.* || : - ${csudo} rm -rf ${v15_java_app_dir} || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : + #${csudo} rm -rf ${v15_java_app_dir} || : } function clean_header() { diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index 9210546a9f..4bc278fcf0 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -15,11 +15,12 @@ log_link_dir="/usr/local/taos/log" cfg_link_dir="/usr/local/taos/cfg" bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" # v1.5 jar dir -v15_java_app_dir="/usr/local/lib/taos" +#v15_java_app_dir="/usr/local/lib/taos" csudo="" if command -v sudo > /dev/null; then @@ -43,7 +44,8 @@ function clean_bin() { function clean_lib() { # Remove link ${csudo} rm -f ${lib_link_dir}/libtaos.* || : - ${csudo} rm -rf ${v15_java_app_dir} || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : + #${csudo} rm -rf ${v15_java_app_dir} || : } function clean_header() { diff --git a/src/dnode/src/dnodeCheck.c b/src/dnode/src/dnodeCheck.c index a5b6fa09dd..dfdc3fa53f 100644 --- a/src/dnode/src/dnodeCheck.c +++ b/src/dnode/src/dnodeCheck.c @@ -173,7 +173,7 @@ static int dnodeCheckDatafile() { } static void dnodeAllocCheckItem() { - tsCheckItem[TSDB_CHECK_ITEM_NETWORK].enable = true; + tsCheckItem[TSDB_CHECK_ITEM_NETWORK].enable = false; tsCheckItem[TSDB_CHECK_ITEM_NETWORK].name = "network"; tsCheckItem[TSDB_CHECK_ITEM_NETWORK].initFp = NULL; tsCheckItem[TSDB_CHECK_ITEM_NETWORK].cleanUpFp = NULL; From 082584e4390f411114fafb8768b76b393f9dcc8c Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Tue, 11 Aug 2020 11:36:01 +0800 Subject: [PATCH 099/190] fix TD-1112 --- src/tsdb/src/tsdbMemTable.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index cd5f223f3d..2df8ff26bd 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -232,7 +232,7 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) { } pNode->next = pNode->prev = NULL; - tdListAppend(pRepo->mem->extraBuffList, pNode); + tdListAppendNode(pRepo->mem->extraBuffList, pNode); ptr = (void *)(pNode->data); tsdbTrace("vgId:%d allocate %d bytes from SYSTEM buffer block", REPO_ID(pRepo), bytes); } else { // allocate from TSDB buffer pool From 88171545f8e78fdb6e49110297634d5524df94f0 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 11 Aug 2020 03:58:41 +0000 Subject: [PATCH 100/190] TD-1081 change cache key --- src/mnode/src/mnodeShow.c | 6 ++++-- src/plugins/http/src/httpContext.c | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index aec946722f..8a84b66a33 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -377,7 +377,8 @@ static bool mnodeCheckShowFinished(SShowObj *pShow) { } static bool mnodeAccquireShowObj(SShowObj *pShow) { - SShowObj **ppShow = taosCacheAcquireByKey(tsMnodeShowCache, &pShow, sizeof(int64_t)); + uint64_t handleVal = (uint64_t)pShow; + SShowObj **ppShow = taosCacheAcquireByKey(tsMnodeShowCache, &handleVal, sizeof(int64_t)); if (ppShow) { mDebug("%p, show is accquired from cache, data:%p, index:%d", pShow, ppShow, pShow->index); return true; @@ -389,7 +390,8 @@ static bool mnodeAccquireShowObj(SShowObj *pShow) { static void* mnodePutShowObj(SShowObj *pShow) { if (tsMnodeShowCache != NULL) { pShow->index = atomic_add_fetch_32(&tsShowObjIndex, 1); - SShowObj **ppShow = taosCachePut(tsMnodeShowCache, &pShow, sizeof(int64_t), &pShow, sizeof(int64_t), 6000); + uint64_t handleVal = (uint64_t)pShow; + SShowObj **ppShow = taosCachePut(tsMnodeShowCache, &handleVal, sizeof(int64_t), &pShow, sizeof(int64_t), 6000); pShow->ppShow = (void**)ppShow; mDebug("%p, show is put into cache, data:%p index:%d", pShow, ppShow, pShow->index); return pShow; diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index 38b829e406..5ef3c9a66e 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -108,7 +108,8 @@ HttpContext *httpCreateContext(int32_t fd) { pContext->lastAccessTime = taosGetTimestampSec(); pContext->state = HTTP_CONTEXT_STATE_READY; - HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &pContext, sizeof(int64_t), &pContext, sizeof(int64_t), 3000); + uint64_t handleVal = (uint64_t)pContext; + HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &handleVal, sizeof(int64_t), &pContext, sizeof(int64_t), 3000); pContext->ppContext = ppContext; httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext); @@ -119,7 +120,8 @@ HttpContext *httpCreateContext(int32_t fd) { } HttpContext *httpGetContext(void *ptr) { - HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &ptr, sizeof(HttpContext *)); + uint64_t handleVal = (uint64_t)ptr; + HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &handleVal, sizeof(HttpContext *)); if (ppContext) { HttpContext *pContext = *ppContext; From fe834cc75dce1b55179e69e23ea3e33641f8a193 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 11 Aug 2020 04:43:57 +0000 Subject: [PATCH 101/190] only drop one vnode each time --- src/balance/src/balance.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/balance/src/balance.c b/src/balance/src/balance.c index 53ed860aa1..1c25c04717 100644 --- a/src/balance/src/balance.c +++ b/src/balance/src/balance.c @@ -214,8 +214,8 @@ static bool balanceCheckVgroupReady(SVgObj *pVgroup, SVnodeGid *pRmVnode) { * desc: remove one vnode from vgroup * all vnodes in vgroup should in ready state, except the balancing one **/ -static void balanceRemoveVnode(SVgObj *pVgroup) { - if (pVgroup->numOfVnodes <= 1) return; +static int32_t balanceRemoveVnode(SVgObj *pVgroup) { + if (pVgroup->numOfVnodes <= 1) return -1; SVnodeGid *pRmVnode = NULL; SVnodeGid *pSelVnode = NULL; @@ -258,9 +258,11 @@ static void balanceRemoveVnode(SVgObj *pVgroup) { if (!balanceCheckVgroupReady(pVgroup, pSelVnode)) { mDebug("vgId:%d, is not ready", pVgroup->vgId); + return -1; } else { mDebug("vgId:%d, is ready, discard dnode:%d", pVgroup->vgId, pSelVnode->dnodeId); balanceDiscardVnode(pVgroup, pSelVnode); + return TSDB_CODE_SUCCESS; } } @@ -407,22 +409,22 @@ static int32_t balanceMonitorVgroups() { int32_t dbReplica = pVgroup->pDb->cfg.replications; int32_t vgReplica = pVgroup->numOfVnodes; + int32_t code; if (vgReplica > dbReplica) { mInfo("vgId:%d, replica:%d numOfVnodes:%d, try remove one vnode", pVgroup->vgId, dbReplica, vgReplica); hasUpdatingVgroup = true; - balanceRemoveVnode(pVgroup); + code = balanceRemoveVnode(pVgroup); } else if (vgReplica < dbReplica) { mInfo("vgId:%d, replica:%d numOfVnodes:%d, try add one vnode", pVgroup->vgId, dbReplica, vgReplica); hasUpdatingVgroup = true; - int32_t code = balanceAddVnode(pVgroup, NULL, NULL); - if (code == TSDB_CODE_SUCCESS) { - mnodeDecVgroupRef(pVgroup); - break; - } + code = balanceAddVnode(pVgroup, NULL, NULL); } mnodeDecVgroupRef(pVgroup); + if (code == TSDB_CODE_SUCCESS) { + break; + } } sdbFreeIter(pIter); From 3b8b8414f3ba26f998d07678d7ad024587d853da Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 11 Aug 2020 04:52:44 +0000 Subject: [PATCH 102/190] minor changes --- src/balance/src/balance.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/balance/src/balance.c b/src/balance/src/balance.c index 1c25c04717..53638f1025 100644 --- a/src/balance/src/balance.c +++ b/src/balance/src/balance.c @@ -409,7 +409,7 @@ static int32_t balanceMonitorVgroups() { int32_t dbReplica = pVgroup->pDb->cfg.replications; int32_t vgReplica = pVgroup->numOfVnodes; - int32_t code; + int32_t code = -1; if (vgReplica > dbReplica) { mInfo("vgId:%d, replica:%d numOfVnodes:%d, try remove one vnode", pVgroup->vgId, dbReplica, vgReplica); From 872ec08f3e00b5b5426244036546cac4da798e9a Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 11 Aug 2020 05:01:16 +0000 Subject: [PATCH 103/190] scripts --- tests/script/unique/big/balance.sim | 57 +++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/tests/script/unique/big/balance.sim b/tests/script/unique/big/balance.sim index a9092ac790..91a7c538d2 100644 --- a/tests/script/unique/big/balance.sim +++ b/tests/script/unique/big/balance.sim @@ -321,6 +321,63 @@ if $data00 != $totalNum then goto show5 endi + +print ========== step6 +sleep 3000 +sql alter database db replica 1 + +$x = 0 +show6: + $x = $x + 1 + sleep 3000 + if $x == 10 then + return -1 + endi + +sql show dnodes +print dnode1 freeVnodes $data2_1 +print dnode4 freeVnodes $data2_4 +if $data2_1 != 2 then + goto show6 +endi +if $data2_4 != 2 then + goto show6 +endi + +sql reset query cache +sleep 1000 + +sql select count(*) from t10 +print select count(*) from t10 $data00 expect $rowNum +if $data00 != $rowNum then + goto show5 +endi + +sql select count(*) from t1010 +print select count(*) from t1010 $data00 expect $rowNum +if $data00 != $rowNum then + goto show5 +endi + +sql select count(*) from t2010 +print select count(*) from t2010 $data00 expect $rowNum +if $data00 != $rowNum then + goto show5 +endi + +sql select count(*) from t3010 +print select count(*) from t3010 $data00 expect $rowNum +if $data00 != $rowNum then + goto show5 +endi + +sql select count(*) from mt +print select count(*) from mt $data00 expect $rowNum +if $data00 != $totalNum then + goto show5 +endi + + system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec.sh -n dnode3 -s stop -x SIGINT From c505914b0427dc1ac23323f5e7774fa0fe6563af Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Tue, 11 Aug 2020 14:01:28 +0800 Subject: [PATCH 104/190] fix td-1114 --- src/client/src/tscStream.c | 4 ++++ tests/pytest/stream/new.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index d0591de011..60517a2f5c 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -135,6 +135,10 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) { etime = pStream->stime + (etime - pStream->stime) / pStream->interval * pStream->interval; } pQueryInfo->window.ekey = etime; + if (pQueryInfo->window.skey >= pQueryInfo->window.ekey) { + tscSetRetryTimer(pStream, pSql, pStream->slidingTime); + return; + } } // launch stream computing in a new thread diff --git a/tests/pytest/stream/new.py b/tests/pytest/stream/new.py index b8503f0b4e..eac93dc0e6 100644 --- a/tests/pytest/stream/new.py +++ b/tests/pytest/stream/new.py @@ -54,7 +54,7 @@ class TDTestCase: tdSql.execute("insert into tb%d values(now + %ds, %d, %d)" % (i, j, j, j)) tdLog.info("=============== step5") - tdLog.sleep(30) + tdLog.sleep(40) tdSql.waitedQuery("select * from st order by ts desc", 1, 120) v = tdSql.getData(0, 3) if v <= 51: From 09072fba183831d37d30f1d70749507307b69c8b Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 11 Aug 2020 16:45:01 +0800 Subject: [PATCH 105/190] TD-1057 localtime func may crash while input < 0 in windows --- src/kit/shell/src/shellEngine.c | 4 ++++ src/os/src/windows/w64Dir.c | 2 +- tests/script/windows/compute/leastsquare.sim | 2 ++ tests/tsim/src/simExe.c | 7 ++++++- 4 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 750335a037..b0f71368ac 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -368,6 +368,10 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { tt = (time_t)(val / 1000); } + if (tt < 0) { + tt = 0; + } + struct tm* ptm = localtime(&tt); size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); diff --git a/src/os/src/windows/w64Dir.c b/src/os/src/windows/w64Dir.c index 7816dac0d6..c486cd0d40 100644 --- a/src/os/src/windows/w64Dir.c +++ b/src/os/src/windows/w64Dir.c @@ -23,7 +23,7 @@ void taosRemoveDir(char *rootDir) { int taosMkDir(const char *path, mode_t mode) { uError("%s not implemented yet", __FUNCTION__); - return -1; + return 0; } void taosMvDir(char* destDir, char *srcDir) { diff --git a/tests/script/windows/compute/leastsquare.sim b/tests/script/windows/compute/leastsquare.sim index 4cd3ad1fb9..69c8fb377b 100644 --- a/tests/script/windows/compute/leastsquare.sim +++ b/tests/script/windows/compute/leastsquare.sim @@ -69,12 +69,14 @@ if $data00 != @{slop:1.000000, intercept:1.000000}@ then endi print =============== step5 +print select leastsquares(tbcol, 1, 1) as b from $tb interval(1d) sql select leastsquares(tbcol, 1, 1) as b from $tb interval(1m) print ===> $data01 if $data01 != @{slop:1.000000, intercept:1.000000}@ then return -1 endi +print select leastsquares(tbcol, 1, 1) as b from $tb interval(1d) sql select leastsquares(tbcol, 1, 1) as b from $tb interval(1d) print ===> $data01 if $data01 != @{slop:1.000000, intercept:1.000000}@ then diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index 8bc9a76545..0a1829f7c7 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -783,10 +783,15 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { break; case TSDB_DATA_TYPE_TIMESTAMP: tt = *(int64_t *)row[i] / 1000; + if (tt < 0) { + tt = 0; + } + tp = localtime(&tt); strftime(timeStr, 64, "%y-%m-%d %H:%M:%S", tp); sprintf(value, "%s.%03d", timeStr, - (int)(*((int64_t *)row[i]) % 1000)); + (int)(*((int64_t *)row[i]) % 1000)); + break; default: break; From 32c81edca4dcd3c786bc46290f0e1a00246458bc Mon Sep 17 00:00:00 2001 From: Hui Li Date: Tue, 11 Aug 2020 17:03:48 +0800 Subject: [PATCH 106/190] [fix bug] --- src/kit/taosdemo/taosdemo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 859e22a178..2b039c90f9 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -250,7 +250,7 @@ typedef struct DemoArguments { static struct argp argp = {options, parse_opt, 0, 0}; void parse_args(int argc, char *argv[], SDemoArguments *arguments) { - argp_parse(&argp, argc, argv, 0, 0, &arguments); + argp_parse(&argp, argc, argv, 0, 0, arguments); if (arguments->abort) { #ifndef _ALPINE error(10, 0, "ABORTED"); From d87df9f036235df9e9e8ece69ecaa8ccab7d99df Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Tue, 11 Aug 2020 17:05:01 +0800 Subject: [PATCH 107/190] update md file for jdbc connector --- .../webdocs/markdowndocs/Connector.md | 2 +- .../webdocs/markdowndocs/connector-ch.md | 2 +- .../webdocs/markdowndocs/Connector.md | 219 ++++++++++++------ .../webdocs/markdowndocs/connector-ch.md | 6 +- 4 files changed, 151 insertions(+), 78 deletions(-) diff --git a/documentation/webdocs/markdowndocs/Connector.md b/documentation/webdocs/markdowndocs/Connector.md index 563d306128..fcd6976cb0 100644 --- a/documentation/webdocs/markdowndocs/Connector.md +++ b/documentation/webdocs/markdowndocs/Connector.md @@ -286,7 +286,7 @@ Connection conn = DriverManager.getConnection(jdbcUrl); > `6030` is the default port and `log` is the default database for system monitor. A normal JDBC URL looks as follows: -`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` +`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` values in `{}` are necessary while values in `[]` are optional。Each option in the above URL denotes: diff --git a/documentation/webdocs/markdowndocs/connector-ch.md b/documentation/webdocs/markdowndocs/connector-ch.md index ec97816a27..79e7e918b3 100644 --- a/documentation/webdocs/markdowndocs/connector-ch.md +++ b/documentation/webdocs/markdowndocs/connector-ch.md @@ -281,7 +281,7 @@ Connection conn = DriverManager.getConnection(jdbcUrl); > 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。 TDengine 的 JDBC URL 规范格式为: -`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` +`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` 其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下: diff --git a/documentation20/webdocs/markdowndocs/Connector.md b/documentation20/webdocs/markdowndocs/Connector.md index 6d981478df..e5ba6d5185 100644 --- a/documentation20/webdocs/markdowndocs/Connector.md +++ b/documentation20/webdocs/markdowndocs/Connector.md @@ -281,103 +281,100 @@ For the time being, TDengine supports subscription on one or multiple tables. It ## Java Connector -TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。 +To Java delevopers, TDengine provides `taos-jdbcdriver` according to the JDBC(3.0) API. Users can find and download it through [Sonatype Repository][1]. -由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 +Since the native language of TDengine is C, the necessary TDengine library should be checked before using the taos-jdbcdriver: -* libtaos.so - 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。 +* libtaos.so (Linux) + After TDengine is installed successfully, the library `libtaos.so` will be automatically copied to the `/usr/lib/`, which is the system's default search path. -* taos.dll - 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 +* taos.dll (Windows) + After TDengine client is installed, the library `taos.dll` will be automatically copied to the `C:/Windows/System32`, which is the system's default search path. -> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。 +> Note: Please make sure that [TDengine Windows client][14] has been installed if developing on Windows. Now although TDengine client would be defaultly installed together with TDengine server, it can also be installed [alone][15]. -TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点: +Since TDengine is time-series database, there are still some differences compared with traditional databases in using TDengine JDBC driver: +* TDengine doesn't allow to delete/modify a single record, and thus JDBC driver also has no such method. +* No support for transaction +* No support for union between tables +* No support for nested query,`There is at most one open ResultSet for each Connection. Thus, TSDB JDBC Driver will close current ResultSet if it is not closed and a new query begins`. -* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。 -* 由于不支持删除和修改,所以也不支持事务操作。 -* 目前不支持表间的 union 操作。 -* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet。 +## Version list of TAOS-JDBCDriver and required TDengine and JDK - -## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 - -| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | +| taos-jdbcdriver | TDengine | JDK | | --- | --- | --- | -| 1.0.3 | 1.6.1.x 及以上 | 1.8.x | -| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | -| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | -| 2.0.0 | 2.0.0.x 及以上 | 1.8.x | +| 2.0.2 | 2.0.0.x or higher | 1.8.x | +| 1.0.3 | 1.6.1.x or higher | 1.8.x | +| 1.0.2 | 1.6.1.x or higher | 1.8.x | +| 1.0.1 | 1.6.1.x or higher | 1.8.x | -## TDengine DataType 和 Java DataType +## DataType in TDengine and Java -TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: +The datatypes in TDengine include timestamp, number, string and boolean, which are converted as follows in Java: -| TDengine DataType | Java DataType | -| --- | --- | -| TIMESTAMP | java.sql.Timestamp | -| INT | java.lang.Integer | -| BIGINT | java.lang.Long | -| FLOAT | java.lang.Float | -| DOUBLE | java.lang.Double | +| TDengine | Java | +| --- | --- | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | | SMALLINT, TINYINT |java.lang.Short | -| BOOL | java.lang.Boolean | -| BINARY, NCHAR | java.lang.String | +| BOOL | java.lang.Boolean | +| BINARY, NCHAR | java.lang.String | -## 如何获取 TAOS-JDBCDriver +## How to get TAOS-JDBC Driver -### maven 仓库 +### maven repository -目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。 +taos-jdbcdriver has been published to [Sonatype Repository][1]: * [sonatype][8] * [mvnrepository][9] * [maven.aliyun][10] -maven 项目中使用如下 pom.xml 配置即可: +Using the following pom.xml for maven projects ```xml com.taosdata.jdbc taos-jdbcdriver - 2.0.0 + 2.0.2 ``` -### 源码编译打包 +### JAR file from the source code -下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。 +After downloading the [TDengine][3] source code, execute `mvn clean package` in the directory `src/connector/jdbc` and then the corresponding jar file is generated. +## Usage -## 使用说明 +### get the connection -### 获取连接 - -如下所示配置即可获取 TDengine Connection: ```java Class.forName("com.taosdata.jdbc.TSDBDriver"); String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(jdbcUrl); ``` -> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。 +> `6030` is the default port and `log` is the default database for system monitor. -TDengine 的 JDBC URL 规范格式为: -`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` +A normal JDBC URL looks as follows: +`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` -其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下: +values in `{}` are necessary while values in `[]` are optional。Each option in the above URL denotes: -* user:登录 TDengine 用户名,默认值 root。 -* password:用户登录密码,默认值 taosdata。 -* charset:客户端使用的字符集,默认值为系统字符集。 -* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。 -* locale:客户端语言环境,默认值系统当前 locale。 -* timezone:客户端使用的时区,默认值为系统当前时区。 +* user:user name for login, defaultly root。 +* password:password for login,defaultly taosdata。 +* charset:charset for client,defaultly system charset +* cfgdir:log directory for client, defaultly _/etc/taos/_ on Linux and _C:/TDengine/cfg_ on Windows。 +* locale:language for client,defaultly system locale。 +* timezone:timezone for client,defaultly system timezone。 -以上参数可以在 3 处配置,`优先级由高到低`分别如下: -1. JDBC URL 参数 - 如上所述,可以在 JDBC URL 的参数中指定。 +The options above can be configures (`ordered by priority`): +1. JDBC URL + + As explained above. 2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps) ```java public Connection getConn() throws Exception{ @@ -395,9 +392,9 @@ public Connection getConn() throws Exception{ } ``` -3. 客户端配置文件 taos.cfg +3. Configuration file (taos.cfg) - linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。 + Default configuration file is _/var/lib/taos/taos.cfg_ On Linux and _C:\TDengine\cfg\taos.cfg_ on Windows ```properties # client default username # defaultUser root @@ -411,9 +408,9 @@ public Connection getConn() throws Exception{ # system locale # locale en_US.UTF-8 ``` -> 更多详细配置请参考[客户端配置][13] +> More options can refer to [client configuration][13] -### 创建数据库和表 +### Create databases and tables ```java Statement stmt = conn.createStatement(); @@ -427,9 +424,9 @@ stmt.executeUpdate("use db"); // create table stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); ``` -> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。 +> Note: if no step like `use db`, the name of database must be added as prefix like _db.tb_ when operating on tables -### 插入数据 +### Insert data ```java // insert data @@ -437,10 +434,10 @@ int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now System.out.println("insert " + affectedRows + " rows."); ``` -> now 为系统内部函数,默认为服务器当前时间。 -> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。 +> _now_ is the server time. +> _now+1s_ is 1 second later than current server time. The time unit includes: _a_(millisecond), _s_(second), _m_(minute), _h_(hour), _d_(day), _w_(week), _n_(month), _y_(year). -### 查询数据 +### Query database ```java // query data @@ -458,22 +455,22 @@ while(resultSet.next()){ System.out.printf("%s, %d, %s\n", ts, temperature, humidity); } ``` -> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 +> query is consistent with relational database. The subscript start with 1 when retrieving return results. It is recommended to use the column name to retrieve results. - -### 关闭资源 +### Close all ```java resultSet.close(); stmt.close(); conn.close(); ``` -> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。 -## 与连接池使用 +> `please make sure the connection is closed to avoid the error like connection leakage` + +## Using connection pool **HikariCP** -* 引入相应 HikariCP maven 依赖: +* dependence in pom.xml: ```xml com.zaxxer @@ -482,7 +479,7 @@ conn.close(); ``` -* 使用示例如下: +* Examples: ```java public static void main(String[] args) throws SQLException { HikariConfig config = new HikariConfig(); @@ -508,8 +505,69 @@ conn.close(); connection.close(); // put back to conneciton pool } ``` -> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 -> 更多 HikariCP 使用问题请查看[官方说明][5] +> The close() method will not close the connection from HikariDataSource.getConnection(). Instead, the connection is put back to the connection pool. +> More instructions can refer to [User Guide][5] + +**Druid** + +* dependency in pom.xml: + +```xml + + com.alibaba + druid + 1.1.20 + +``` + +* Examples: +```java +public static void main(String[] args) throws Exception { + Properties properties = new Properties(); + properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver"); + properties.put("url","jdbc:TAOS://127.0.0.1:6030/log"); + properties.put("username","root"); + properties.put("password","taosdata"); + + properties.put("maxActive","10"); //maximum number of connection in the pool + properties.put("initialSize","3");//initial number of connection + properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool + properties.put("minIdle","3");//minimum number of connection in the pool + + properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection + + properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle + properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle + + properties.put("validationQuery","describe log.dn"); //validation query + properties.put("testWhileIdle","true"); // test connection while idle + properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true + properties.put("testOnReturn","false"); // don't need while testWhileIdle is true + + //create druid datasource + DataSource ds = DruidDataSourceFactory.createDataSource(properties); + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` +> More instructions can refer to [User Guide][6] + +**Notice** +* TDengine `v1.6.4.1` provides a function `select server_status()` to check heartbeat. It is highly recommended to use this function for `Validation Query`. + +As follows,`1` will be returned if `select server_status()` is successfully executed。 +```shell +taos> select server_status(); +server_status()| +================ +1 | +Query OK, 1 row(s) in set (0.000141s) +``` ## Python Connector @@ -821,3 +879,18 @@ An example of using the NodeJS connector to create a table with weather data and An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js) +[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[3]: https://github.com/taosdata/TDengine +[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/ +[5]: https://github.com/brettwooldridge/HikariCP +[6]: https://github.com/alibaba/druid +[7]: https://github.com/taosdata/TDengine/issues +[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[10]: https://maven.aliyun.com/mvn/search +[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate +[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo +[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE +[14]: https://www.taosdata.com/cn/documentation20/connector/#Windows +[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B \ No newline at end of file diff --git a/documentation20/webdocs/markdowndocs/connector-ch.md b/documentation20/webdocs/markdowndocs/connector-ch.md index 2cd12c3779..ae39c78101 100644 --- a/documentation20/webdocs/markdowndocs/connector-ch.md +++ b/documentation20/webdocs/markdowndocs/connector-ch.md @@ -306,7 +306,7 @@ TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一 | taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | | --- | --- | --- | -| 2.0.0 | 2.0.0.x 及以上 | 1.8.x | +| 2.0.2 | 2.0.0.x 及以上 | 1.8.x | | 1.0.3 | 1.6.1.x 及以上 | 1.8.x | | 1.0.2 | 1.6.1.x 及以上 | 1.8.x | | 1.0.1 | 1.6.1.x 及以上 | 1.8.x | @@ -341,7 +341,7 @@ maven 项目中使用如下 pom.xml 配置即可: com.taosdata.jdbc taos-jdbcdriver - 2.0.1 + 2.0.2 ``` @@ -363,7 +363,7 @@ Connection conn = DriverManager.getConnection(jdbcUrl); > 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。 TDengine 的 JDBC URL 规范格式为: -`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` +`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` 其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下: From 1169eba1c84be625e860295a0bac06ffd0b65a39 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 11 Aug 2020 17:27:13 +0800 Subject: [PATCH 108/190] TD-1057 crash in windows --- src/client/src/tscSQLParser.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index bcf43a1a8b..d81896322c 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1125,7 +1125,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t int32_t tableIndex = columnList.ids[0].tableIndex; // todo potential data overflow - char arithmeticExprStr[1024*12]; + char* arithmeticExprStr = malloc(1024*1024); char* p = arithmeticExprStr; if (arithmeticType == NORMAL_ARITHMETIC) { @@ -1134,11 +1134,13 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t // all columns in arithmetic expression must belong to the same table for (int32_t f = 1; f < columnList.num; ++f) { if (columnList.ids[f].tableIndex != tableIndex) { + taosTFree(arithmeticExprStr); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); } } if (arithmeticExprToString(pItem->pNode, &p) != TSDB_CODE_SUCCESS) { + taosTFree(arithmeticExprStr); return TSDB_CODE_TSC_INVALID_SQL; } @@ -1157,6 +1159,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t int32_t ret = exprTreeFromSqlExpr(pCmd, &pNode, pItem->pNode, pQueryInfo->exprList, pQueryInfo, colList); if (ret != TSDB_CODE_SUCCESS) { tExprTreeDestroy(&pNode, NULL); + taosTFree(arithmeticExprStr); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -1164,6 +1167,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t for(int32_t k = 0; k < numOfNode; ++k) { SColIndex* pIndex = taosArrayGet(colList, k); if (pIndex->flag == 1) { + taosTFree(arithmeticExprStr); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } } @@ -1190,6 +1194,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t tExprTreeDestroy(&pNode, NULL); } else { if (arithmeticExprToString(pItem->pNode, &p) != TSDB_CODE_SUCCESS) { + taosTFree(arithmeticExprStr); return TSDB_CODE_TSC_INVALID_SQL; } @@ -1213,6 +1218,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo->exprList, pQueryInfo, NULL); if (ret != TSDB_CODE_SUCCESS) { tExprTreeDestroy(&pArithExprInfo->pExpr, NULL); + taosTFree(arithmeticExprStr); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid expression in select clause"); } @@ -1220,6 +1226,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t } } + taosTFree(arithmeticExprStr); return TSDB_CODE_SUCCESS; } From 1e24b99534002a00a69180e5412a59e239c2f896 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 11 Aug 2020 17:58:41 +0800 Subject: [PATCH 109/190] TD-1057 system function in windows --- src/os/src/windows/w64Atomic.c | 4 ++ src/os/src/windows/w64Lz4.c | 26 +++++++++- src/os/src/windows/w64Sysinfo.c | 91 +++++++++++++++++++++++++++++++-- 3 files changed, 115 insertions(+), 6 deletions(-) diff --git a/src/os/src/windows/w64Atomic.c b/src/os/src/windows/w64Atomic.c index 0425f4ed3f..9fc3eae672 100644 --- a/src/os/src/windows/w64Atomic.c +++ b/src/os/src/windows/w64Atomic.c @@ -43,7 +43,11 @@ long interlocked_add_fetch_32(long volatile* ptr, long val) { } __int64 interlocked_add_fetch_64(__int64 volatile* ptr, __int64 val) { +#ifdef _WIN64 return _InterlockedExchangeAdd64(ptr, val) + val; +#else + return _InterlockedExchangeAdd(ptr, val) + val; +#endif } // and diff --git a/src/os/src/windows/w64Lz4.c b/src/os/src/windows/w64Lz4.c index 96556c1f1c..631a22e572 100644 --- a/src/os/src/windows/w64Lz4.c +++ b/src/os/src/windows/w64Lz4.c @@ -21,9 +21,29 @@ #include "tulog.h" #include "tutil.h" +unsigned char _MyBitScanForward64(unsigned long *ret, uint64_t x) { + unsigned long x0 = (unsigned long)x, top, bottom; + _BitScanForward(&top, (unsigned long)(x >> 32)); + _BitScanForward(&bottom, x0); + *ret = x0 ? bottom : 32 + top; + return x != 0; +} + +unsigned char _MyBitScanReverse64(unsigned long *ret, uint64_t x) { + unsigned long x1 = (unsigned long)(x >> 32), top, bottom; + _BitScanReverse(&top, x1); + _BitScanReverse(&bottom, (unsigned long)x); + *ret = x1 ? top + 32 : bottom; + return x != 0; +} + int32_t BUILDIN_CLZL(uint64_t val) { unsigned long r = 0; +#ifdef _WIN64 _BitScanReverse64(&r, val); +#else + _MyBitScanReverse64(&r, val); +#endif return (int)(r >> 3); } @@ -35,7 +55,11 @@ int32_t BUILDIN_CLZ(uint32_t val) { int32_t BUILDIN_CTZL(uint64_t val) { unsigned long r = 0; +#ifdef _WIN64 _BitScanForward64(&r, val); +#else + _MyBitScanForward64(&r, val); +#endif return (int)(r >> 3); } @@ -43,4 +67,4 @@ int32_t BUILDIN_CTZ(uint32_t val) { unsigned long r = 0; _BitScanForward(&r, val); return (int)(r >> 3); -} +} \ No newline at end of file diff --git a/src/os/src/windows/w64Sysinfo.c b/src/os/src/windows/w64Sysinfo.c index 27869e1eec..70abe939b2 100644 --- a/src/os/src/windows/w64Sysinfo.c +++ b/src/os/src/windows/w64Sysinfo.c @@ -21,6 +21,15 @@ #include "ttimer.h" #include "tulog.h" #include "tutil.h" +#if (_WIN64) +#include +#include +#include +#include +#include +#include +#pragma comment(lib, "Mswsock.lib ") +#endif static void taosGetSystemTimezone() { // get and set default timezone @@ -69,11 +78,64 @@ void taosGetSystemInfo() { taosGetSystemLocale(); } -bool taosGetDisk() { return true; } +bool taosGetDisk() { + const double unit = 1024 * 1024 * 1024; + BOOL fResult; + unsigned _int64 i64FreeBytesToCaller; + unsigned _int64 i64TotalBytes; + unsigned _int64 i64FreeBytes; + char dir[4] = {'C', ':', '\\', '\0'}; + int drive_type; + + if (tscEmbedded) { + drive_type = GetDriveTypeA(dir); + if (drive_type == DRIVE_FIXED) { + fResult = GetDiskFreeSpaceExA(dir, (PULARGE_INTEGER)&i64FreeBytesToCaller, (PULARGE_INTEGER)&i64TotalBytes, + (PULARGE_INTEGER)&i64FreeBytes); + if (fResult) { + tsTotalDataDirGB = tsTotalLogDirGB = tsTotalTmpDirGB = (float)(i64TotalBytes / unit); + tsAvailDataDirGB = tsAvailLogDirGB = tsAvailTmpDirectorySpace = (float)(i64FreeBytes / unit); + } + } + } + return true; +} + +bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) { + IO_COUNTERS io_counter; + if (GetProcessIoCounters(GetCurrentProcess(), &io_counter)) { + if (readbyte) *readbyte = io_counter.ReadTransferCount; + if (writebyte) *writebyte = io_counter.WriteTransferCount; + return true; + } + return false; +} bool taosGetProcIO(float *readKB, float *writeKB) { - *readKB = 0; - *writeKB = 0; + static int64_t lastReadbyte = -1; + static int64_t lastWritebyte = -1; + + int64_t curReadbyte = 0; + int64_t curWritebyte = 0; + + if (!taosReadProcIO(&curReadbyte, &curWritebyte)) { + return false; + } + + if (lastReadbyte == -1 || lastWritebyte == -1) { + lastReadbyte = curReadbyte; + lastWritebyte = curWritebyte; + return false; + } + + *readKB = (float)((double)(curReadbyte - lastReadbyte) / 1024); + *writeKB = (float)((double)(curWritebyte - lastWritebyte) / 1024); + if (*readKB < 0) *readKB = 0; + if (*writeKB < 0) *writeKB = 0; + + lastReadbyte = curReadbyte; + lastWritebyte = curWritebyte; + return true; } @@ -89,12 +151,31 @@ bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) { } bool taosGetProcMemory(float *memoryUsedMB) { - *memoryUsedMB = 0; + unsigned bytes_used = 0; +#if defined(_WIN32) && defined(_MSC_VER) + PROCESS_MEMORY_COUNTERS pmc; + HANDLE cur_proc = GetCurrentProcess(); + + if (GetProcessMemoryInfo(cur_proc, &pmc, sizeof(pmc))) { + bytes_used = (unsigned)(pmc.WorkingSetSize + pmc.PagefileUsage); + } +#endif + + *memoryUsedMB = (float)bytes_used / 1024 / 1024; + return true; } bool taosGetSysMemory(float *memoryUsedMB) { - *memoryUsedMB = 0; + MEMORYSTATUSEX memsStat; + float nMemFree; + float nMemTotal; + + memsStat.dwLength = sizeof(memsStat); + if (!GlobalMemoryStatusEx(&memsStat)) { return false; } + nMemFree = memsStat.ullAvailPhys / (1024.0f * 1024.0f); + nMemTotal = memsStat.ullTotalPhys / (1024.0f * 1024.0f); + *memoryUsedMB = nMemTotal - nMemFree; return true; } From ad4157d2badcb490699ce40bfcc228c1b515bb8d Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 11 Aug 2020 18:19:56 +0800 Subject: [PATCH 110/190] TD-1057 minor changes --- src/kit/shell/src/shellEngine.c | 3 +++ src/kit/shell/src/shellWindows.c | 2 +- src/kit/taosdemo/taosdemo.c | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index b0f71368ac..a5e5801ada 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -128,6 +128,9 @@ static int32_t shellRunSingleCommand(TAOS *con, char *command) { if (regex_match(command, "^[ \t]*(quit|q|exit)[ \t;]*$", REG_EXTENDED | REG_ICASE)) { taos_close(con); write_history(); +#ifdef WINDOWS + exit(EXIT_SUCCESS); +#endif return -1; } diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index 7297f23931..279d3b9cdd 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -222,6 +222,6 @@ void *shellLoopQuery(void *arg) { return NULL; } -void get_history_path(char *history) { sprintf(history, "%s/%s", ".", HISTORY_FILE); } +void get_history_path(char *history) { sprintf(history, "C:/TDengine/%s", HISTORY_FILE); } void exitShell() { exit(EXIT_SUCCESS); } diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 2b039c90f9..192cb3145c 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -306,7 +306,7 @@ typedef struct DemoArguments { printf("%s%s\n", indent, "-R"); printf("%s%s%s\n", indent, indent, "rate, Out of order data's rate--if order=1 Default 10, min: 0, max: 50."); printf("%s%s\n", indent, "-D"); - printf("%s%s%s\n", indent, indent, "Delete data methods——0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database."); + printf("%s%s%s\n", indent, indent, "Delete data methods 0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database."); } void parse_args(int argc, char *argv[], SDemoArguments *arguments) { From 2bd372877b8867de5d13920ffd60a1aba1d1ea5e Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 11 Aug 2020 18:35:01 +0800 Subject: [PATCH 111/190] TD-1057 minor changes --- src/kit/shell/src/shellEngine.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index a5e5801ada..a7a69e5f45 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -743,7 +743,7 @@ void read_history() { FILE *f = fopen(f_history, "r"); if (f == NULL) { - fprintf(stderr, "Opening file %s\n", f_history); + fprintf(stderr, "Failed to open file %s\n", f_history); return; } @@ -768,7 +768,7 @@ void write_history() { FILE *f = fopen(f_history, "w"); if (f == NULL) { - fprintf(stderr, "Opening file %s\n", f_history); + fprintf(stderr, "Failed to open file %s for write\n", f_history); return; } From 04b497495edcaa830db46764bb51f2d914ed3b09 Mon Sep 17 00:00:00 2001 From: Xiaowei Su <46439638+Shawshank-Smile@users.noreply.github.com> Date: Tue, 11 Aug 2020 19:07:02 +0800 Subject: [PATCH 112/190] Update faq-ch.md --- documentation20/webdocs/markdowndocs/faq-ch.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/webdocs/markdowndocs/faq-ch.md index bca9cff8e6..a4111e78fc 100644 --- a/documentation20/webdocs/markdowndocs/faq-ch.md +++ b/documentation20/webdocs/markdowndocs/faq-ch.md @@ -19,7 +19,7 @@ #### 4. 如何让TDengine crash时生成core文件? 请看为此问题撰写的技术博客 -#### 5. 遇到错误"failed to connect to server", 我怎么办? +#### 5. 遇到错误"Unable to establish connection", 我怎么办? 客户端遇到链接故障,请按照下面的步骤进行检查: From 6277e43a902be0758ddbd1edcad86bfb4ce10c41 Mon Sep 17 00:00:00 2001 From: Xiaowei Su <46439638+Shawshank-Smile@users.noreply.github.com> Date: Tue, 11 Aug 2020 19:09:20 +0800 Subject: [PATCH 113/190] Update faq.md --- documentation20/webdocs/markdowndocs/faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/webdocs/markdowndocs/faq.md b/documentation20/webdocs/markdowndocs/faq.md index ec0bc2957a..ce7d2ebf5e 100644 --- a/documentation20/webdocs/markdowndocs/faq.md +++ b/documentation20/webdocs/markdowndocs/faq.md @@ -10,7 +10,7 @@ Version 2.X is a complete refactoring of the previous version, and configuration 4. Enjoy the latest stable version of TDengine 5. If the data needs to be migrated or the data file is corrupted, please contact the official technical support team for assistance -#### 2. When encoutered with the error "failed to connect to server", what can I do? +#### 2. When encoutered with the error "Unable to establish connection", what can I do? The client may encounter connection errors. Please follow the steps below for troubleshooting: From 936a3897ac0d58f76c131b816e5b88be97a878b2 Mon Sep 17 00:00:00 2001 From: Xiaowei Su <46439638+Shawshank-Smile@users.noreply.github.com> Date: Tue, 11 Aug 2020 20:59:36 +0800 Subject: [PATCH 114/190] Update faq-ch.md --- documentation20/webdocs/markdowndocs/faq-ch.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/webdocs/markdowndocs/faq-ch.md index bca9cff8e6..a4111e78fc 100644 --- a/documentation20/webdocs/markdowndocs/faq-ch.md +++ b/documentation20/webdocs/markdowndocs/faq-ch.md @@ -19,7 +19,7 @@ #### 4. 如何让TDengine crash时生成core文件? 请看为此问题撰写的技术博客 -#### 5. 遇到错误"failed to connect to server", 我怎么办? +#### 5. 遇到错误"Unable to establish connection", 我怎么办? 客户端遇到链接故障,请按照下面的步骤进行检查: From 81f619cf5145a40b13f6a0f3c5ad960ef29edad4 Mon Sep 17 00:00:00 2001 From: Xiaowei Su <46439638+Shawshank-Smile@users.noreply.github.com> Date: Tue, 11 Aug 2020 21:03:09 +0800 Subject: [PATCH 115/190] Update faq.md --- documentation20/webdocs/markdowndocs/faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/webdocs/markdowndocs/faq.md b/documentation20/webdocs/markdowndocs/faq.md index ec0bc2957a..ce7d2ebf5e 100644 --- a/documentation20/webdocs/markdowndocs/faq.md +++ b/documentation20/webdocs/markdowndocs/faq.md @@ -10,7 +10,7 @@ Version 2.X is a complete refactoring of the previous version, and configuration 4. Enjoy the latest stable version of TDengine 5. If the data needs to be migrated or the data file is corrupted, please contact the official technical support team for assistance -#### 2. When encoutered with the error "failed to connect to server", what can I do? +#### 2. When encoutered with the error "Unable to establish connection", what can I do? The client may encounter connection errors. Please follow the steps below for troubleshooting: From 4b9e42eba262a124cbb66382e175e6121f98bdb8 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 11 Aug 2020 23:23:38 +0800 Subject: [PATCH 116/190] [td-225] add unlock read lock of tsdb. --- src/tsdb/src/tsdbRead.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 07b50301d3..9e09f50abd 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -2406,6 +2406,8 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, TSKEY skey, co } CATCH( code ) { CLEANUP_EXECUTE(); terrno = code; + tsdbUnlockRepoMeta(tsdb); // unlock tsdb in any cases + goto _error; // TODO: more error handling } END_TRY From 5615c021c298d24d99b7a9e0046ce5f090d3408f Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 12 Aug 2020 10:46:22 +0800 Subject: [PATCH 117/190] fix .h file coredump --- src/tsdb/src/tsdbFile.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index 71d88ff29e..5ba9a68e37 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -128,7 +128,11 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { code = regexec(®ex2, dp->d_name, 0, NULL, 0); if (code == 0) { tsdbDebug("vgId:%d invalid file %s exists, remove it", REPO_ID(pRepo), dp->d_name); - remove(dp->d_name); + char *fname = malloc(strlen(tDataDir) + strlen(dp->d_name) + 2); + if (fname == NULL) goto _err; + sprintf(fname, "%s/%s", tDataDir, dp->d_name); + remove(fname); + free(fname); } else if (code == REG_NOMATCH) { tsdbError("vgId:%d invalid file %s exists, ignore it", REPO_ID(pRepo), dp->d_name); continue; From 6418035f3edc6efb4f4e05a557273246c2b0360c Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Wed, 12 Aug 2020 11:05:34 +0800 Subject: [PATCH 118/190] temporary fix of negative time --- src/kit/shell/src/shellEngine.c | 4 ++++ tests/tsim/src/simExe.c | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index a7a69e5f45..2838dc5386 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -371,9 +371,13 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { tt = (time_t)(val / 1000); } +/* comment out as it make testcases like select_with_tags.sim fail. + but in windows, this may cause the call to localtime crash if tt < 0, + need to find a better solution. if (tt < 0) { tt = 0; } + */ struct tm* ptm = localtime(&tt); size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index 0a1829f7c7..677cea54c2 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -783,9 +783,13 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { break; case TSDB_DATA_TYPE_TIMESTAMP: tt = *(int64_t *)row[i] / 1000; + /* comment out as it make testcases like select_with_tags.sim fail. + but in windows, this may cause the call to localtime crash if tt < 0, + need to find a better solution. if (tt < 0) { tt = 0; } + */ tp = localtime(&tt); strftime(timeStr, 64, "%y-%m-%d %H:%M:%S", tp); From c29cbddc1aff5d08e48b7a717ca3c34faeac07c5 Mon Sep 17 00:00:00 2001 From: yangzy Date: Wed, 12 Aug 2020 11:28:10 +0800 Subject: [PATCH 119/190] TD-1115 Fix --- .../jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java index 1cf024f30e..277f9a1ab0 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java @@ -158,9 +158,10 @@ public class TSDBStatement implements Statement { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { // no result set is retrieved + this.connecter.freeResultSet(pSql);: res = false; } - this.connecter.freeResultSet(pSql); + return res; } From f790a124b1edfa93a5463cdba1a6dddaf382a7fb Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 12 Aug 2020 05:14:59 +0000 Subject: [PATCH 120/190] localtime func crash in windows --- src/kit/shell/src/shellEngine.c | 4 ++++ tests/tsim/src/simExe.c | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 2838dc5386..d765eb3ad7 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -379,6 +379,10 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { } */ +#ifdef WINDOWS + if (tt < 0) tt = 0; +#endif + struct tm* ptm = localtime(&tt); size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index 677cea54c2..50d1a9b5be 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -791,6 +791,10 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { } */ +#ifdef WINDOWS + if (tt < 0) tt = 0; +#endif + tp = localtime(&tt); strftime(timeStr, 64, "%y-%m-%d %H:%M:%S", tp); sprintf(value, "%s.%03d", timeStr, From 94d0aa193d33f591e89c48ba04e7b53e90fccc59 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 12 Aug 2020 14:44:09 +0800 Subject: [PATCH 121/190] TD-1057 set crash in windows --- src/os/src/windows/w64Env.c | 1 + src/os/src/windows/w64Sysinfo.c | 90 +++++++++++++++++++++++---------- 2 files changed, 63 insertions(+), 28 deletions(-) diff --git a/src/os/src/windows/w64Env.c b/src/os/src/windows/w64Env.c index 57d34d4003..5544c4ba39 100644 --- a/src/os/src/windows/w64Env.c +++ b/src/os/src/windows/w64Env.c @@ -21,6 +21,7 @@ extern void taosWinSocketInit(); void osInit() { + taosSetCoreDump(); if (configDir[0] == 0) { strcpy(configDir, "C:/TDengine/cfg"); } diff --git a/src/os/src/windows/w64Sysinfo.c b/src/os/src/windows/w64Sysinfo.c index 70abe939b2..eb252a0b86 100644 --- a/src/os/src/windows/w64Sysinfo.c +++ b/src/os/src/windows/w64Sysinfo.c @@ -16,21 +16,23 @@ #define _DEFAULT_SOURCE #include "os.h" #include "taosdef.h" -#include "tglobal.h" #include "tconfig.h" +#include "tglobal.h" #include "ttimer.h" #include "tulog.h" #include "tutil.h" #if (_WIN64) -#include #include -#include -#include -#include #include +#include +#include +#include +#include #pragma comment(lib, "Mswsock.lib ") #endif +#include + static void taosGetSystemTimezone() { // get and set default timezone SGlobalCfg *cfg_timezone = taosGetConfigOption("timezone"); @@ -52,7 +54,7 @@ static void taosGetSystemLocale() { if (cfg_locale && cfg_locale->cfgStatus < TAOS_CFG_CSTATUS_DEFAULT) { char *locale = setlocale(LC_CTYPE, "chs"); if (locale != NULL) { - tstrncpy(tsLocale, locale, TSDB_LOCALE_LEN);; + tstrncpy(tsLocale, locale, TSDB_LOCALE_LEN); cfg_locale->cfgStatus = TAOS_CFG_CSTATUS_DEFAULT; uInfo("locale not configured, set to default:%s", tsLocale); } @@ -79,7 +81,7 @@ void taosGetSystemInfo() { } bool taosGetDisk() { - const double unit = 1024 * 1024 * 1024; + const double unit = 1024 * 1024 * 1024; BOOL fResult; unsigned _int64 i64FreeBytesToCaller; unsigned _int64 i64TotalBytes; @@ -88,15 +90,15 @@ bool taosGetDisk() { int drive_type; if (tscEmbedded) { - drive_type = GetDriveTypeA(dir); + drive_type = GetDriveTypeA(dir); if (drive_type == DRIVE_FIXED) { - fResult = GetDiskFreeSpaceExA(dir, (PULARGE_INTEGER)&i64FreeBytesToCaller, (PULARGE_INTEGER)&i64TotalBytes, - (PULARGE_INTEGER)&i64FreeBytes); - if (fResult) { - tsTotalDataDirGB = tsTotalLogDirGB = tsTotalTmpDirGB = (float)(i64TotalBytes / unit); - tsAvailDataDirGB = tsAvailLogDirGB = tsAvailTmpDirectorySpace = (float)(i64FreeBytes / unit); - } - } + fResult = GetDiskFreeSpaceExA(dir, (PULARGE_INTEGER)&i64FreeBytesToCaller, (PULARGE_INTEGER)&i64TotalBytes, + (PULARGE_INTEGER)&i64FreeBytes); + if (fResult) { + tsTotalDataDirGB = tsTotalLogDirGB = tsTotalTmpDirGB = (float)(i64TotalBytes / unit); + tsAvailDataDirGB = tsAvailLogDirGB = tsAvailTmpDirectorySpace = (float)(i64FreeBytes / unit); + } + } } return true; } @@ -172,10 +174,12 @@ bool taosGetSysMemory(float *memoryUsedMB) { float nMemTotal; memsStat.dwLength = sizeof(memsStat); - if (!GlobalMemoryStatusEx(&memsStat)) { return false; } + if (!GlobalMemoryStatusEx(&memsStat)) { + return false; + } nMemFree = memsStat.ullAvailPhys / (1024.0f * 1024.0f); nMemTotal = memsStat.ullTotalPhys / (1024.0f * 1024.0f); - *memoryUsedMB = nMemTotal - nMemFree; + *memoryUsedMB = nMemTotal - nMemFree; return true; } @@ -184,16 +188,46 @@ int taosSystem(const char *cmd) { return -1; } -int flock(int fd, int option) { - return 0; +int flock(int fd, int option) { return 0; } + +int fsync(int filedes) { return 0; } + +int sigaction(int sig, struct sigaction *d, void *p) { return 0; } + +LONG WINAPI FlCrashDump(PEXCEPTION_POINTERS ep) { + typedef BOOL(WINAPI * FxMiniDumpWriteDump)(IN HANDLE hProcess, IN DWORD ProcessId, IN HANDLE hFile, + IN MINIDUMP_TYPE DumpType, + IN CONST PMINIDUMP_EXCEPTION_INFORMATION ExceptionParam, + IN CONST PMINIDUMP_USER_STREAM_INFORMATION UserStreamParam, + IN CONST PMINIDUMP_CALLBACK_INFORMATION CallbackParam); + + HMODULE dll = LoadLibrary("dbghelp.dll"); + if (dll == NULL) return EXCEPTION_CONTINUE_SEARCH; + FxMiniDumpWriteDump mdwd = (FxMiniDumpWriteDump)(GetProcAddress(dll, "MiniDumpWriteDump")); + if (mdwd == NULL) { + FreeLibrary(dll); + return EXCEPTION_CONTINUE_SEARCH; + } + + TCHAR path[MAX_PATH]; + DWORD len = GetModuleFileName(NULL, path, _countof(path)); + path[len - 3] = 'd'; + path[len - 2] = 'm'; + path[len - 1] = 'p'; + + HANDLE file = CreateFile(path, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); + + MINIDUMP_EXCEPTION_INFORMATION mei; + mei.ThreadId = GetCurrentThreadId(); + mei.ExceptionPointers = ep; + mei.ClientPointers = FALSE; + + (*mdwd)(GetCurrentProcess(), GetCurrentProcessId(), file, MiniDumpWithHandleData, &mei, NULL, NULL); + + CloseHandle(file); + FreeLibrary(dll); + + return EXCEPTION_CONTINUE_SEARCH; } -int fsync(int filedes) { - return 0; -} - -int sigaction(int sig, struct sigaction *d, void *p) { - return 0; -} - -void taosSetCoreDump() {} \ No newline at end of file +void taosSetCoreDump() { SetUnhandledExceptionFilter(&FlCrashDump); } \ No newline at end of file From 6bb00549310156d7e1783c36e2027df226b6947c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 12 Aug 2020 14:57:32 +0800 Subject: [PATCH 122/190] [td-225] remove invalid ts assert --- src/tsdb/src/tsdbRead.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index ca92d19de9..98153efe3e 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -1115,14 +1115,6 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* TSKEY* tsArray = pCols->cols[0].pData; - if (ASCENDING_TRAVERSE(pQueryHandle->order)) { - TSKEY s = tsArray[cur->pos]; - assert(s >= pQueryHandle->window.skey && s <= pQueryHandle->window.ekey); - } else { - TSKEY s = tsArray[cur->pos]; - assert(s <= pQueryHandle->window.skey && s >= pQueryHandle->window.ekey); - } - // for search the endPos, so the order needs to reverse int32_t order = (pQueryHandle->order == TSDB_ORDER_ASC)? TSDB_ORDER_DESC:TSDB_ORDER_ASC; From d2fafd71f72726c632da22a47e35923247550dc1 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 12 Aug 2020 15:11:15 +0800 Subject: [PATCH 123/190] [td-225] add some test cases. --- tests/script/general/parser/lastrow_query.sim | 4 ++++ tests/script/general/parser/testSuite.sim | 2 ++ 2 files changed, 6 insertions(+) diff --git a/tests/script/general/parser/lastrow_query.sim b/tests/script/general/parser/lastrow_query.sim index e43cc15173..72e8b4de95 100644 --- a/tests/script/general/parser/lastrow_query.sim +++ b/tests/script/general/parser/lastrow_query.sim @@ -51,3 +51,7 @@ if $data09 != NCHAR then return -1 endi +sql select count(*) from lr_tb1 where ts>'2018-09-18 08:45:00.1' and ts<'2018-09-18 08:45:00.2' +if $row != 0 then + return -1 +endi diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index dd4faee959..ccd1aa9940 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -97,6 +97,8 @@ run general/parser/topbot.sim sleep 2000 run general/parser/union.sim sleep 2000 +run general/parser/bug.sim +sleep 2000 run general/parser/sliding.sim #sleep 2000 From 37aa0504aa97e1407d80141860828c4f68ee3c40 Mon Sep 17 00:00:00 2001 From: gemini Date: Wed, 12 Aug 2020 16:02:25 +0800 Subject: [PATCH 124/190] fix TD-1115 --- .../main/java/com/taosdata/jdbc/TSDBStatement.java | 2 +- .../jdbc/src/test/java/TestAsyncTSDBSubscribe.java | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java index 1cf024f30e..777eef53d1 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java @@ -158,9 +158,9 @@ public class TSDBStatement implements Statement { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { // no result set is retrieved + this.connecter.freeResultSet(pSql); res = false; } - this.connecter.freeResultSet(pSql); return res; } diff --git a/src/connector/jdbc/src/test/java/TestAsyncTSDBSubscribe.java b/src/connector/jdbc/src/test/java/TestAsyncTSDBSubscribe.java index 03a4761b91..6d4c6b1e94 100644 --- a/src/connector/jdbc/src/test/java/TestAsyncTSDBSubscribe.java +++ b/src/connector/jdbc/src/test/java/TestAsyncTSDBSubscribe.java @@ -42,12 +42,14 @@ public class TestAsyncTSDBSubscribe { long subscribId = 0; try { Class.forName("com.taosdata.jdbc.TSDBDriver"); + Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + dbName + "?user=root&password=taosdata", properties); String rawSql = "select * from " + tName + ";"; TSDBSubscribe subscribe = ((TSDBConnection) connection).createSubscribe(); From d63c90b5a38a8329ab91c792355c82197cd14b30 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 12 Aug 2020 08:28:16 +0000 Subject: [PATCH 125/190] record log error, while fqdn too long --- src/plugins/monitor/src/monitorMain.c | 2 +- src/util/src/version.c | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/plugins/monitor/src/monitorMain.c b/src/plugins/monitor/src/monitorMain.c index 32f5966e2d..55c242763b 100644 --- a/src/plugins/monitor/src/monitorMain.c +++ b/src/plugins/monitor/src/monitorMain.c @@ -35,7 +35,7 @@ #define SQL_LENGTH 1024 #define LOG_LEN_STR 100 -#define IP_LEN_STR 18 +#define IP_LEN_STR TSDB_EP_LEN #define CHECK_INTERVAL 1000 typedef enum { diff --git a/src/util/src/version.c b/src/util/src/version.c index d12c2f339e..d541b1afd3 100644 --- a/src/util/src/version.c +++ b/src/util/src/version.c @@ -1,7 +1,7 @@ -char version[12] = "2.0.0.0"; +char version[12] = "2.0.0.6"; char compatible_version[12] = "2.0.0.0"; -char gitinfo[48] = "8df8b7d9930342dd34ba13df160a7d538fad7bc7"; -char gitinfoOfInternal[48] = "bad4f040145fba581d1ab0c5dd718a5ede3e209f"; -char buildinfo[64] = "Built by root at 2020-08-03 17:23"; +char gitinfo[48] = "e9a20fafbe9e3b0b12cbdf55604163b4b9a41b41"; +char gitinfoOfInternal[48] = "dd679db0b9edeedad68574c1e031544711a9831f"; +char buildinfo[64] = "Built by at 2020-08-12 07:59"; -void libtaos_2_0_0_0_Linux_x64() {}; +void libtaos_2_0_0_6_Linux_x64() {}; From eae5bbda27d2abae6924dcb64b69fc3281562e4e Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 12 Aug 2020 17:37:53 +0800 Subject: [PATCH 126/190] not install some files in windows --- cmake/install.inc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/install.inc b/cmake/install.inc index 98a60ace7b..432e925b05 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -14,9 +14,9 @@ ELSEIF (TD_LINUX_32) ELSEIF (TD_WINDOWS_64) SET(CMAKE_INSTALL_PREFIX C:/TDengine) IF (NOT TD_GODLL) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector) + #INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector) + #INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector) + #INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .) INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include) From ae787ac904a2f8afe4b6605106a8d7a2d97976be Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 12 Aug 2020 22:19:36 +0800 Subject: [PATCH 127/190] TD-1124 httpPort can not configured by serverPort --- src/common/src/tglobal.c | 1 + src/inc/taosdef.h | 1 + tests/script/general/http/autocreate.sim | 4 +- tests/script/general/http/grafana.sim | 40 +++++----- tests/script/general/http/grafana_bug.sim | 58 +++++++------- tests/script/general/http/prepare.sim | 8 +- tests/script/general/http/restful.sim | 28 +++---- tests/script/general/http/restful_full.sim | 80 ++++++++++---------- tests/script/general/http/restful_insert.sim | 22 +++--- tests/script/general/http/restful_limit.sim | 8 +- tests/script/general/http/telegraf.sim | 76 +++++++++---------- tests/script/general/parser/bug.sim | 2 +- tests/script/tmp/prepare.sim | 11 ++- tests/script/unique/http/admin.sim | 60 +++++++-------- tests/script/unique/http/opentsdb.sim | 76 +++++++++---------- 15 files changed, 243 insertions(+), 232 deletions(-) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 0d7df38b83..3747264123 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -1315,6 +1315,7 @@ bool taosCheckGlobalCfg() { tsDnodeShellPort = tsServerPort + TSDB_PORT_DNODESHELL; // udp[6035-6039] tcp[6035] tsDnodeDnodePort = tsServerPort + TSDB_PORT_DNODEDNODE; // udp/tcp tsSyncPort = tsServerPort + TSDB_PORT_SYNC; + tsHttpPort = tsServerPort + TSDB_PORT_HTTP; return true; } diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index b4cecec8e4..3dea8da18a 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -394,6 +394,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size); #define TSDB_PORT_DNODESHELL 0 #define TSDB_PORT_DNODEDNODE 5 #define TSDB_PORT_SYNC 10 +#define TSDB_PORT_HTTP 11 #define TAOS_QTYPE_RPC 0 #define TAOS_QTYPE_FWD 1 diff --git a/tests/script/general/http/autocreate.sim b/tests/script/general/http/autocreate.sim index d6efbdbd65..6a005b028a 100644 --- a/tests/script/general/http/autocreate.sim +++ b/tests/script/general/http/autocreate.sim @@ -18,8 +18,8 @@ sql create table if not exists db.win_cpu(ts timestamp,f_percent_dpc_time double print =============== step2 - auto create -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'import into db.win_cpu_windows_1_processor using db.win_cpu tags('windows','1','Processor') values(1564641722000,0.000000,95.598305,0.000000,0.000000,0.000000,0.000000);' 127.0.0.1:6041/rest/sql -print curl 127.0.0.1:6041/rest/sql -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'import into db.win_cpu_windows_1_processor using db.win_cpu tags('windows','1','Processor') values(1564641722000,0.000000,95.598305,0.000000,0.000000,0.000000,0.000000);' 127.0.0.1:7111/rest/sql +print curl 127.0.0.1:7111/rest/sql -----> $system_content #if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then # return -1 #endi diff --git a/tests/script/general/http/grafana.sim b/tests/script/general/http/grafana.sim index 74b1e5637a..cea804cfbb 100644 --- a/tests/script/general/http/grafana.sim +++ b/tests/script/general/http/grafana.sim @@ -52,50 +52,50 @@ sql insert into t3 values('2017-12-25 21:27:41', 3) print =============== step2 - login -system_content curl 127.0.0.1:6041/grafana/ +system_content curl 127.0.0.1:7111/grafana/ print 1-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/grafana/xx +system_content curl 127.0.0.1:7111/grafana/xx print 2-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/grafana/login/xx/xx/ +system_content curl 127.0.0.1:7111/grafana/login/xx/xx/ print 3-> $system_content if $system_content != @{"status":"error","code":849,"desc":"Invalid user"}@ then return -1 endi -system_content curl 127.0.0.1:6041/grafana/root/1/123/1/1/3 +system_content curl 127.0.0.1:7111/grafana/root/1/123/1/1/3 print 4-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:6041/grafana/login/1/root/1/ +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/grafana/login/1/root/1/ print 5-> $system_content if $system_content != @{"status":"error","code":849,"desc":"Invalid user"}@ then return -1 endi -system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:6041/grafana/root/1/login +system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:7111/grafana/root/1/login print 6-> $system_content if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then return -1 endi -system_content curl -H 'Authorization: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:6041/grafana/root/1/login +system_content curl -H 'Authorization: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:7111/grafana/root/1/login print 7-> $system_content if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then return -1 endi sleep 3000 -system_content curl 127.0.0.1:6041/grafana/login/root/taosdata +system_content curl 127.0.0.1:7111/grafana/login/root/taosdata print 8-> $system_content if $system_content != @{"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"}@ then return -1 @@ -103,7 +103,7 @@ endi print =============== step3 - heartbeat -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:6041/grafana/d1/table_gc +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/grafana/d1/table_gc print 9-> $system_content if $system_content != @{"message":"Grafana server receive a quest from you!"}@ then return -1 @@ -111,19 +111,19 @@ endi print =============== step4 - search -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:6041/grafana/heartbeat +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/grafana/heartbeat print 10-> $system_content if $system_content != @{"message":"Grafana server receive a quest from you!"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:6041/grafana/d1/table_invalid/search +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/grafana/d1/table_invalid/search print 11-> $system_content if $system_content != @{"message":"Grafana server receive a quest from you!"}@ then return -1 endi -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' 127.0.0.1:6041/grafana/d1/m1/search +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' 127.0.0.1:7111/grafana/d1/m1/search print 12-> $system_content if $system_content != @{"message":"Grafana server receive a quest from you!"}@ then return -1 @@ -131,49 +131,49 @@ endi print =============== step5 - query -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"taosd","sql":"select first(v1) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"},{"refId":"B","alias":"system","sql":"select first(v2) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"}]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"taosd","sql":"select first(v1) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"},{"refId":"B","alias":"system","sql":"select first(v2) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"}]' 127.0.0.1:7111/grafana/query print 13-> $system_content if $system_content != @[{"refId":"A","target":"taosd","datapoints":[[2,1514208480000]]},{"refId":"B","target":"system","datapoints":[[5.10000,1514208480000]]}]@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select first(v1) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"},{"refId":"B","alias":"","sql":"select first(v2) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"}]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select first(v1) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"},{"refId":"B","alias":"","sql":"select first(v2) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"}]' 127.0.0.1:7111/grafana/query print 14-> $system_content if $system_content != @[{"refId":"A","target":"A","datapoints":[[2,1514208480000]]},{"refId":"B","target":"B","datapoints":[[5.10000,1514208480000]]}]@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2) from d1.m1"}]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2) from d1.m1"}]' 127.0.0.1:7111/grafana/query print 15-> $system_content if $system_content != @[{"refId":"A","target":"A","datapoints":[[3,"-"]]},{"refId":"B","target":"B","datapoints":[[3,"-"]]}]@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2) from d1.m1"}]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2) from d1.m1"}]' 127.0.0.1:7111/grafana/query print 15-> $system_content if $system_content != @[{"refId":"A","target":"A","datapoints":[[3,"-"]]},{"refId":"B","target":"B","datapoints":[[3,"-"]]}]@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(i) from d1.mt group by a"},{"refId":"B","alias":"","sql":"select sum(i) from d1.mt group by b"}]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(i) from d1.mt group by a"},{"refId":"B","alias":"","sql":"select sum(i) from d1.mt group by b"}]' 127.0.0.1:7111/grafana/query print 16-> $system_content if $system_content != @[{"refId":"A","target":"{a:1,}","datapoints":[[5,"-"]]},{"refId":"A","target":"{a:2,}","datapoints":[[4,"-"]]},{"refId":"A","target":"{a:3,}","datapoints":[[3,"-"]]},{"refId":"B","target":"{b:a}","datapoints":[[5,"-"]]},{"refId":"B","target":"{b:b}","datapoints":[[8,"-"]]},{"refId":"B","target":"{b:c}","datapoints":[[9,"-"]]}]@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"count","sql":"select count(i) from d1.mt group by a"},{"refId":"B","alias":"sum-","sql":"select sum(i) from d1.mt group by b"}]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"count","sql":"select count(i) from d1.mt group by a"},{"refId":"B","alias":"sum-","sql":"select sum(i) from d1.mt group by b"}]' 127.0.0.1:7111/grafana/query print 17-> $system_content if $system_content != @[{"refId":"A","target":"count{a:1,}","datapoints":[[5,"-"]]},{"refId":"A","target":"count{a:2,}","datapoints":[[4,"-"]]},{"refId":"A","target":"count{a:3,}","datapoints":[[3,"-"]]},{"refId":"B","target":"sum-{b:a}","datapoints":[[5,"-"]]},{"refId":"B","target":"sum-{b:b}","datapoints":[[8,"-"]]},{"refId":"B","target":"sum-{b:c}","datapoints":[[9,"-"]]}]@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"count","sql":"select count(i) from d1.mt interval(1m) group by a "}]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"count","sql":"select count(i) from d1.mt interval(1m) group by a "}]' 127.0.0.1:7111/grafana/query print 18-> $system_content if $system_content != @[{"refId":"A","target":"count{a:1,}","datapoints":[[1,1514208300000],[1,1514208360000],[1,1514208420000],[1,1514208480000],[1,1514208540000]]},{"refId":"A","target":"count{a:2,}","datapoints":[[1,1514208300000],[1,1514208360000],[1,1514208420000],[1,1514208480000]]},{"refId":"A","target":"count{a:3,}","datapoints":[[1,1514208300000],[1,1514208360000],[1,1514208420000]]}]@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select sum(v2), count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2), sum(v2) from d1.m1"}]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select sum(v2), count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2), sum(v2) from d1.m1"}]' 127.0.0.1:7111/grafana/query print 19-> $system_content if $system_content != @[{"refId":"A","target":"{count(v1):3}","datapoints":[[15.299999714,"-"]]},{"refId":"B","target":"{sum(v2):15.299999714}","datapoints":[[3,"-"]]}]@ then return -1 diff --git a/tests/script/general/http/grafana_bug.sim b/tests/script/general/http/grafana_bug.sim index 9c6a8a7000..43c52ba75f 100644 --- a/tests/script/general/http/grafana_bug.sim +++ b/tests/script/general/http/grafana_bug.sim @@ -33,7 +33,7 @@ sql insert into tb2 values('2020-01-04 00:00:00.000', 22, 214, 224) print =============== step1 - one query, 1 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"A","target":"A","datapoints":[[null,1577980800000],[null,1578067200000]]}]@ then return -1 @@ -41,7 +41,7 @@ endi print =============== step2 - one query, 2 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step2-> $system_content if $system_content != @[{"refId":"A","target":"A","datapoints":[[2,1577980800000],[2,1578067200000]]}]@ then return -1 @@ -49,80 +49,80 @@ endi print =============== step3 - one query, 3 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step3.1-> $system_content if $system_content != @[{"refId":"A","target":"{val1:13,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"{val1:14,}","datapoints":[[2,1578067200000]]}]@ then return -1 endi -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb "} ]' 127.0.0.1:7111/grafana/query print step3.2-> $system_content if $system_content != @[{"refId":"A","target":"{val1:11,}","datapoints":[[1,1577808000000]]},{"refId":"A","target":"{val1:12,}","datapoints":[[1,1577894400000]]},{"refId":"A","target":"{val1:13,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"{val1:14,}","datapoints":[[2,1578067200000]]}]@ then return -1 endi -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val1,val from db.tb "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val1,val from db.tb "} ]' 127.0.0.1:7111/grafana/query print step3.3-> $system_content if $system_content != @[{"refId":"A","target":"{val:1,}","datapoints":[[11,1577808000000],[12,1577894400000]]},{"refId":"A","target":"{val:2,}","datapoints":[[13,1577980800000],[14,1578067200000]]}]@ then return -1 endi print =============== step4 - one query, 4 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val2,val1,val from db.tb "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val2,val1,val from db.tb "} ]' 127.0.0.1:7111/grafana/query print step4.1-> $system_content if $system_content != @[{"refId":"A","target":"{val1:11,, val:1,}","datapoints":[[21,1577808000000]]},{"refId":"A","target":"{val1:12,, val:1,}","datapoints":[[22,1577894400000]]},{"refId":"A","target":"{val1:13,, val:2,}","datapoints":[[23,1577980800000]]},{"refId":"A","target":"{val1:14,, val:2,}","datapoints":[[24,1578067200000]]}]@ then return -1 endi -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1,val2 from db.tb "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1,val2 from db.tb "} ]' 127.0.0.1:7111/grafana/query print step4.2-> $system_content if $system_content != @[{"refId":"A","target":"{val1:11,, val2:21,}","datapoints":[[1,1577808000000]]},{"refId":"A","target":"{val1:12,, val2:22,}","datapoints":[[1,1577894400000]]},{"refId":"A","target":"{val1:13,, val2:23,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"{val1:14,, val2:24,}","datapoints":[[2,1578067200000]]}]@ then return -1 endi print =============== step5 - one query, 1 column, no timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"A","target":"A","datapoints":[[2,"-"],[2,"-"]]}]@ then return -1 endi print =============== step6 - one query, 2 column, no timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"A","target":"{val2:23,}","datapoints":[[13,"-"]]},{"refId":"A","target":"{val2:24,}","datapoints":[[14,"-"]]}]@ then return -1 endi print =============== step7 - one query, 3 column, no timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"A","target":"{val2:23,, val:2,}","datapoints":[[13,"-"]]},{"refId":"A","target":"{val2:24,, val:2,}","datapoints":[[14,"-"]]}]@ then return -1 endi -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb"} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb"} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"A","target":"{val2:21,, val:1,}","datapoints":[[11,"-"]]},{"refId":"A","target":"{val2:22,, val:1,}","datapoints":[[12,"-"]]},{"refId":"A","target":"{val2:23,, val:2,}","datapoints":[[13,"-"]]},{"refId":"A","target":"{val2:24,, val:2,}","datapoints":[[14,"-"]]}]@ then return -1 endi print =============== step8 - one query, no return -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb where ts >= 1677980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb where ts >= 1677980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[]@ then return -1 endi print =============== step9 - one query, insert sql -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"insert into db.t2 values(now, 1) "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"insert into db.t2 values(now, 1) "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[]@ then return -1 endi print =============== step10 - one query, error sql -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select * from db.tt "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select * from db.tt "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[]@ then return -1 @@ -130,7 +130,7 @@ endi print =============== step11 - two query, 1 column, with timestamp, 1 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[null,1577980800000],[null,1578067200000]]},{"refId":"A","target":"A","datapoints":[[null,1577980800000],[null,1578067200000]]}]@ then return -1 @@ -138,98 +138,98 @@ endi print =============== step12 - two query, 1 column, with timestamp, 2 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[null,1577980800000],[null,1578067200000]]},{"refId":"A","target":"A","datapoints":[[2,1577980800000],[2,1578067200000]]}]@ then return -1 endi print =============== step13 - two query, 1 column, with timestamp, 3 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[null,1577980800000],[null,1578067200000]]},{"refId":"A","target":"{val1:13,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"{val1:14,}","datapoints":[[2,1578067200000]]}]@ then return -1 endi print =============== step14 - two query, 2 column, with timestamp, 2 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[223,1577980800000],[224,1578067200000]]},{"refId":"A","target":"AA","datapoints":[[2,1577980800000],[2,1578067200000]]}]@ then return -1 endi print =============== step15 - two query, 2 column, with timestamp, 3 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val2, val1 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val2, val1 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB{val1:213,}","datapoints":[[223,1577980800000]]},{"refId":"B","target":"BB{val1:214,}","datapoints":[[224,1578067200000]]},{"refId":"A","target":"AA","datapoints":[[2,1577980800000],[2,1578067200000]]}]@ then return -1 endi print =============== step16 - two query, 3 column, with timestamp, 4 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val, val1, val2, val1 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val, val1, val2, val1 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB{val1:213,, val2:223,, val1:213,}","datapoints":[[22,1577980800000]]},{"refId":"B","target":"BB{val1:214,, val2:224,, val1:214,}","datapoints":[[22,1578067200000]]},{"refId":"A","target":"AA{val1:13,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"AA{val1:14,}","datapoints":[[2,1578067200000]]}]@ then return -1 endi print =============== step17 - two query, 2 column, with timestamp, no return -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb where ts >= 1677980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb where ts >= 1677980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,1577980800000],[22,1578067200000]]}]@ then return -1 endi print =============== step18 - two query, 2 column, with timestamp, invalid sql -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb222 where ts >= 1677980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb222 where ts >= 1677980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,1577980800000],[22,1578067200000]]}]@ then return -1 endi print =============== step19 - two query, 2 column, with timestamp, insert sql -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"insert into db.t2 values(now, 1)"} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"insert into db.t2 values(now, 1)"} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,1577980800000],[22,1578067200000]]}]@ then return -1 endi print =============== step20 - two query, 1 column, no timestamp, 1 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,"-"],[22,"-"]]},{"refId":"A","target":"AA","datapoints":[[null,1577980800000],[null,1578067200000]]}]@ then return -1 endi print =============== step21 - two query, 1 column, no timestamp, 2 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,"-"],[22,"-"]]},{"refId":"A","target":"AA","datapoints":[[223,1577980800000],[224,1578067200000]]}]@ then return -1 endi print =============== step22 - two query, 1 column, no timestamp, 3 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val1, val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val1, val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,"-"],[22,"-"]]},{"refId":"A","target":"AA{val2:223,}","datapoints":[[213,1577980800000]]},{"refId":"A","target":"AA{val2:224,}","datapoints":[[214,1578067200000]]}]@ then return -1 endi print =============== step23 - two query, 2 column, no timestamp, 1 column, no timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val1 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val1 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB{val2:223,}","datapoints":[[213,"-"]]},{"refId":"B","target":"BB{val2:224,}","datapoints":[[214,"-"]]},{"refId":"A","target":"AA","datapoints":[[213,"-"],[214,"-"]]}]@ then return -1 endi print =============== step24 - two query, 2 column, no timestamp, 2 column, no timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val,val1 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val,val1 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB{val2:223,}","datapoints":[[213,"-"]]},{"refId":"B","target":"BB{val2:224,}","datapoints":[[214,"-"]]},{"refId":"A","target":"AA{val1:213,}","datapoints":[[22,"-"]]},{"refId":"A","target":"AA{val1:214,}","datapoints":[[22,"-"]]}]@ then return -1 endi print =============== step25 - two query, 2 column, no timestamp, 3 column, no timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val,val1,val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val,val1,val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB{val2:223,}","datapoints":[[213,"-"]]},{"refId":"B","target":"BB{val2:224,}","datapoints":[[214,"-"]]},{"refId":"A","target":"AA{val1:213,, val2:223,}","datapoints":[[22,"-"]]},{"refId":"A","target":"AA{val1:214,, val2:224,}","datapoints":[[22,"-"]]}]@ then return -1 diff --git a/tests/script/general/http/prepare.sim b/tests/script/general/http/prepare.sim index 1645c1a7e9..0bcb42ad41 100644 --- a/tests/script/general/http/prepare.sim +++ b/tests/script/general/http/prepare.sim @@ -34,8 +34,8 @@ sleep 4000 print =============== step2 - login -system_content curl 127.0.0.1:6041/rest/login/root/taosdata -print curl 127.0.0.1:6041/rest/login/root/taosdata -----> $system_content +system_content curl 127.0.0.1:7111/rest/login/root/taosdata +print curl 127.0.0.1:7111/rest/login/root/taosdata -----> $system_content if $system_content != @{"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"}@ then return -1 @@ -45,8 +45,8 @@ return print =============== step3 - query data -system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwia2V5IjoiYkVsOExjdkxVZDdhOHFkdE5abXRPTnJ5cDIwMW1hMDQiLCJzdWIiOiJyb290In0.k7CkgmpOJImIkLqZqzASlPmkdeEw7Wfk4XUrqGZX-LQ' -d 'select * from t1' 127.0.0.1:6041/rest/sql/d1 -print curl 127.0.0.1:6041/rest/sql/d1 -----> $system_content +system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwia2V5IjoiYkVsOExjdkxVZDdhOHFkdE5abXRPTnJ5cDIwMW1hMDQiLCJzdWIiOiJyb290In0.k7CkgmpOJImIkLqZqzASlPmkdeEw7Wfk4XUrqGZX-LQ' -d 'select * from t1' 127.0.0.1:7111/rest/sql/d1 +print curl 127.0.0.1:7111/rest/sql/d1 -----> $system_content if $system_content != @{"status":"succ","head":["ts","i","b"],"data":[["2017-12-25 21:28:54.022",14,"44\\\\\"\"44"],["2017-12-25 21:28:53.022",13,"33\\\\\"33"],["2017-12-25 21:28:52.022",12,"22\\\\11"],["2017-12-25 21:28:51.022",11,"11\\\\11"],["2017-12-25 21:28:49.022",9,"99\\99"],["2017-12-25 21:28:48.022",8,"88\"\"88"],["2017-12-25 21:28:47.022",7,"77\"7\""],["2017-12-25 21:28:46.022",6,"66'6'"],["2017-12-25 21:28:45.022",5,"55'"],["2017-12-25 21:28:44.022",4,"44\""],["2017-12-25 21:28:43.022",3,"33"],["2017-12-25 21:28:42.022",2,"22"],["2017-12-25 21:28:41.022",1,"11"]],"rows":13}@ then return -1 endi diff --git a/tests/script/general/http/restful.sim b/tests/script/general/http/restful.sim index ff0e823c6d..7d1169ca27 100644 --- a/tests/script/general/http/restful.sim +++ b/tests/script/general/http/restful.sim @@ -30,8 +30,8 @@ sql insert into table_rest values('2017-12-25 21:28:50.022', 10) print =============== step2 - login -system_content curl 127.0.0.1:6041/rest/login/root/taosdata -print curl 127.0.0.1:6041/rest/login/root/taosdata -----> $system_content +system_content curl 127.0.0.1:7111/rest/login/root/taosdata +print curl 127.0.0.1:7111/rest/login/root/taosdata -----> $system_content if $system_content != {"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"} then return -1 @@ -39,44 +39,44 @@ endi print =============== step3 - query data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest' 127.0.0.1:6041/rest/sql -print curl 127.0.0.1:6041/rest/sql -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest' 127.0.0.1:7111/rest/sql +print curl 127.0.0.1:7111/rest/sql -----> $system_content if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then return -1 endi print =============== step4 - insert data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.table_rest values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:6041/rest/sql -print curl 127.0.0.1:6041/rest/sql -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.table_rest values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:7111/rest/sql +print curl 127.0.0.1:7111/rest/sql -----> $system_content if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[1]],"rows":1}@ then return -1 endi print =============== step5 - query data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest' 127.0.0.1:6041/rest/sql -print curl 127.0.0.1:6041/rest/sql -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest' 127.0.0.1:7111/rest/sql +print curl 127.0.0.1:7111/rest/sql -----> $system_content if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then return -1 endi print =============== step6 - query no db data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show dnodes' 127.0.0.1:6041/rest/sql -print curl 127.0.0.1:6041/rest/sql -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show dnodes' 127.0.0.1:7111/rest/sql +print curl 127.0.0.1:7111/rest/sql -----> $system_content print =============== step7 - change password sql create user u1 PASS 'abcd@1234' sql create user u2 PASS 'abcd_1234' -system_content curl 127.0.0.1:6041/rest/login/u1/abcd@1234 -print curl 127.0.0.1:6041/rest/login/u1/abcd@1234 -----> $system_content +system_content curl 127.0.0.1:7111/rest/login/u1/abcd@1234 +print curl 127.0.0.1:7111/rest/login/u1/abcd@1234 -----> $system_content if $system_content != @{"status":"succ","code":0,"desc":"jIlItaLFFIPa8qdtNZmtONryp201ma04SXX8PEJowKAB/46k1gwnPNryp201ma04"}@ then return -1 endi -system_content curl 127.0.0.1:6041/rest/login/u2/aabcd_1234 -print curl 127.0.0.1:6041/rest/login/u2/abcd_1234 -----> $system_content +system_content curl 127.0.0.1:7111/rest/login/u2/aabcd_1234 +print curl 127.0.0.1:7111/rest/login/u2/abcd_1234 -----> $system_content if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then return -1 endi diff --git a/tests/script/general/http/restful_full.sim b/tests/script/general/http/restful_full.sim index 0b3cfaa160..88e7dece4c 100644 --- a/tests/script/general/http/restful_full.sim +++ b/tests/script/general/http/restful_full.sim @@ -12,57 +12,57 @@ print ============================ dnode1 start print =============== step1 - login -system_content curl 127.0.0.1:6041/rest/ +system_content curl 127.0.0.1:7111/rest/ print 1-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/rest/xx +system_content curl 127.0.0.1:7111/rest/xx print 2-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/rest/login +system_content curl 127.0.0.1:7111/rest/login print 3-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi #4 -system_content curl 127.0.0.1:6041/rest/login/root +system_content curl 127.0.0.1:7111/rest/login/root print 4-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/rest/login/root/123 +system_content curl 127.0.0.1:7111/rest/login/root/123 print 5-> $system_content if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then return -1 endi -system_content curl 127.0.0.1:6041/rest/login/root/123/1/1/3 +system_content curl 127.0.0.1:7111/rest/login/root/123/1/1/3 print 6-> $system_content if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:6041/rest/login/root/1 +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/rest/login/root/1 print 7-> $system_content if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then return -1 endi #8 -system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:6041/rest/login/root/1 +system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:7111/rest/login/root/1 print 8-> $system_content if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then return -1 endi -system_content curl -H 'Authorization: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:6041/rest/login/root/1 +system_content curl -H 'Authorization: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:7111/rest/login/root/1 print 9-> $system_content if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then return -1 @@ -70,7 +70,7 @@ endi sleep 3000 -system_content curl 127.0.0.1:6041/rest/login/root/taosdata/ +system_content curl 127.0.0.1:7111/rest/login/root/taosdata/ print 10-> $system_content if $system_content != @{"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"}@ then return -1 @@ -79,52 +79,52 @@ endi print =============== step2 - no db #11 -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/rest/sql print 11-> $system_content if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d1' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d1' 127.0.0.1:7111/rest/sql print 12-> $system_content if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":0}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d1' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d1' 127.0.0.1:7111/rest/sql print 13-> $system_content if $system_content != @{"status":"error","code":897,"desc":"Database already exists"}@ then return -1 endi #14 -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '' 127.0.0.1:7111/rest/sql print 14-> $system_content if $system_content != @{"status":"error","code":5012,"desc":"no sql input"}@ then return -1 endi -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1' 127.0.0.1:6041/rest/sql +#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1' 127.0.0.1:7111/rest/sql #print 15-> $system_content #if $system_content != @{"status":"error","code":5017,"desc":"no need to execute use db cmd"}@ then #if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[1]],"rows":1}@ then # return -1 #endi -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' use d1' 127.0.0.1:6041/rest/sql +#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' use d1' 127.0.0.1:7111/rest/sql #print 16-> $system_content #if $system_content != @{"status":"error","code":5017,"desc":"no need to execute use db cmd"}@ then # return -1 #endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' used1' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' used1' 127.0.0.1:7111/rest/sql print 17-> $system_content if $system_content != @{"status":"error","code":512,"desc":"invalid SQL: invalid SQL: syntax error near 'used1'"}@ then return -1 endi #18 -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' show tables;' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' show tables;' 127.0.0.1:7111/rest/sql print 18-> $system_content if $system_content != @{"status":"error","code":896,"desc":"Database not specified or available"}@ then return -1 @@ -133,44 +133,44 @@ endi print =============== step3 - db #19 -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' show tables;' 127.0.0.1:6041/rest/sql/d4 +#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' show tables;' 127.0.0.1:7111/rest/sql/d4 #print 19-> $system_content #if $system_content != @{"status":"error","code":1000,"desc":"invalid DB"}@ then # return -1 #endi -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' show tables;' 127.0.0.1:6041/rest/sql/d1 +#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' show tables;' 127.0.0.1:7111/rest/sql/d1 #print 20-> $system_content #if $system_content != @{"status":"succ","head":["name","created time","columns","metric"],"data":[],"rows":0}@ then # return -1 #endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1;' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1;' 127.0.0.1:7111/rest/sql print 21-> $system_content if $system_content != @{"status":"error","code":866,"desc":"Table does not exist"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' create table d1.t1 (ts timestamp, speed int)' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' create table d1.t1 (ts timestamp, speed int)' 127.0.0.1:7111/rest/sql print 22-> $system_content if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":0}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:7111/rest/sql print 23-> $system_content if $system_content != @{"status":"succ","head":["ts","speed"],"data":[],"rows":0}@ then return -1 endi #24 -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:41.022', 1)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:41.022', 1)" 127.0.0.1:7111/rest/sql print 24-> $system_content if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[1]],"rows":1}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:7111/rest/sql print 25-> $system_content if $system_content != @{"status":"succ","head":["ts","speed"],"data":[["2017-12-25 21:28:41.022",1]],"rows":1}@ then return -1 @@ -178,53 +178,53 @@ endi #26 print 25-> no print -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:42.022', 2)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:42.022', 2)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:43.022', 3)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:43.022', 3)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:44.022', 4)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:44.022', 4)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:45.022', 5)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:45.022', 5)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:46.022', 6)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:46.022', 6)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:47.022', 7)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:47.022', 7)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:48.022', 8)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:48.022', 8)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:49.022', 9)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:49.022', 9)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:50.022', 10)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:50.022', 10)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:7111/rest/sql #27 -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:7111/rest/sql print 27-> $system_content if $system_content != @{"status":"succ","head":["ts","speed"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d2' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d2' 127.0.0.1:7111/rest/sql print 28-> $system_content if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":0}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' create table d2.t1 (ts timestamp, speed int)' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' create table d2.t1 (ts timestamp, speed int)' 127.0.0.1:7111/rest/sql print 29-> $system_content if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":0}@ then return -1 endi #30 -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d2.t1 values('2017-12-25 21:28:41.022', 1)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d2.t1 values('2017-12-25 21:28:41.022', 1)" 127.0.0.1:7111/rest/sql print 30-> $system_content if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[1]],"rows":1}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d2.t1 ' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d2.t1 ' 127.0.0.1:7111/rest/sql print 31-> $system_content if $system_content != @{"status":"succ","head":["ts","speed"],"data":[["2017-12-25 21:28:41.022",1]],"rows":1}@ then return -1 diff --git a/tests/script/general/http/restful_insert.sim b/tests/script/general/http/restful_insert.sim index ecdaef987e..f230f98723 100644 --- a/tests/script/general/http/restful_insert.sim +++ b/tests/script/general/http/restful_insert.sim @@ -28,8 +28,8 @@ sql create table d1.table_rest9 (ts timestamp, i int) print =============== step2 - login -system_content curl 127.0.0.1:6041/rest/login/root/taosdata -print curl 127.0.0.1:6041/rest/login/root/taosdata -----> $system_content +system_content curl 127.0.0.1:7111/rest/login/root/taosdata +print curl 127.0.0.1:7111/rest/login/root/taosdata -----> $system_content if $system_content != {"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"} then return -1 @@ -37,17 +37,17 @@ endi print =============== step3 - query data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6041/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6041/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6041/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6041/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6041/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6041/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql print =============== step5 - query data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest1' 127.0.0.1:6041/rest/sql -print curl 127.0.0.1:6041/rest/sql -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest1' 127.0.0.1:7111/rest/sql +print curl 127.0.0.1:7111/rest/sql -----> $system_content system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/http/restful_limit.sim b/tests/script/general/http/restful_limit.sim index a88bdec912..7d2b6e9a02 100644 --- a/tests/script/general/http/restful_limit.sim +++ b/tests/script/general/http/restful_limit.sim @@ -37,10 +37,10 @@ while $i < 2 $i = $i + 1 endw -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from db0.st0 limit 100' 127.0.0.1:6041/rest/sql -print curl 127.0.0.1:6041/rest/sql -----> $system_content +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from db0.st0 limit 100' 127.0.0.1:7111/rest/sql +print curl 127.0.0.1:7111/rest/sql -----> $system_content -#system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d "select * from db0.st0 where tbname in ('tb0', 'tb1') limit 1000" 127.0.0.1:6041/rest/sql -#print curl 127.0.0.1:6041/rest/sql -----> $system_content +#system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d "select * from db0.st0 where tbname in ('tb0', 'tb1') limit 1000" 127.0.0.1:7111/rest/sql +#print curl 127.0.0.1:7111/rest/sql -----> $system_content system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/http/telegraf.sim b/tests/script/general/http/telegraf.sim index a845c2b12f..e54af99ad7 100644 --- a/tests/script/general/http/telegraf.sim +++ b/tests/script/general/http/telegraf.sim @@ -13,231 +13,231 @@ sql connect print ============================ dnode1 start print =============== step1 - parse -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/ +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/ print $system_content if $system_content != @{"status":"error","code":5022,"desc":"database name can not be null"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/ +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/ print $system_content if $system_content != @{"status":"error","code":5022,"desc":"database name can not be null"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/d123456789012345678901234567890123456 +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/d123456789012345678901234567890123456 print $system_content if $system_content != @{"status":"error","code":5023,"desc":"database name too long"}@ then return -1 endi -system_content curl -u root:taosdata -d '[]' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '[]' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '{}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"metrics": []}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"metrics": []}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5025,"desc":"metrics size is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"metrics": [{}]}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"metrics": [{}]}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"metrics": 12}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"metrics": 12}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5025,"desc":"metrics size is 0"}@ then return -1 endi -#system_content curl -u root:taosdata -d '{"metrics": [{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +#system_content curl -u root:taosdata -d '{"metrics": [{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]}' 127.0.0.1:7111/telegraf/db/root/taosdata1 #print $system_content #if $system_content != @{"status":"error","code":5026,"desc":"metrics size can not more than 50"}@ then # return -1 #endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":111,"tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":111,"tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5028,"desc":"metric name type should be string"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5029,"desc":"metric name length is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5030,"desc":"metric name length too long"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"}}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"}}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5031,"desc":"timestamp not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":""}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":""}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5032,"desc":"timestamp type should be integer"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":-1}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":-1}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5033,"desc":"timestamp value smaller than 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5034,"desc":"tags not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5035,"desc":"tags size is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":"","timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":"","timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5035,"desc":"tags size is 0"}@ then return -1 endi -#system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor","host":"windows","instance":"1","objectname":"Processor","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata +#system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor","host":"windows","instance":"1","objectname":"Processor","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata #print $system_content #if $system_content != @{"status":"error","code":5036,"desc":"tags size too long"}@ then # return -1 #endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5035,"desc":"tags size is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"":"windows"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"":"windows"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5038,"desc":"tag name is null"}@ then return -1 endi -#system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host111111111111222222222222222222222":""},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +#system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host111111111111222222222222222222222":""},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 #print $system_content #if $system_content != @{"status":"error","code":5039,"desc":"tag name length too long"}@ then # return -1 #endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":true},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":true},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5040,"desc":"tag value type should be number or string"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":""},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":""},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5041,"desc":"tag value is null"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"5022":"111"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"5022":"111"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5042,"desc":"table is null"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5043,"desc":"table name length too long"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5045,"desc":"fields size is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5048,"desc":"field name is null"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":"","Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":"","Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5051,"desc":"field value is null"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":true,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":true,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5050,"desc":"field value type should be number or string"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1564641722000}' 127.0.0.1:6041/telegraf/db +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1564641722000}' 127.0.0.1:7111/telegraf/db print $system_content @@ -248,7 +248,7 @@ endi sleep 3000 print =============== step2 - insert single data -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1564641722000}' 127.0.0.1:6041/telegraf/db/ +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1564641722000}' 127.0.0.1:7111/telegraf/db/ print $system_content @@ -256,7 +256,7 @@ print $system_content # return -1 #endi -system_content curl -u root:taosdata -d 'select * from db.win_cpu_windows_1_Processor' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select * from db.win_cpu_windows_1_Processor' 127.0.0.1:7111/rest/sql/ print $system_content @@ -265,7 +265,7 @@ print $system_content #endi print =============== step3 - multi-query data -system_content curl -u root:taosdata -d '{"metrics": [{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"window1","instance":"1","objectname":"Processor"},"timestamp":1564641723000},{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"window2","instance":"1","objectname":"Processor"},"timestamp":1564641723000}]}' 127.0.0.1:6041/telegraf/db/ +system_content curl -u root:taosdata -d '{"metrics": [{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"window1","instance":"1","objectname":"Processor"},"timestamp":1564641723000},{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"window2","instance":"1","objectname":"Processor"},"timestamp":1564641723000}]}' 127.0.0.1:7111/telegraf/db/ print $system_content @@ -273,7 +273,7 @@ if $system_content != @{"metrics":[{"metric":"win_cpu","stable":"win_cpu","table return -1 endi -system_content curl -u root:taosdata -d 'select * from db.win_cpu_window1_1_Processor' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select * from db.win_cpu_window1_1_Processor' 127.0.0.1:7111/rest/sql/ print $system_content @@ -281,7 +281,7 @@ print $system_content # return -1 #endi -system_content curl -u root:taosdata -d 'select count(*) from db.win_cpu' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select count(*) from db.win_cpu' 127.0.0.1:7111/rest/sql/ print $system_content diff --git a/tests/script/general/parser/bug.sim b/tests/script/general/parser/bug.sim index f97905d76c..2a46ad1fd6 100644 --- a/tests/script/general/parser/bug.sim +++ b/tests/script/general/parser/bug.sim @@ -38,6 +38,6 @@ sql insert into t2 values(1575880055000, 2); sql select st1.ts, st1.f1, st2.f2 from db.st1, db.st2 where st1.t1=st2.t2 and st1.ts=st2.ts -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select st1.ts, st1.f1, st2.f2 from db.st1, db.st2 where st1.t1=st2.t2 and st1.ts=st2.ts' 127.0.0.1:6041/restful/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select st1.ts, st1.f1, st2.f2 from db.st1, db.st2 where st1.t1=st2.t2 and st1.ts=st2.ts' 127.0.0.1:7111/restful/sql system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tmp/prepare.sim b/tests/script/tmp/prepare.sim index 0a0c512b26..f59eebede0 100644 --- a/tests/script/tmp/prepare.sim +++ b/tests/script/tmp/prepare.sim @@ -30,4 +30,13 @@ system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 100000 system sh/cfg.sh -n dnode1 -c http -v 1 system sh/cfg.sh -n dnode2 -c http -v 1 system sh/cfg.sh -n dnode3 -c http -v 1 -system sh/cfg.sh -n dnode4 -c http -v 1 \ No newline at end of file +system sh/cfg.sh -n dnode4 -c http -v 1 + + +system sh/cfg.sh -n dnode1 -c walLevel -v 2 + + +system sh/cfg.sh -n dnode1 -c firstEp -v 152.136.17.116:6030 +system sh/cfg.sh -n dnode1 -c secondEp -v 152.136.17.116:6030 +system sh/cfg.sh -n dnode1 -c serverPort -v 6030 +system sh/cfg.sh -n dnode1 -c fqdn -v 152.136.17.116 diff --git a/tests/script/unique/http/admin.sim b/tests/script/unique/http/admin.sim index cca2d096f6..dc17520d02 100644 --- a/tests/script/unique/http/admin.sim +++ b/tests/script/unique/http/admin.sim @@ -31,63 +31,63 @@ sql insert into table_admin values('2017-12-25 21:28:50.022', 10) print =============== step1 - login -system_content curl 127.0.0.1:6041/admin/ +system_content curl 127.0.0.1:7111/admin/ print 1-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/admin/xx +system_content curl 127.0.0.1:7111/admin/xx print 2-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/admin/login +system_content curl 127.0.0.1:7111/admin/login print 3-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/admin/login/root +system_content curl 127.0.0.1:7111/admin/login/root print 4-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/admin/login/root/123 +system_content curl 127.0.0.1:7111/admin/login/root/123 print 5-> $system_content if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then return -1 endi -system_content curl 127.0.0.1:6041/admin/login/root/123/1/1/3 +system_content curl 127.0.0.1:7111/admin/login/root/123/1/1/3 print 6-> $system_content if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then return -1 endi -system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.' -d 'show databases' 127.0.0.1:6041/admin/login/root/1 +system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.' -d 'show databases' 127.0.0.1:7111/admin/login/root/1 print 7-> $system_content if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' 127.0.0.1:6041/admin/login/root/1 +system_content curl -H 'Authorization: Taosd eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' 127.0.0.1:7111/admin/login/root/1 print 8-> $system_content if $system_content != @{"status":"error","code":5053,"desc":"parse http auth token error"}@ then return -1 endi sleep 3000 -system_content curl 127.0.0.1:6041/admin/login/root/taosdata +system_content curl 127.0.0.1:7111/admin/login/root/taosdata print 9 -----> $system_content if $system_content != {"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"} then return -1 endi -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:6041/admin/login/root/1 +#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/admin/login/root/1 #print 10-> $system_content #if $system_content != @{"status":"error","code":29,"desc":"failed to connect to server"}@ then # return -1 @@ -95,14 +95,14 @@ endi print =============== step2 - logout -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:6041/admin/logout +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/logout print 10 -----> $system_content if $system_content != @{"status":"succ","code":0,"desc":"logout success"}@ then return -1 endi -system_content curl 127.0.0.1:6041/admin/logout +system_content curl 127.0.0.1:7111/admin/logout print 11 -----> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then @@ -111,69 +111,69 @@ endi print =============== step3 - info -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:6041/admin/info -print curl 127.0.0.1:6041/admin/info -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/info +print curl 127.0.0.1:7111/admin/info -----> $system_content if $system_content != {"status":"succ","data":[{"dbs":1,"tables":1,"users":3,"mnodes":1,"dnodes":1}]} then return -1 endi print =============== step4 - meta -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show mnodes' 127.0.0.1:6041/admin/meta -print curl 127.0.0.1:6041/admin/meta -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show mnodes' 127.0.0.1:7111/admin/meta +print curl 127.0.0.1:7111/admin/meta -----> $system_content #if $system_content != @{"status":"succ","head":["column type","column name","column bytes"],"data":[["binary","IP",16],["timestamp","created time",8],["binary","status",10],["binary","role",10],["binary","public ip",16]],"rows":5}@ then # return -1 #endi print =============== step5 - query data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:6041/admin/sql -print curl 127.0.0.1:6041/admin/all -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql +print curl 127.0.0.1:7111/admin/all -----> $system_content if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:6041/admin/sql -print curl 127.0.0.1:6041/admin/sql -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql +print curl 127.0.0.1:7111/admin/sql -----> $system_content if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then return -1 endi print =============== step6 - insert data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.table_admin values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:6041/admin/sql -print curl 127.0.0.1:6041/admin/sql -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.table_admin values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:7111/admin/sql +print curl 127.0.0.1:7111/admin/sql -----> $system_content if $system_content != @{"status":"succ","head":["affect_rows"],"data":[[1]],"rows":1}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:6041/admin/all -print curl 127.0.0.1:6041/admin/all -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/all +print curl 127.0.0.1:7111/admin/all -----> $system_content if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then return -1 endi -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:6041/admin/sql -#print curl 127.0.0.1:6041/admin/sql -----> $system_content +#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql +#print curl 127.0.0.1:7111/admin/sql -----> $system_content #if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:51.022",11],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:42.022",2]],"rows":10}@ then # return -1 #endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:6041/admin/info -print curl 127.0.0.1:6041/admin/info -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/info +print curl 127.0.0.1:7111/admin/info -----> $system_content if $system_content != {"status":"succ","data":[{"dbs":1,"tables":1,"users":3,"mnodes":1,"dnodes":1}]} then return -1 endi print =============== step7 - use dbs -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1;' 127.0.0.1:6041/admin/all +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1;' 127.0.0.1:7111/admin/all print 23-> $system_content if $system_content != @{"status":"error","code":5017,"desc":"no need to execute use db cmd"}@ then return -1 endi print =============== step8 - monitor dbs -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show dnodes;show mnodes;' 127.0.0.1:6041/admin/sqls +#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show dnodes;show mnodes;' 127.0.0.1:7111/admin/sqls #print 24-> $system_content #if $system_content != @[{"status":"succ","head":["IP","created time","open vnodes","free vnodes","status","balance state"],"data":[["127.0.0.1","2018-09-04 #11:16:13.985",1,3,"ready","balanced"]],"rows":1},{"status":"succ","head":["IP","created time","status","role"],"data":[["127.0.0.1","2018-09-04 11:16:13.371","serving","master"]],"rows":1}]@ then # return -1 diff --git a/tests/script/unique/http/opentsdb.sim b/tests/script/unique/http/opentsdb.sim index e14d0de8f2..4901c5b3fd 100644 --- a/tests/script/unique/http/opentsdb.sim +++ b/tests/script/unique/http/opentsdb.sim @@ -11,92 +11,92 @@ sql connect print ============================ dnode1 start print =============== step1 - parse -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/ +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/ print $system_content if $system_content != @{"status":"error","code":5057,"desc":"database name can not be null"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db123456789012345678901234567890db +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db123456789012345678901234567890db print $system_content if $system_content != @{"status":"error","code":5058,"desc":"database name too long"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/ +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/ print $system_content if $system_content != @{"status":"error","code":5057,"desc":"database name can not be null"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put2 +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put2 print $system_content if $system_content != @{"status":"error","code":5009,"desc":"http url parse error"}@ then return -1 endi -system_content curl -u root:taosdata -d '[]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5060,"desc":"metrics size is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '[' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5059,"desc":"invalid opentsdb json fromat"}@ then return -1 endi -system_content curl -u root:taosdata -d '{}' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '{}' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5060,"desc":"metrics size is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5062,"desc":"metric name not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": 1,"timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": 1,"timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5063,"desc":"metric name type should be string"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5064,"desc":"metric name length is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "ab1234567890123456789012345678ab1234567890123456789012345678","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "ab1234567890123456789012345678ab1234567890123456789012345678","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"errors":[{"datapoint":{"metric":"ab1234567890123456789012345678ab1234567890123456789012345678","stable":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb","table":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb_lga_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"status":"error","code":1547,"desc":"Timestamp data out of range"}}],"failed":1,"success":0,"affected_rows":0}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5066,"desc":"timestamp not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": "2","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": "2","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5067,"desc":"timestamp type should be integer"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": -1,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": -1,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5068,"desc":"timestamp value smaller than 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5078,"desc":"value not find"}@ then return -1 @@ -104,49 +104,49 @@ endi ####### -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5069,"desc":"tags not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5070,"desc":"tags size is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": 0}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": 0}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5070,"desc":"tags size is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","group1": "1","group1": "1","group1": "1","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","group1": "1","group1": "1","group1": "1","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbbbbbb","table":"sys_cpu_d_bbbbbbb_lga_1_1_1_1_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","group1":"1","group1":"1","group1":"1","group1":"1","host":"web01"},"status":"error","code":866,"desc":"failed to create table"}}],"failed":1,"success":0,"affected_rows":0}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"": "web01"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"": "web01"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5073,"desc":"tag name is null"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host01123456789001123456789001123456789001123456789001123456789001123456789": "01"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host01123456789001123456789001123456789001123456789001123456789001123456789": "01"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5074,"desc":"tag name length too long"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web011234567890011234567890011234567890011234567890011234567890011234567890011234567890011234567890"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web011234567890011234567890011234567890011234567890011234567890011234567890011234567890011234567890"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5077,"desc":"tag value can not more than 64"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": ""}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": ""}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5076,"desc":"tag value is null"}@ then return -1 @@ -155,26 +155,26 @@ endi sleep 3000 print =============== step2 - insert single data -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then return -1 endi -system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/ print $system_content if $system_content != @{"status":"succ","head":["ts","value"],"data":[["2012-09-05 20:00:00.000",18.000000000]],"rows":1}@ then return -1 endi print =============== step3 - multi-query data -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402000,"value": 18,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402000,"value": 18,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content @@ -182,7 +182,7 @@ if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys return -1 endi -system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/ print $system_content @@ -190,7 +190,7 @@ if $system_content != @{"status":"succ","head":["ts","value"],"data":[["2012-09- return -1 endi -system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/ print $system_content @@ -199,7 +199,7 @@ if $system_content != @{"status":"succ","head":["count(*)"],"data":[[3]],"rows": endi print =============== step4 - summary-put data -system_content curl -u root:taosdata -d '[{"metric": "sys_mem","timestamp": 1346846400000,"value": 8,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_mem","timestamp": 1346846405000,"value": 9,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put?details=false +system_content curl -u root:taosdata -d '[{"metric": "sys_mem","timestamp": 1346846400000,"value": 8,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_mem","timestamp": 1346846405000,"value": 9,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put?details=false print $system_content @@ -207,7 +207,7 @@ if $system_content != @{"failed":0,"success":2}@ then return -1 endi -system_content curl -u root:taosdata -d 'select * from db.sys_mem_d_bbb_lga_1_web01' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select * from db.sys_mem_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/ print $system_content @@ -215,7 +215,7 @@ if $system_content != @{"status":"succ","head":["ts","value"],"data":[["2012-09- return -1 endi -system_content curl -u root:taosdata -d 'select count(*) from db.sys_mem_d_bbb' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select count(*) from db.sys_mem_d_bbb' 127.0.0.1:7111/rest/sql/ print $system_content @@ -225,13 +225,13 @@ endi print =============== step5 - prepare data -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846402000,"value": 19,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402,"value": 19,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846403000,"value": 20,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846403,"value": 20,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846404000,"value": 21,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846404,"value": 21,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 22,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846405,"value": 22,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846406000,"value": 23,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846406,"value": 23,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846402000,"value": 19,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402,"value": 19,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846403000,"value": 20,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846403,"value": 20,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846404000,"value": 21,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846404,"value": 21,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 22,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846405,"value": 22,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846406000,"value": 23,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846406,"value": 23,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/ print $system_content if $system_content != @{"status":"succ","head":["count(*)"],"data":[[7]],"rows":1}@ then return -1 From 70be4639a3cba9e464256d83e3d8893e76ff6ac9 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 12 Aug 2020 22:39:10 +0800 Subject: [PATCH 128/190] TD-1047 change include files --- src/os/inc/os.h | 6 +++- src/os/inc/osArm64.h | 85 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 1 deletion(-) create mode 100644 src/os/inc/osArm64.h diff --git a/src/os/inc/os.h b/src/os/inc/os.h index 11c423a500..6b9517a4b2 100644 --- a/src/os/inc/os.h +++ b/src/os/inc/os.h @@ -25,7 +25,11 @@ extern "C" { #endif #ifdef _TD_LINUX_64 -#include "osLinux64.h" + #ifdef _TD_ARM_64 + #include "osArm64.h" + #elif + #include "osLinux64.h" + #endif #endif #ifdef _TD_LINUX_32 diff --git a/src/os/inc/osArm64.h b/src/os/inc/osArm64.h new file mode 100644 index 0000000000..3ae08b45f4 --- /dev/null +++ b/src/os/inc/osArm64.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_OS_ARM64_H +#define TDENGINE_OS_ARM64_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +} +#endif + +#endif From 819ceda31f09502236b0a20801bb3df02ada9258 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Wed, 12 Aug 2020 22:45:10 +0800 Subject: [PATCH 129/190] update client test case --- tests/pytest/client/client.py | 8 +------ tests/pytest/client/version.py | 41 ++++++++++++++++++++++++++++++++++ tests/pytest/fulltest.sh | 1 + tests/pytest/regressiontest.sh | 1 + 4 files changed, 44 insertions(+), 7 deletions(-) create mode 100644 tests/pytest/client/version.py diff --git a/tests/pytest/client/client.py b/tests/pytest/client/client.py index 6fd2a2b6bd..b40511094b 100644 --- a/tests/pytest/client/client.py +++ b/tests/pytest/client/client.py @@ -27,13 +27,7 @@ class TDTestCase: ret = tdSql.query('select database()') tdSql.checkData(0, 0, "db") - - ret = tdSql.query('select server_version()') - tdSql.checkData(0, 0, "2.0.0.0") - - ret = tdSql.query('select client_version()') - tdSql.checkData(0, 0, "2.0.0.0") - + ret = tdSql.query('select server_status()') tdSql.checkData(0, 0, 1) diff --git a/tests/pytest/client/version.py b/tests/pytest/client/version.py new file mode 100644 index 0000000000..bc2e58f106 --- /dev/null +++ b/tests/pytest/client/version.py @@ -0,0 +1,41 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + ret = tdSql.query('select server_version()') + tdSql.checkData(0, 0, "2.0.0.6") + + ret = tdSql.query('select client_version()') + tdSql.checkData(0, 0, "2.0.0.6") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index cbe82b2c8c..8e0f6314e0 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -158,6 +158,7 @@ python3 ./test.py -f alter/alter_table_crash.py # client python3 ./test.py -f client/client.py +python3 ./test.py -f client/version.py # Misc python3 testCompress.py diff --git a/tests/pytest/regressiontest.sh b/tests/pytest/regressiontest.sh index 61ec491f5d..20f4b4dc3b 100755 --- a/tests/pytest/regressiontest.sh +++ b/tests/pytest/regressiontest.sh @@ -152,6 +152,7 @@ python3 ./test.py -f alter/alter_table_crash.py # client python3 ./test.py -f client/client.py +python3 ./test.py -f client/version.py # Misc python3 testCompress.py From e826a65dc8ae27466383a7b9bc10ea8ada6f5386 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 12 Aug 2020 22:48:28 +0800 Subject: [PATCH 130/190] TD-1047 undefine linux64 while cputype is aarch64 --- cmake/platform.inc | 3 +++ src/os/inc/os.h | 10 +++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/cmake/platform.inc b/cmake/platform.inc index 4cb6262471..ffde56aab2 100755 --- a/cmake/platform.inc +++ b/cmake/platform.inc @@ -81,14 +81,17 @@ ENDIF () # cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64 IF (${CPUTYPE} MATCHES "aarch32") SET(TD_LINUX TRUE) + SET(TD_LINUX_32 FALSE) SET(TD_ARM_32 TRUE) MESSAGE(STATUS "input cpuType: aarch32") ELSEIF (${CPUTYPE} MATCHES "aarch64") SET(TD_LINUX TRUE) + SET(TD_LINUX_64 FALSE) SET(TD_ARM_64 TRUE) MESSAGE(STATUS "input cpuType: aarch64") ELSEIF (${CPUTYPE} MATCHES "mips64") SET(TD_LINUX TRUE) + SET(TD_LINUX_64 FALSE) SET(TD_MIPS_64 TRUE) MESSAGE(STATUS "input cpuType: mips64") ELSEIF (${CPUTYPE} MATCHES "x64") diff --git a/src/os/inc/os.h b/src/os/inc/os.h index 6b9517a4b2..2005072ad1 100644 --- a/src/os/inc/os.h +++ b/src/os/inc/os.h @@ -24,12 +24,12 @@ extern "C" { #include "osDarwin.h" #endif +#ifdef _TD_ARM_64 +#include "osArm64.h" +#endif + #ifdef _TD_LINUX_64 - #ifdef _TD_ARM_64 - #include "osArm64.h" - #elif - #include "osLinux64.h" - #endif +#include "osLinux64.h" #endif #ifdef _TD_LINUX_32 From d7d840cc977059aafc1f59592b4c72e7984e95a2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 12 Aug 2020 22:49:52 +0800 Subject: [PATCH 131/190] [td-225] remove invalid assert --- src/client/src/tscLocalMerge.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 8436777ddf..0733593284 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -1349,7 +1349,6 @@ static bool doHandleLastRemainData(SSqlObj *pSql) { ((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) { int64_t etime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.ekey : pQueryInfo->window.skey; - assert(pFillInfo->numOfRows == 0); int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity); if (rows > 0) { // do interpo doFillResult(pSql, pLocalReducer, true); From 89a55172ea53f89303165c6d93bcfbd2398c4de2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 12 Aug 2020 23:00:23 +0800 Subject: [PATCH 132/190] [td-225] add some test case. --- tests/script/general/parser/lastrow_query.sim | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/script/general/parser/lastrow_query.sim b/tests/script/general/parser/lastrow_query.sim index 72e8b4de95..9f52e45b80 100644 --- a/tests/script/general/parser/lastrow_query.sim +++ b/tests/script/general/parser/lastrow_query.sim @@ -51,7 +51,14 @@ if $data09 != NCHAR then return -1 endi +# regression test case 1 sql select count(*) from lr_tb1 where ts>'2018-09-18 08:45:00.1' and ts<'2018-09-18 08:45:00.2' if $row != 0 then return -1 endi + +# regression test case 2 +sql select count(*) from lr_db0.lr_stb0 where ts>'2018-9-18 8:00:00' and ts<'2018-9-18 14:00:00' interval(1s) fill(NULL); +if $row != 21600 then + return -1 +endi \ No newline at end of file From bd6f9ee91834eccf6b70e1f4874f68b0b57354f7 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 13 Aug 2020 00:17:48 +0800 Subject: [PATCH 133/190] TD-1047 change some definitions in arm64 platform --- cmake/define.inc | 1 + cmake/install.inc | 14 +++----------- cmake/platform.inc | 1 - src/os/CMakeLists.txt | 12 ++++-------- src/os/inc/os.h | 2 +- src/os/src/{linux64 => linux}/CMakeLists.txt | 0 src/os/src/{linux64 => linux}/linuxEnv.c | 0 src/rpc/test/CMakeLists.txt | 2 +- src/sync/test/CMakeLists.txt | 2 +- src/wal/test/CMakeLists.txt | 2 +- tests/comparisonTest/tdengine/CMakeLists.txt | 2 +- 11 files changed, 13 insertions(+), 25 deletions(-) rename src/os/src/{linux64 => linux}/CMakeLists.txt (100%) rename src/os/src/{linux64 => linux}/linuxEnv.c (100%) diff --git a/cmake/define.inc b/cmake/define.inc index b1e8f097be..2e3c639ecb 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -47,6 +47,7 @@ IF (TD_LINUX_32) ENDIF () IF (TD_ARM_64) + ADD_DEFINITIONS(-D_M_X64) ADD_DEFINITIONS(-D_TD_ARM_64_) ADD_DEFINITIONS(-D_TD_ARM_) SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") diff --git a/cmake/install.inc b/cmake/install.inc index 432e925b05..997101c8d9 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -1,17 +1,9 @@ -IF (TD_LINUX_64) +IF (TD_LINUX) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})") INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR})") -ELSEIF (TD_LINUX_32) - IF (NOT TD_ARM) - EXIT () - ENDIF () - SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") - INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") - INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})") - INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR})") -ELSEIF (TD_WINDOWS_64) +ELSEIF (TD_WINDOWS) SET(CMAKE_INSTALL_PREFIX C:/TDengine) IF (NOT TD_GODLL) #INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector) @@ -33,7 +25,7 @@ ELSEIF (TD_WINDOWS_64) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll DESTINATION driver) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll.a DESTINATION driver) ENDIF () -ELSEIF (TD_DARWIN_64) +ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})") diff --git a/cmake/platform.inc b/cmake/platform.inc index ffde56aab2..11ab8f301d 100755 --- a/cmake/platform.inc +++ b/cmake/platform.inc @@ -43,7 +43,6 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux") ELSEIF (${CMAKE_SIZEOF_VOID_P} MATCHES 4) SET(TD_LINUX_32 TRUE) MESSAGE(STATUS "The current platform is Linux 32-bit") - ELSE () MESSAGE(FATAL_ERROR "The current platform is Linux neither 32-bit nor 64-bit, not supported yet") EXIT () diff --git a/src/os/CMakeLists.txt b/src/os/CMakeLists.txt index b4ad4ad915..4e44d29a02 100644 --- a/src/os/CMakeLists.txt +++ b/src/os/CMakeLists.txt @@ -1,15 +1,11 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) -IF (TD_LINUX_64) - ADD_SUBDIRECTORY(src/linux64) -ELSEIF (TD_LINUX_32) - ADD_SUBDIRECTORY(src/linux32) -ELSEIF (TD_DARWIN_64) +IF (TD_LINUX) + ADD_SUBDIRECTORY(src/linux) +ELSEIF (TD_DARWIN) ADD_SUBDIRECTORY(src/darwin) -ELSEIF (TD_WINDOWS_64) - ADD_SUBDIRECTORY(src/windows) -ELSEIF (TD_WINDOWS_32) +ELSEIF (TD_WINDOWS) ADD_SUBDIRECTORY(src/windows) ENDIF () diff --git a/src/os/inc/os.h b/src/os/inc/os.h index 2005072ad1..4953416bde 100644 --- a/src/os/inc/os.h +++ b/src/os/inc/os.h @@ -24,7 +24,7 @@ extern "C" { #include "osDarwin.h" #endif -#ifdef _TD_ARM_64 +#ifdef _TD_ARM_64_ #include "osArm64.h" #endif diff --git a/src/os/src/linux64/CMakeLists.txt b/src/os/src/linux/CMakeLists.txt similarity index 100% rename from src/os/src/linux64/CMakeLists.txt rename to src/os/src/linux/CMakeLists.txt diff --git a/src/os/src/linux64/linuxEnv.c b/src/os/src/linux/linuxEnv.c similarity index 100% rename from src/os/src/linux64/linuxEnv.c rename to src/os/src/linux/linuxEnv.c diff --git a/src/rpc/test/CMakeLists.txt b/src/rpc/test/CMakeLists.txt index 286c8e1680..383ce1b0f6 100644 --- a/src/rpc/test/CMakeLists.txt +++ b/src/rpc/test/CMakeLists.txt @@ -3,7 +3,7 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) -IF (TD_LINUX_64) +IF (TD_LINUX) LIST(APPEND CLIENT_SRC ./rclient.c) ADD_EXECUTABLE(rclient ${CLIENT_SRC}) TARGET_LINK_LIBRARIES(rclient trpc) diff --git a/src/sync/test/CMakeLists.txt b/src/sync/test/CMakeLists.txt index a309539024..27614454f9 100644 --- a/src/sync/test/CMakeLists.txt +++ b/src/sync/test/CMakeLists.txt @@ -1,7 +1,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) -IF (TD_LINUX_64) +IF (TD_LINUX) INCLUDE_DIRECTORIES(../inc) LIST(APPEND CLIENT_SRC ./syncClient.c) diff --git a/src/wal/test/CMakeLists.txt b/src/wal/test/CMakeLists.txt index 6fdb03710e..6c232ce4b9 100644 --- a/src/wal/test/CMakeLists.txt +++ b/src/wal/test/CMakeLists.txt @@ -1,7 +1,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) -IF (TD_LINUX_64) +IF (TD_LINUX) INCLUDE_DIRECTORIES(../inc) LIST(APPEND WALTEST_SRC ./waltest.c) diff --git a/tests/comparisonTest/tdengine/CMakeLists.txt b/tests/comparisonTest/tdengine/CMakeLists.txt index 2555bdce91..990612b8c3 100644 --- a/tests/comparisonTest/tdengine/CMakeLists.txt +++ b/tests/comparisonTest/tdengine/CMakeLists.txt @@ -1,7 +1,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) -IF (TD_LINUX_64) +IF (TD_LINUX) add_executable(tdengineTest tdengineTest.c) target_link_libraries(tdengineTest taos_static tutil common pthread) ENDIF() From 86032d83e575323b720b161fce3d4f13afc5b24d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 13 Aug 2020 00:35:10 +0800 Subject: [PATCH 134/190] [td-225] restrict the number of tags assignment. --- src/query/src/qFill.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index ac44feb576..d28d99ceb8 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -131,7 +131,7 @@ void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInpu assert(pFillInfo->numOfRows == pInput->num); int32_t t = 0; - for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) { + for(int32_t i = 0; i < pFillInfo->numOfCols && t < pFillInfo->numOfTags; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; char* s = pInput->data + pCol->col.offset * pInput->num; From 259928d4de63b01d4bf76b95b1bc2ba2d2d66985 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 13 Aug 2020 00:59:49 +0800 Subject: [PATCH 135/190] [td-225] fix bugs. --- src/query/src/qFill.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index d28d99ceb8..d29186ba49 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -131,13 +131,13 @@ void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInpu assert(pFillInfo->numOfRows == pInput->num); int32_t t = 0; - for(int32_t i = 0; i < pFillInfo->numOfCols && t < pFillInfo->numOfTags; ++i) { + for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; char* s = pInput->data + pCol->col.offset * pInput->num; memcpy(pFillInfo->pData[i], s, pInput->num * pCol->col.bytes); - if (pCol->flag == TSDB_COL_TAG) { // copy the tag value + if (pCol->flag == TSDB_COL_TAG && t < pFillInfo->numOfTags) { // copy the tag value memcpy(pFillInfo->pTags[t++], pFillInfo->pData[i], pCol->col.bytes); } } From 29c5d7773a4cede0c29499be26665f019851b7e6 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Thu, 13 Aug 2020 09:31:46 +0800 Subject: [PATCH 136/190] [add link for libtaos.so to /usr/lib64] --- packaging/release.sh | 50 ++++++++++++++++++++----------- packaging/tools/install.sh | 6 ++-- packaging/tools/install_client.sh | 9 ++++-- packaging/tools/make_install.sh | 7 +++++ 4 files changed, 50 insertions(+), 22 deletions(-) diff --git a/packaging/release.sh b/packaging/release.sh index 7a585431a2..8a4b473a4b 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -10,6 +10,7 @@ set -e # -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] # -V [stable | beta] # -l [full | lite] +# -n [2.0.0.3] # set parameters by default value verMode=edge # [cluster, edge] @@ -17,8 +18,9 @@ verType=stable # [stable, beta] cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...] osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] pagMode=full # [full | lite] +verNumber="" -while getopts "hv:V:c:o:l:" arg +while getopts "hv:V:c:o:l:n:" arg do case $arg in v) @@ -37,12 +39,16 @@ do #echo "pagMode=$OPTARG" pagMode=$(echo $OPTARG) ;; + n) + #echo "verNumber=$OPTARG" + verNumber=$(echo $OPTARG) + ;; o) #echo "osType=$OPTARG" osType=$(echo $OPTARG) ;; h) - echo "Usage: `basename $0` -v [cluster | edge] -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] -V [stable | beta] -l [full | lite]" + echo "Usage: `basename $0` -v [cluster | edge] -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] -V [stable | beta] -l [full | lite] -n [version number]" exit 0 ;; ?) #unknow option @@ -52,7 +58,7 @@ do esac done -echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode}" +echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} verNumber=${verNumber}" curr_dir=$(pwd) @@ -80,7 +86,6 @@ function is_valid_version() { if [[ $1 =~ $rx ]]; then return 0 fi - return 1 } @@ -89,26 +94,25 @@ function vercomp () { echo 0 exit 0 fi + local IFS=. local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do ver1[i]=0 done for ((i=0; i<${#ver1[@]}; i++)); do - if [[ -z ${ver2[i]} ]] - then + if [[ -z ${ver2[i]} ]]; then # fill empty fields in ver2 with zeros ver2[i]=0 fi - if ((10#${ver1[i]} > 10#${ver2[i]})) - then + if ((10#${ver1[i]} > 10#${ver2[i]})); then echo 1 exit 0 fi - if ((10#${ver1[i]} < 10#${ver2[i]})) - then + if ((10#${ver1[i]} < 10#${ver2[i]})); then echo 2 exit 0 fi @@ -120,10 +124,11 @@ function vercomp () { version=$(cat ${versioninfo} | grep " version" | cut -d '"' -f2) compatible_version=$(cat ${versioninfo} | grep " compatible_version" | cut -d '"' -f2) -while true; do - read -p "Do you want to release a new version? [y/N]: " is_version_change +if [ -z ${verNumber} ]; then + while true; do + read -p "Do you want to release a new version? [y/N]: " is_version_change - if [[ ( "${is_version_change}" == "y") || ( "${is_version_change}" == "Y") ]]; then + if [[ ( "${is_version_change}" == "y") || ( "${is_version_change}" == "Y") ]]; then read -p "Please enter the new version: " tversion while true; do if (! is_valid_version $tversion) || [ "$(vercomp $tversion $version)" = '2' ]; then @@ -152,13 +157,24 @@ while true; do done break - elif [[ ( "${is_version_change}" == "n") || ( "${is_version_change}" == "N") ]]; then + elif [[ ( "${is_version_change}" == "n") || ( "${is_version_change}" == "N") ]]; then echo "Use old version: ${version} compatible version: ${compatible_version}." break - else + else continue - fi -done + fi + done +else + echo "old version: $version, new version: $verNumber" + #if ( ! is_valid_version $verNumber ) || [[ "$(vercomp $version $verNumber)" == '2' ]]; then + # echo "please enter correct version" + # exit 0 + #else + version=${verNumber} + #fi +fi + +echo "=======================new version number: ${version}======================================" # output the version info to the buildinfo file. build_time=$(date +"%F %R") diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 0700ed4682..70c8f83923 100644 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -196,8 +196,10 @@ function install_lib() { ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + if [ -d ${lib64_link_dir} ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi #if [ "$verMode" == "cluster" ]; then # # Compatible with version 1.5 diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 0ec8dbd0cc..396b4a80a7 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -91,7 +91,7 @@ function install_bin() { ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* - #Make link + #Make link [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : if [ "$osType" == "Darwin" ]; then [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : @@ -115,8 +115,11 @@ function install_lib() { if [ "$osType" != "Darwin" ]; then ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + + if [ -d "${lib64_link_dir}" ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi else ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 2200c7f13d..8324b3b25a 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -34,6 +34,7 @@ cfg_install_dir="/etc/taos" if [ "$osType" != "Darwin" ]; then bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" + lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" else bin_link_dir="/usr/local/bin" @@ -173,12 +174,18 @@ function install_bin() { function install_lib() { # Remove links ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : versioninfo=$(${script_dir}/get_version.sh ${source_dir}/src/util/src/version.c) if [ "$osType" != "Darwin" ]; then ${csudo} cp ${binary_dir}/build/lib/libtaos.so.${versioninfo} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* ${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib_link_dir}/libtaos.so.1 ${csudo} ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [ -d "${lib64_link_dir}" ]; then + ${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib64_link_dir}/libtaos.so.1 + ${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so + fi else ${csudo} cp ${binary_dir}/build/lib/libtaos.${versioninfo}.dylib ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* ${csudo} ln -sf ${install_main_dir}/driver/libtaos.${versioninfo}.dylib ${lib_link_dir}/libtaos.1.dylib From 9ef11679bd958df6d293c57a203a489f48523b0a Mon Sep 17 00:00:00 2001 From: Hui Li Date: Thu, 13 Aug 2020 11:18:55 +0800 Subject: [PATCH 137/190] [TD-1121] --- packaging/tools/install.sh | 2 ++ packaging/tools/install_client.sh | 10 ++++++---- packaging/tools/make_install.sh | 3 +++ packaging/tools/makeclient.sh | 2 +- packaging/tools/makepkg.sh | 2 +- packaging/tools/post.sh | 3 ++- packaging/tools/preun.sh | 7 +++++-- 7 files changed, 20 insertions(+), 9 deletions(-) diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 70c8f83923..728f40aff1 100644 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -168,6 +168,7 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* @@ -176,6 +177,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : if [ "$verMode" == "cluster" ]; then diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 396b4a80a7..81e20fcbfb 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -88,15 +88,17 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/taosdump || : fi ${csudo} rm -f ${bin_link_dir}/rmtaos || : + ${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* - #Make link + #Make link [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : if [ "$osType" == "Darwin" ]; then [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : fi [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : } function clean_lib() { @@ -117,9 +119,9 @@ function install_lib() { ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so if [ -d "${lib64_link_dir}" ]; then - ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : - fi + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi else ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 8324b3b25a..9af153d3d2 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -142,6 +142,7 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdump || : + ${csudo} rm -f ${bin_link_dir}/set_core || : fi ${csudo} rm -f ${bin_link_dir}/rmtaos || : @@ -150,6 +151,7 @@ function install_bin() { if [ "$osType" != "Darwin" ]; then ${csudo} cp -r ${script_dir}/remove.sh ${install_main_dir}/bin + ${csudo} cp -r ${script_dir}/set_core.sh ${install_main_dir}/bin else ${csudo} cp -r ${script_dir}/remove_client.sh ${install_main_dir}/bin fi @@ -162,6 +164,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : + [ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : fi if [ "$osType" != "Darwin" ]; then diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index 6120f9fcc2..855d0b9c27 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -45,7 +45,7 @@ if [ "$osType" != "Darwin" ]; then strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" else - bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh" + bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh ${script_dir}/set_core.sh" fi lib_files="${build_dir}/lib/libtaos.so.${version}" else diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 1d65dd8069..6a1a282e01 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -36,7 +36,7 @@ if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" else - bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh" + bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh.sh" fi lib_files="${build_dir}/lib/libtaos.so.${version}" diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index e9a742e632..0feb64c795 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -90,6 +90,7 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : + ${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} chmod 0555 ${bin_dir}/* @@ -97,7 +98,7 @@ function install_bin() { [ -x ${bin_dir}/taos ] && ${csudo} ln -s ${bin_dir}/taos ${bin_link_dir}/taos || : [ -x ${bin_dir}/taosd ] && ${csudo} ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || : [ -x ${bin_dir}/taosdemo ] && ${csudo} ln -s ${bin_dir}/taosdemo ${bin_link_dir}/taosdemo || : -# [ -x ${bin_dir}/remove.sh ] && ${csudo} ln -s ${bin_dir}/remove.sh ${bin_link_dir}/rmtaos || : + [ -x ${bin_dir}/set_core.sh ] && ${csudo} ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || : } function install_config() { diff --git a/packaging/tools/preun.sh b/packaging/tools/preun.sh index 0533410802..07b43c0e49 100755 --- a/packaging/tools/preun.sh +++ b/packaging/tools/preun.sh @@ -8,6 +8,7 @@ NC='\033[0m' bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" data_link_dir="/usr/local/taos/data" @@ -104,10 +105,12 @@ ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdump || : +${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${cfg_link_dir}/* || : ${csudo} rm -f ${inc_link_dir}/taos.h || : -${csudo} rm -f ${inc_link_dir}/taoserror.h || : -${csudo} rm -f ${lib_link_dir}/libtaos.* || : +${csudo} rm -f ${inc_link_dir}/taoserror.h || : +${csudo} rm -f ${lib_link_dir}/libtaos.* || : +${csudo} rm -f ${lib64_link_dir}/libtaos.* || : ${csudo} rm -f ${log_link_dir} || : ${csudo} rm -f ${data_link_dir} || : From a5d1e97058abc5dfa669a58caf15249a21f35f49 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Thu, 13 Aug 2020 11:19:10 +0800 Subject: [PATCH 138/190] [TD-1121] --- packaging/tools/set_core.sh | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 packaging/tools/set_core.sh diff --git a/packaging/tools/set_core.sh b/packaging/tools/set_core.sh new file mode 100644 index 0000000000..7586d531d1 --- /dev/null +++ b/packaging/tools/set_core.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# +# This file is used to set config for core when taosd crash + +set -e +# set -x + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +#ulimit -c unlimited +${csudo} echo "ulimit -c unlimited" >> /etc/profile +source /etc/profile + +${csudo} mkdir -p /coredump +${csudo} sysctl -w kernel.core_pattern='/coredump/core-%e-%p' +${csudo} echo '/coredump/core-%e-%p' | ${csudo} tee /proc/sys/kernel/core_pattern From 0f8072dde5d2260274927e208d609fef52675cd4 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Thu, 13 Aug 2020 11:25:56 +0800 Subject: [PATCH 139/190] [TD-1121] --- packaging/release.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/packaging/release.sh b/packaging/release.sh index 8a4b473a4b..2302b45875 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -48,7 +48,12 @@ do osType=$(echo $OPTARG) ;; h) - echo "Usage: `basename $0` -v [cluster | edge] -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] -V [stable | beta] -l [full | lite] -n [version number]" + echo "Usage: `basename $0` -v [cluster | edge] " + echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] " + echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] " + echo " -V [stable | beta] " + echo " -l [full | lite] " + echo " -n [version number] " exit 0 ;; ?) #unknow option From 3867d5e8637efb241f35001ce2809fffff1a89aa Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 13 Aug 2020 11:31:57 +0800 Subject: [PATCH 140/190] [td-225] set the correct object for extracting error msg --- src/kit/shell/src/shellEngine.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 2838dc5386..6c4bd1358b 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -310,7 +310,7 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { if (error_no == 0) { printf("Query OK, %d row(s) in set (%.6fs)\n", numOfRows, (et - st) / 1E6); } else { - printf("Query interrupted (%s), %d row(s) in set (%.6fs)\n", taos_errstr(con), numOfRows, (et - st) / 1E6); + printf("Query interrupted (%s), %d row(s) in set (%.6fs)\n", taos_errstr(pSql), numOfRows, (et - st) / 1E6); } } else { int num_rows_affacted = taos_affected_rows(pSql); From 607d4fd2389868525c789fffbcdf7fd2d8f8ac96 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 13 Aug 2020 11:36:03 +0800 Subject: [PATCH 141/190] [td-225] add check if cache is destoried. --- src/util/src/tcache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index fee7ed3c8b..bd903c8c23 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -190,7 +190,7 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext } void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const void *pData, size_t dataSize, int duration) { - if (pCacheObj == NULL || pCacheObj->pHashTable == NULL) { + if (pCacheObj == NULL || pCacheObj->pHashTable == NULL || pCacheObj->deleting == 1) { return NULL; } @@ -261,7 +261,7 @@ static void incRefFn(void* ptNode) { } void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen) { - if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) == 0) { + if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) == 0 || pCacheObj->deleting == 1) { return NULL; } From 7bcd7447eb97359b4bbabe3a923cfbe29d61ac16 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 13 Aug 2020 11:37:42 +0800 Subject: [PATCH 142/190] [td-225] return error code in tsdb to client during the data retrieval. --- src/dnode/src/dnodeVRead.c | 4 ++-- src/query/src/qExecutor.c | 5 +++++ src/tsdb/src/tsdbRead.c | 25 +++++++++++++++++++------ src/vnode/src/vnodeRead.c | 19 +++++++++++-------- 4 files changed, 37 insertions(+), 16 deletions(-) diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c index c99cf87b21..d66ebf9772 100644 --- a/src/dnode/src/dnodeVRead.c +++ b/src/dnode/src/dnodeVRead.c @@ -211,8 +211,8 @@ static void *dnodeProcessReadQueue(void *param) { dnodeSendRpcReadRsp(pVnode, pReadMsg, code); } else { if (code == TSDB_CODE_QRY_HAS_RSP) { - dnodeSendRpcReadRsp(pVnode, pReadMsg, TSDB_CODE_SUCCESS); - } else { + dnodeSendRpcReadRsp(pVnode, pReadMsg, pReadMsg->rpcMsg.code); + } else { // code == TSDB_CODE_NOT_READY, do not return msg to client dnodeDispatchNonRspMsg(pVnode, pReadMsg, code); } } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 34700a33f3..20e43866d7 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2338,6 +2338,11 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { continue; } + if (terrno != TSDB_CODE_SUCCESS) { // load data block failed, abort query + longjmp(pRuntimeEnv->env, terrno); + break; + } + // query start position can not move into tableApplyFunctionsOnBlock due to limit/offset condition pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : blockInfo.rows - 1; int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, binarySearchForKey, pDataBlock); diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 98153efe3e..0a72cfa7b9 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -679,7 +679,13 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo if (pCheckInfo->pDataCols == NULL) { STsdbMeta* pMeta = tsdbGetMeta(pRepo); + pCheckInfo->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock); + if (pCheckInfo->pDataCols == NULL) { + tsdbError("%p failed to malloc buf, %p", pQueryHandle, pQueryHandle->qinfo); + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return blockLoaded; + } } STSchema* pSchema = tsdbGetTableSchema(pCheckInfo->pTableObj); @@ -745,7 +751,11 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* return; } - doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot); + // return error, add test cases + if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) { + + } + doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock); } else { /* @@ -1714,9 +1724,9 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { STableCheckInfo* pCheckInfo = (STableCheckInfo*) taosArrayGet(pQueryHandle->pTableCheckInfo, j); STableCheckInfo info = { .lastKey = pSecQueryHandle->window.skey, - //.tableId = pCheckInfo->tableId, .pTableObj = pCheckInfo->pTableObj, }; + info.tableId = pCheckInfo->tableId; taosArrayPush(pSecQueryHandle->pTableCheckInfo, &info); @@ -1726,8 +1736,9 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { tsdbInitCompBlockLoadInfo(&pSecQueryHandle->compBlockLoadInfo); pSecQueryHandle->defaultLoadColumn = taosArrayClone(pQueryHandle->defaultLoadColumn); - bool ret = tsdbNextDataBlock((void*) pSecQueryHandle); - assert(ret); + if (!tsdbNextDataBlock((void*) pSecQueryHandle)) { + return false; + } tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle, &blockInfo); tsdbRetrieveDataBlock((void*) pSecQueryHandle, pSecQueryHandle->defaultLoadColumn); @@ -1770,7 +1781,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { bool exists = true; int32_t code = getDataBlocksInFiles(pQueryHandle, &exists); if (code != TSDB_CODE_SUCCESS) { - return false; + return code; } if (exists) { @@ -2048,8 +2059,10 @@ SArray* tsdbRetrieveDataBlock(TsdbQueryHandleT* pQueryHandle, SArray* pIdList) { return pHandle->pColumns; } else { // only load the file block SCompBlock* pBlock = pBlockInfo->compBlock; - doLoadFileDataBlock(pHandle, pBlock, pCheckInfo, pHandle->cur.slot); + if (!doLoadFileDataBlock(pHandle, pBlock, pCheckInfo, pHandle->cur.slot)) { + return NULL; + } // todo refactor int32_t numOfRows = copyDataFromFileBlock(pHandle, pHandle->outputCapacity, 0, 0, pBlock->numOfRows - 1); diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 8ca76ef22d..c41b245794 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -93,8 +93,11 @@ static int32_t vnodeDumpQueryResult(SRspRet *pRet, void* pVnode, void** handle, vDebug("QInfo:%p exec completed, free handle:%d", *handle, *freeHandle); } } else { - pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); - memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); + SRetrieveTableRsp* pRsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); + memset(pRsp, 0, sizeof(SRetrieveTableRsp)); + pRsp->completed = true; + + pRet->rsp = pRsp; *freeHandle = true; } @@ -200,18 +203,18 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { vDebug("vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p", pVnode->vgId, *qhandle, pReadMsg->rpcMsg.handle); - code = vnodeDumpQueryResult(&pReadMsg->rspRet, pVnode, qhandle, &freehandle); - // todo test the error code case - if (code == TSDB_CODE_SUCCESS) { - code = TSDB_CODE_QRY_HAS_RSP; - } + // set the real rsp error code + pReadMsg->rpcMsg.code = vnodeDumpQueryResult(&pReadMsg->rspRet, pVnode, qhandle, &freehandle); + + // NOTE: set return code to be TSDB_CODE_QRY_HAS_RSP to notify dnode to return msg to client + code = TSDB_CODE_QRY_HAS_RSP; } else { freehandle = qQueryCompleted(*qhandle); } // NOTE: if the qhandle is not put into vread queue or query is completed, free the qhandle. - // if not build result, free it not by forced. + // If the building of result is not required, simply free it. Otherwise, mandatorily free the qhandle if (freehandle || (!buildRes)) { qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, freehandle); } From fa0b77d19b043c9cd41b267895e401871617042d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 13 Aug 2020 11:39:00 +0800 Subject: [PATCH 143/190] [td-225] add log if malloc failed. --- src/common/src/tdataformat.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 0be0216174..e7f40442a0 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ #include "tdataformat.h" +#include "tulog.h" #include "talgo.h" #include "tcoding.h" #include "wchar.h" @@ -311,10 +312,14 @@ void dataColSetOffset(SDataCol *pCol, int nEle) { SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) { SDataCols *pCols = (SDataCols *)calloc(1, sizeof(SDataCols)); - if (pCols == NULL) return NULL; + if (pCols == NULL) { + uDebug("malloc failure, size:%"PRId64" failed, reason:%s", sizeof(SDataCols), strerror(errno)); + return NULL; + } pCols->cols = (SDataCol *)calloc(maxCols, sizeof(SDataCol)); if (pCols->cols == NULL) { + uDebug("malloc failure, size:%"PRId64" failed, reason:%s", sizeof(SDataCol) * maxCols, strerror(errno)); tdFreeDataCols(pCols); return NULL; } @@ -326,6 +331,7 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) { pCols->buf = malloc(pCols->bufSize); if (pCols->buf == NULL) { + uDebug("malloc failure, size:%"PRId64" failed, reason:%s", sizeof(SDataCol) * maxCols, strerror(errno)); tdFreeDataCols(pCols); return NULL; } From 9ab57b25ecd67484acf652001a4f340b3b8eac37 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 13 Aug 2020 11:39:56 +0800 Subject: [PATCH 144/190] [td-225] set the colId for tag columns. --- src/client/src/tscLocalMerge.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 0733593284..7818765e57 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -142,6 +142,7 @@ static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) { pFillCol[i].col.bytes = pExpr->resBytes; pFillCol[i].col.type = (int8_t)pExpr->resType; + pFillCol[i].col.colId = pExpr->colInfo.colId; pFillCol[i].flag = pExpr->colInfo.flag; pFillCol[i].col.offset = offset; pFillCol[i].functionId = pExpr->functionId; From 24055909786a1048427711e5a79215949ec374e6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 13 Aug 2020 12:01:25 +0800 Subject: [PATCH 145/190] [td-225] minor refactoring. --- src/query/src/qExecutor.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 20e43866d7..6f54876b0a 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6501,8 +6501,7 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co (*pRsp)->numOfRows = htonl((int32_t)pQuery->rec.rows); - int32_t code = pQInfo->code; - if (code == TSDB_CODE_SUCCESS) { + if (pQInfo->code == TSDB_CODE_SUCCESS) { (*pRsp)->offset = htobe64(pQuery->limit.offset); (*pRsp)->useconds = htobe64(pRuntimeEnv->summary.elapsedTime); } else { @@ -6511,11 +6510,10 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co } (*pRsp)->precision = htons(pQuery->precision); - if (pQuery->rec.rows > 0 && code == TSDB_CODE_SUCCESS) { - code = doDumpQueryResult(pQInfo, (*pRsp)->data); + if (pQuery->rec.rows > 0 && pQInfo->code == TSDB_CODE_SUCCESS) { + doDumpQueryResult(pQInfo, (*pRsp)->data); } else { setQueryStatus(pQuery, QUERY_OVER); - code = pQInfo->code; } pQInfo->rspContext = NULL; @@ -6529,7 +6527,7 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co qDebug("QInfo:%p has more results waits for client retrieve", pQInfo); } - return code; + return pQInfo->code; } int32_t qQueryCompleted(qinfo_t qinfo) { From fe6c8d6d999f2182775cbb37bc7e9da0973500ba Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 13 Aug 2020 12:13:02 +0800 Subject: [PATCH 146/190] add cii best practice badge. [TECO-5] --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 41629d6df3..2d84389f78 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ [![Build Status](https://travis-ci.org/taosdata/TDengine.svg?branch=master)](https://travis-ci.org/taosdata/TDengine) [![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master) [![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop) +[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201) [![TDengine](TDenginelogo.png)](https://www.taosdata.com) From 69fd77276637d8a2b88ea6d9bda45181ee425863 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Thu, 13 Aug 2020 13:26:28 +0800 Subject: [PATCH 147/190] [TD-1134] --- src/common/src/tglobal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 0d7df38b83..c11b2667e6 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -54,7 +54,7 @@ int8_t tsDaylight = 0; char tsTimezone[TSDB_TIMEZONE_LEN] = {0}; char tsLocale[TSDB_LOCALE_LEN] = {0}; char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string -int32_t tsEnableCoreFile = 0; +int32_t tsEnableCoreFile = 1; int32_t tsMaxBinaryDisplayWidth = 30; /* From d7bf44a03b25ee6032f52a9c465774e36e6f062c Mon Sep 17 00:00:00 2001 From: Hui Li Date: Thu, 13 Aug 2020 13:57:39 +0800 Subject: [PATCH 148/190] [fix bug] --- packaging/tools/makepkg.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 6a1a282e01..16e0354dcf 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -36,7 +36,7 @@ if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" else - bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh.sh" + bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh" fi lib_files="${build_dir}/lib/libtaos.so.${version}" From faf2f73f947e642cb388cf253600057931f0547e Mon Sep 17 00:00:00 2001 From: Hui Li Date: Thu, 13 Aug 2020 14:41:49 +0800 Subject: [PATCH 149/190] [add ldconfig] --- packaging/tools/install.sh | 2 ++ packaging/tools/install_client.sh | 2 ++ packaging/tools/make_install.sh | 2 ++ 3 files changed, 6 insertions(+) diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 728f40aff1..f27595a356 100644 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -209,6 +209,8 @@ function install_lib() { # ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar # ${csudo} chmod 777 ${v15_java_app_dir} || : #fi + + ${csudo} ldconfig } function install_header() { diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 81e20fcbfb..6a1b7be191 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -126,6 +126,8 @@ function install_lib() { ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib fi + + ${csudo} ldconfig } function install_header() { diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 9af153d3d2..1a5c4d75b5 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -194,6 +194,8 @@ function install_lib() { ${csudo} ln -sf ${install_main_dir}/driver/libtaos.${versioninfo}.dylib ${lib_link_dir}/libtaos.1.dylib ${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib fi + + ${csudo} ldconfig } function install_header() { From b36c9b1bdc3aa2993b340c48444b39b53743ba4f Mon Sep 17 00:00:00 2001 From: Hui Li Date: Thu, 13 Aug 2020 14:47:18 +0800 Subject: [PATCH 150/190] [fix bug] --- packaging/tools/set_core.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) mode change 100644 => 100755 packaging/tools/set_core.sh diff --git a/packaging/tools/set_core.sh b/packaging/tools/set_core.sh old mode 100644 new mode 100755 index 7586d531d1..05404d7a64 --- a/packaging/tools/set_core.sh +++ b/packaging/tools/set_core.sh @@ -11,7 +11,9 @@ if command -v sudo > /dev/null; then fi #ulimit -c unlimited -${csudo} echo "ulimit -c unlimited" >> /etc/profile +${csudo} sed -i '/ulimit -c unlimited/d' /etc/profile +${csudo} sed -i '$a\ulimit -c unlimited' /etc/profile +#${csudo} echo "ulimit -c unlimited" >> /etc/profile source /etc/profile ${csudo} mkdir -p /coredump From 88a1e6b16e9174ceccc2568ea9d6bc4fff1bbeae Mon Sep 17 00:00:00 2001 From: Hui Li Date: Thu, 13 Aug 2020 14:49:32 +0800 Subject: [PATCH 151/190] [fix bug] --- packaging/tools/set_core.sh | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/packaging/tools/set_core.sh b/packaging/tools/set_core.sh index 05404d7a64..083b0516db 100755 --- a/packaging/tools/set_core.sh +++ b/packaging/tools/set_core.sh @@ -11,11 +11,10 @@ if command -v sudo > /dev/null; then fi #ulimit -c unlimited -${csudo} sed -i '/ulimit -c unlimited/d' /etc/profile -${csudo} sed -i '$a\ulimit -c unlimited' /etc/profile -#${csudo} echo "ulimit -c unlimited" >> /etc/profile +${csudo} sed -i '/ulimit -c unlimited/d' /etc/profile ||: +${csudo} sed -i '$a\ulimit -c unlimited' /etc/profile ||: source /etc/profile -${csudo} mkdir -p /coredump -${csudo} sysctl -w kernel.core_pattern='/coredump/core-%e-%p' -${csudo} echo '/coredump/core-%e-%p' | ${csudo} tee /proc/sys/kernel/core_pattern +${csudo} mkdir -p /coredump ||: +${csudo} sysctl -w kernel.core_pattern='/coredump/core-%e-%p' ||: +${csudo} echo '/coredump/core-%e-%p' | ${csudo} tee /proc/sys/kernel/core_pattern ||: From dcf17ab0936b9efb2ae0596f61e6fcdbe43ebbdb Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 13 Aug 2020 15:06:22 +0800 Subject: [PATCH 152/190] [td-225] check errors from tsdb --- src/client/src/tscSystem.c | 1 + src/tsdb/src/tsdbRead.c | 94 +++++++++++++---------- tests/script/general/parser/testSuite.sim | 48 ++++++------ 3 files changed, 80 insertions(+), 63 deletions(-) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index c742db42ab..211e673754 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -158,6 +158,7 @@ void taos_init() { pthread_once(&tscinit, taos_init_imp); } void taos_cleanup() { if (tscCacheHandle != NULL) { taosCacheCleanup(tscCacheHandle); + tscCacheHandle = NULL; } if (tscQhandle != NULL) { diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 0a72cfa7b9..a7dda9a4af 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -16,7 +16,6 @@ #include "os.h" #include "tulog.h" #include "talgo.h" -#include "tutil.h" #include "tcompare.h" #include "exception.h" @@ -599,6 +598,8 @@ static int32_t binarySearchForBlock(SCompBlock* pBlock, int32_t numOfBlocks, TSK static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlocks) { // load all the comp offset value for all tables in this file + int32_t code = TSDB_CODE_SUCCESS; + *numOfBlocks = 0; size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo); @@ -606,7 +607,10 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i); pCheckInfo->numOfBlocks = 0; - tsdbSetHelperTable(&pQueryHandle->rhelper, pCheckInfo->pTableObj, pQueryHandle->pTsdb); + if (tsdbSetHelperTable(&pQueryHandle->rhelper, pCheckInfo->pTableObj, pQueryHandle->pTsdb) != TSDB_CODE_SUCCESS) { + code = terrno; + break; + } SCompIdx* compIndex = &pQueryHandle->rhelper.curCompIdx; @@ -619,7 +623,11 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo assert(compIndex->len > 0); char* t = realloc(pCheckInfo->pCompInfo, compIndex->len); - assert(t != NULL); + if (t == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + code = TSDB_CODE_TDB_OUT_OF_MEMORY; + break; + } pCheckInfo->pCompInfo = (SCompInfo*) t; pCheckInfo->compSize = compIndex->len; @@ -661,7 +669,7 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo (*numOfBlocks) += pCheckInfo->numOfBlocks; } - return TSDB_CODE_SUCCESS; + return code; } #define GET_FILE_DATA_BLOCK_INFO(_checkInfo, _block) \ @@ -672,9 +680,8 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo .uid = (_checkInfo)->tableId.uid}) -static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo, int32_t slotIndex) { +static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo, int32_t slotIndex) { STsdbRepo *pRepo = pQueryHandle->pTsdb; - bool blockLoaded = false; int64_t st = taosGetTimestampUs(); if (pCheckInfo->pDataCols == NULL) { @@ -684,7 +691,7 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo if (pCheckInfo->pDataCols == NULL) { tsdbError("%p failed to malloc buf, %p", pQueryHandle, pQueryHandle->qinfo); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return blockLoaded; + return terrno; } } @@ -694,17 +701,18 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo tdInitDataCols(pQueryHandle->rhelper.pDataCols[1], pSchema); int16_t* colIds = pQueryHandle->defaultLoadColumn->pData; + int32_t ret = tsdbLoadBlockDataCols(&(pQueryHandle->rhelper), pBlock, pCheckInfo->pCompInfo, colIds, (int)(QH_GET_NUM_OF_COLS(pQueryHandle))); - if (ret == TSDB_CODE_SUCCESS) { - SDataBlockLoadInfo* pBlockLoadInfo = &pQueryHandle->dataBlockLoadInfo; - - pBlockLoadInfo->fileGroup = pQueryHandle->pFileGroup; - pBlockLoadInfo->slot = pQueryHandle->cur.slot; - pBlockLoadInfo->tid = pCheckInfo->pTableObj->tableId.tid; - - blockLoaded = true; + if (ret != TSDB_CODE_SUCCESS) { + return terrno; } + SDataBlockLoadInfo* pBlockLoadInfo = &pQueryHandle->dataBlockLoadInfo; + + pBlockLoadInfo->fileGroup = pQueryHandle->pFileGroup; + pBlockLoadInfo->slot = pQueryHandle->cur.slot; + pBlockLoadInfo->tid = pCheckInfo->pTableObj->tableId.tid; + SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0]; assert(pCols->numOfRows != 0 && pCols->numOfRows <= pBlock->numOfRows); @@ -715,12 +723,14 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo tsdbDebug("%p load file block into buffer, index:%d, brange:%"PRId64"-%"PRId64" , rows:%d, elapsed time:%"PRId64 " us, %p", pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, elapsedTime, pQueryHandle->qinfo); - return blockLoaded; + + return TSDB_CODE_SUCCESS; } -static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo){ +static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo){ SQueryFilePos* cur = &pQueryHandle->cur; SDataBlockInfo binfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlock); + int32_t code = TSDB_CODE_SUCCESS; /*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo); SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order); @@ -748,12 +758,12 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* cur->mixBlock = true; cur->blockCompleted = false; - return; + return code; } // return error, add test cases - if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) { - + if ((code = doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) != TSDB_CODE_SUCCESS) { + return code; } doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock); @@ -774,16 +784,20 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* cur->lastKey = binfo.window.ekey + (ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1); pCheckInfo->lastKey = cur->lastKey; } + + return code; } -static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo) { +static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo, bool* exists) { SQueryFilePos* cur = &pQueryHandle->cur; + int32_t code = TSDB_CODE_SUCCESS; if (ASCENDING_TRAVERSE(pQueryHandle->order)) { // query ended in/started from current block if (pQueryHandle->window.ekey < pBlock->keyLast || pCheckInfo->lastKey > pBlock->keyFirst) { - if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) { - return false; + if ((code = doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) != TSDB_CODE_SUCCESS) { + *exists = false; + return code; } SDataCols* pTSCol = pQueryHandle->rhelper.pDataCols[0]; @@ -799,12 +813,13 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock assert(pCheckInfo->lastKey <= pBlock->keyLast); doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock); } else { // the whole block is loaded in to buffer - handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); + code = handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); } } else { //desc order, query ended in current block if (pQueryHandle->window.ekey > pBlock->keyFirst || pCheckInfo->lastKey < pBlock->keyLast) { - if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) { - return false; + if ((code = doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) != TSDB_CODE_SUCCESS) { + *exists = false; + return code; } SDataCols* pTSCol = pQueryHandle->rhelper.pDataCols[0]; @@ -817,11 +832,12 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock assert(pCheckInfo->lastKey >= pBlock->keyFirst); doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock); } else { - handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); + code = handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); } } - return pQueryHandle->realNumOfRows > 0; + *exists = pQueryHandle->realNumOfRows > 0; + return code; } static int doBinarySearchKey(char* pValue, int num, TSKEY key, int order) { @@ -1577,9 +1593,7 @@ static int32_t getDataBlocksInFilesImpl(STsdbQueryHandle* pQueryHandle, bool* ex cur->fid = pQueryHandle->pFileGroup->fileId; STableBlockInfo* pBlockInfo = &pQueryHandle->pDataBlockInfo[cur->slot]; - *exists = loadFileDataBlock(pQueryHandle, pBlockInfo->compBlock, pBlockInfo->pTableCheckInfo); - - return TSDB_CODE_SUCCESS; + return loadFileDataBlock(pQueryHandle, pBlockInfo->compBlock, pBlockInfo->pTableCheckInfo, exists); } static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists) { @@ -1618,16 +1632,14 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists cur->blockCompleted = false; STableBlockInfo* pNext = &pQueryHandle->pDataBlockInfo[cur->slot]; - *exists = loadFileDataBlock(pQueryHandle, pNext->compBlock, pNext->pTableCheckInfo); - - return TSDB_CODE_SUCCESS; + return loadFileDataBlock(pQueryHandle, pNext->compBlock, pNext->pTableCheckInfo, exists); } } else { tsdbDebug("%p continue in current data block, index:%d, %p", pQueryHandle, cur->slot, pQueryHandle->qinfo); - handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->compBlock, pCheckInfo); + int32_t code = handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->compBlock, pCheckInfo); *exists = pQueryHandle->realNumOfRows > 0; - return TSDB_CODE_SUCCESS; + return code; } } } @@ -1665,8 +1677,11 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { return false; } - /*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo(pHandle, &blockInfo); + tsdbRetrieveDataBlockInfo(pHandle, &blockInfo); /*SArray *pDataBlock = */tsdbRetrieveDataBlock(pHandle, pQueryHandle->defaultLoadColumn); + if (terrno != TSDB_CODE_SUCCESS) { + return false; + } if (pQueryHandle->cur.win.ekey == pQueryHandle->window.skey) { // data already retrieve, discard other data rows and return @@ -1737,6 +1752,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { pSecQueryHandle->defaultLoadColumn = taosArrayClone(pQueryHandle->defaultLoadColumn); if (!tsdbNextDataBlock((void*) pSecQueryHandle)) { + tsdbCleanupQueryHandle(pSecQueryHandle); return false; } @@ -2059,10 +2075,10 @@ SArray* tsdbRetrieveDataBlock(TsdbQueryHandleT* pQueryHandle, SArray* pIdList) { return pHandle->pColumns; } else { // only load the file block SCompBlock* pBlock = pBlockInfo->compBlock; - - if (!doLoadFileDataBlock(pHandle, pBlock, pCheckInfo, pHandle->cur.slot)) { + if (doLoadFileDataBlock(pHandle, pBlock, pCheckInfo, pHandle->cur.slot) != TSDB_CODE_SUCCESS) { return NULL; } + // todo refactor int32_t numOfRows = copyDataFromFileBlock(pHandle, pHandle->outputCapacity, 0, 0, pBlock->numOfRows - 1); diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index ccd1aa9940..aafba2d328 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -1,27 +1,27 @@ -#sleep 2000 -#run general/parser/alter.sim -#sleep 2000 -#run general/parser/alter1.sim -#sleep 2000 -#run general/parser/alter_stable.sim -#sleep 2000 -#run general/parser/auto_create_tb.sim -#sleep 2000 -#run general/parser/auto_create_tb_drop_tb.sim -#sleep 2000 -#run general/parser/col_arithmetic_operation.sim -#sleep 2000 -#run general/parser/columnValue.sim -#sleep 2000 -#run general/parser/commit.sim -#sleep 2000 -#run general/parser/create_db.sim -#sleep 2000 -#run general/parser/create_mt.sim -#sleep 2000 -#run general/parser/create_tb.sim -#sleep 2000 -#run general/parser/dbtbnameValidate.sim +sleep 2000 +run general/parser/alter.sim +sleep 2000 +run general/parser/alter1.sim +sleep 2000 +run general/parser/alter_stable.sim +sleep 2000 +run general/parser/auto_create_tb.sim +sleep 2000 +run general/parser/auto_create_tb_drop_tb.sim +sleep 2000 +run general/parser/col_arithmetic_operation.sim +sleep 2000 +run general/parser/columnValue.sim +sleep 2000 +run general/parser/commit.sim +sleep 2000 +run general/parser/create_db.sim +sleep 2000 +run general/parser/create_mt.sim +sleep 2000 +run general/parser/create_tb.sim +sleep 2000 +run general/parser/dbtbnameValidate.sim sleep 2000 run general/parser/fill.sim sleep 2000 From d1382a7a048bdf50df2cc19b2bc09ce4d41e2a24 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 13 Aug 2020 15:07:04 +0800 Subject: [PATCH 153/190] [td-225] refactoring codes. --- src/query/src/qExecutor.c | 52 ++++++++++++++++----------------------- 1 file changed, 21 insertions(+), 31 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 6f54876b0a..2c02f476d5 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2312,13 +2312,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; - while (true) { - if (!tsdbNextDataBlock(pQueryHandle)) { - if (terrno != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, terrno); - } - break; - } + while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { @@ -2357,6 +2351,10 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { } } + if (terrno != TSDB_CODE_SUCCESS) { + longjmp(pRuntimeEnv->env, terrno); + } + // if the result buffer is not full, set the query complete if (!Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)) { setQueryStatus(pQuery, QUERY_COMPLETED); @@ -4046,14 +4044,7 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { TsdbQueryHandleT pQueryHandle = pRuntimeEnv->pQueryHandle; SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; - while (true) { - if (!tsdbNextDataBlock(pQueryHandle)) { - if (terrno != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, terrno); - } - break; - } - + while (tsdbNextDataBlock(pQueryHandle)) { if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); @@ -4073,6 +4064,10 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { break; } } + + if (terrno != TSDB_CODE_SUCCESS) { + longjmp(pRuntimeEnv->env, terrno); + } } static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { @@ -4097,14 +4092,7 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { STableQueryInfo *pTableQueryInfo = pQuery->current; SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; - while (true) { - if (!tsdbNextDataBlock(pRuntimeEnv->pQueryHandle)) { - if (terrno != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, terrno); - } - break; - } - + while (tsdbNextDataBlock(pRuntimeEnv->pQueryHandle)) { tsdbRetrieveDataBlockInfo(pRuntimeEnv->pQueryHandle, &blockInfo); if (QUERY_IS_ASC_QUERY(pQuery)) { @@ -4200,6 +4188,11 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { } } + // check for error + if (terrno != TSDB_CODE_SUCCESS) { + longjmp(pRuntimeEnv->env, terrno); + } + return true; } @@ -4416,14 +4409,7 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); - while (true) { - if (!tsdbNextDataBlock(pQueryHandle)) { - if (terrno != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, terrno); - } - break; - } - + while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; if (IS_QUERY_KILLED(pQInfo)) { @@ -4457,6 +4443,10 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { pQInfo, blockInfo.uid, blockInfo.tid, blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, pQuery->current->lastKey); } + if (terrno != TSDB_CODE_SUCCESS) { + longjmp(pRuntimeEnv->env, terrno); + } + updateWindowResNumOfRes(pRuntimeEnv); int64_t et = taosGetTimestampMs(); From d8ee718a1526f3c290d8b781ff45419d56019820 Mon Sep 17 00:00:00 2001 From: Yiqing Liu Date: Thu, 13 Aug 2020 15:24:48 +0800 Subject: [PATCH 154/190] Update administrator-ch.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 修改了错别字 --- documentation20/webdocs/markdowndocs/administrator-ch.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/webdocs/markdowndocs/administrator-ch.md b/documentation20/webdocs/markdowndocs/administrator-ch.md index 470c718af1..813d06a660 100644 --- a/documentation20/webdocs/markdowndocs/administrator-ch.md +++ b/documentation20/webdocs/markdowndocs/administrator-ch.md @@ -39,7 +39,7 @@ Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable 用户可以通过参数keep,设置数据在磁盘中的最大保存时长。为进一步减少存储成本,TDengine还提供多级存储,最冷的数据可以存放在最廉价的存储介质上,应用的访问不用做任何调整,只是读取速度降低了。 -为提高速度,可以配置多快硬盘,这样可以并发写入或读取数据。需要提醒的是,TDengine采取多副本的方式提供数据的高可靠,因此不再需要采用昂贵的磁盘阵列。 +为提高速度,可以配置多块硬盘,这样可以并发写入或读取数据。需要提醒的是,TDengine采取多副本的方式提供数据的高可靠,因此不再需要采用昂贵的磁盘阵列。 ### 物理机或虚拟机台数 From b622154f69b6de5f4a39260e1965c90faf5084b8 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Thu, 13 Aug 2020 15:42:29 +0800 Subject: [PATCH 155/190] update java subscription interface --- .../webdocs/markdowndocs/connector-ch.md | 43 ++++++ .../com/taosdata/jdbc/TSDBConnection.java | 13 +- .../com/taosdata/jdbc/TSDBJNIConnector.java | 16 +-- .../java/com/taosdata/jdbc/TSDBSubscribe.java | 136 ++---------------- .../src/test/java/TestAsyncTSDBSubscribe.java | 92 ------------ .../jdbc/src/test/java/TestTSDBSubscribe.java | 92 ++++++------ .../com/taosdata/jdbc/AsyncSubscribeTest.java | 100 ------------- .../java/com/taosdata/jdbc/SubscribeTest.java | 12 +- 8 files changed, 118 insertions(+), 386 deletions(-) delete mode 100644 src/connector/jdbc/src/test/java/TestAsyncTSDBSubscribe.java delete mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/AsyncSubscribeTest.java diff --git a/documentation20/webdocs/markdowndocs/connector-ch.md b/documentation20/webdocs/markdowndocs/connector-ch.md index ae39c78101..4992092864 100644 --- a/documentation20/webdocs/markdowndocs/connector-ch.md +++ b/documentation20/webdocs/markdowndocs/connector-ch.md @@ -460,6 +460,49 @@ while(resultSet.next()){ > 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 +### 订阅 + +#### 创建 + +```java +TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false); +``` + +`subscribe` 方法的三个参数含义如下: + +* topic:订阅的主题(即名称),此参数是订阅的唯一标识 +* sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 +* restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 + +如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic' 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。 + +#### 消费数据 + +```java +int total = 0; +while(true) { + TSDBResultSet rs = sub.consume(); + int count = 0; + while(rs.next()) { + count++; + } + total += count; + System.out.printf("%d rows consumed, total %d\n", count, total); + Thread.sleep(1000); +} +``` + +`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的`Thread.sleep(1000)`),否则会给服务端造成不必要的压力。 + +#### 关闭订阅 + +```java +sub.close(true); +``` + +`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。 + + ### 关闭资源 ```java diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java index 062cb63cfd..2ce39b7ee4 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -84,12 +84,17 @@ public class TSDBConnection implements Connection { } } - public TSDBSubscribe createSubscribe() throws SQLException { - if (!this.connector.isClosed()) { - return new TSDBSubscribe(this.connector); - } else { + public TSDBSubscribe subscribe(String topic, String sql, boolean restart) throws SQLException { + if (this.connector.isClosed()) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } + + long id = this.connector.subscribe(topic, sql, restart, 0); + if (id == 0) { + throw new SQLException(TSDBConstants.WrapErrMsg("failed to create subscription")); + } + + return new TSDBSubscribe(this.connector, id); } public PreparedStatement prepareStatement(String sql) throws SQLException { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 13fa2eda81..bab3c79089 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -254,29 +254,29 @@ public class TSDBJNIConnector { private native int closeConnectionImp(long connection); /** - * Subscribe to a table in TSDB + * Create a subscription */ - public long subscribe(String topic, String sql, boolean restart, int period) { + long subscribe(String topic, String sql, boolean restart, int period) { return subscribeImp(this.taos, restart, topic, sql, period); } - public native long subscribeImp(long connection, boolean restart, String topic, String sql, int period); + private native long subscribeImp(long connection, boolean restart, String topic, String sql, int period); /** - * Consume a subscribed table + * Consume a subscription */ - public long consume(long subscription) { - return this.consumeImp(subscription); + long consume(long subscription) { + return this.consumeImp(subscription); } private native long consumeImp(long subscription); /** - * Unsubscribe a table + * Unsubscribe, close a subscription * * @param subscription */ - public void unsubscribe(long subscription, boolean isKeep) { + void unsubscribe(long subscription, boolean isKeep) { unsubscribeImp(subscription, isKeep); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java index e20c6a815c..deffd9aa2a 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java @@ -22,81 +22,28 @@ import java.util.concurrent.*; public class TSDBSubscribe { private TSDBJNIConnector connecter = null; - private static ScheduledExecutorService pool; - private static Map timerTaskMap = new ConcurrentHashMap<>(); - private static Map scheduledMap = new ConcurrentHashMap(); + private long id = 0; - private static class TimerInstance { - private static final ScheduledExecutorService instance = Executors.newScheduledThreadPool(1); - } - - public static ScheduledExecutorService getTimerInstance() { - return TimerInstance.instance; - } - - public TSDBSubscribe(TSDBJNIConnector connecter) throws SQLException { + TSDBSubscribe(TSDBJNIConnector connecter, long id) throws SQLException { if (null != connecter) { this.connecter = connecter; + this.id = id; } else { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } } /** - * sync subscribe + * consume * - * @param topic - * @param sql - * @param restart - * @param period - * @throws SQLException + * @throws OperationsException, SQLException */ - public long subscribe(String topic, String sql, boolean restart, int period) throws SQLException { + public TSDBResultSet consume() throws OperationsException, SQLException { if (this.connecter.isClosed()) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } - if (period < 1000) { - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.INVALID_VARIABLES)); - } - return this.connecter.subscribe(topic, sql, restart, period); - } - /** - * async subscribe - * - * @param topic - * @param sql - * @param restart - * @param period - * @param callBack - * @throws SQLException - */ - public long subscribe(String topic, String sql, boolean restart, int period, TSDBSubscribeCallBack callBack) throws SQLException { - if (this.connecter.isClosed()) { - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); - } - final long subscription = this.connecter.subscribe(topic, sql, restart, period); - if (null != callBack) { - pool = getTimerInstance(); - - TSDBTimerTask timerTask = new TSDBTimerTask(subscription, callBack); - - timerTaskMap.put(subscription, timerTask); - - ScheduledFuture scheduledFuture = pool.scheduleAtFixedRate(timerTask, 1, 1000, TimeUnit.MILLISECONDS); - scheduledMap.put(subscription, scheduledFuture); - } - return subscription; - } - - public TSDBResultSet consume(long subscription) throws OperationsException, SQLException { - if (this.connecter.isClosed()) { - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); - } - if (0 == subscription) { - throw new OperationsException("Invalid use of consume"); - } - long resultSetPointer = this.connecter.consume(subscription); + long resultSetPointer = this.connecter.consume(this.id); if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); @@ -108,77 +55,16 @@ public class TSDBSubscribe { } /** - * cancel subscribe + * close subscription * - * @param subscription - * @param isKeep + * @param keepProgress * @throws SQLException */ - public void unsubscribe(long subscription, boolean isKeep) throws SQLException { + public void close(boolean keepProgress) throws SQLException { if (this.connecter.isClosed()) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } - - if (null != timerTaskMap.get(subscription)) { - synchronized (timerTaskMap.get(subscription)) { - while (1 == timerTaskMap.get(subscription).getState()) { - try { - Thread.sleep(10); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - timerTaskMap.get(subscription).setState(2); - if (!timerTaskMap.isEmpty() && timerTaskMap.containsKey(subscription)) { - timerTaskMap.get(subscription).cancel(); - timerTaskMap.remove(subscription); - scheduledMap.get(subscription).cancel(false); - scheduledMap.remove(subscription); - } - this.connecter.unsubscribe(subscription, isKeep); - } - } else { - this.connecter.unsubscribe(subscription, isKeep); - } - } - - class TSDBTimerTask extends TimerTask { - private long subscription; - private TSDBSubscribeCallBack callBack; - // 0: not running 1: running 2: cancel - private int state = 0; - - public TSDBTimerTask(long subscription, TSDBSubscribeCallBack callBack) { - this.subscription = subscription; - this.callBack = callBack; - } - - public int getState() { - return this.state; - } - - public void setState(int state) { - this.state = state; - } - - @Override - public void run() { - synchronized (this) { - if (2 == state) { - return; - } - - state = 1; - - try { - callBack.invoke(consume(subscription)); - } catch (Exception e) { - this.cancel(); - throw new RuntimeException(e); - } - state = 0; - } - } + this.connecter.unsubscribe(this.id, keepProgress); } } diff --git a/src/connector/jdbc/src/test/java/TestAsyncTSDBSubscribe.java b/src/connector/jdbc/src/test/java/TestAsyncTSDBSubscribe.java deleted file mode 100644 index 6d4c6b1e94..0000000000 --- a/src/connector/jdbc/src/test/java/TestAsyncTSDBSubscribe.java +++ /dev/null @@ -1,92 +0,0 @@ -import com.taosdata.jdbc.*; -import org.apache.commons.lang3.StringUtils; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.util.Properties; - -public class TestAsyncTSDBSubscribe { - public static void main(String[] args) throws SQLException { - String usage = "java -cp taos-jdbcdriver-2.0.0_dev-dist.jar com.taosdata.jdbc.TSDBSubscribe -db dbName -topic topicName " + - "-tname tableName -h host"; - if (args.length < 2) { - System.err.println(usage); - return; - } - - String dbName = ""; - String tName = ""; - String host = "localhost"; - String topic = ""; - for (int i = 0; i < args.length; i++) { - if ("-db".equalsIgnoreCase(args[i]) && i < args.length - 1) { - dbName = args[++i]; - } - if ("-tname".equalsIgnoreCase(args[i]) && i < args.length - 1) { - tName = args[++i]; - } - if ("-h".equalsIgnoreCase(args[i]) && i < args.length - 1) { - host = args[++i]; - } - if ("-topic".equalsIgnoreCase(args[i]) && i < args.length - 1) { - topic = args[++i]; - } - } - if (StringUtils.isEmpty(dbName) || StringUtils.isEmpty(tName) || StringUtils.isEmpty(topic)) { - System.err.println(usage); - return; - } - - Connection connection = null; - long subscribId = 0; - try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); - - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - - connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + dbName + "?user=root&password=taosdata", properties); - String rawSql = "select * from " + tName + ";"; - TSDBSubscribe subscribe = ((TSDBConnection) connection).createSubscribe(); - subscribId = subscribe.subscribe(topic, rawSql, false, 1000, new CallBack("first")); - long subscribId2 = subscribe.subscribe("test", rawSql, false, 1000, new CallBack("second")); - int a = 0; - Thread.sleep(2000); - subscribe.unsubscribe(subscribId, true); - System.err.println("cancel subscribe"); - } catch (Exception e) { - e.printStackTrace(); - if (null != connection && !connection.isClosed()) { - connection.close(); - } - } - } - - private static class CallBack implements TSDBSubscribeCallBack { - private String name = ""; - - public CallBack(String name) { - this.name = name; - } - - @Override - public void invoke(TSDBResultSet resultSet) { - try { - while (null !=resultSet && resultSet.next()) { - System.out.print("callback_" + name + ": "); - for (int i = 1; i <= resultSet.getMetaData().getColumnCount(); i++) { - System.out.printf(i + ": " + resultSet.getString(i) + "\t"); - } - System.out.println(); - } - } catch (Exception e) { - e.printStackTrace(); - } - } - } -} diff --git a/src/connector/jdbc/src/test/java/TestTSDBSubscribe.java b/src/connector/jdbc/src/test/java/TestTSDBSubscribe.java index 598ef4bbc0..df730efa69 100644 --- a/src/connector/jdbc/src/test/java/TestTSDBSubscribe.java +++ b/src/connector/jdbc/src/test/java/TestTSDBSubscribe.java @@ -2,82 +2,76 @@ import com.taosdata.jdbc.TSDBConnection; import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.TSDBResultSet; import com.taosdata.jdbc.TSDBSubscribe; -import org.apache.commons.lang3.StringUtils; import java.sql.Connection; import java.sql.DriverManager; import java.util.Properties; public class TestTSDBSubscribe { + + public static TSDBConnection connectTDengine(String host, String database) throws Exception { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + String cs = String.format("jdbc:TAOS://%s:0/%s?user=root&password=taosdata", host, database); + return (TSDBConnection)DriverManager.getConnection(cs, properties); + } + + + public static void main(String[] args) throws Exception { - String usage = "java -cp taos-jdbcdriver-2.0.0_dev-dist.jar com.taosdata.jdbc.TSDBSubscribe -db dbName " + - "-topic topicName -tname tableName -h host"; + String usage = "java -Djava.ext.dirs=../ TestTSDBSubscribe [-host host] <-db database> <-topic topic> <-sql sql>"; if (args.length < 2) { System.err.println(usage); return; } - String dbName = ""; - String tName = ""; - String host = "localhost"; - String topic = ""; + String host = "localhost", database = "", topic = "", sql = ""; for (int i = 0; i < args.length; i++) { if ("-db".equalsIgnoreCase(args[i]) && i < args.length - 1) { - dbName = args[++i]; - } - if ("-tname".equalsIgnoreCase(args[i]) && i < args.length - 1) { - tName = args[++i]; - } - if ("-h".equalsIgnoreCase(args[i]) && i < args.length - 1) { - host = args[++i]; + database = args[++i]; } if ("-topic".equalsIgnoreCase(args[i]) && i < args.length - 1) { topic = args[++i]; } + if ("-host".equalsIgnoreCase(args[i]) && i < args.length - 1) { + host = args[++i]; + } + if ("-sql".equalsIgnoreCase(args[i]) && i < args.length - 1) { + sql = args[++i]; + } } - if (StringUtils.isEmpty(dbName) || StringUtils.isEmpty(tName) || StringUtils.isEmpty(topic)) { - System.err.println(usage); - return; + if (database.isEmpty() || topic.isEmpty() || sql.isEmpty()) { + System.err.println(usage); + return; } - Connection connection = null; - TSDBSubscribe subscribe = null; - long subscribId = 0; + TSDBConnection connection = null; + TSDBSubscribe sub = null; try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + dbName + "?user=root&password=taosdata" - , properties); - String rawSql = "select * from " + tName + ";"; - subscribe = ((TSDBConnection) connection).createSubscribe(); - subscribId = subscribe.subscribe(topic, rawSql, false, 1000); - int a = 0; - TSDBResultSet resSet = null; - while (true) { + connection = connectTDengine(host, database); + sub = ((TSDBConnection) connection).subscribe(topic, sql, false); + + int total = 0; + while(true) { + TSDBResultSet rs = sub.consume(); + int count = 0; + while(rs.next()) { + count++; + } + total += count; + System.out.printf("%d rows consumed, total %d\n", count, total); Thread.sleep(900); - resSet = subscribe.consume(subscribId); - - while (resSet.next()) { - for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) { - System.out.printf(i + ": " + resSet.getString(i) + "\t"); - } - System.out.println("\n======" + a + "=========="); - } - - a++; - if (a >= 10) { - break; - } } } catch (Exception e) { e.printStackTrace(); } finally { - if (null != subscribe && 0 != subscribId) { - subscribe.unsubscribe(subscribId, true); + if (null != sub) { + sub.close(true); } if (null != connection) { connection.close(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/AsyncSubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/AsyncSubscribeTest.java deleted file mode 100644 index c14624e683..0000000000 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/AsyncSubscribeTest.java +++ /dev/null @@ -1,100 +0,0 @@ -package com.taosdata.jdbc; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Properties; - -import static org.junit.Assert.assertTrue; - -public class AsyncSubscribeTest extends BaseTest { - Connection connection = null; - Statement statement = null; - String dbName = "test"; - String tName = "t0"; - String host = "localhost"; - String topic = "test"; - long subscribId = 0; - - @Before - public void createDatabase() throws SQLException { - try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); - } catch (ClassNotFoundException e) { - return; - } - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata" - , properties); - - statement = connection.createStatement(); - statement.executeUpdate("create database if not exists " + dbName); - statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); - long ts = System.currentTimeMillis(); - for (int i = 0; i < 2; i++) { - ts += i; - String sql = "insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")"; - statement.executeUpdate(sql); - } - } - - @Test - public void subscribe() throws Exception { - TSDBSubscribe subscribe = null; - try { - String rawSql = "select * from " + dbName + "." + tName + ";"; - System.out.println(rawSql); - subscribe = ((TSDBConnection) connection).createSubscribe(); - subscribId = subscribe.subscribe(topic, rawSql, false, 1000, new CallBack("first")); - - assertTrue(subscribId > 0); - } catch (Exception e) { - e.printStackTrace(); - } - - Thread.sleep(2000); - subscribe.unsubscribe(subscribId, true); - } - - private static class CallBack implements TSDBSubscribeCallBack { - private String name = ""; - - public CallBack(String name) { - this.name = name; - } - - @Override - public void invoke(TSDBResultSet resultSet) { - try { - while (null != resultSet && resultSet.next()) { - System.out.print("callback_" + name + ": "); - for (int i = 1; i <= resultSet.getMetaData().getColumnCount(); i++) { - System.out.printf(i + ": " + resultSet.getString(i) + "\t"); - } - System.out.println(); - } - resultSet.close(); - - } catch (Exception e) { - e.printStackTrace(); - } - } - } - - @After - public void close() throws Exception { - statement.executeQuery("drop database test"); - statement.close(); - connection.close(); - Thread.sleep(10); - } -} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java index d7f56ac468..2dc27adab7 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java @@ -49,20 +49,16 @@ public class SubscribeTest extends BaseTest { @Test public void subscribe() throws Exception { TSDBSubscribe subscribe = null; - long subscribId = 0; try { String rawSql = "select * from " + dbName + "." + tName + ";"; System.out.println(rawSql); - subscribe = ((TSDBConnection) connection).createSubscribe(); - subscribId = subscribe.subscribe(topic, rawSql, false, 1000); - - assertTrue(subscribId > 0); + subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false); int a = 0; while (true) { Thread.sleep(900); - TSDBResultSet resSet = subscribe.consume(subscribId); + TSDBResultSet resSet = subscribe.consume(); while (resSet.next()) { for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) { @@ -79,8 +75,8 @@ public class SubscribeTest extends BaseTest { } catch (Exception e) { e.printStackTrace(); } finally { - if (null != subscribe && 0 != subscribId) { - subscribe.unsubscribe(subscribId, true); + if (null != subscribe) { + subscribe.close(true); } } } From 6ce1c1a13fc152f3a9249c1d74d5171a9f695058 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 13 Aug 2020 17:01:40 +0800 Subject: [PATCH 156/190] [td-225] add some logs. --- src/client/src/tscServer.c | 13 ++++++++----- src/client/src/tscSql.c | 1 + 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 8d86f046ef..0613f35242 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -199,17 +199,20 @@ void tscProcessActivityTimer(void *handle, void *tmrId) { pObj->pHb = pSql; tscAddSubqueryInfo(&pObj->pHb->cmd); - tscDebug("%p pHb is allocated, pObj:%p", pObj->pHb, pObj); + tscDebug("%p HB is allocated, pObj:%p", pObj->pHb, pObj); } if (tscShouldFreeHeatBeat(pObj->pHb)) { - tscDebug("%p free HB object and release connection", pObj); + tscDebug("%p free HB object and release connection", pObj->pHb); tscFreeSqlObj(pObj->pHb); tscCloseTscObj(pObj); - return; + } else { +// taosMsleep(500); + int32_t code = tscProcessSql(pObj->pHb); + if (code != TSDB_CODE_SUCCESS) { + tscError("%p failed to sent HB to server, reason:%s", pObj->pHb, tstrerror(code)); + } } - - tscProcessSql(pObj->pHb); } int tscSendMsgToServer(SSqlObj *pSql) { diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 2c48c76b1c..94ab3a0c75 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -181,6 +181,7 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha return NULL; } + TAOS *taos_connect_c(const char *ip, uint8_t ipLen, const char *user, uint8_t userLen, const char *pass, uint8_t passLen, const char *db, uint8_t dbLen, uint16_t port) { char ipBuf[TSDB_EP_LEN] = {0}; From cc8a315c3e2012d07be35e09beac7daab2d4e0a9 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 13 Aug 2020 17:12:01 +0800 Subject: [PATCH 157/190] TD-1113: Add taosdemo into daily test --- tests/pytest/fulltest.sh | 3 +++ tests/pytest/regressiontest.sh | 3 +++ tests/pytest/tools/taosdemo.py | 44 ++++++++++++++++++++++++++++++++++ 3 files changed, 50 insertions(+) create mode 100644 tests/pytest/tools/taosdemo.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 8e0f6314e0..fd5aa4ecf0 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -183,3 +183,6 @@ python3 ./test.py -f functions/function_stddev.py python3 ./test.py -f functions/function_sum.py python3 ./test.py -f functions/function_top.py python3 ./test.py -f functions/function_twa.py + +# tools +python3 test.py -f tools/taosdemo.py diff --git a/tests/pytest/regressiontest.sh b/tests/pytest/regressiontest.sh index 20f4b4dc3b..b69ee37a55 100755 --- a/tests/pytest/regressiontest.sh +++ b/tests/pytest/regressiontest.sh @@ -177,3 +177,6 @@ python3 ./test.py -f functions/function_stddev.py python3 ./test.py -f functions/function_sum.py python3 ./test.py -f functions/function_top.py python3 ./test.py -f functions/function_twa.py + +# tools +python3 test.py -f tools/taosdemo.py \ No newline at end of file diff --git a/tests/pytest/tools/taosdemo.py b/tests/pytest/tools/taosdemo.py new file mode 100644 index 0000000000..54d33c90f3 --- /dev/null +++ b/tests/pytest/tools/taosdemo.py @@ -0,0 +1,44 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.numberOfTables = 10000 + self.numberOfRecords = 100 + + def run(self): + tdSql.prepare() + + os.system("yes | taosdemo -t %d -n %d" % (self.numberOfTables, self.numberOfRecords)) + + tdSql.execute("use test") + tdSql.query("select count(*) from meters") + tdSql.checkData(0, 0, self.numberOfTables * self.numberOfRecords) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 37e11f9b04d9ad3410cfdb570df68ca5685f5fe9 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 13 Aug 2020 21:16:53 +0800 Subject: [PATCH 158/190] try to solve sync problem --- src/tsdb/inc/tsdbMain.h | 1 + src/tsdb/src/tsdbFile.c | 29 +++++++++++++++++++++++++++++ src/tsdb/src/tsdbMain.c | 25 +++++++++++++------------ src/util/inc/tkvstore.h | 1 + src/util/src/tkvstore.c | 25 +++++++++++++++++++++++++ 5 files changed, 69 insertions(+), 12 deletions(-) diff --git a/src/tsdb/inc/tsdbMain.h b/src/tsdb/inc/tsdbMain.h index 678e167351..7936ea423f 100644 --- a/src/tsdb/inc/tsdbMain.h +++ b/src/tsdb/inc/tsdbMain.h @@ -475,6 +475,7 @@ int tsdbUpdateFileHeader(SFile* pFile); int tsdbEncodeSFileInfo(void** buf, const STsdbFileInfo* pInfo); void* tsdbDecodeSFileInfo(void* buf, STsdbFileInfo* pInfo); void tsdbRemoveFileGroup(STsdbRepo* pRepo, SFileGroup* pFGroup); +void tsdbGetFileInfoImpl(char* fname, uint32_t* magic, int32_t* size); void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TSKEY *minKey, TSKEY *maxKey); // ------------------ tsdbRWHelper.c diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index 5ba9a68e37..8bc4be6d67 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -423,6 +423,35 @@ void tsdbRemoveFileGroup(STsdbRepo *pRepo, SFileGroup *pFGroup) { } } +void tsdbGetFileInfoImpl(char *fname, uint32_t *magic, int32_t *size) { + char buf[TSDB_FILE_HEAD_SIZE] = "\0"; + uint32_t version = 0; + STsdbFileInfo info = {0}; + + int fd = open(fname, O_RDONLY); + if (fd < 0) goto _err; + + if (taosTRead(fd, buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) goto _err; + + if (!taosCheckChecksumWhole((uint8_t *)buf, TSDB_FILE_HEAD_SIZE)) goto _err; + + void *pBuf = (void *)buf; + pBuf = taosDecodeFixedU32(pBuf, &version); + pBuf = tsdbDecodeSFileInfo(pBuf, &info); + + off_t offset = lseek(fd, 0, SEEK_END); + if (offset < 0) goto _err; + close(fd); + + *magic = info.magic; + *size = (int32_t)offset; + +_err: + if (fd >= 0) close(fd); + *magic = TSDB_FILE_INIT_MAGIC; + *size = 0; +} + // ---------------- LOCAL FUNCTIONS ---------------- static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type) { uint32_t version; diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 628a8bac81..dd647ddd9b 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -216,9 +216,9 @@ uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_ char *sdup = strdup(pRepo->rootDir); char *prefix = dirname(sdup); int prefixLen = (int)strlen(prefix); - taosTFree(sdup); if (name[0] == 0) { // get the file from index or after, but not larger than eindex + taosTFree(sdup); int fid = (*index) / TSDB_FILE_TYPE_MAX; if (pFileH->nFGroups == 0 || fid > pFileH->pFGroup[pFileH->nFGroups - 1].fileId) { @@ -248,18 +248,19 @@ uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_ strcpy(name, fname + prefixLen); } else { // get the named file at the specified index. If not there, return 0 if (*index == TSDB_META_FILE_INDEX) { // get meta file - fname = tsdbGetMetaFileName(pRepo->rootDir); - magic = TSDB_META_FILE_MAGIC(pRepo->tsdbMeta); + fname = malloc(prefixLen + strlen(name) + 2); + sprintf(fname, "%s/%s", prefix, name); + tsdbGetStoreInfo(fname, &magic, size); + taosFree(fname); + taosFree(sdup); + return magic; } else { - int fid = (*index) / TSDB_FILE_TYPE_MAX; - SFileGroup *pFGroup = tsdbSearchFGroup(pFileH, fid, TD_EQ); - if (pFGroup == NULL) { // not found - return 0; - } - - SFile *pFile = &pFGroup->files[(*index) % TSDB_FILE_TYPE_MAX]; - fname = strdup(pFile->fname); - magic = pFile->info.magic; + fname = malloc(prefixLen + strlen(name) + 2); + sprintf(fname, "%s/%s", prefix, name); + tsdbGetFileInfoImpl(fname, &magic, size); + taosFree(fname); + taosFree(sdup); + return magic; } } diff --git a/src/util/inc/tkvstore.h b/src/util/inc/tkvstore.h index a6cc82e751..3b4e8e3757 100644 --- a/src/util/inc/tkvstore.h +++ b/src/util/inc/tkvstore.h @@ -58,6 +58,7 @@ int tdKVStoreStartCommit(SKVStore *pStore); int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLen); int tdDropKVStoreRecord(SKVStore *pStore, uint64_t uid); int tdKVStoreEndCommit(SKVStore *pStore); +void tsdbGetStoreInfo(char *fname, uint32_t *magic, int32_t *size); #ifdef __cplusplus } diff --git a/src/util/src/tkvstore.c b/src/util/src/tkvstore.c index 499edb4c1a..dd0600ec3c 100644 --- a/src/util/src/tkvstore.c +++ b/src/util/src/tkvstore.c @@ -330,6 +330,31 @@ int tdKVStoreEndCommit(SKVStore *pStore) { return 0; } +void tsdbGetStoreInfo(char *fname, uint32_t *magic, int32_t *size) { + char buf[TD_KVSTORE_HEADER_SIZE] = "\0"; + SStoreInfo info = {0}; + + int fd = open(fname, O_RDONLY); + if (fd < 0) goto _err; + + if (taosTRead(fd, buf, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) goto _err; + if (!taosCheckChecksumWhole((uint8_t *)buf, TD_KVSTORE_HEADER_SIZE)) goto _err; + + void *pBuf = (void *)buf; + pBuf = tdDecodeStoreInfo(pBuf, &info); + off_t offset = lseek(fd, 0, SEEK_END); + if (offset < 0) goto _err; + close(fd); + + *magic = info.magic; + *size = (int32_t)offset; + +_err: + if (fd >= 0) close(fd); + *magic = TD_KVSTORE_INIT_MAGIC; + *size = 0; +} + static int tdLoadKVStoreHeader(int fd, char *fname, SStoreInfo *pInfo, uint32_t *version) { char buf[TD_KVSTORE_HEADER_SIZE] = "\0"; From c3dee8b6907b8233985925cf09a49ce5e51d9ca1 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 13 Aug 2020 22:59:33 +0800 Subject: [PATCH 159/190] add notification message before return --- tests/examples/c/prepare.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/examples/c/prepare.c b/tests/examples/c/prepare.c index 3b968aca07..ca0942035a 100644 --- a/tests/examples/c/prepare.c +++ b/tests/examples/c/prepare.c @@ -198,7 +198,8 @@ int main(int argc, char *argv[]) taos_free_result(result); taos_stmt_close(stmt); - + + printf("Data has been written, Please press enter to return"); return getchar(); } From cfc7da145c390a6e0f80205c804ff8d2a998721f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 13 Aug 2020 23:25:47 +0800 Subject: [PATCH 160/190] [td-225] fix a typo --- src/rpc/src/rpcMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 13f44f20b7..d0c57a34d0 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -541,7 +541,7 @@ void rpcCancelRequest(void *handle) { if (pContext->signature != pContext) return; if (pContext->pConn) { - tDebug("%s, app trys to cancel request", pContext->pConn->info); + tDebug("%s, app tries to cancel request", pContext->pConn->info); pContext->pConn->pReqMsg = NULL; rpcCloseConn(pContext->pConn); pContext->pConn = NULL; From 07e2f9611516f92fc6b22baef67921dc55e93898 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 13 Aug 2020 23:29:13 +0800 Subject: [PATCH 161/190] [td-225] fix error in closing the conns. --- src/client/inc/tscUtil.h | 2 +- src/client/src/tscServer.c | 81 ++++++++++++++++++++++---------------- src/client/src/tscSql.c | 9 ++++- src/client/src/tscUtil.c | 2 +- 4 files changed, 55 insertions(+), 39 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index af91ac34f0..2ca6ba6691 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -210,7 +210,7 @@ void tscTagCondRelease(STagCond* pCond); void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo); void tscSetFreeHeatBeat(STscObj* pObj); -bool tscShouldFreeHeatBeat(SSqlObj* pHb); +bool tscShouldFreeHeartBeat(SSqlObj* pHb); bool tscShouldBeFreed(SSqlObj* pSql); STableMetaInfo* tscGetTableMetaInfoFromCmd(SSqlCmd *pCmd, int32_t subClauseIndex, int32_t tableIndex); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 0613f35242..6b75b680b1 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -171,46 +171,23 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { void tscProcessActivityTimer(void *handle, void *tmrId) { STscObj *pObj = (STscObj *)handle; - - if (pObj == NULL) return; - if (pObj->signature != pObj) return; - if (pObj->pTimer != tmrId) return; - - if (pObj->pHb == NULL) { - SSqlObj *pSql = (SSqlObj *)calloc(1, sizeof(SSqlObj)); - if (NULL == pSql) return; - - pSql->fp = tscProcessHeartBeatRsp; - - SQueryInfo *pQueryInfo = NULL; - tscGetQueryInfoDetailSafely(&pSql->cmd, 0, &pQueryInfo); - pQueryInfo->command = TSDB_SQL_HB; - - pSql->cmd.command = TSDB_SQL_HB; - if (TSDB_CODE_SUCCESS != tscAllocPayload(&(pSql->cmd), TSDB_DEFAULT_PAYLOAD_SIZE)) { - taosTFree(pSql); - return; - } - - pSql->cmd.command = TSDB_SQL_HB; - pSql->param = pObj; - pSql->pTscObj = pObj; - pSql->signature = pSql; - pObj->pHb = pSql; - tscAddSubqueryInfo(&pObj->pHb->cmd); - - tscDebug("%p HB is allocated, pObj:%p", pObj->pHb, pObj); + if (pObj == NULL || pObj->signature != pObj) { + return; } - if (tscShouldFreeHeatBeat(pObj->pHb)) { - tscDebug("%p free HB object and release connection", pObj->pHb); - tscFreeSqlObj(pObj->pHb); + SSqlObj* pHB = pObj->pHb; + if (pObj->pTimer != tmrId || pHB == NULL) { + return; + } + + if (tscShouldFreeHeartBeat(pHB)) { + tscDebug("%p free HB object and release connection", pHB); + tscFreeSqlObj(pHB); tscCloseTscObj(pObj); } else { -// taosMsleep(500); - int32_t code = tscProcessSql(pObj->pHb); + int32_t code = tscProcessSql(pHB); if (code != TSDB_CODE_SUCCESS) { - tscError("%p failed to sent HB to server, reason:%s", pObj->pHb, tstrerror(code)); + tscError("%p failed to sent HB to server, reason:%s", pHB, tstrerror(code)); } } } @@ -268,6 +245,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { return; } + pSql->pRpcCtx = NULL; // clear the rpcCtx + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); if (pQueryInfo != NULL && pQueryInfo->type == TSDB_QUERY_TYPE_FREE_RESOURCE) { tscDebug("%p sqlObj needs to be released or DB connection is closed, cmd:%d type:%d, pObj:%p signature:%p", @@ -1956,6 +1935,35 @@ int tscProcessShowRsp(SSqlObj *pSql) { return 0; } +static void createHBObj(STscObj* pObj) { + if (pObj->pHb != NULL) { + return; + } + + SSqlObj *pSql = (SSqlObj *)calloc(1, sizeof(SSqlObj)); + if (NULL == pSql) return; + + pSql->fp = tscProcessHeartBeatRsp; + + SQueryInfo *pQueryInfo = NULL; + tscGetQueryInfoDetailSafely(&pSql->cmd, 0, &pQueryInfo); + pQueryInfo->command = TSDB_SQL_HB; + + pSql->cmd.command = pQueryInfo->command; + if (TSDB_CODE_SUCCESS != tscAllocPayload(&(pSql->cmd), TSDB_DEFAULT_PAYLOAD_SIZE)) { + taosTFree(pSql); + return; + } + + pSql->param = pObj; + pSql->pTscObj = pObj; + pSql->signature = pSql; + pObj->pHb = pSql; + tscAddSubqueryInfo(&pObj->pHb->cmd); + + tscDebug("%p HB is allocated, pObj:%p", pObj->pHb, pObj); +} + int tscProcessConnectRsp(SSqlObj *pSql) { char temp[TSDB_TABLE_FNAME_LEN * 2]; STscObj *pObj = pSql->pTscObj; @@ -1977,6 +1985,9 @@ int tscProcessConnectRsp(SSqlObj *pSql) { pObj->writeAuth = pConnect->writeAuth; pObj->superAuth = pConnect->superAuth; pObj->connId = htonl(pConnect->connId); + + createHBObj(pObj); + taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, pObj, tscTmr, &pObj->pTimer); return 0; diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 94ab3a0c75..29c8aa0a56 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -216,10 +216,15 @@ void taos_close(TAOS *taos) { } if (pObj->pHb != NULL) { + if (pObj->pHb->pRpcCtx != NULL) { // wait for rsp from dnode + rpcCancelRequest(pObj->pHb->pRpcCtx); + } + tscSetFreeHeatBeat(pObj); - } else { - tscCloseTscObj(pObj); + tscFreeSqlObj(pObj->pHb); } + + tscCloseTscObj(pObj); } void waitForQueryRsp(void *param, TAOS_RES *tres, int code) { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 60723676df..1483f0de48 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1394,7 +1394,7 @@ void tscSetFreeHeatBeat(STscObj* pObj) { pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE; } -bool tscShouldFreeHeatBeat(SSqlObj* pHb) { +bool tscShouldFreeHeartBeat(SSqlObj* pHb) { assert(pHb == pHb->signature); SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pHb->cmd, 0); From 20249346ff1252d23f789c5137605f3682a8ee1c Mon Sep 17 00:00:00 2001 From: Yiqing Liu Date: Fri, 14 Aug 2020 09:43:37 +0800 Subject: [PATCH 162/190] Update connector-ch.md --- documentation20/webdocs/markdowndocs/connector-ch.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation20/webdocs/markdowndocs/connector-ch.md b/documentation20/webdocs/markdowndocs/connector-ch.md index 4992092864..ffacd054f5 100644 --- a/documentation20/webdocs/markdowndocs/connector-ch.md +++ b/documentation20/webdocs/markdowndocs/connector-ch.md @@ -974,12 +974,12 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间 ## Go Connector -TDengine提供了GO驱动程序`taosSql`. `taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengin, 详见`https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go` +TDengine提供了GO驱动程序`taosSql`. `taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengine, 详见`https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go` ```Go import ( "database/sql" - _ "github.com/taosdata/driver-go/taoSql" + _ "github.com/taosdata/driver-go/taosSql" ) ``` ### 常用API @@ -1151,4 +1151,4 @@ promise2.then(function(result) { [12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo [13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE [14]: https://www.taosdata.com/cn/documentation20/connector/#Windows -[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B \ No newline at end of file +[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B From 183af97a8b8458022bb9ed2b7567a697c8f98e46 Mon Sep 17 00:00:00 2001 From: Xiaowei Su <46439638+Shawshank-Smile@users.noreply.github.com> Date: Fri, 14 Aug 2020 10:35:39 +0800 Subject: [PATCH 163/190] Update connector-ch.md --- documentation20/webdocs/markdowndocs/connector-ch.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/documentation20/webdocs/markdowndocs/connector-ch.md b/documentation20/webdocs/markdowndocs/connector-ch.md index ffacd054f5..c1ffa9f2ce 100644 --- a/documentation20/webdocs/markdowndocs/connector-ch.md +++ b/documentation20/webdocs/markdowndocs/connector-ch.md @@ -45,11 +45,11 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine 创建数据库连接,初始化连接上下文。其中需要用户提供的参数包含: - - ip:TDengine管理主节点的IP地址 - - user:用户名 - - pass:密码 - - db:数据库名字,如果用户没有提供,也可以正常连接,用户可以通过该连接创建新的数据库,如果用户提供了数据库名字,则说明该数据库用户已经创建好,缺省使用该数据库 - - port:端口号 + - ip:TDengine管理主节点的IP地址 + - user:用户名 + - pass:密码 + - db:数据库名字,如果用户没有提供,也可以正常连接,用户可以通过该连接创建新的数据库,如果用户提供了数据库名字,则说明该数据库用户已经创建好,缺省使用该数据库 + - port:端口号 返回值为空表示失败。应用程序需要保存返回的参数,以便后续API调用。 @@ -646,7 +646,7 @@ Query OK, 1 row(s) in set (0.000141s) #### Linux -用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包。用户可以通过pip命令安装: +用户可以在源代码的src/connector/python(或者tar.gz的/connector/python)文件夹下找到python2和python3的connector安装包。用户可以通过pip命令安装: ​ `pip install src/connector/python/linux/python2/` From 91904ee50218def61e54362ef38aaf696d067a9a Mon Sep 17 00:00:00 2001 From: Jeff Tao Date: Fri, 14 Aug 2020 02:41:44 +0000 Subject: [PATCH 164/190] inotfiy events --- src/sync/src/syncMain.c | 6 +++--- src/sync/src/syncRetrieve.c | 23 ++++++++++++++++------- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index f2abded3b6..eaa073348a 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -32,7 +32,7 @@ // global configurable int tsMaxSyncNum = 2; int tsSyncTcpThreads = 2; -int tsMaxWatchFiles = 100; +int tsMaxWatchFiles = 500; int tsMaxFwdInfo = 200; int tsSyncTimer = 1; //int sDebugFlag = 135; @@ -516,7 +516,7 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) int ret = strcmp(pPeer->fqdn, tsNodeFqdn); if (pPeer->nodeId == 0 || (ret > 0) || (ret == 0 && pPeer->port > tsSyncPort)) { sDebug("%s, start to check peer connection", pPeer->id); - taosTmrReset(syncCheckPeerConnection, 100, pPeer, syncTmrCtrl, &pPeer->timer); + taosTmrReset(syncCheckPeerConnection, 100 + (pNode->vgId*10)%100, pPeer, syncTmrCtrl, &pPeer->timer); } syncAddNodeRef(pNode); @@ -815,7 +815,7 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) taosTmrStopA(&pPeer->timer); if (tsSyncNum >= tsMaxSyncNum) { sInfo("%s, %d syncs are in process, try later", pPeer->id, tsSyncNum); - taosTmrReset(syncTryRecoverFromMaster, 500, pPeer, syncTmrCtrl, &pPeer->timer); + taosTmrReset(syncTryRecoverFromMaster, 500 + (pNode->vgId*10)%200, pPeer, syncTmrCtrl, &pPeer->timer); return; } diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index f881b680f5..1dd1cda343 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -57,13 +57,14 @@ static int syncAddIntoWatchList(SSyncPeer *pPeer, char *name) } } - *wd = inotify_add_watch(pPeer->notifyFd, name, IN_MODIFY); + *wd = inotify_add_watch(pPeer->notifyFd, name, IN_MODIFY | IN_DELETE); if (*wd == -1) { sError("%s, failed to add %s(%s)", pPeer->id, name, strerror(errno)); return -1; + } else { + sDebug("%s, monitor %s, wd:%d watchNum:%d", pPeer->id, name, *wd, pPeer->watchNum); } - pPeer->watchNum++; pPeer->watchNum = (pPeer->watchNum +1) % tsMaxWatchFiles; return 0; @@ -75,16 +76,24 @@ static int syncAreFilesModified(SSyncPeer *pPeer) char buf[2048]; int len = read(pPeer->notifyFd, buf, sizeof(buf)); - if (len <0 && errno != EAGAIN) { + if (len < 0 && errno != EAGAIN) { sError("%s, failed to read notify FD(%s)", pPeer->id, strerror(errno)); return -1; } int code = 0; - if (len >0) { - sDebug("%s, processed file is changed", pPeer->id); - pPeer->fileChanged = 1; - code = 1; + if (len > 0) { + const struct inotify_event *event; + char *ptr; + for (ptr = buf; ptr < buf + len; ptr += sizeof(struct inotify_event) + event->len) { + event = (const struct inotify_event *) ptr; + if ((event->mask & IN_MODIFY) || (event->mask & IN_DELETE)) { + sDebug("%s, processed file is changed", pPeer->id); + pPeer->fileChanged = 1; + code = 1; + break; + } + } } return code; From 7c4f8f6464d521ebc2dd723a3dab60b3252cad41 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 14 Aug 2020 11:04:26 +0800 Subject: [PATCH 165/190] [td-225] fix bug while multiple tags exists during fill results --- src/client/src/tscLocalMerge.c | 14 ----- src/client/src/tscUtil.c | 37 ------------- src/query/inc/qFill.h | 8 ++- src/query/src/qFill.c | 96 ++++++++++++++++++++++------------ 4 files changed, 71 insertions(+), 84 deletions(-) diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 7818765e57..cbc4caad1b 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -380,20 +380,6 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd 4096, (int32_t)numOfCols, pQueryInfo->slidingTime, pQueryInfo->slidingTimeUnit, tinfo.precision, pQueryInfo->fillType, pFillCol); } - - int32_t startIndex = pQueryInfo->fieldsInfo.numOfOutput - pQueryInfo->groupbyExpr.numOfGroupCols; - - if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 && pReducer->pFillInfo != NULL) { - pReducer->pFillInfo->pTags[0] = (char *)pReducer->pFillInfo->pTags + POINTER_BYTES * pQueryInfo->groupbyExpr.numOfGroupCols; - for (int32_t i = 1; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { - SSchema *pSchema = getColumnModelSchema(pReducer->resColModel, startIndex + i - 1); - pReducer->pFillInfo->pTags[i] = pSchema->bytes + pReducer->pFillInfo->pTags[i - 1]; - } - } else { - if (pReducer->pFillInfo != NULL) { - assert(pReducer->pFillInfo->pTags == NULL); - } - } } static int32_t tscFlushTmpBufferImpl(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 1483f0de48..64a871ff74 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -2141,43 +2141,6 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) { } } -//void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) { -// SFieldSupInfo* pInfo = TARRAY_GET_ELEM(pFieldInfo->pSupportInfo, columnIndex); -// assert(pInfo->pSqlExpr != NULL); -// -// int32_t type = pInfo->pSqlExpr->resType; -// int32_t bytes = pInfo->pSqlExpr->resBytes; -// -// char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row; -// -// if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) { -// int32_t realLen = varDataLen(pData); -// assert(realLen <= bytes - VARSTR_HEADER_SIZE); -// -// if (isNull(pData, type)) { -// pRes->tsrow[columnIndex] = NULL; -// } else { -// pRes->tsrow[columnIndex] = ((tstr*)pData)->data; -// } -// -// if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor -// *(pData + realLen + VARSTR_HEADER_SIZE) = 0; -// } -// -// pRes->length[columnIndex] = realLen; -// } else { -// assert(bytes == tDataTypeDesc[type].nSize); -// -// if (isNull(pData, type)) { -// pRes->tsrow[columnIndex] = NULL; -// } else { -// pRes->tsrow[columnIndex] = pData; -// } -// -// pRes->length[columnIndex] = bytes; -// } -//} - void* malloc_throw(size_t size) { void* p = malloc(size); if (p == NULL) { diff --git a/src/query/inc/qFill.h b/src/query/inc/qFill.h index db6a69c2c5..6b8dcb0bf9 100644 --- a/src/query/inc/qFill.h +++ b/src/query/inc/qFill.h @@ -30,6 +30,11 @@ typedef struct { int16_t flag; // column flag: TAG COLUMN|NORMAL COLUMN union {int64_t i; double d;} fillVal; } SFillColInfo; + +typedef struct { + SSchema col; + char* tagVal; +} SFillTagColInfo; typedef struct SFillInfo { TSKEY start; // start timestamp @@ -44,7 +49,8 @@ typedef struct SFillInfo { int32_t numOfTags; // number of tags int32_t numOfCols; // number of columns, including the tags columns int32_t rowSize; // size of each row - char ** pTags; // tags value for current interpolation +// char ** pTags; // tags value for current interpolation + SFillTagColInfo* pTags; // tags value for filling gap int64_t slidingTime; // sliding value to determine the number of result for a given time window char * prevValues; // previous row of data, to generate the interpolation results char * nextValues; // next row of data diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index d29186ba49..998049fad6 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -42,19 +42,38 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_ pFillInfo->slidingUnit = slidingUnit; pFillInfo->pData = malloc(POINTER_BYTES * numOfCols); - - int32_t rowsize = 0; - for (int32_t i = 0; i < numOfCols; ++i) { - int32_t bytes = pFillInfo->pFillCol[i].col.bytes; - pFillInfo->pData[i] = calloc(1, bytes * capacity); - - rowsize += bytes; - } - if (numOfTags > 0) { - pFillInfo->pTags = calloc(1, pFillInfo->numOfTags * POINTER_BYTES + rowsize); + pFillInfo->pTags = calloc(pFillInfo->numOfTags, sizeof(SFillTagColInfo)); + for(int32_t i = 0; i < numOfTags; ++i) { + pFillInfo->pTags[i].col.colId = -2; + } } - + + int32_t rowsize = 0; + int32_t k = 0; + for (int32_t i = 0; i < numOfCols; ++i) { + SFillColInfo* pColInfo = &pFillInfo->pFillCol[i]; + pFillInfo->pData[i] = calloc(1, pColInfo->col.bytes * capacity); + + if (pColInfo->flag == TSDB_COL_TAG) { + bool exists = false; + for(int32_t j = 0; j < k; ++j) { + if (pFillInfo->pTags[j].col.colId == pColInfo->col.colId) { + exists = true; + break; + } + } + + if (!exists) { + pFillInfo->pTags[k].col.colId = pColInfo->col.colId; + pFillInfo->pTags[k].tagVal = calloc(1, pColInfo->col.bytes); + + k += 1; + } + } + rowsize += pColInfo->col.bytes; + } + pFillInfo->rowSize = rowsize; pFillInfo->capacityInRows = capacity; @@ -129,16 +148,21 @@ void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, tFilePage** pInput) void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInput) { assert(pFillInfo->numOfRows == pInput->num); - int32_t t = 0; - + for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - - char* s = pInput->data + pCol->col.offset * pInput->num; - memcpy(pFillInfo->pData[i], s, pInput->num * pCol->col.bytes); - - if (pCol->flag == TSDB_COL_TAG && t < pFillInfo->numOfTags) { // copy the tag value - memcpy(pFillInfo->pTags[t++], pFillInfo->pData[i], pCol->col.bytes); + + char* data = pInput->data + pCol->col.offset * pInput->num; + memcpy(pFillInfo->pData[i], data, pInput->num * pCol->col.bytes); + + if (pCol->flag == TSDB_COL_TAG) { // copy the tag value to tag value buffer + for (int32_t j = 0; j < pFillInfo->numOfTags; ++j) { + SFillTagColInfo* pTag = &pFillInfo->pTags[j]; + if (pTag->col.colId == pCol->col.colId) { + memcpy(pTag->tagVal, data, pCol->col.bytes); + break; + } + } } } } @@ -224,22 +248,31 @@ int taosDoLinearInterpolation(int32_t type, SPoint* point1, SPoint* point2, SPoi return 0; } -static void setTagsValue(SFillInfo* pColInfo, tFilePage** data, char** pTags, int32_t start, int32_t num) { - for (int32_t j = 0, i = start; i < pColInfo->numOfCols; ++i, ++j) { - SFillColInfo* pCol = &pColInfo->pFillCol[i]; - - char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, num); - assignVal(val1, pTags[j], pCol->col.bytes, pCol->col.type); +static void setTagsValue(SFillInfo* pColInfo, tFilePage** data, SFillTagColInfo *pTags, int32_t start, int32_t num) { + for(int32_t j = 0; j < pColInfo->numOfCols; ++j) { + SFillColInfo* pCol = &pColInfo->pFillCol[j]; + if (pCol->flag == TSDB_COL_NORMAL) { + continue; + } + + char* val1 = elePtrAt(data[j]->data, pCol->col.bytes, num); + + for(int32_t i = 0; i < pColInfo->numOfTags; ++i) { + SFillTagColInfo* pTag = &pColInfo->pTags[i]; + if (pTag->col.colId == pCol->col.colId) { + assignVal(val1, pTag->tagVal, pCol->col.bytes, pCol->col.type); + break; + } + } } } -static void doInterpoResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* num, char** srcData, - int64_t ts, char** pTags, bool outOfBound) { +static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* num, char** srcData, + int64_t ts, SFillTagColInfo* pTags, bool outOfBound) { char* prevValues = pFillInfo->prevValues; char* nextValues = pFillInfo->nextValues; SPoint point1, point2, point; - int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order); char* val = elePtrAt(data[0]->data, TSDB_KEYSIZE, *num); @@ -364,17 +397,16 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu char** nextValues = &pFillInfo->nextValues; int32_t numOfTags = pFillInfo->numOfTags; - char** pTags = pFillInfo->pTags; + SFillTagColInfo* pTags = pFillInfo->pTags; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order); - if (numOfRows == 0) { /* * These data are generated according to fill strategy, since the current timestamp is out of time window of * real result set. Note that we need to keep the direct previous result rows, to generated the filled data. */ while (num < outputRows) { - doInterpoResultImpl(pFillInfo, data, &num, srcData, pFillInfo->start, pTags, true); + doFillResultImpl(pFillInfo, data, &num, srcData, pFillInfo->start, pTags, true); } pFillInfo->numOfTotal += pFillInfo->numOfCurrent; @@ -401,7 +433,7 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu while (((pFillInfo->start < ts && FILL_IS_ASC_FILL(pFillInfo)) || (pFillInfo->start > ts && !FILL_IS_ASC_FILL(pFillInfo))) && num < outputRows) { - doInterpoResultImpl(pFillInfo, data, &num, srcData, ts, pTags, false); + doFillResultImpl(pFillInfo, data, &num, srcData, ts, pTags, false); } /* output buffer is full, abort */ From 21f0afe535458d0c3436f2d56805a18e67f470c0 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 14 Aug 2020 11:12:06 +0800 Subject: [PATCH 166/190] [td-225] add todo --- src/plugins/http/src/httpSql.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index a01cb07a98..e0e0d1aa8b 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -210,6 +210,7 @@ void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int num } } + // todo refactor if (tscResultsetFetchCompleted(result)) { isContinue = false; } From 4dd43425616a4c796059081a050dcba8aca41ba8 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Fri, 14 Aug 2020 13:26:41 +0800 Subject: [PATCH 167/190] fix td-1142 --- src/client/src/tscPrepare.c | 12 +++++++----- tests/examples/c/makefile | 24 +++++++++++++----------- tests/examples/c/prepare.c | 10 +++++++--- 3 files changed, 27 insertions(+), 19 deletions(-) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 0df1c7ddc5..b996dd958a 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -613,11 +613,13 @@ int taos_stmt_execute(TAOS_STMT* stmt) { if (sql == NULL) { ret = TSDB_CODE_TSC_OUT_OF_MEMORY; } else { - taosTFree(pStmt->pSql->sqlstr); - pStmt->pSql->sqlstr = sql; - SSqlObj* pSql = taos_query((TAOS*)pStmt->taos, pStmt->pSql->sqlstr); - ret = taos_errno(pSql); - taos_free_result(pSql); + if (pStmt->pSql != NULL) { + taos_free_result(pStmt->pSql); + pStmt->pSql = NULL; + } + pStmt->pSql = taos_query((TAOS*)pStmt->taos, sql); + ret = taos_errno(pStmt->pSql); + free(sql); } } return ret; diff --git a/tests/examples/c/makefile b/tests/examples/c/makefile index f9653c9c96..7293a22c11 100644 --- a/tests/examples/c/makefile +++ b/tests/examples/c/makefile @@ -4,22 +4,24 @@ ROOT=./ TARGET=exe LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt -CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX -msse4.2 -Wno-unused-function -D_M_X64 \ - -I/usr/local/taos/include -std=gnu99 +CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion \ + -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX \ + -msse4.2 -Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 all: $(TARGET) exe: - gcc $(CFLAGS) ./asyncdemo.c -o $(ROOT)/asyncdemo $(LFLAGS) - gcc $(CFLAGS) ./demo.c -o $(ROOT)/demo $(LFLAGS) - gcc $(CFLAGS) ./prepare.c -o $(ROOT)/prepare $(LFLAGS) - gcc $(CFLAGS) ./stream.c -o $(ROOT)/stream $(LFLAGS) + gcc $(CFLAGS) ./asyncdemo.c -o $(ROOT)asyncdemo $(LFLAGS) + gcc $(CFLAGS) ./demo.c -o $(ROOT)demo $(LFLAGS) + gcc $(CFLAGS) ./prepare.c -o $(ROOT)prepare $(LFLAGS) + gcc $(CFLAGS) ./stream.c -o $(ROOT)stream $(LFLAGS) gcc $(CFLAGS) ./subscribe.c -o $(ROOT)subscribe $(LFLAGS) gcc $(CFLAGS) ./apitest.c -o $(ROOT)apitest $(LFLAGS) clean: - rm $(ROOT)/asyncdemo - rm $(ROOT)/demo - rm $(ROOT)/prepare - rm $(ROOT)/stream - rm $(ROOT)/subscribe + rm $(ROOT)asyncdemo + rm $(ROOT)demo + rm $(ROOT)prepare + rm $(ROOT)stream + rm $(ROOT)subscribe + rm $(ROOT)apitest diff --git a/tests/examples/c/prepare.c b/tests/examples/c/prepare.c index ca0942035a..7a70b744ee 100644 --- a/tests/examples/c/prepare.c +++ b/tests/examples/c/prepare.c @@ -195,11 +195,15 @@ int main(int argc, char *argv[]) taos_print_row(temp, row, fields, num_fields); printf("%s\n", temp); } + if (rows == 2) { + printf("two rows are fetched as expectation\n"); + } else { + printf("expect two rows, but %d rows are fetched\n", rows); + } taos_free_result(result); taos_stmt_close(stmt); - - printf("Data has been written, Please press enter to return"); - return getchar(); + + return 0; } From dce8ec169eaab9571216e0c719824c823567aa48 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Fri, 14 Aug 2020 13:28:10 +0800 Subject: [PATCH 168/190] fix server&client side crash in singe table subscription, after consume all data, the next call to consume crash both client and server. this commit fix the issue. --- src/client/src/tscSub.c | 14 ++++++++------ tests/examples/c/subscribe.c | 2 +- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c index 0e1be926f4..76bce19668 100644 --- a/src/client/src/tscSub.c +++ b/src/client/src/tscSub.c @@ -405,16 +405,20 @@ TAOS_SUB *taos_subscribe(TAOS *taos, int restart, const char* topic, const char return pSub; } -void taos_free_result_imp(SSqlObj* pSql, int keepCmd); - TAOS_RES *taos_consume(TAOS_SUB *tsub) { SSub *pSub = (SSub *)tsub; if (pSub == NULL) return NULL; tscSaveSubscriptionProgress(pSub); - SSqlObj* pSql = pSub->pSql; + SSqlObj *pSql = pSub->pSql; SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + if (taosArrayGetSize(pSub->progress) > 0) { // fix crash in single tabel subscription + pQueryInfo->window.skey = ((SSubscriptionProgress*)taosArrayGet(pSub->progress, 0))->key; + } if (pSub->pTimer == NULL) { int64_t duration = taosGetTimestampMs() - pSub->lastConsumeTime; @@ -436,8 +440,6 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) { tscDebug("table synchronization completed"); } - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); - uint32_t type = pQueryInfo->type; tscFreeSqlResult(pSql); pRes->numOfRows = 1; @@ -445,7 +447,7 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) { pSql->cmd.command = TSDB_SQL_SELECT; pQueryInfo->type = type; - tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0)->vgroupIndex = 0; + pTableMetaInfo->vgroupIndex = 0; pSql->fp = asyncCallback; pSql->fetchFp = asyncCallback; diff --git a/tests/examples/c/subscribe.c b/tests/examples/c/subscribe.c index db5ad34ee7..8368af18f7 100644 --- a/tests/examples/c/subscribe.c +++ b/tests/examples/c/subscribe.c @@ -56,7 +56,7 @@ void check_row_count(int line, TAOS_RES* res, int expected) { void do_query(TAOS* taos, const char* sql) { - TAOS_RES* res = taos_query(taos, "drop database if exists test;"); + TAOS_RES* res = taos_query(taos, sql); taos_free_result(res); } From c71ab49d6bc8a595fb7ef62efbadbfaa27bb5ec9 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 14 Aug 2020 06:00:25 +0000 Subject: [PATCH 169/190] TD-1143 --- src/plugins/monitor/src/monitorMain.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/plugins/monitor/src/monitorMain.c b/src/plugins/monitor/src/monitorMain.c index 55c242763b..d76bb4bd82 100644 --- a/src/plugins/monitor/src/monitorMain.c +++ b/src/plugins/monitor/src/monitorMain.c @@ -234,17 +234,22 @@ static void monitorInitDatabaseCb(void *param, TAOS_RES *result, int32_t code) { } void monitorStopSystem() { - monitorInfo("monitor module is stopped"); - monitorExecuteSQLFp = NULL; + if (tsMonitorConn.state == MONITOR_STATE_STOPPED) return; tsMonitorConn.state = MONITOR_STATE_STOPPED; + monitorExecuteSQLFp = NULL; + + monitorInfo("monitor module is stopped"); + if (tsMonitorConn.initTimer != NULL) { taosTmrStopA(&(tsMonitorConn.initTimer)); } if (tsMonitorConn.timer != NULL) { taosTmrStopA(&(tsMonitorConn.timer)); } - - taos_close(tsMonitorConn.conn); + if (tsMonitorConn.conn != NULL) { + taos_close(tsMonitorConn.conn); + tsMonitorConn.conn = NULL; + } } void monitorCleanUpSystem() { From 7af0683db107d82d3fafb875fc64826f1688d14d Mon Sep 17 00:00:00 2001 From: Yiqing Liu Date: Fri, 14 Aug 2020 14:26:11 +0800 Subject: [PATCH 170/190] Update connector-ch.md change format --- .../webdocs/markdowndocs/connector-ch.md | 52 +++++++++---------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/documentation20/webdocs/markdowndocs/connector-ch.md b/documentation20/webdocs/markdowndocs/connector-ch.md index c1ffa9f2ce..6b22004c43 100644 --- a/documentation20/webdocs/markdowndocs/connector-ch.md +++ b/documentation20/webdocs/markdowndocs/connector-ch.md @@ -157,25 +157,25 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine 异步执行SQL语句。 - * taos:调用taos_connect返回的数据库连接 - * sql:需要执行的SQL语句 - * fp:用户定义的回调函数,其第三个参数`code`用于指示操作是否成功,`0`表示成功,负数表示失败(调用`taos_errstr`获取失败原因)。应用在定义回调函数的时候,主要处理第二个参数`TAOS_RES *`,该参数是查询返回的结果集 - * param:应用提供一个用于回调的参数 + * taos:调用taos_connect返回的数据库连接 + * sql:需要执行的SQL语句 + * fp:用户定义的回调函数,其第三个参数`code`用于指示操作是否成功,`0`表示成功,负数表示失败(调用`taos_errstr`获取失败原因)。应用在定义回调函数的时候,主要处理第二个参数`TAOS_RES *`,该参数是查询返回的结果集 + * param:应用提供一个用于回调的参数 - `void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);` 批量获取异步查询的结果集,只能与`taos_query_a`配合使用。其中: - * res:`taos_query_a`回调时返回的结果集 - * fp:回调函数。其参数`param`是用户可定义的传递给回调函数的参数结构体;`numOfRows`是获取到的数据的行数(不是整个查询结果集的函数)。 在回调函数中,应用可以通过调用`taos_fetch_row`前向迭代获取批量记录中每一行记录。读完一块内的所有记录后,应用需要在回调函数中继续调用`taos_fetch_rows_a`获取下一批记录进行处理,直到返回的记录数(numOfRows)为零(结果返回完成)或记录数为负值(查询出错)。 + * res:`taos_query_a`回调时返回的结果集 + * fp:回调函数。其参数`param`是用户可定义的传递给回调函数的参数结构体;`numOfRows`是获取到的数据的行数(不是整个查询结果集的函数)。 在回调函数中,应用可以通过调用`taos_fetch_row`前向迭代获取批量记录中每一行记录。读完一块内的所有记录后,应用需要在回调函数中继续调用`taos_fetch_rows_a`获取下一批记录进行处理,直到返回的记录数(numOfRows)为零(结果返回完成)或记录数为负值(查询出错)。 - `void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);` 异步获取一条记录。其中: - * res:`taos_query_a`回调时返回的结果集 - * fp:回调函数。其参数`param`是应用提供的一个用于回调的参数。回调时,第三个参数`row`指向一行记录。不同于`taos_fetch_rows_a`,应用无需调用`taos_fetch_row`来获取一行数据,更加简单,但数据提取性能不及批量获取的API。 + * res:`taos_query_a`回调时返回的结果集 + * fp:回调函数。其参数`param`是应用提供的一个用于回调的参数。回调时,第三个参数`row`指向一行记录。不同于`taos_fetch_rows_a`,应用无需调用`taos_fetch_row`来获取一行数据,更加简单,但数据提取性能不及批量获取的API。 TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表,并可以同时对每张打开的表进行查询或者插入操作。需要指出的是,**客户端应用必须确保对同一张表的操作完全串行化**,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。 @@ -232,12 +232,12 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 - `TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *))` 该API用来创建数据流,其中: - * taos:已经建立好的数据库连接 - * sql:SQL查询语句(仅能使用查询语句) - * fp:用户定义的回调函数指针,每次流式计算完成后,TDengine将查询的结果(TAOS_ROW)、查询状态(TAOS_RES)、用户定义参数(PARAM)传递给回调函数,在回调函数内,用户可以使用taos_num_fields获取结果集列数,taos_fetch_fields获取结果集每列数据的类型。 - * stime:是流式计算开始的时间,如果是0,表示从现在开始,如果不为零,表示从指定的时间开始计算(UTC时间从1970/1/1算起的毫秒数) - * param:是应用提供的用于回调的一个参数,回调时,提供给应用 - * callback: 第二个回调函数,会在连续查询自动停止时被调用。 + * taos:已经建立好的数据库连接 + * sql:SQL查询语句(仅能使用查询语句) + * fp:用户定义的回调函数指针,每次流式计算完成后,TDengine将查询的结果(TAOS_ROW)、查询状态(TAOS_RES)、用户定义参数(PARAM)传递给回调函数,在回调函数内,用户可以使用taos_num_fields获取结果集列数,taos_fetch_fields获取结果集每列数据的类型。 + * stime:是流式计算开始的时间,如果是0,表示从现在开始,如果不为零,表示从指定的时间开始计算(UTC时间从1970/1/1算起的毫秒数) + * param:是应用提供的用于回调的一个参数,回调时,提供给应用 + * callback: 第二个回调函数,会在连续查询自动停止时被调用。 返回值为NULL,表示创建成功,返回值不为空,表示成功。 @@ -254,21 +254,21 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 * `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)` 该函数负责启动订阅服务,成功时返回订阅对象,失败时返回 `NULL`,其参数为: - * taos:已经建立好的数据库连接 - * restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 - * topic:订阅的主题(即名称),此参数是订阅的唯一标识 - * sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 - * fp:收到查询结果时的回调函数(稍后介绍函数原型),只在异步调用时使用,同步调用时此参数应该传 `NULL` - * param:调用回调函数时的附加参数,系统API将其原样传递到回调函数,不进行任何处理 - * interval:轮询周期,单位为毫秒。异步调用时,将根据此参数周期性的调用回调函数,为避免对系统性能造成影响,不建议将此参数设置的过小;同步调用时,如两次调用`taos_consume`的间隔小于此周期,API将会阻塞,直到时间间隔超过此周期。 + * taos:已经建立好的数据库连接 + * restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 + * topic:订阅的主题(即名称),此参数是订阅的唯一标识 + * sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 + * fp:收到查询结果时的回调函数(稍后介绍函数原型),只在异步调用时使用,同步调用时此参数应该传 `NULL` + * param:调用回调函数时的附加参数,系统API将其原样传递到回调函数,不进行任何处理 + * interval:轮询周期,单位为毫秒。异步调用时,将根据此参数周期性的调用回调函数,为避免对系统性能造成影响,不建议将此参数设置的过小;同步调用时,如两次调用`taos_consume`的间隔小于此周期,API将会阻塞,直到时间间隔超过此周期。 * `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)` 异步模式下,回调函数的原型,其参数为: - * tsub:订阅对象 - * res:查询结果集,注意结果集中可能没有记录 - * param:调用 `taos_subscribe`时客户程序提供的附加参数 - * code:错误码 + * tsub:订阅对象 + * res:查询结果集,注意结果集中可能没有记录 + * param:调用 `taos_subscribe`时客户程序提供的附加参数 + * code:错误码 * `TAOS_RES *taos_consume(TAOS_SUB *tsub)` @@ -1025,7 +1025,7 @@ npm install td2.0-connector - Xcode - - 然后通过Xcode安装 + - 然后通过Xcode安装 ``` Command Line Tools From d7579e02a11dc654c8f587c2236276bfea8dfbc5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 14 Aug 2020 15:08:15 +0800 Subject: [PATCH 171/190] [td-225] fix the bugs in fill for interval query --- src/client/src/tscLocalMerge.c | 149 ++++++++++++++++++--------------- src/query/src/qFill.c | 43 +++++----- 2 files changed, 102 insertions(+), 90 deletions(-) diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index cbc4caad1b..186c2871a1 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -822,7 +822,7 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource * } } -void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQueryInfo, SFillInfo *pFillInfo) { +void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQueryInfo, SFillInfo *pFillInfo) { // discard following dataset in the same group and reset the interpolation information STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -865,64 +865,66 @@ static void reversedCopyFromInterpolationToDstBuf(SQueryInfo *pQueryInfo, SSqlRe } } +static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo) { + assert(pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE); + + tFilePage * pBeforeFillData = pLocalReducer->pResultBuf; + + pRes->data = pLocalReducer->pFinalRes; + pRes->numOfRows = pBeforeFillData->num; + + if (pQueryInfo->limit.offset > 0) { + if (pQueryInfo->limit.offset < pRes->numOfRows) { + int32_t prevSize = (int32_t)pBeforeFillData->num; + tColModelErase(pLocalReducer->resColModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1); + + /* remove the hole in column model */ + tColModelCompact(pLocalReducer->resColModel, pBeforeFillData, prevSize); + + pRes->numOfRows -= pQueryInfo->limit.offset; + pQueryInfo->limit.offset = 0; + } else { + pQueryInfo->limit.offset -= pRes->numOfRows; + pRes->numOfRows = 0; + } + } + + pRes->numOfRowsGroup += pRes->numOfRows; + + // impose the limitation of output rows on the final result + if (pQueryInfo->limit.limit >= 0 && pRes->numOfRowsGroup > pQueryInfo->limit.limit) { + int32_t prevSize = (int32_t)pBeforeFillData->num; + int32_t overflow = (int32_t)(pRes->numOfRowsGroup - pQueryInfo->limit.limit); + assert(overflow < pRes->numOfRows); + + pRes->numOfRowsGroup = pQueryInfo->limit.limit; + pRes->numOfRows -= overflow; + pBeforeFillData->num -= overflow; + + tColModelCompact(pLocalReducer->resColModel, pBeforeFillData, prevSize); + + // set remain data to be discarded, and reset the interpolation information + savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo); + } + + memcpy(pRes->data, pBeforeFillData->data, pRes->numOfRows * pLocalReducer->finalRowSize); + + pRes->numOfClauseTotal += pRes->numOfRows; + pBeforeFillData->num = 0; +} + /* * Note: pRes->pLocalReducer may be null, due to the fact that "tscDestroyLocalReducer" is called * by "interuptHandler" function in shell */ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneOutput) { - SSqlCmd * pCmd = &pSql->cmd; - SSqlRes * pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; - tFilePage * pFinalDataPage = pLocalReducer->pResultBuf; + tFilePage *pBeforeFillData = pLocalReducer->pResultBuf; SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SFillInfo *pFillInfo = pLocalReducer->pFillInfo; - // no interval query, no fill operation - if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) { - pRes->data = pLocalReducer->pFinalRes; - pRes->numOfRows = pFinalDataPage->num; - - if (pQueryInfo->limit.offset > 0) { - if (pQueryInfo->limit.offset < pRes->numOfRows) { - int32_t prevSize = (int32_t)pFinalDataPage->num; - tColModelErase(pLocalReducer->resColModel, pFinalDataPage, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1); - - /* remove the hole in column model */ - tColModelCompact(pLocalReducer->resColModel, pFinalDataPage, prevSize); - - pRes->numOfRows -= pQueryInfo->limit.offset; - pQueryInfo->limit.offset = 0; - } else { - pQueryInfo->limit.offset -= pRes->numOfRows; - pRes->numOfRows = 0; - } - } - - pRes->numOfRowsGroup += pRes->numOfRows; - - if (pQueryInfo->limit.limit >= 0 && pRes->numOfRowsGroup > pQueryInfo->limit.limit) { - /* impose the limitation of output rows on the final result */ - int32_t prevSize = (int32_t)pFinalDataPage->num; - int32_t overflow = (int32_t)(pRes->numOfRowsGroup - pQueryInfo->limit.limit); - assert(overflow < pRes->numOfRows); - - pRes->numOfRowsGroup = pQueryInfo->limit.limit; - pRes->numOfRows -= overflow; - pFinalDataPage->num -= overflow; - - tColModelCompact(pLocalReducer->resColModel, pFinalDataPage, prevSize); - - /* set remain data to be discarded, and reset the interpolation information */ - savePrevRecordAndSetupInterpoInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo); - } - - memcpy(pRes->data, pFinalDataPage->data, pRes->numOfRows * pLocalReducer->finalRowSize); - - pRes->numOfClauseTotal += pRes->numOfRows; - pFinalDataPage->num = 0; - return; - } - - SFillInfo *pFillInfo = pLocalReducer->pFillInfo; int64_t actualETime = MAX(pQueryInfo->window.skey, pQueryInfo->window.ekey); tFilePage **pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutput); @@ -960,7 +962,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO break; } - /* all output for current group are completed */ + // all output in current group are completed int32_t totalRemainRows = (int32_t)getFilledNumOfRes(pFillInfo, actualETime, pLocalReducer->resColModel->capacity); if (totalRemainRows <= 0) { break; @@ -970,15 +972,16 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO } if (pRes->numOfRows > 0) { - if (pQueryInfo->limit.limit >= 0 && pRes->numOfRows > pQueryInfo->limit.limit) { - int32_t overflow = (int32_t)(pRes->numOfRows - pQueryInfo->limit.limit); - pRes->numOfRows -= overflow; - pFinalDataPage->num -= overflow; + int32_t currentTotal = pRes->numOfRowsGroup + pRes->numOfRows; - assert(pRes->numOfRows >= 0 && pFinalDataPage->num > 0); + if (pQueryInfo->limit.limit >= 0 && currentTotal > pQueryInfo->limit.limit) { + int32_t overflow = (int32_t)(currentTotal - pQueryInfo->limit.limit); + + pRes->numOfRows -= overflow; + assert(pRes->numOfRows >= 0); /* set remain data to be discarded, and reset the interpolation information */ - savePrevRecordAndSetupInterpoInfo(pLocalReducer, pQueryInfo, pFillInfo); + savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pFillInfo); } if (pQueryInfo->order.order == TSDB_ORDER_ASC) { @@ -995,7 +998,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO pRes->numOfClauseTotal += pRes->numOfRows; } - pFinalDataPage->num = 0; + pBeforeFillData->num = 0; for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { taosTFree(pResPages[i]); } @@ -1212,7 +1215,7 @@ static bool saveGroupResultInfo(SSqlObj *pSql) { * @param noMoreCurrentGroupRes * @return if current group is skipped, return false, and do NOT record it into pRes->numOfGroups */ -bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCurrentGroupRes) { +bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCurrentGroupRes) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; @@ -1246,13 +1249,21 @@ bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool no // tColModelDisplay(pLocalReducer->resColModel, pLocalReducer->pBufForInterpo, pResBuf->num, pResBuf->num); #endif - SFillInfo* pFillInfo = pLocalReducer->pFillInfo; - if (pFillInfo != NULL) { - taosFillSetStartInfo(pFillInfo, (int32_t)pResBuf->num, pQueryInfo->window.ekey); - taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf); - } - doFillResult(pSql, pLocalReducer, noMoreCurrentGroupRes); + + // no interval query, no fill operation + if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) { + genFinalResWithoutFill(pRes, pLocalReducer, pQueryInfo); + } else { + SFillInfo* pFillInfo = pLocalReducer->pFillInfo; + if (pFillInfo != NULL) { + taosFillSetStartInfo(pFillInfo, (int32_t)pResBuf->num, pQueryInfo->window.ekey); + taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf); + } + + doFillResult(pSql, pLocalReducer, noMoreCurrentGroupRes); + } + return true; } @@ -1337,7 +1348,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) { int64_t etime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.ekey : pQueryInfo->window.skey; int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity); - if (rows > 0) { // do interpo + if (rows > 0) { doFillResult(pSql, pLocalReducer, true); } } @@ -1502,7 +1513,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) { */ if ((!sameGroup && pResBuf->num > 0) || (pResBuf->num == pLocalReducer->resColModel->capacity)) { // does not belong to the same group - bool notSkipped = doGenerateFinalResults(pSql, pLocalReducer, !sameGroup); + bool notSkipped = genFinalResults(pSql, pLocalReducer, !sameGroup); // this row needs to discard, since it belongs to the group of previous if (pLocalReducer->discard && sameGroup) { @@ -1571,7 +1582,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) { } if (pLocalReducer->pResultBuf->num) { - doGenerateFinalResults(pSql, pLocalReducer, true); + genFinalResults(pSql, pLocalReducer, true); } assert(pLocalReducer->status == TSC_LOCALREDUCE_IN_PROGRESS && pRes->row == 0); diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index 998049fad6..d9fe67e1b7 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -248,17 +248,17 @@ int taosDoLinearInterpolation(int32_t type, SPoint* point1, SPoint* point2, SPoi return 0; } -static void setTagsValue(SFillInfo* pColInfo, tFilePage** data, SFillTagColInfo *pTags, int32_t start, int32_t num) { - for(int32_t j = 0; j < pColInfo->numOfCols; ++j) { - SFillColInfo* pCol = &pColInfo->pFillCol[j]; +static void setTagsValue(SFillInfo* pFillInfo, tFilePage** data, int32_t num) { + for(int32_t j = 0; j < pFillInfo->numOfCols; ++j) { + SFillColInfo* pCol = &pFillInfo->pFillCol[j]; if (pCol->flag == TSDB_COL_NORMAL) { continue; } char* val1 = elePtrAt(data[j]->data, pCol->col.bytes, num); - for(int32_t i = 0; i < pColInfo->numOfTags; ++i) { - SFillTagColInfo* pTag = &pColInfo->pTags[i]; + for(int32_t i = 0; i < pFillInfo->numOfTags; ++i) { + SFillTagColInfo* pTag = &pFillInfo->pTags[i]; if (pTag->col.colId == pCol->col.colId) { assignVal(val1, pTag->tagVal, pCol->col.bytes, pCol->col.type); break; @@ -267,8 +267,8 @@ static void setTagsValue(SFillInfo* pColInfo, tFilePage** data, SFillTagColInfo } } -static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* num, char** srcData, - int64_t ts, SFillTagColInfo* pTags, bool outOfBound) { +static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* num, char** srcData, int64_t ts, + bool outOfBound) { char* prevValues = pFillInfo->prevValues; char* nextValues = pFillInfo->nextValues; @@ -312,7 +312,7 @@ static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* nu } } - setTagsValue(pFillInfo, data, pTags, numOfValCols, *num); + setTagsValue(pFillInfo, data, *num); } else if (pFillInfo->fillType == TSDB_FILL_LINEAR) { // TODO : linear interpolation supports NULL value if (prevValues != NULL && !outOfBound) { @@ -337,7 +337,7 @@ static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* nu taosDoLinearInterpolation(type, &point1, &point2, &point); } - setTagsValue(pFillInfo, data, pTags, numOfValCols, *num); + setTagsValue(pFillInfo, data, *num); } else { for (int32_t i = 1; i < numOfValCols; ++i) { @@ -352,7 +352,7 @@ static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* nu } } - setTagsValue(pFillInfo, data, pTags, numOfValCols, *num); + setTagsValue(pFillInfo, data, *num); } } else { /* fill the default value */ @@ -363,7 +363,7 @@ static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* nu assignVal(val1, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type); } - setTagsValue(pFillInfo, data, pTags, numOfValCols, *num); + setTagsValue(pFillInfo, data, *num); } pFillInfo->start += (pFillInfo->slidingTime * step); @@ -397,8 +397,6 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu char** nextValues = &pFillInfo->nextValues; int32_t numOfTags = pFillInfo->numOfTags; - SFillTagColInfo* pTags = pFillInfo->pTags; - int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order); if (numOfRows == 0) { /* @@ -406,7 +404,7 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu * real result set. Note that we need to keep the direct previous result rows, to generated the filled data. */ while (num < outputRows) { - doFillResultImpl(pFillInfo, data, &num, srcData, pFillInfo->start, pTags, true); + doFillResultImpl(pFillInfo, data, &num, srcData, pFillInfo->start, true); } pFillInfo->numOfTotal += pFillInfo->numOfCurrent; @@ -433,12 +431,11 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu while (((pFillInfo->start < ts && FILL_IS_ASC_FILL(pFillInfo)) || (pFillInfo->start > ts && !FILL_IS_ASC_FILL(pFillInfo))) && num < outputRows) { - doFillResultImpl(pFillInfo, data, &num, srcData, ts, pTags, false); + doFillResultImpl(pFillInfo, data, &num, srcData, ts, false); } /* output buffer is full, abort */ - if ((num == outputRows && FILL_IS_ASC_FILL(pFillInfo)) || - (num < 0 && !FILL_IS_ASC_FILL(pFillInfo))) { + if ((num == outputRows && FILL_IS_ASC_FILL(pFillInfo)) || (num < 0 && !FILL_IS_ASC_FILL(pFillInfo))) { pFillInfo->numOfTotal += pFillInfo->numOfCurrent; return outputRows; } @@ -447,10 +444,12 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu initBeforeAfterDataBuf(pFillInfo, prevValues); // assign rows to dst buffer - int32_t i = 0; - for (; i < pFillInfo->numOfCols - numOfTags; ++i) { + for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - + if (pCol->flag == TSDB_COL_TAG) { + continue; + } + char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, num); char* src = elePtrAt(srcData[i], pCol->col.bytes, pFillInfo->rowIdx); @@ -472,10 +471,12 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu } // set the tag value for final result - setTagsValue(pFillInfo, data, pTags, pFillInfo->numOfCols - numOfTags, num); + setTagsValue(pFillInfo, data, num); pFillInfo->start += (pFillInfo->slidingTime * step); pFillInfo->rowIdx += 1; + + pFillInfo->numOfCurrent +=1; num += 1; } From 98f526e634fb3f001bcd4fc78b2252f1fd8cd19d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 14 Aug 2020 15:08:43 +0800 Subject: [PATCH 172/190] [td-225] add test cases. --- tests/script/general/parser/lastrow_query.sim | 68 ++++++++++++++++++- 1 file changed, 67 insertions(+), 1 deletion(-) diff --git a/tests/script/general/parser/lastrow_query.sim b/tests/script/general/parser/lastrow_query.sim index 9f52e45b80..98eb5a8d6d 100644 --- a/tests/script/general/parser/lastrow_query.sim +++ b/tests/script/general/parser/lastrow_query.sim @@ -61,4 +61,70 @@ endi sql select count(*) from lr_db0.lr_stb0 where ts>'2018-9-18 8:00:00' and ts<'2018-9-18 14:00:00' interval(1s) fill(NULL); if $row != 21600 then return -1 -endi \ No newline at end of file +endi + +#regression test case 3 +sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 limit 1 +if $row != 2 then + return -1 +endi + +if $data01 != 7 then + return -1 +endi + +if $data02 != 7 then + return -1 +endi + +if $data03 != 59 then + print expect 59, actual: $data03 + return -1 +endi + +if $data04 != 7 then + return -1 +endi + +if $data11 != 8 then + return -1 +endi + +if $data12 != 8 then + return -1 +endi + +if $data13 != NULL then + return -1 +endi + +sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 limit 9 +if $rows != 18 then + return -1 +endi + +sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 limit 12 +if $rows != 24 then + return -1 +endi + +sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 limit 25 +if $rows != 48 then + return -1 +endi + +sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 limit 25 offset 1 +if $rows != 46 then + return -1 +endi + +sql select t1,t1,count(*),tbname,t1,t1,tbname from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1s) fill(NULL) group by tbname, t1 slimit 2 soffset 0 limit 250000 offset 1 +if $rows != 172798 then + return -1 +endi + +sql select t1,t1,count(*),tbname,t1,t1,tbname from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1s) fill(NULL) group by tbname, t1 slimit 1 soffset 1 limit 250000 offset 1 +if $rows != 86399 then + return -1 +endi + From faabd6e5cbdf02564c4c62ba81ff6e82c6a8e69d Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 14 Aug 2020 16:59:41 +0800 Subject: [PATCH 173/190] TD-1057 compile error in vs2019 --- src/client/src/TSDBJNIConnector.c | 2 +- src/client/src/tscSQLParser.c | 2 +- src/common/src/ttimezone.c | 2 +- src/kit/taosnetwork/client.c | 4 ++-- src/kit/taosnetwork/server.c | 6 +++--- src/os/inc/osSocket.h | 4 ++++ src/os/inc/osWindows.h | 22 +++++++++++++++++++--- src/os/src/detail/osSocket.c | 12 ++++++++++++ src/os/src/detail/osTime.c | 7 +++++++ src/os/src/windows/w64Socket.c | 22 +++++++++++++++++++++- src/plugins/http/src/httpServer.c | 6 +++--- src/rpc/src/rpcTcp.c | 4 ++-- src/util/src/tconfig.c | 2 +- 13 files changed, 77 insertions(+), 18 deletions(-) diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 5036983424..34204f96bf 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -583,7 +583,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEn return 0l; } - return (long)res; + return (jlong)res; } JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp(JNIEnv *env, jobject jobj, jlong sub, diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index f933366711..e5bb516ee6 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2390,7 +2390,7 @@ bool validateIpAddress(const char* ip, size_t size) { strncpy(tmp, ip, size); - in_addr_t epAddr = inet_addr(tmp); + in_addr_t epAddr = taosInetAddr(tmp); return epAddr != INADDR_NONE; } diff --git a/src/common/src/ttimezone.c b/src/common/src/ttimezone.c index edb6aea7f9..62b1e0bb5c 100644 --- a/src/common/src/ttimezone.c +++ b/src/common/src/ttimezone.c @@ -50,7 +50,7 @@ void tsSetTimeZone() { #endif #endif - int32_t tz = (-timezone * MILLISECOND_PER_SECOND) / MILLISECOND_PER_HOUR; + int32_t tz = (int32_t)((-timezone * MILLISECOND_PER_SECOND) / MILLISECOND_PER_HOUR); tz += daylight; /* diff --git a/src/kit/taosnetwork/client.c b/src/kit/taosnetwork/client.c index 102f9b9d89..74c161bf67 100644 --- a/src/kit/taosnetwork/client.c +++ b/src/kit/taosnetwork/client.c @@ -91,7 +91,7 @@ int checkTcpPort(info_s *info) { serverAddr.sin_family = AF_INET; serverAddr.sin_port = htons(port); - serverAddr.sin_addr.s_addr = inet_addr(host); + serverAddr.sin_addr.s_addr = taosInetAddr(host); //printf("=================================\n"); if (connect(clientSocket, (struct sockaddr *)&serverAddr, sizeof(serverAddr)) < 0) { @@ -157,7 +157,7 @@ int checkUdpPort(info_s *info) { serverAddr.sin_family = AF_INET; serverAddr.sin_port = htons(port); - serverAddr.sin_addr.s_addr = inet_addr(host); + serverAddr.sin_addr.s_addr = taosInetAddr(host); memset(sendbuf, 0, BUFFER_SIZE); memset(recvbuf, 0, BUFFER_SIZE); diff --git a/src/kit/taosnetwork/server.c b/src/kit/taosnetwork/server.c index 1c3bc6fa09..a2bb2b2b65 100644 --- a/src/kit/taosnetwork/server.c +++ b/src/kit/taosnetwork/server.c @@ -128,7 +128,7 @@ static void *bindTcpPort(void *sarg) { if (errno == EINTR) { continue; } else { - printf("recv Client: %s pkg from TCP port: %d fail:%s.\n", inet_ntoa(clientAddr.sin_addr), port, strerror(errno)); + printf("recv Client: %s pkg from TCP port: %d fail:%s.\n", taosInetNtoa(clientAddr.sin_addr), port, strerror(errno)); close(serverSocket); return NULL; } @@ -139,7 +139,7 @@ static void *bindTcpPort(void *sarg) { } } - printf("recv Client: %s pkg from TCP port: %d, pkg len: %d\n", inet_ntoa(clientAddr.sin_addr), port, iDataNum); + printf("recv Client: %s pkg from TCP port: %d, pkg len: %d\n", taosInetNtoa(clientAddr.sin_addr), port, iDataNum); if (iDataNum > 0) { send(client, buffer, iDataNum, 0); break; @@ -188,7 +188,7 @@ static void *bindUdpPort(void *sarg) { continue; } if (iDataNum > 0) { - printf("recv Client: %s pkg from UDP port: %d, pkg len: %d\n", inet_ntoa(clientAddr.sin_addr), port, iDataNum); + printf("recv Client: %s pkg from UDP port: %d, pkg len: %d\n", taosInetNtoa(clientAddr.sin_addr), port, iDataNum); //printf("Read msg from udp:%s ... %s\n", buffer, buffer+iDataNum-16); sendto(serverSocket, buffer, iDataNum, 0, (struct sockaddr *)&clientAddr, (int)sin_size); diff --git a/src/os/inc/osSocket.h b/src/os/inc/osSocket.h index 58f95c3c2d..e5c3806d1b 100644 --- a/src/os/inc/osSocket.h +++ b/src/os/inc/osSocket.h @@ -65,6 +65,10 @@ void taosBlockSIGPIPE(); // TAOS_OS_FUNC_SOCKET_SETSOCKETOPT int taosSetSockOpt(SOCKET socketfd, int level, int optname, void *optval, int optlen); +// TAOS_OS_FUNC_SOCKET_INET +uint32_t taosInetAddr(char *ipAddr); +const char *taosInetNtoa(IN_ADDR ipInt); + #ifdef __cplusplus } #endif diff --git a/src/os/inc/osWindows.h b/src/os/inc/osWindows.h index 6665dcd920..994e3b991c 100644 --- a/src/os/inc/osWindows.h +++ b/src/os/inc/osWindows.h @@ -164,9 +164,25 @@ int gettimeofday(struct timeval *ptv, void *pTimeZone); #define MSG_NOSIGNAL 0 #define SO_NO_CHECK 0x1234 #define SOL_TCP 0x1234 -#define TCP_KEEPCNT 0x1234 -#define TCP_KEEPIDLE 0x1234 -#define TCP_KEEPINTVL 0x1234 + +#ifndef TCP_KEEPCNT + #define TCP_KEEPCNT 0x1234 +#endif + +#ifndef TCP_KEEPIDLE + #define TCP_KEEPIDLE 0x1234 +#endif + +#ifndef TCP_KEEPINTVL + #define TCP_KEEPINTVL 0x1234 +#endif + +#ifdef _MSC_VER +#if _MSC_VER >= 1900 + #define TAOS_OS_FUNC_SOCKET_INET +#endif +#endif + #define SHUT_RDWR SD_BOTH #define SHUT_RD SD_RECEIVE #define SHUT_WR SD_SEND diff --git a/src/os/src/detail/osSocket.c b/src/os/src/detail/osSocket.c index c8ad6a5acc..bcacd8575a 100644 --- a/src/os/src/detail/osSocket.c +++ b/src/os/src/detail/osSocket.c @@ -57,4 +57,16 @@ int taosSetSockOpt(SOCKET socketfd, int level, int optname, void *optval, int op return setsockopt(socketfd, level, optname, optval, (socklen_t)optlen); } +#endif + +#ifndef TAOS_OS_FUNC_SOCKET_INET + +uint32_t taosInetAddr(char *ipAddr) { + return inet_addr(ipAddr); +} + +const char *taosInetNtoa(IN_ADDR ipInt) { + return inet_ntoa(ipInt); +} + #endif \ No newline at end of file diff --git a/src/os/src/detail/osTime.c b/src/os/src/detail/osTime.c index ced1643d2b..57634e468a 100644 --- a/src/os/src/detail/osTime.c +++ b/src/os/src/detail/osTime.c @@ -61,8 +61,15 @@ int64_t user_mktime64(const unsigned int year0, const unsigned int mon0, res = res*24; res = ((res + hour) * 60 + min) * 60 + sec; +#ifdef _MSC_VER +#if _MSC_VER >= 1900 + int64_t timezone = _timezone; +#endif +#endif + return (res + timezone); } + // ==== mktime() kernel code =================// static int64_t m_deltaUtc = 0; void deltaToUtcInitOnce() { diff --git a/src/os/src/windows/w64Socket.c b/src/os/src/windows/w64Socket.c index dd8961da40..6514c2f851 100644 --- a/src/os/src/windows/w64Socket.c +++ b/src/os/src/windows/w64Socket.c @@ -62,4 +62,24 @@ int taosSetSockOpt(SOCKET socketfd, int level, int optname, void *optval, int op } return setsockopt(socketfd, level, optname, optval, optlen); -} \ No newline at end of file +} + +#ifdef TAOS_OS_FUNC_SOCKET_INET + +uint32_t taosInetAddr(char *ipAddr) { + uint32_t value; + int ret = inet_pton(AF_INET, ipAddr, &value); + if (ret <= 0) { + return INADDR_NONE; + } else { + return value; + } +} + +const char *taosInetNtoa(IN_ADDR ipInt) { + // not thread safe, only for debug usage while print log + static char tmpDstStr[16]; + return inet_ntop(AF_INET, &ipInt, tmpDstStr, INET6_ADDRSTRLEN); +} + +#endif \ No newline at end of file diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c index 8ee92be31c..21949e93af 100644 --- a/src/plugins/http/src/httpServer.c +++ b/src/plugins/http/src/httpServer.c @@ -302,7 +302,7 @@ static void *httpAcceptHttpConnection(void *arg) { #if 0 if (totalFds > tsHttpCacheSessions * 100) { httpError("fd:%d, ip:%s:%u, totalFds:%d larger than httpCacheSessions:%d*100, refuse connection", connFd, - inet_ntoa(clientAddr.sin_addr), htons(clientAddr.sin_port), totalFds, tsHttpCacheSessions); + taosInetNtoa(clientAddr.sin_addr), htons(clientAddr.sin_port), totalFds, tsHttpCacheSessions); taosCloseSocket(connFd); continue; } @@ -316,14 +316,14 @@ static void *httpAcceptHttpConnection(void *arg) { pContext = httpCreateContext(connFd); if (pContext == NULL) { - httpError("fd:%d, ip:%s:%u, no enough resource to allocate http context", connFd, inet_ntoa(clientAddr.sin_addr), + httpError("fd:%d, ip:%s:%u, no enough resource to allocate http context", connFd, taosInetNtoa(clientAddr.sin_addr), htons(clientAddr.sin_port)); taosCloseSocket(connFd); continue; } pContext->pThread = pThread; - sprintf(pContext->ipstr, "%s:%u", inet_ntoa(clientAddr.sin_addr), htons(clientAddr.sin_port)); + sprintf(pContext->ipstr, "%s:%u", taosInetAddr(clientAddr.sin_addr), htons(clientAddr.sin_port)); struct epoll_event event; event.events = EPOLLIN | EPOLLPRI | EPOLLWAKEUP | EPOLLERR | EPOLLHUP | EPOLLRDHUP; diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 9da11831e5..3475e0f317 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -254,11 +254,11 @@ static void *taosAcceptTcpConnection(void *arg) { pFdObj->ip = caddr.sin_addr.s_addr; pFdObj->port = htons(caddr.sin_port); tDebug("%s new TCP connection from %s:%hu, fd:%d FD:%p numOfFds:%d", pServerObj->label, - inet_ntoa(caddr.sin_addr), pFdObj->port, connFd, pFdObj, pThreadObj->numOfFds); + taosInetNtoa(caddr.sin_addr), pFdObj->port, connFd, pFdObj, pThreadObj->numOfFds); } else { taosCloseSocket(connFd); tError("%s failed to malloc FdObj(%s) for connection from:%s:%hu", pServerObj->label, strerror(errno), - inet_ntoa(caddr.sin_addr), htons(caddr.sin_port)); + taosInetNtoa(caddr.sin_addr), htons(caddr.sin_port)); } // pick up next thread for next connection diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index ccf4ea7317..704af2017e 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -133,7 +133,7 @@ static void taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) { } static void taosReadIpStrConfig(SGlobalCfg *cfg, char *input_value) { - uint32_t value = inet_addr(input_value); + uint32_t value = taosInetAddr(input_value); char * option = (char *)cfg->ptr; if (value == INADDR_NONE) { uError("config option:%s, input value:%s, is not a valid ip address, use default value:%s", From 8ac389c675802a81e6048d5e77aaa640ca3e3f74 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 14 Aug 2020 17:08:04 +0800 Subject: [PATCH 174/190] TD-1057 compile error after merge feature/query --- src/client/src/tscLocalMerge.c | 2 +- tests/tsim/src/simExe.c | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 186c2871a1..99c3bc4fb3 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -972,7 +972,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO } if (pRes->numOfRows > 0) { - int32_t currentTotal = pRes->numOfRowsGroup + pRes->numOfRows; + int32_t currentTotal = (int32_t)(pRes->numOfRowsGroup + pRes->numOfRows); if (pQueryInfo->limit.limit >= 0 && currentTotal > pQueryInfo->limit.limit) { int32_t overflow = (int32_t)(currentTotal - pQueryInfo->limit.limit); diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index 50d1a9b5be..0529808b6b 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -748,11 +748,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { sprintf(value, "%d", *((int *)row[i])); break; case TSDB_DATA_TYPE_BIGINT: -#ifdef _TD_ARM_32_ - sprintf(value, "%lld", *((int64_t *)row[i])); -#else - sprintf(value, "%ld", *((int64_t *)row[i])); -#endif + sprintf(value, "%" PRId64, *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_FLOAT:{ #ifdef _TD_ARM_32_ From 7f885cb3b3f137ef0b3a13eeb91f448793d59bdc Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 14 Aug 2020 09:39:56 +0000 Subject: [PATCH 175/190] TD-1057 --- src/kit/taosnetwork/client.c | 4 ++-- src/kit/taosnetwork/server.c | 6 +++--- src/os/inc/osSocket.h | 2 +- src/os/src/detail/osSocket.c | 2 +- src/os/src/windows/w64Socket.c | 2 +- src/plugins/http/src/httpServer.c | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/kit/taosnetwork/client.c b/src/kit/taosnetwork/client.c index 74c161bf67..102f9b9d89 100644 --- a/src/kit/taosnetwork/client.c +++ b/src/kit/taosnetwork/client.c @@ -91,7 +91,7 @@ int checkTcpPort(info_s *info) { serverAddr.sin_family = AF_INET; serverAddr.sin_port = htons(port); - serverAddr.sin_addr.s_addr = taosInetAddr(host); + serverAddr.sin_addr.s_addr = inet_addr(host); //printf("=================================\n"); if (connect(clientSocket, (struct sockaddr *)&serverAddr, sizeof(serverAddr)) < 0) { @@ -157,7 +157,7 @@ int checkUdpPort(info_s *info) { serverAddr.sin_family = AF_INET; serverAddr.sin_port = htons(port); - serverAddr.sin_addr.s_addr = taosInetAddr(host); + serverAddr.sin_addr.s_addr = inet_addr(host); memset(sendbuf, 0, BUFFER_SIZE); memset(recvbuf, 0, BUFFER_SIZE); diff --git a/src/kit/taosnetwork/server.c b/src/kit/taosnetwork/server.c index a2bb2b2b65..1c3bc6fa09 100644 --- a/src/kit/taosnetwork/server.c +++ b/src/kit/taosnetwork/server.c @@ -128,7 +128,7 @@ static void *bindTcpPort(void *sarg) { if (errno == EINTR) { continue; } else { - printf("recv Client: %s pkg from TCP port: %d fail:%s.\n", taosInetNtoa(clientAddr.sin_addr), port, strerror(errno)); + printf("recv Client: %s pkg from TCP port: %d fail:%s.\n", inet_ntoa(clientAddr.sin_addr), port, strerror(errno)); close(serverSocket); return NULL; } @@ -139,7 +139,7 @@ static void *bindTcpPort(void *sarg) { } } - printf("recv Client: %s pkg from TCP port: %d, pkg len: %d\n", taosInetNtoa(clientAddr.sin_addr), port, iDataNum); + printf("recv Client: %s pkg from TCP port: %d, pkg len: %d\n", inet_ntoa(clientAddr.sin_addr), port, iDataNum); if (iDataNum > 0) { send(client, buffer, iDataNum, 0); break; @@ -188,7 +188,7 @@ static void *bindUdpPort(void *sarg) { continue; } if (iDataNum > 0) { - printf("recv Client: %s pkg from UDP port: %d, pkg len: %d\n", taosInetNtoa(clientAddr.sin_addr), port, iDataNum); + printf("recv Client: %s pkg from UDP port: %d, pkg len: %d\n", inet_ntoa(clientAddr.sin_addr), port, iDataNum); //printf("Read msg from udp:%s ... %s\n", buffer, buffer+iDataNum-16); sendto(serverSocket, buffer, iDataNum, 0, (struct sockaddr *)&clientAddr, (int)sin_size); diff --git a/src/os/inc/osSocket.h b/src/os/inc/osSocket.h index e5c3806d1b..ecc69ec3d3 100644 --- a/src/os/inc/osSocket.h +++ b/src/os/inc/osSocket.h @@ -67,7 +67,7 @@ int taosSetSockOpt(SOCKET socketfd, int level, int optname, void *optval, int op // TAOS_OS_FUNC_SOCKET_INET uint32_t taosInetAddr(char *ipAddr); -const char *taosInetNtoa(IN_ADDR ipInt); +const char *taosInetNtoa(struct in_addr ipInt); #ifdef __cplusplus } diff --git a/src/os/src/detail/osSocket.c b/src/os/src/detail/osSocket.c index bcacd8575a..8a51c389e9 100644 --- a/src/os/src/detail/osSocket.c +++ b/src/os/src/detail/osSocket.c @@ -65,7 +65,7 @@ uint32_t taosInetAddr(char *ipAddr) { return inet_addr(ipAddr); } -const char *taosInetNtoa(IN_ADDR ipInt) { +const char *taosInetNtoa(struct in_addr ipInt) { return inet_ntoa(ipInt); } diff --git a/src/os/src/windows/w64Socket.c b/src/os/src/windows/w64Socket.c index 6514c2f851..8fd198ba80 100644 --- a/src/os/src/windows/w64Socket.c +++ b/src/os/src/windows/w64Socket.c @@ -76,7 +76,7 @@ uint32_t taosInetAddr(char *ipAddr) { } } -const char *taosInetNtoa(IN_ADDR ipInt) { +const char *taosInetNtoa(struct in_addr ipInt) { // not thread safe, only for debug usage while print log static char tmpDstStr[16]; return inet_ntop(AF_INET, &ipInt, tmpDstStr, INET6_ADDRSTRLEN); diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c index 21949e93af..f4aca91cba 100644 --- a/src/plugins/http/src/httpServer.c +++ b/src/plugins/http/src/httpServer.c @@ -323,7 +323,7 @@ static void *httpAcceptHttpConnection(void *arg) { } pContext->pThread = pThread; - sprintf(pContext->ipstr, "%s:%u", taosInetAddr(clientAddr.sin_addr), htons(clientAddr.sin_port)); + sprintf(pContext->ipstr, "%s:%u", taosInetNtoa(clientAddr.sin_addr), htons(clientAddr.sin_port)); struct epoll_event event; event.events = EPOLLIN | EPOLLPRI | EPOLLWAKEUP | EPOLLERR | EPOLLHUP | EPOLLRDHUP; From 75b91d88164bf7c678cfa3b0ceb0d414907e5434 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Fri, 14 Aug 2020 18:06:02 +0800 Subject: [PATCH 176/190] udpate taos sql md --- documentation20/webdocs/markdowndocs/TAOS SQL-ch.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md index 6c6a3afb60..293aac8d23 100644 --- a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md @@ -31,7 +31,7 @@ taos> DESCRIBE meters; - 时间格式为```YYYY-MM-DD HH:mm:ss.MS```, 默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128``` - 内部函数now是服务器的当前时间 -- 插入记录时,如果时间戳为0,插入数据时使用服务器当前时间 +- 插入记录时,如果时间戳为now,插入数据时使用服务器当前时间 - Epoch Time: 时间戳也可以是一个长整数,表示从1970-01-01 08:00:00.000开始的毫秒数 - 时间可以加减,比如 now-2h,表明查询时刻向前推2个小时(最近2小时)。数字后面的时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据 - TDengine暂不支持时间窗口按照自然年和自然月切分。Where条件中的时间窗口单位的换算关系如下:interval(1y) 等效于 interval(365d), interval(1n) 等效于 interval(30d), interval(1w) 等效于 interval(7d) @@ -994,4 +994,4 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER - 列名最大长度为65,最多允许1024列,最少需要2列,第一列必须是时间戳 - 标签最多允许128个,可以0个,标签总长度不超过16k个字符 - SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为8M -- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 \ No newline at end of file +- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 From c4f1db3ec7f8dd5f3059549908ea526a683336a0 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 14 Aug 2020 18:50:29 +0800 Subject: [PATCH 177/190] TD-1057 not show error if create file failed --- src/kit/shell/src/shellEngine.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index abc4f7a02c..692b5e49a0 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -751,7 +751,9 @@ void read_history() { FILE *f = fopen(f_history, "r"); if (f == NULL) { +#ifndef WINDOWS fprintf(stderr, "Failed to open file %s\n", f_history); +#endif return; } @@ -776,7 +778,9 @@ void write_history() { FILE *f = fopen(f_history, "w"); if (f == NULL) { +#ifndef WINDOWS fprintf(stderr, "Failed to open file %s for write\n", f_history); +#endif return; } From aa33984c28770c477b655a5886888b04176da7a3 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 14 Aug 2020 18:51:06 +0800 Subject: [PATCH 178/190] add influxdbTestWriteLoop.sh --- .../perftest-scripts/influxdbTestWriteLoop.sh | 45 ++++++++++++++----- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/tests/perftest-scripts/influxdbTestWriteLoop.sh b/tests/perftest-scripts/influxdbTestWriteLoop.sh index a9b10ac45d..2cbb456a0f 100755 --- a/tests/perftest-scripts/influxdbTestWriteLoop.sh +++ b/tests/perftest-scripts/influxdbTestWriteLoop.sh @@ -4,7 +4,7 @@ DATA_DIR=/mnt/root/testdata NUM_LOOP=1 NUM_OF_FILES=100 -rowsPerRequest=(1 100 500 1000 2000) +rowsPerRequest=(1 100 1000 10000 20000 50000 100000) function printTo { if $verbose ; then @@ -13,17 +13,21 @@ function printTo { } function runTest { - printf "R/R, " - for c in `seq 1 $clients`; do - if [ "$c" == "1" ]; then - printf "$c client, " - else - printf "$c clients, " - fi + declare -A avgRPR + + for r in ${!rowsPerRequest[@]}; do + for c in `seq 1 $clients`; do + avgRPR[$r, $c]=0 + done done - printf "\n" for r in ${rowsPerRequest[@]}; do + if [ "$r" == "1" ] || [ "$r" == "100" ] || [ "$r" == "1000" ]; then + NUM_OF_FILES=$clients + else + NUM_OF_FILES=100 + fi + printf "$r, " for c in `seq 1 $clients`; do totalRPR=0 @@ -39,13 +43,30 @@ function runTest { -numOfFiles $NUM_OF_FILES \ -writeClients $c \ -rowsPerRequest $r 2>&1 \ - | tee $OUTPUT_FILE + > $OUTPUT_FILE RPR=`cat $OUTPUT_FILE | grep speed | awk '{print $(NF-1)}'` totalRPR=`echo "scale=4; $totalRPR + $RPR" | bc` printTo "rows:$r, clients:$c, i:$i RPR:$RPR" done - avgRPR=`echo "scale=4; $totalRPR / $NUM_LOOP" | bc` - printf "$avgRPR, " + avgRPR[$r,$c]=`echo "scale=4; $totalRPR / $NUM_LOOP" | bc` + printTo "r:$r c:$c avgRPR:${avgRPR[$r, $c]}" + done + done + + printf "R/R, " + for c in `seq 1 $clients`; do + if [ "$c" == "1" ]; then + printf "$c client, " + else + printf "$c clients, " + fi + done + printf "\n" + + for r in ${!rowsPerRequest[@]}; do + printf "${rowsPerRequest[$r]}, " + for c in `seq 1 $clients`; do + printf "${avgRPR[$r,$c]}, " done printf "\n" done From b13a1dda0080f1ff43a573dc0960f0a08c786278 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 15 Aug 2020 04:25:34 +0000 Subject: [PATCH 179/190] TD-1147 --- .../pytest/tag_lite/too_many_tag_condtions.py | 70 +++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 tests/pytest/tag_lite/too_many_tag_condtions.py diff --git a/tests/pytest/tag_lite/too_many_tag_condtions.py b/tests/pytest/tag_lite/too_many_tag_condtions.py new file mode 100644 index 0000000000..a40de405ff --- /dev/null +++ b/tests/pytest/tag_lite/too_many_tag_condtions.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdLog.info('======================== dnode1 start') + tbPrefix = "ta_cr_tb" + mtPrefix = "ta_cr_mt" + tbNum = 2 + rowNum = 10 + totalNum = 200 + tagCondsLimit = 1024 + tdLog.info('=============== step1: create tbl and prepare data') + i = 0 + i = 2 + mt = "%s%d" % (mtPrefix, i) + tb = "%s%d" % (tbPrefix, i) + + sql ='create table %s (ts timestamp, tbcol int) TAGS(tgcol int)'% (mt) + tdLog.info(sql) + tdSql.execute(sql) + for i in range(0, tbNum): + tblName = "%s%d"%(tbPrefix, i) + sql = 'create table %s using %s TAGS(%d)'%(tblName, mt, i) + tdSql.execute(sql) + for j in range(0, rowNum): + sql = "insert into %s values(now, %d)"%(tblName, j) + tdSql.execute(sql) + + sqlPrefix = "select * from %s where "%(mt) + for i in range(2, 2048, 1): + conds = "tgcol=1 and "* (i - 1) + conds = "%stgcol=1"%(conds) + sql = "%s%s"%(sqlPrefix, conds) + if i >= tagCondsLimit: + tdSql.error(sql) + else: + tdSql.query(sql) + #tdSql.checkRows(1) + + for i in range(2, 2048, 1): + conds = "" + for j in range(0, i - 1): + conds = conds + "tgcol=%d or "%(j%tbNum) + conds += "tgcol=%d"%(i%tbNum) + sql = sqlPrefix + conds + if i >= tagCondsLimit: + tdSql.error(sql) + else: + tdSql.query(sql) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From fae6f1f97583e8c174ab1368c6ec0dcf424f1a03 Mon Sep 17 00:00:00 2001 From: zyyang Date: Sat, 15 Aug 2020 12:51:34 +0800 Subject: [PATCH 180/190] update doc: add jdbc example for data subscribe --- .../markdowndocs/advanced features-ch.md | 136 ++++++++++++++++++ 1 file changed, 136 insertions(+) diff --git a/documentation20/webdocs/markdowndocs/advanced features-ch.md b/documentation20/webdocs/markdowndocs/advanced features-ch.md index 690f2a6268..6f8ceca071 100644 --- a/documentation20/webdocs/markdowndocs/advanced features-ch.md +++ b/documentation20/webdocs/markdowndocs/advanced features-ch.md @@ -295,6 +295,142 @@ $ taos 这时,因为电流超过了10A,您应该可以看到示例程序将它输出到了屏幕上。 您可以继续插入一些数据观察示例程序的输出。 +### jdbc使用数据订阅功能 + +(1)使用订阅功能前的数据准备 + +```shell +# 创建power库 +taos> create database power; +# 切换库 +taos> use power; +# 创建超级表 +taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupI +d int); +# 创建表 +taos> create table d1001 using meters tags ("Beijing.Chaoyang",2); +taos> create table d1002 using meters tags ("Beijing.Haidian",2); +# 插入测试数据 +taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1); +taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1); +# 从超级表meters查询current大于10的数据 +taos> select * from meters where current > 10; + ts | current | voltage | phase| location | groupid | +=========================================================================================================== + 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | Beijing.Haidian | 2 | + 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | Beijing.Haidian | 2 | + 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | Beijing.Chaoyang | 2 | + 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | Beijing.Chaoyang | 2 | + 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | Beijing.Chaoyang | 2 | +Query OK, 5 row(s) in set (0.004896s) +``` + +(2)使用jdbc提供的订阅功能 + +```java +public class SubscribeDemo { + private static final String topic = "topic_meter_current_bg_10"; + private static final String sql = "select * from meters where current > 10"; + + public static void main(String[] args) { + Connection connection = null; + Statement statement = null; + TSDBSubscribe subscribe = null; + long subscribeId = 0; + + try { + // 加载驱动 + Class.forName("com.taosdata.jdbc.TSDBDriver"); + // 获取Connectin + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/power?user=root&password=taosdata"; + connection = DriverManager.getConnection(jdbcUrl, properties); + System.out.println("create the connection"); + // 创建Subscribe + subscribe = ((TSDBConnection) connection).createSubscribe(); + // subscribe订阅topic,topic为主题名称,sql为查询语句,restart代表是否每次订阅接受历史数据 + subscribeId = subscribe.subscribe(topic, sql, true); + System.out.println("create a subscribe topic: " + topic + "@[" + subscribeId + "]"); + int count = 0; + while (true) { + // 消费数据 + TSDBResultSet resultSet = subscribe.consume(subscribeId); + // 打印结果集 + if (resultSet != null) { + ResultSetMetaData metaData = resultSet.getMetaData(); + while (resultSet.next()) { + int columnCount = metaData.getColumnCount(); + for (int i = 1; i <= columnCount; i++) { + System.out.print(metaData.getColumnLabel(i) + " : " + resultSet.getString(i) + "\t"); + } + System.out.println("\n===================="); + count++; + } + } + if (count > 10) + break; + TimeUnit.SECONDS.sleep(1); + } + } catch (Exception e) { + e.printStackTrace(); + } finally { + try { + if (null != subscribe && subscribeId != 0) { + subscribe.unsubscribe(subscribeId, true); + System.out.println("unsubscribe the top@[" + subscribeId + "]"); + } + if (statement != null) { + statement.close(); + System.out.println("close the statement."); + } + if (connection != null) { + connection.close(); + System.out.println("close the connection."); + } + } catch (SQLException throwables) { + throwables.printStackTrace(); + } + } + } +} +``` + +(3)订阅功能演示 + +运行demo,首先,subscribe会将满足情况的历史数据消费 + +```shell +# java -jar subscribe.jar + +ts : 1597464000000 current : 12.0 voltage : 220 phase : 1 location : Beijing.Chaoyang groupid : 2 +==================== +ts : 1597464600000 current : 12.3 voltage : 220 phase : 2 location : Beijing.Chaoyang groupid : 2 +==================== +ts : 1597465200000 current : 12.2 voltage : 220 phase : 1 location : Beijing.Chaoyang groupid : 2 +==================== +ts : 1597464600000 current : 10.3 voltage : 220 phase : 1 location : Beijing.Haidian groupid : 2 +==================== +ts : 1597465200000 current : 11.2 voltage : 220 phase : 1 location : Beijing.Haidian groupid : 2 +==================== +``` + +接着,使用taos客户端向表中新增数据 + +```shell +# taos +taos> use power; +taos> insert into d1001 values("2020-08-15 12:40:00.000", 12.4, 220, 1); +``` + +查看数据消费情况 + +```shell +ts : 1597466400000 current : 12.4 voltage : 220 phase : 1 location : Beijing.Chaoyang groupid : 2 +==================== +``` + ## 缓存(Cache) From a6f1583f8132c933b731916d472867590a5aee0d Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 15 Aug 2020 13:33:58 +0800 Subject: [PATCH 181/190] move macro definition position for random test. --- src/tsdb/src/tsdbFile.c | 5 +++-- src/tsdb/src/tsdbRWHelper.c | 6 ++++-- src/util/src/tkvstore.c | 4 +++- src/wal/src/walMain.c | 4 +++- 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index 8bc4be6d67..d960dfb7ba 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -15,13 +15,14 @@ #define _DEFAULT_SOURCE #include +#define TAOS_RANDOM_FILE_FAIL_TEST + #include "os.h" #include "talgo.h" #include "tchecksum.h" #include "tsdbMain.h" #include "tutil.h" -#define TAOS_RANDOM_FILE_FAIL_TEST const char *tsdbFileSuffix[] = {".head", ".data", ".last", ".stat", ".h", ".d", ".l", ".s"}; @@ -529,4 +530,4 @@ static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo) { terrno = TSDB_CODE_TDB_FILE_CORRUPTED; } } -} \ No newline at end of file +} diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c index ed851a08c2..12199f491f 100644 --- a/src/tsdb/src/tsdbRWHelper.c +++ b/src/tsdb/src/tsdbRWHelper.c @@ -14,13 +14,15 @@ */ #define _DEFAULT_SOURCE + +#define TAOS_RANDOM_FILE_FAIL_TEST + #include "os.h" #include "talgo.h" #include "tchecksum.h" #include "tcoding.h" #include "tscompression.h" #include "tsdbMain.h" -#define TAOS_RANDOM_FILE_FAIL_TEST #define TSDB_GET_COMPCOL_LEN(nCols) (sizeof(SCompData) + sizeof(SCompCol) * (nCols) + sizeof(TSCKSUM)) #define TSDB_KEY_COL_OFFSET 0 @@ -1651,4 +1653,4 @@ static int tsdbWriteBlockToProperFile(SRWHelper *pHelper, SDataCols *pDataCols, if (tsdbWriteBlockToFile(pHelper, pFile, pDataCols, pCompBlock, isLast, true) < 0) return -1; return 0; -} \ No newline at end of file +} diff --git a/src/util/src/tkvstore.c b/src/util/src/tkvstore.c index dd0600ec3c..9fab4a5936 100644 --- a/src/util/src/tkvstore.c +++ b/src/util/src/tkvstore.c @@ -14,6 +14,9 @@ */ #define _DEFAULT_SOURCE + +#define TAOS_RANDOM_FILE_FAIL_TEST + #include "os.h" #include "hash.h" #include "taoserror.h" @@ -21,7 +24,6 @@ #include "tcoding.h" #include "tkvstore.h" #include "tulog.h" -#define TAOS_RANDOM_FILE_FAIL_TEST #define TD_KVSTORE_HEADER_SIZE 512 #define TD_KVSTORE_MAJOR_VERSION 1 diff --git a/src/wal/src/walMain.c b/src/wal/src/walMain.c index 4ac8a096c6..bebad69f32 100644 --- a/src/wal/src/walMain.c +++ b/src/wal/src/walMain.c @@ -14,6 +14,9 @@ */ #define _DEFAULT_SOURCE + +#define TAOS_RANDOM_FILE_FAIL_TEST + #include "os.h" #include "tlog.h" #include "tchecksum.h" @@ -22,7 +25,6 @@ #include "taoserror.h" #include "twal.h" #include "tqueue.h" -#define TAOS_RANDOM_FILE_FAIL_TEST #define walPrefix "wal" From c5ae6d2aafee5b6090f863eb5d636dbbad597559 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Sat, 15 Aug 2020 13:43:20 +0800 Subject: [PATCH 182/190] fix td-1150 --- src/client/src/tscStream.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 60517a2f5c..9dd47888d2 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -136,7 +136,11 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) { } pQueryInfo->window.ekey = etime; if (pQueryInfo->window.skey >= pQueryInfo->window.ekey) { - tscSetRetryTimer(pStream, pSql, pStream->slidingTime); + int64_t timer = pStream->slidingTime; + if (pStream->precision == TSDB_TIME_PRECISION_MICRO) { + timer /= 1000l; + } + tscSetRetryTimer(pStream, pSql, timer); return; } } From 6143e72b1c4d9c637f929e393d36acb814f784df Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sat, 15 Aug 2020 14:20:20 +0800 Subject: [PATCH 183/190] TD-1047 iconv compile options --- cmake/define.inc | 1 + tests/script/jenkins/basic.txt | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/cmake/define.inc b/cmake/define.inc index 2e3c639ecb..d4f3b3c8bf 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -50,6 +50,7 @@ IF (TD_ARM_64) ADD_DEFINITIONS(-D_M_X64) ADD_DEFINITIONS(-D_TD_ARM_64_) ADD_DEFINITIONS(-D_TD_ARM_) + ADD_DEFINITIONS(-DUSE_LIBICONV) SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index e0f6b7b197..a48584b0ed 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -135,7 +135,6 @@ cd ../../../debug; make ./test.sh -f general/parser/limit2.sim ./test.sh -f general/parser/fill.sim ./test.sh -f general/parser/fill_stb.sim -#./test.sh -f general/parser/fill_us.sim ./test.sh -f general/parser/where.sim ./test.sh -f general/parser/slimit.sim ./test.sh -f general/parser/select_with_tags.sim @@ -143,7 +142,6 @@ cd ../../../debug; make ./test.sh -f general/parser/tags_dynamically_specifiy.sim ./test.sh -f general/parser/groupby.sim ./test.sh -f general/parser/set_tag_vals.sim -#./test.sh -f general/parser/sliding.sim ./test.sh -f general/parser/tags_filter.sim ./test.sh -f general/parser/slimit_alter_tags.sim ./test.sh -f general/parser/join.sim @@ -151,6 +149,8 @@ cd ../../../debug; make ./test.sh -f general/parser/binary_escapeCharacter.sim ./test.sh -f general/parser/bug.sim ./test.sh -f general/parser/repeatAlter.sim +./test.sh -f general/parser/union.sim +./test.sh -f general/parser/topbot.sim ./test.sh -f general/stable/disk.sim ./test.sh -f general/stable/dnode3.sim From 22cb8287b54ba0c7b69bcffc16fc9db0d22c805e Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sat, 15 Aug 2020 14:29:35 +0800 Subject: [PATCH 184/190] TD-1047 minor changes for libiconv --- cmake/define.inc | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/cmake/define.inc b/cmake/define.inc index d4f3b3c8bf..c72995159f 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -33,11 +33,7 @@ IF (TD_LINUX_64) ADD_DEFINITIONS(-D_M_X64) ADD_DEFINITIONS(-D_TD_LINUX_64) SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") - - FIND_PATH(ICONV_INCLUDE_EXIST iconv.h /usr/include/ /usr/local/include/) - IF (ICONV_INCLUDE_EXIST) - ADD_DEFINITIONS(-DUSE_LIBICONV) - ENDIF () + ADD_DEFINITIONS(-DUSE_LIBICONV) ENDIF () IF (TD_LINUX_32) @@ -134,6 +130,7 @@ ENDIF () IF (TD_WINDOWS_32) ADD_DEFINITIONS(-D_TD_WINDOWS_32) + ADD_DEFINITIONS(-DUSE_LIBICONV) ENDIF () INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) From acdcd47243de8d0e5ccde73fac3edb36f92e73db Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sat, 15 Aug 2020 14:52:38 +0800 Subject: [PATCH 185/190] a crash generated by dn3_mn1_replica_change.sim --- src/tsdb/src/tsdbRead.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index c89ae0698a..ac3a6dac07 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -1795,19 +1795,22 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { if (pQueryHandle->checkFiles) { bool exists = true; + int32_t code = getDataBlocksInFiles(pQueryHandle, &exists); if (code != TSDB_CODE_SUCCESS) { - return code; + pQueryHandle->activeIndex = 0; + pQueryHandle->checkFiles = false; + + return false; } if (exists) { - elapsedTime = taosGetTimestampUs() - stime; - pQueryHandle->cost.checkForNextTime += elapsedTime; + pQueryHandle->cost.checkForNextTime += (taosGetTimestampUs() - stime); return exists; } pQueryHandle->activeIndex = 0; - pQueryHandle->checkFiles = false; + pQueryHandle->checkFiles = false; } // TODO: opt by consider the scan order From 40772c1e1f3ce691b247c2a70cfa92df206bffd2 Mon Sep 17 00:00:00 2001 From: zyyang Date: Sat, 15 Aug 2020 12:51:34 +0800 Subject: [PATCH 186/190] update doc: add jdbc example for data subscribe --- .../markdowndocs/advanced features-ch.md | 122 ++++++++++++++++++ 1 file changed, 122 insertions(+) diff --git a/documentation20/webdocs/markdowndocs/advanced features-ch.md b/documentation20/webdocs/markdowndocs/advanced features-ch.md index 690f2a6268..858f92b604 100644 --- a/documentation20/webdocs/markdowndocs/advanced features-ch.md +++ b/documentation20/webdocs/markdowndocs/advanced features-ch.md @@ -295,6 +295,128 @@ $ taos 这时,因为电流超过了10A,您应该可以看到示例程序将它输出到了屏幕上。 您可以继续插入一些数据观察示例程序的输出。 +### jdbc使用数据订阅功能 + +(1)使用订阅功能前的数据准备 + +```shell +# 创建power库 +taos> create database power; +# 切换库 +taos> use power; +# 创建超级表 +taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupI +d int); +# 创建表 +taos> create table d1001 using meters tags ("Beijing.Chaoyang",2); +taos> create table d1002 using meters tags ("Beijing.Haidian",2); +# 插入测试数据 +taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1); +taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1); +# 从超级表meters查询current大于10的数据 +taos> select * from meters where current > 10; + ts | current | voltage | phase| location | groupid | +=========================================================================================================== + 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | Beijing.Haidian | 2 | + 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | Beijing.Haidian | 2 | + 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | Beijing.Chaoyang | 2 | + 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | Beijing.Chaoyang | 2 | + 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | Beijing.Chaoyang | 2 | +Query OK, 5 row(s) in set (0.004896s) +``` + +(2)使用jdbc提供的订阅功能 + +```java +public class SubscribeDemo { + private static final String topic = "topic-meter-current-bg-10"; + private static final String sql = "select * from meters where current > 10"; + + public static void main(String[] args) { + Connection connection = null; + TSDBSubscribe subscribe = null; + + try { + // 加载驱动 + Class.forName("com.taosdata.jdbc.TSDBDriver"); + // 获取Connectin + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/power?user=root&password=taosdata"; + connection = DriverManager.getConnection(jdbcUrl, properties); + // 创建Subscribe,topic为主题名称,sql为查询语句,restar为true代表每次订阅消费历史数据 + subscribe = ((TSDBConnection) connection).subscribe(topic, sql, true); + int count = 0; + while (true) { + // 消费数据 + TSDBResultSet resultSet = subscribe.consume(); + // 打印结果集 + if (resultSet != null) { + ResultSetMetaData metaData = resultSet.getMetaData(); + while (resultSet.next()) { + int columnCount = metaData.getColumnCount(); + for (int i = 1; i <= columnCount; i++) { + System.out.print(metaData.getColumnLabel(i) + " : " + resultSet.getString(i) + "\t"); + } + System.out.println("\n===================="); + count++; + } + } + if (count > 10) + break; + TimeUnit.SECONDS.sleep(1); + } + } catch (Exception e) { + e.printStackTrace(); + } finally { + try { + if (null != subscribe) + subscribe.close(true); + if (connection != null) + connection.close(); + } catch (SQLException throwables) { + throwables.printStackTrace(); + } + } + } +} +``` + +(3)订阅功能演示 + +运行demo,首先,subscribe会将满足情况的历史数据消费 + +```shell +# java -jar subscribe.jar + +ts : 1597464000000 current : 12.0 voltage : 220 phase : 1 location : Beijing.Chaoyang groupid : 2 +==================== +ts : 1597464600000 current : 12.3 voltage : 220 phase : 2 location : Beijing.Chaoyang groupid : 2 +==================== +ts : 1597465200000 current : 12.2 voltage : 220 phase : 1 location : Beijing.Chaoyang groupid : 2 +==================== +ts : 1597464600000 current : 10.3 voltage : 220 phase : 1 location : Beijing.Haidian groupid : 2 +==================== +ts : 1597465200000 current : 11.2 voltage : 220 phase : 1 location : Beijing.Haidian groupid : 2 +==================== +``` + +接着,使用taos客户端向表中新增数据 + +```shell +# taos +taos> use power; +taos> insert into d1001 values("2020-08-15 12:40:00.000", 12.4, 220, 1); +``` + +查看数据消费情况 + +```shell +ts : 1597466400000 current : 12.4 voltage : 220 phase : 1 location : Beijing.Chaoyang groupid : 2 +==================== +``` + ## 缓存(Cache) From cef379d992b0bf6e708342e87894f6f1288a7b77 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Sat, 15 Aug 2020 16:24:20 +0800 Subject: [PATCH 187/190] update java document for subscription --- .../markdowndocs/advanced features-ch.md | 91 ++++++++----------- 1 file changed, 40 insertions(+), 51 deletions(-) diff --git a/documentation20/webdocs/markdowndocs/advanced features-ch.md b/documentation20/webdocs/markdowndocs/advanced features-ch.md index 858f92b604..b1d050c8cc 100644 --- a/documentation20/webdocs/markdowndocs/advanced features-ch.md +++ b/documentation20/webdocs/markdowndocs/advanced features-ch.md @@ -295,27 +295,30 @@ $ taos 这时,因为电流超过了10A,您应该可以看到示例程序将它输出到了屏幕上。 您可以继续插入一些数据观察示例程序的输出。 -### jdbc使用数据订阅功能 +### Java 使用数据订阅功能 -(1)使用订阅功能前的数据准备 +订阅功能也提供了 Java 开发接口,相关说明请见 [Java Connector](https://www.taosdata.com/cn/documentation20/connector/)。需要注意的是,目前 Java 接口没有提供异步订阅模式,但用户程序可以通过创建 `TimerTask` 等方式达到同样的效果。 -```shell -# 创建power库 +下面以一个示例程序介绍其具体使用方法。它所完成的功能与前面介绍的 C 语言示例基本相同,也是订阅数据库中所有电流超过 10A 的记录。 + +#### 准备数据 + +```sql +# 创建 power 库 taos> create database power; # 切换库 taos> use power; # 创建超级表 -taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupI -d int); +taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int); # 创建表 -taos> create table d1001 using meters tags ("Beijing.Chaoyang",2); -taos> create table d1002 using meters tags ("Beijing.Haidian",2); +taos> create table d1001 using meters tags ("Beijing.Chaoyang", 2); +taos> create table d1002 using meters tags ("Beijing.Haidian", 2); # 插入测试数据 taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1); taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1); -# 从超级表meters查询current大于10的数据 +# 从超级表 meters 查询电流大于 10A 的记录 taos> select * from meters where current > 10; - ts | current | voltage | phase| location | groupid | + ts | current | voltage | phase | location | groupid | =========================================================================================================== 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | Beijing.Haidian | 2 | 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | Beijing.Haidian | 2 | @@ -325,7 +328,7 @@ taos> select * from meters where current > 10; Query OK, 5 row(s) in set (0.004896s) ``` -(2)使用jdbc提供的订阅功能 +#### 示例程序 ```java public class SubscribeDemo { @@ -337,42 +340,36 @@ public class SubscribeDemo { TSDBSubscribe subscribe = null; try { - // 加载驱动 Class.forName("com.taosdata.jdbc.TSDBDriver"); - // 获取Connectin Properties properties = new Properties(); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/power?user=root&password=taosdata"; connection = DriverManager.getConnection(jdbcUrl, properties); - // 创建Subscribe,topic为主题名称,sql为查询语句,restar为true代表每次订阅消费历史数据 - subscribe = ((TSDBConnection) connection).subscribe(topic, sql, true); + subscribe = ((TSDBConnection) connection).subscribe(topic, sql, true); // 创建订阅 int count = 0; - while (true) { - // 消费数据 - TSDBResultSet resultSet = subscribe.consume(); - // 打印结果集 - if (resultSet != null) { - ResultSetMetaData metaData = resultSet.getMetaData(); - while (resultSet.next()) { - int columnCount = metaData.getColumnCount(); - for (int i = 1; i <= columnCount; i++) { - System.out.print(metaData.getColumnLabel(i) + " : " + resultSet.getString(i) + "\t"); - } - System.out.println("\n===================="); - count++; - } + while (count < 10) { + TimeUnit.SECONDS.sleep(1); // 等待1秒,避免频繁调用 consume,给服务端造成压力 + TSDBResultSet resultSet = subscribe.consume(); // 消费数据 + if (resultSet == null) { + continue; + } + ResultSetMetaData metaData = resultSet.getMetaData(); + while (resultSet.next()) { + int columnCount = metaData.getColumnCount(); + for (int i = 1; i <= columnCount; i++) { + System.out.print(metaData.getColumnLabel(i) + ": " + resultSet.getString(i) + "\t"); + } + System.out.println(); + count++; } - if (count > 10) - break; - TimeUnit.SECONDS.sleep(1); } } catch (Exception e) { e.printStackTrace(); } finally { try { if (null != subscribe) - subscribe.close(true); + subscribe.close(true); // 关闭订阅 if (connection != null) connection.close(); } catch (SQLException throwables) { @@ -383,38 +380,30 @@ public class SubscribeDemo { } ``` -(3)订阅功能演示 - -运行demo,首先,subscribe会将满足情况的历史数据消费 +运行示例程序,首先,它会消费符合查询条件的所有历史数据: ```shell # java -jar subscribe.jar -ts : 1597464000000 current : 12.0 voltage : 220 phase : 1 location : Beijing.Chaoyang groupid : 2 -==================== -ts : 1597464600000 current : 12.3 voltage : 220 phase : 2 location : Beijing.Chaoyang groupid : 2 -==================== -ts : 1597465200000 current : 12.2 voltage : 220 phase : 1 location : Beijing.Chaoyang groupid : 2 -==================== -ts : 1597464600000 current : 10.3 voltage : 220 phase : 1 location : Beijing.Haidian groupid : 2 -==================== -ts : 1597465200000 current : 11.2 voltage : 220 phase : 1 location : Beijing.Haidian groupid : 2 -==================== +ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2 +ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: Beijing.Chaoyang groupid : 2 +ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2 +ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2 +ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2 ``` -接着,使用taos客户端向表中新增数据 +接着,使用 taos 客户端向表中新增一条数据: -```shell +```sql # taos taos> use power; taos> insert into d1001 values("2020-08-15 12:40:00.000", 12.4, 220, 1); ``` -查看数据消费情况 +因为这条数据的电流大于10A,示例程序会将其消费: ```shell -ts : 1597466400000 current : 12.4 voltage : 220 phase : 1 location : Beijing.Chaoyang groupid : 2 -==================== +ts: 1597466400000 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid: 2 ``` From b0c9fc3201fefcaba6643f4ac2c77ffdc4e0155c Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Sun, 16 Aug 2020 15:32:09 +0800 Subject: [PATCH 188/190] fix bug --- src/tsdb/src/tsdbMain.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index dd647ddd9b..733bca6af5 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -246,22 +246,18 @@ uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_ } } strcpy(name, fname + prefixLen); - } else { // get the named file at the specified index. If not there, return 0 + } else { // get the named file at the specified index. If not there, return 0 + fname = malloc(prefixLen + strlen(name) + 2); + sprintf(fname, "%s/%s", prefix, name); + if (access(fname, F_OK) != 0) return 0; if (*index == TSDB_META_FILE_INDEX) { // get meta file - fname = malloc(prefixLen + strlen(name) + 2); - sprintf(fname, "%s/%s", prefix, name); tsdbGetStoreInfo(fname, &magic, size); - taosFree(fname); - taosFree(sdup); - return magic; } else { - fname = malloc(prefixLen + strlen(name) + 2); - sprintf(fname, "%s/%s", prefix, name); tsdbGetFileInfoImpl(fname, &magic, size); - taosFree(fname); - taosFree(sdup); - return magic; } + taosFree(fname); + taosFree(sdup); + return magic; } if (stat(fname, &fState) < 0) { From df288e779e5e64b05d56878f4109a8bde03f1084 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Sun, 16 Aug 2020 15:35:51 +0800 Subject: [PATCH 189/190] fix sync memory leak --- src/tsdb/src/tsdbMain.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 733bca6af5..e2d7d03eda 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -249,7 +249,11 @@ uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_ } else { // get the named file at the specified index. If not there, return 0 fname = malloc(prefixLen + strlen(name) + 2); sprintf(fname, "%s/%s", prefix, name); - if (access(fname, F_OK) != 0) return 0; + if (access(fname, F_OK) != 0) { + taosFree(fname); + taosFree(sdup) + return 0; + } if (*index == TSDB_META_FILE_INDEX) { // get meta file tsdbGetStoreInfo(fname, &magic, size); } else { From 051d2c33c64b96b728ee60a01aa77ae1f2b6c8fe Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Sun, 16 Aug 2020 15:37:12 +0800 Subject: [PATCH 190/190] fix compile error --- src/tsdb/src/tsdbMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index e2d7d03eda..bc979cca84 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -251,7 +251,7 @@ uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_ sprintf(fname, "%s/%s", prefix, name); if (access(fname, F_OK) != 0) { taosFree(fname); - taosFree(sdup) + taosFree(sdup); return 0; } if (*index == TSDB_META_FILE_INDEX) { // get meta file