From 6cd9fac7da0592e784f150d9889ee1e1bdc020f5 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 16 May 2023 11:30:08 +0800 Subject: [PATCH 01/59] fix linear interpolation bugs --- source/libs/executor/src/timesliceoperator.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index d56595dae9..cc2fffae03 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -310,6 +310,11 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp break; } + if (end.key != INT64_MIN && end.key < pSliceInfo->current) { + hasInterp = false; + break; + } + if (start.key == INT64_MIN || end.key == INT64_MIN) { colDataSetNULL(pDst, rows); break; From ace27326b0071200ffbabae7d50b1e5b04b0da09 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 16 May 2023 14:13:40 +0800 Subject: [PATCH 02/59] enable ignore_null value param in client --- source/libs/function/src/builtins.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index fe98a1dd53..36139f6d3c 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1568,17 +1568,30 @@ static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len) int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); uint8_t dbPrec = pFunc->node.resType.precision; - // if (1 != numOfParams && 3 != numOfParams && 4 != numOfParams) { - if (1 != numOfParams) { + if (2 < numOfParams) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } uint8_t nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, 0)); uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - if ((!IS_NUMERIC_TYPE(paraType) && !IS_BOOLEAN_TYPE(paraType))|| QUERY_NODE_VALUE == nodeType) { + if ((!IS_NUMERIC_TYPE(paraType) && !IS_BOOLEAN_TYPE(paraType)) || QUERY_NODE_VALUE == nodeType) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + if (2 == numOfParams) { + nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, 1)); + paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + if (!IS_INTEGER_TYPE(paraType) || QUERY_NODE_VALUE != nodeType) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1); + if (pValue->datum.i != 0 && pValue->datum.i != 1) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "INTERP function second parameter should be 0/1"); + } + } + #if 0 if (3 <= numOfParams) { int64_t timeVal[2] = {0}; From 6879a784ae62dc07ab63a7eb2fc933bbb11d413b Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 16 May 2023 15:07:07 +0800 Subject: [PATCH 03/59] reserve value node --- source/libs/function/src/builtins.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 36139f6d3c..9529b91fa0 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1590,6 +1590,8 @@ static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len) return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "INTERP function second parameter should be 0/1"); } + + pValue->notReserved = true; } #if 0 From 375aa2f26bf317d476ac37e725dac010d5be93b2 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 16 May 2023 16:33:28 +0800 Subject: [PATCH 04/59] feat(query): interp support ignore null value opition --- source/libs/executor/src/timesliceoperator.c | 50 ++++++++++++++++++-- 1 file changed, 47 insertions(+), 3 deletions(-) diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index cc2fffae03..e41310d95c 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -209,6 +209,45 @@ static bool isGroupKeyFunc(SExprInfo* pExprInfo) { return (functionType == FUNCTION_TYPE_GROUP_KEY); } +static bool getIgoreNullRes(SExprSupp* pExprSup) { + for (int32_t i = 0; i < pExprSup->numOfExprs; ++i) { + SExprInfo* pExprInfo = &pExprSup->pExprInfo[i]; + + if (isInterpFunc(pExprInfo)) { + for (int32_t j = 0; j < pExprInfo->base.numOfParams; ++j) { + SFunctParam *pFuncParam = &pExprInfo->base.pParam[j]; + if (pFuncParam->type == FUNC_PARAM_TYPE_VALUE) { + return pFuncParam->param.i ? true : false; + } + } + } + } + + return false; +} + +static bool checkNullRow(SExprSupp* pExprSup, SSDataBlock* pSrcBlock, int32_t index, bool ignoreNull) { + if (!ignoreNull) { + return false; + } + + for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) { + SExprInfo* pExprInfo = &pExprSup->pExprInfo[j]; + + if (isInterpFunc(pExprInfo)) { + int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId; + SColumnInfoData* pSrc = taosArrayGet(pSrcBlock->pDataBlock, srcSlot); + + if (colDataIsNull_s(pSrc, index)) { + return true; + } + } + } + + return false; +} + + static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock, SSDataBlock* pSrcBlock, int32_t index, bool beforeTs) { int32_t rows = pResBlock->info.rows; @@ -590,7 +629,7 @@ static int32_t resetKeeperInfo(STimeSliceOperatorInfo* pInfo) { } static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pSliceInfo, SSDataBlock* pBlock, - SExecTaskInfo* pTaskInfo) { + SExecTaskInfo* pTaskInfo, bool ignoreNull) { SSDataBlock* pResBlock = pSliceInfo->pRes; SInterval* pInterval = &pSliceInfo->interval; @@ -603,6 +642,10 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS T_LONG_JMP(pTaskInfo->env, TSDB_CODE_FUNC_DUP_TIMESTAMP); } + if (checkNullRow(&pOperator->exprSupp, pBlock, i, ignoreNull)) { + continue; + } + if (pSliceInfo->current > pSliceInfo->win.ekey) { break; } @@ -732,6 +775,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { STimeSliceOperatorInfo* pSliceInfo = pOperator->info; SSDataBlock* pResBlock = pSliceInfo->pRes; SExprSupp* pSup = &pOperator->exprSupp; + bool ignoreNull = getIgoreNullRes(pSup); int32_t order = TSDB_ORDER_ASC; SInterval* pInterval = &pSliceInfo->interval; @@ -742,7 +786,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { while (1) { if (pSliceInfo->pNextGroupRes != NULL) { setInputDataBlock(pSup, pSliceInfo->pNextGroupRes, order, MAIN_SCAN, true); - doTimesliceImpl(pOperator, pSliceInfo, pSliceInfo->pNextGroupRes, pTaskInfo); + doTimesliceImpl(pOperator, pSliceInfo, pSliceInfo->pNextGroupRes, pTaskInfo, ignoreNull); copyPrevGroupKey(&pOperator->exprSupp, pSliceInfo->pPrevGroupKey, pSliceInfo->pNextGroupRes); pSliceInfo->pNextGroupRes = NULL; } @@ -776,7 +820,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pSup, pBlock, order, MAIN_SCAN, true); - doTimesliceImpl(pOperator, pSliceInfo, pBlock, pTaskInfo); + doTimesliceImpl(pOperator, pSliceInfo, pBlock, pTaskInfo, ignoreNull); copyPrevGroupKey(&pOperator->exprSupp, pSliceInfo->pPrevGroupKey, pBlock); } From 1a59ac6c0acee70d5637a1b2b53cec277295f0cf Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 17 May 2023 14:56:19 +0800 Subject: [PATCH 05/59] add test cases --- tests/system-test/2-query/interp.py | 901 +++++++++++++++++++++++++++- 1 file changed, 900 insertions(+), 1 deletion(-) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index cdbfa0de84..2bc856b0ef 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -26,6 +26,12 @@ class TDTestCase: ctbname3 = "ctb3" num_of_ctables = 3 + tbname_null = "tb_null" + ctbname1_null = "ctb1_null" + ctbname2_null = "ctb2_null" + ctbname3_null = "ctb3_null" + stbname_null = "stb_null" + tdSql.prepare() tdLog.printNoPrefix("==========step1:create table") @@ -2912,8 +2918,901 @@ class TDTestCase: tdSql.error(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(null)") tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(null)") + tdLog.printNoPrefix("======step 14: test interp ignore null values") + tdSql.execute( + f'''create table if not exists {dbname}.{tbname_null} + (ts timestamp, c0 int, c1 float, c2 bool) + ''' + ) - tdLog.printNoPrefix("======step 14: test interp pseudo columns") + tdSql.execute(f"insert into {dbname}.{tbname_null} values ('2020-02-02 00:00:01', 1, 1.0, true)") + tdSql.execute(f"insert into {dbname}.{tbname_null} values ('2020-02-02 00:00:02', NULL, NULL, NULL)") + tdSql.execute(f"insert into {dbname}.{tbname_null} values ('2020-02-02 00:00:03', 3, 3.0, false)") + tdSql.execute(f"insert into {dbname}.{tbname_null} values ('2020-02-02 00:00:04', NULL, NULL, NULL)") + tdSql.execute(f"insert into {dbname}.{tbname_null} values ('2020-02-02 00:00:05', NULL, NULL, NULL)") + tdSql.execute(f"insert into {dbname}.{tbname_null} values ('2020-02-02 00:00:06', 6, 6.0, true)") + tdSql.execute(f"insert into {dbname}.{tbname_null} values ('2020-02-02 00:00:07', NULL, NULL, NULL)") + tdSql.execute(f"insert into {dbname}.{tbname_null} values ('2020-02-02 00:00:08', 8, 8.0, false)") + tdSql.execute(f"insert into {dbname}.{tbname_null} values ('2020-02-02 00:00:09', 9, 9.0, true)") + tdSql.execute(f"insert into {dbname}.{tbname_null} values ('2020-02-02 00:00:10', NULL, NULL, NULL)") + tdSql.execute(f"insert into {dbname}.{tbname_null} values ('2020-02-02 00:00:11', NULL, NULL, NULL)") + + tdSql.execute( + f'''create table if not exists {dbname}.{stbname_null} + (ts timestamp, c0 int, c1 float, c2 bool) tags (t0 int) + ''' + ) + + tdSql.execute( + f'''create table if not exists {dbname}.{ctbname1_null} using {dbname}.{stbname_null} tags(1) + ''' + ) + + tdSql.execute( + f'''create table if not exists {dbname}.{ctbname2_null} using {dbname}.{stbname_null} tags(2) + ''' + ) + + tdSql.execute( + f'''create table if not exists {dbname}.{ctbname3_null} using {dbname}.{stbname_null} tags(3) + ''' + ) + + tdSql.execute(f"insert into {dbname}.{ctbname1_null} values ('2020-02-01 00:00:01', 1, 1.0, true)") + tdSql.execute(f"insert into {dbname}.{ctbname1_null} values ('2020-02-01 00:00:07', NULL, NULL, NULL)") + tdSql.execute(f"insert into {dbname}.{ctbname1_null} values ('2020-02-01 00:00:13', 13, 13.0, false)") + + tdSql.execute(f"insert into {dbname}.{ctbname2_null} values ('2020-02-01 00:00:03', NULL, NULL, NULL)") + tdSql.execute(f"insert into {dbname}.{ctbname2_null} values ('2020-02-01 00:00:09', 9, 9.0, true)") + tdSql.execute(f"insert into {dbname}.{ctbname2_null} values ('2020-02-01 00:00:15', 15, 15.0, false)") + + tdSql.execute(f"insert into {dbname}.{ctbname3_null} values ('2020-02-01 00:00:05', NULL, NULL, NULL)") + tdSql.execute(f"insert into {dbname}.{ctbname3_null} values ('2020-02-01 00:00:11', NULL, NULL, NULL)") + tdSql.execute(f"insert into {dbname}.{ctbname3_null} values ('2020-02-01 00:00:17', NULL, NULL, NULL)") + + # fill null + # normal table + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(NULL)") + + tdSql.checkRows(11) + for i in range (11): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, None) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, None) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + tdSql.checkData(9, 2, None) + tdSql.checkData(10, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 0) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(NULL)") + + tdSql.checkRows(11) + for i in range (11): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, None) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, None) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + tdSql.checkData(9, 2, None) + tdSql.checkData(10, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(NULL)") + + tdSql.checkRows(11) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, False) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, True) + tdSql.checkData(5, 1, False) + tdSql.checkData(6, 1, True) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, False) + tdSql.checkData(9, 1, True) + tdSql.checkData(10, 1, True) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, None) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, None) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + tdSql.checkData(9, 2, None) + tdSql.checkData(10, 2, None) + + + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} where c0 is not null range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(NULL)") + + tdSql.checkRows(11) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, False) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, True) + tdSql.checkData(5, 1, False) + tdSql.checkData(6, 1, True) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, False) + tdSql.checkData(9, 1, True) + tdSql.checkData(10, 1, True) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, None) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, None) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + tdSql.checkData(9, 2, None) + tdSql.checkData(10, 2, None) + + # super table + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(NULL)") + + tdSql.checkRows(9) + for i in range (9): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, None) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + tdSql.checkData(8, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(NULL)") + + tdSql.checkRows(9) + for i in range (9): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, None) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + tdSql.checkData(8, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(NULL)") + + tdSql.checkRows(9) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, True) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, False) + tdSql.checkData(5, 1, True) + tdSql.checkData(6, 1, False) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, True) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, None) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + tdSql.checkData(8, 2, None) + + + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(NULL)") + + tdSql.checkRows(9) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, True) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, False) + tdSql.checkData(5, 1, True) + tdSql.checkData(6, 1, False) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, True) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, None) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + tdSql.checkData(8, 2, None) + + # fill value + # normal table + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(value, 0)") + + tdSql.checkRows(11) + for i in range (11): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, None) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, None) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + tdSql.checkData(9, 2, None) + tdSql.checkData(10, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 0) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(value, 0)") + + tdSql.checkRows(11) + for i in range (11): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, None) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, None) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + tdSql.checkData(9, 2, None) + tdSql.checkData(10, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(value, 0)") + + tdSql.checkRows(11) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, False) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, True) + tdSql.checkData(5, 1, False) + tdSql.checkData(6, 1, True) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, False) + tdSql.checkData(9, 1, True) + tdSql.checkData(10, 1, True) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 0) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, 0) + tdSql.checkData(4, 2, 0) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, 0) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + tdSql.checkData(9, 2, 0) + tdSql.checkData(10, 2, 0) + + + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} where c0 is not null range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(value, 0)") + + tdSql.checkRows(11) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, False) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, True) + tdSql.checkData(5, 1, False) + tdSql.checkData(6, 1, True) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, False) + tdSql.checkData(9, 1, True) + tdSql.checkData(10, 1, True) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 0) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, 0) + tdSql.checkData(4, 2, 0) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, 0) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + tdSql.checkData(9, 2, 0) + tdSql.checkData(10, 2, 0) + + # super table + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(value, 0)") + + tdSql.checkRows(9) + for i in range (9): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, None) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + tdSql.checkData(8, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(value, 0)") + + tdSql.checkRows(9) + for i in range (9): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, None) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + tdSql.checkData(8, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(value, 0)") + + tdSql.checkRows(9) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, True) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, False) + tdSql.checkData(5, 1, True) + tdSql.checkData(6, 1, False) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, True) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 0) + tdSql.checkData(2, 2, 0) + tdSql.checkData(3, 2, 0) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, 0) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + tdSql.checkData(8, 2, 0) + + + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(value, 0)") + + tdSql.checkRows(9) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, True) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, False) + tdSql.checkData(5, 1, True) + tdSql.checkData(6, 1, False) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, True) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 0) + tdSql.checkData(2, 2, 0) + tdSql.checkData(3, 2, 0) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, 0) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + tdSql.checkData(8, 2, 0) + + # fill prev + # normal table + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(prev)") + + tdSql.checkRows(11) + for i in range (11): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, None) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, None) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + tdSql.checkData(9, 2, None) + tdSql.checkData(10, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 0) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(prev)") + + tdSql.checkRows(11) + for i in range (11): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, None) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, None) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + tdSql.checkData(9, 2, None) + tdSql.checkData(10, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(prev)") + + tdSql.checkRows(11) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, False) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, True) + tdSql.checkData(5, 1, False) + tdSql.checkData(6, 1, True) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, False) + tdSql.checkData(9, 1, True) + tdSql.checkData(10, 1, True) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 1) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, 3) + tdSql.checkData(4, 2, 3) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, 6) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + tdSql.checkData(9, 2, 9) + tdSql.checkData(10, 2, 9) + + + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} where c0 is not null range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(prev)") + + tdSql.checkRows(11) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, False) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, True) + tdSql.checkData(5, 1, False) + tdSql.checkData(6, 1, True) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, False) + tdSql.checkData(9, 1, True) + tdSql.checkData(10, 1, True) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 1) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, 3) + tdSql.checkData(4, 2, 3) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, 6) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + tdSql.checkData(9, 2, 9) + tdSql.checkData(10, 2, 9) + + # super table + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(prev)") + + tdSql.checkRows(9) + for i in range (9): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, None) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + tdSql.checkData(8, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(prev)") + + tdSql.checkRows(9) + for i in range (9): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, None) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + tdSql.checkData(8, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(prev)") + + tdSql.checkRows(9) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, True) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, False) + tdSql.checkData(5, 1, True) + tdSql.checkData(6, 1, False) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, True) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 1) + tdSql.checkData(2, 2, 1) + tdSql.checkData(3, 2, 1) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, 9) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + tdSql.checkData(8, 2, 15) + + + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(prev)") + + tdSql.checkRows(9) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, True) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, False) + tdSql.checkData(5, 1, True) + tdSql.checkData(6, 1, False) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, True) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 1) + tdSql.checkData(2, 2, 1) + tdSql.checkData(3, 2, 1) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, 9) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + tdSql.checkData(8, 2, 15) + + # fill next + # normal table + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)") + + tdSql.checkRows(11) + for i in range (11): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, None) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, None) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + tdSql.checkData(9, 2, None) + tdSql.checkData(10, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 0) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)") + + tdSql.checkRows(11) + for i in range (11): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, None) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, None) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + tdSql.checkData(9, 2, None) + tdSql.checkData(10, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)") + + tdSql.checkRows(9) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, False) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, True) + tdSql.checkData(5, 1, False) + tdSql.checkData(6, 1, True) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 3) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, 6) + tdSql.checkData(4, 2, 6) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, 8) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + + + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} where c0 is not null range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)") + + tdSql.checkRows(9) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, False) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, True) + tdSql.checkData(5, 1, False) + tdSql.checkData(6, 1, True) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 3) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, 6) + tdSql.checkData(4, 2, 6) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, 8) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + + # super table + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") + + tdSql.checkRows(9) + for i in range (9): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, None) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + tdSql.checkData(8, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") + + tdSql.checkRows(9) + for i in range (9): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, None) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + tdSql.checkData(8, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") + + tdSql.checkRows(8) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, True) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, False) + tdSql.checkData(5, 1, True) + tdSql.checkData(6, 1, False) + tdSql.checkData(7, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 9) + tdSql.checkData(2, 2, 9) + tdSql.checkData(3, 2, 9) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, 13) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + + + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") + + tdSql.checkRows(8) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, True) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, False) + tdSql.checkData(5, 1, True) + tdSql.checkData(6, 1, False) + tdSql.checkData(7, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 9) + tdSql.checkData(2, 2, 9) + tdSql.checkData(3, 2, 9) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, 13) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + + # fill linear + # normal table + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(linear)") + + tdSql.checkRows(11) + for i in range (11): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, None) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, None) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + tdSql.checkData(9, 2, None) + tdSql.checkData(10, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 0) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(linear)") + + tdSql.checkRows(11) + for i in range (11): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, None) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, None) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + tdSql.checkData(9, 2, None) + tdSql.checkData(10, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(linear)") + + tdSql.checkRows(9) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, False) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, True) + tdSql.checkData(5, 1, False) + tdSql.checkData(6, 1, True) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 2) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, 4) + tdSql.checkData(4, 2, 5) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, 7) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + + + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} where c0 is not null range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(linear)") + + tdSql.checkRows(9) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, False) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, True) + tdSql.checkData(5, 1, False) + tdSql.checkData(6, 1, True) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 2) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, 4) + tdSql.checkData(4, 2, 5) + tdSql.checkData(5, 2, 6) + tdSql.checkData(6, 2, 7) + tdSql.checkData(7, 2, 8) + tdSql.checkData(8, 2, 9) + + # super table + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(linear)") + + tdSql.checkRows(9) + for i in range (9): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, None) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + tdSql.checkData(8, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(linear)") + + tdSql.checkRows(9) + for i in range (9): + tdSql.checkData(i, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, None) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + tdSql.checkData(8, 2, None) + + tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(linear)") + + tdSql.checkRows(8) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, True) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, False) + tdSql.checkData(5, 1, True) + tdSql.checkData(6, 1, False) + tdSql.checkData(7, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 3) + tdSql.checkData(2, 2, 5) + tdSql.checkData(3, 2, 7) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, 11) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + + + tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(linear)") + + tdSql.checkRows(8) + tdSql.checkData(0, 1, False) + tdSql.checkData(1, 1, True) + tdSql.checkData(2, 1, True) + tdSql.checkData(3, 1, True) + tdSql.checkData(4, 1, False) + tdSql.checkData(5, 1, True) + tdSql.checkData(6, 1, False) + tdSql.checkData(7, 1, False) + + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 3) + tdSql.checkData(2, 2, 5) + tdSql.checkData(3, 2, 7) + tdSql.checkData(4, 2, 9) + tdSql.checkData(5, 2, 11) + tdSql.checkData(6, 2, 13) + tdSql.checkData(7, 2, 15) + + + tdLog.printNoPrefix("======step 15: test interp pseudo columns") tdSql.error(f"select _irowts, c6 from {dbname}.{tbname}") def stop(self): From 18530e8b33758bf0b867a478dea663deec02a86b Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 17 May 2023 15:57:44 +0800 Subject: [PATCH 06/59] add test cases --- tests/system-test/2-query/interp.py | 162 ++++++++++++++++++++++++++++ 1 file changed, 162 insertions(+) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 2bc856b0ef..8d654dd9e1 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -3143,6 +3143,42 @@ class TDTestCase: tdSql.checkData(7, 2, 15) tdSql.checkData(8, 2, None) + tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(NULL)") + + tdSql.checkRows(27) + for i in range(0, 9): + tdSql.checkData(i, 0, 'ctb1_null') + + for i in range(9, 18): + tdSql.checkData(i, 0, 'ctb2_null') + + for i in range(18, 27): + tdSql.checkData(i, 0, 'ctb3_null') + + tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(8, 1, '2020-02-01 00:00:17.000') + + tdSql.checkData(9, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(17, 1, '2020-02-01 00:00:17.000') + + tdSql.checkData(18, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(26, 1, '2020-02-01 00:00:17.000') + + tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(NULL)") + + tdSql.checkRows(18) + for i in range(0, 9): + tdSql.checkData(i, 0, 'ctb1_null') + + for i in range(9, 17): + tdSql.checkData(i, 0, 'ctb2_null') + + tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(8, 1, '2020-02-01 00:00:17.000') + + tdSql.checkData(9, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(17, 1, '2020-02-01 00:00:17.000') + # fill value # normal table tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(value, 0)") @@ -3316,6 +3352,42 @@ class TDTestCase: tdSql.checkData(7, 2, 15) tdSql.checkData(8, 2, 0) + tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(value, 0)") + + tdSql.checkRows(27) + for i in range(0, 9): + tdSql.checkData(i, 0, 'ctb1_null') + + for i in range(9, 18): + tdSql.checkData(i, 0, 'ctb2_null') + + for i in range(18, 27): + tdSql.checkData(i, 0, 'ctb3_null') + + tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(8, 1, '2020-02-01 00:00:17.000') + + tdSql.checkData(9, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(17, 1, '2020-02-01 00:00:17.000') + + tdSql.checkData(18, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(26, 1, '2020-02-01 00:00:17.000') + + tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(value, 0)") + + tdSql.checkRows(18) + for i in range(0, 9): + tdSql.checkData(i, 0, 'ctb1_null') + + for i in range(9, 18): + tdSql.checkData(i, 0, 'ctb2_null') + + tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(8, 1, '2020-02-01 00:00:17.000') + + tdSql.checkData(9, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(17, 1, '2020-02-01 00:00:17.000') + # fill prev # normal table tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(prev)") @@ -3489,6 +3561,36 @@ class TDTestCase: tdSql.checkData(7, 2, 15) tdSql.checkData(8, 2, 15) + tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(prev)") + + tdSql.checkRows(14) + for i in range(0, 9): + tdSql.checkData(i, 0, 'ctb1_null') + + for i in range(9, 13): + tdSql.checkData(i, 0, 'ctb2_null') + + tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(8, 1, '2020-02-01 00:00:17.000') + + tdSql.checkData(9, 1, '2020-02-01 00:00:09.000') + tdSql.checkData(13, 1, '2020-02-01 00:00:17.000') + + tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(prev)") + + tdSql.checkRows(14) + for i in range(0, 9): + tdSql.checkData(i, 0, 'ctb1_null') + + for i in range(9, 13): + tdSql.checkData(i, 0, 'ctb2_null') + + tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(8, 1, '2020-02-01 00:00:17.000') + + tdSql.checkData(9, 1, '2020-02-01 00:00:09.000') + tdSql.checkData(13, 1, '2020-02-01 00:00:17.000') + # fill next # normal table tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)") @@ -3650,6 +3752,36 @@ class TDTestCase: tdSql.checkData(6, 2, 13) tdSql.checkData(7, 2, 15) + tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") + + tdSql.checkRows(15) + for i in range(0, 7): + tdSql.checkData(i, 0, 'ctb1_null') + + for i in range(7, 15): + tdSql.checkData(i, 0, 'ctb2_null') + + tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(6, 1, '2020-02-01 00:00:13.000') + + tdSql.checkData(7, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(14, 1, '2020-02-01 00:00:15.000') + + tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") + + tdSql.checkRows(15) + for i in range(0, 7): + tdSql.checkData(i, 0, 'ctb1_null') + + for i in range(7, 15): + tdSql.checkData(i, 0, 'ctb2_null') + + tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(6, 1, '2020-02-01 00:00:13.000') + + tdSql.checkData(7, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(14, 1, '2020-02-01 00:00:15.000') + # fill linear # normal table tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(linear)") @@ -3811,6 +3943,36 @@ class TDTestCase: tdSql.checkData(6, 2, 13) tdSql.checkData(7, 2, 15) + tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(linear)") + + tdSql.checkRows(11) + for i in range(0, 7): + tdSql.checkData(i, 0, 'ctb1_null') + + for i in range(7, 11): + tdSql.checkData(i, 0, 'ctb2_null') + + tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(6, 1, '2020-02-01 00:00:13.000') + + tdSql.checkData(7, 1, '2020-02-01 00:00:09.000') + tdSql.checkData(10, 1, '2020-02-01 00:00:15.000') + + tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(linear)") + + tdSql.checkRows(11) + for i in range(0, 7): + tdSql.checkData(i, 0, 'ctb1_null') + + for i in range(7, 11): + tdSql.checkData(i, 0, 'ctb2_null') + + tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') + tdSql.checkData(6, 1, '2020-02-01 00:00:13.000') + + tdSql.checkData(7, 1, '2020-02-01 00:00:09.000') + tdSql.checkData(10, 1, '2020-02-01 00:00:15.000') + tdLog.printNoPrefix("======step 15: test interp pseudo columns") tdSql.error(f"select _irowts, c6 from {dbname}.{tbname}") From 79d8b8c3d4ebc19c6e42a8c34fa16a007fb5dabd Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 17 May 2023 16:06:12 +0800 Subject: [PATCH 07/59] add zh/en docs --- docs/en/12-taos-sql/10-function.md | 10 ++++++++-- docs/zh/12-taos-sql/10-function.md | 9 +++++++-- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index 7e45ffa1df..a204415d65 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -867,10 +867,16 @@ FIRST(expr) ### INTERP ```sql -INTERP(expr) +INTERP(expr [, ignore_null_values]) + +ignore_null_values: { + 0 + | 1 +} ``` -**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned. +**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned. The value of `ignore_null_values` can be 0 or 1, 1 means null values are ignored. The default value of this parameter is 0. + **Return value type**: Same as the column being operated upon diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index 458fc9c7a2..248554f931 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -869,10 +869,15 @@ FIRST(expr) ### INTERP ```sql -INTERP(expr) +INTERP(expr [, ignore_null_values]) + +ignore_null_values: { + 0 + | 1 +} ``` -**功能说明**:返回指定时间截面指定列的记录值或插值。 +**功能说明**:返回指定时间截面指定列的记录值或插值。ignore_null_values 参数的值可以是 0 或 1,为 1 时表示忽略 NULL 值, 缺省值为0。 **返回数据类型**:同字段类型。 From 90ef52f40d9c1bbb48cbb9ff3617550b1e86fefe Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 17 May 2023 17:11:02 +0800 Subject: [PATCH 08/59] fix fill(value, string) issue --- source/libs/executor/src/timesliceoperator.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index c59669fc53..e54057551d 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -271,15 +271,27 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp if (pDst->info.type == TSDB_DATA_TYPE_FLOAT) { float v = 0; - GET_TYPED_DATA(v, float, pVar->nType, &pVar->i); + if (!IS_VAR_DATA_TYPE(pVar->nType)) { + GET_TYPED_DATA(v, float, pVar->nType, &pVar->i); + } else { + v = taosStr2Float(varDataVal(pVar->pz), NULL); + } colDataSetVal(pDst, rows, (char*)&v, false); } else if (pDst->info.type == TSDB_DATA_TYPE_DOUBLE) { double v = 0; - GET_TYPED_DATA(v, double, pVar->nType, &pVar->i); + if (!IS_VAR_DATA_TYPE(pVar->nType)) { + GET_TYPED_DATA(v, double, pVar->nType, &pVar->i); + } else { + v = taosStr2Double(varDataVal(pVar->pz), NULL); + } colDataSetVal(pDst, rows, (char*)&v, false); } else if (IS_SIGNED_NUMERIC_TYPE(pDst->info.type)) { int64_t v = 0; - GET_TYPED_DATA(v, int64_t, pVar->nType, &pVar->i); + if (!IS_VAR_DATA_TYPE(pVar->nType)) { + GET_TYPED_DATA(v, int64_t, pVar->nType, &pVar->i); + } else { + v = taosStr2int64(varDataVal(pVar->pz)); + } colDataSetVal(pDst, rows, (char*)&v, false); } else if (IS_BOOLEAN_TYPE(pDst->info.type)) { bool v = false; From 30b152c434b1b1031afe7e5550431e49ac1cd318 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 17 May 2023 17:17:36 +0800 Subject: [PATCH 09/59] add test cases --- tests/system-test/2-query/interp.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index cdbfa0de84..4cdaa2fa7d 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -182,6 +182,35 @@ class TDTestCase: tdSql.checkData(2, 0, 1) tdSql.checkData(3, 0, 1) + ## test fill value with string + tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 'abc')") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 0) + tdSql.checkData(1, 0, 0) + tdSql.checkData(2, 0, 0) + tdSql.checkData(3, 0, 0) + + tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, '123')") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 123) + tdSql.checkData(1, 0, 123) + tdSql.checkData(2, 0, 123) + tdSql.checkData(3, 0, 123) + + tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, '123.123')") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 123) + tdSql.checkData(1, 0, 123) + tdSql.checkData(2, 0, 123) + tdSql.checkData(3, 0, 123) + + tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, '12abc')") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 12) + tdSql.checkData(1, 0, 12) + tdSql.checkData(2, 0, 12) + tdSql.checkData(3, 0, 12) + tdLog.printNoPrefix("==========step5:fill prev") ## {. . .} From 20684f873ea7791abbdd5ec1a3f994d94a8dd2a8 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 17 May 2023 17:49:07 +0800 Subject: [PATCH 10/59] fix interp in nested query report error --- source/libs/parser/src/parTranslater.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index c5fe3a1f73..486d7ab26c 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1523,9 +1523,9 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; SNode* pTable = pSelect->pFromTable; - if ((NULL != pTable && QUERY_NODE_REAL_TABLE != nodeType(pTable))) { + if (NULL != pTable && QUERY_NODE_REAL_TABLE != nodeType(pTable) && QUERY_NODE_TEMP_TABLE != nodeType(pTable)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE, - "%s is only supported in single table query", pFunc->functionName); + "unsupported table type for %s query", pFunc->functionName); } if (pSelect->hasAggFuncs || pSelect->hasMultiRowsFunc || pSelect->hasIndefiniteRowsFunc) { From 312e77f7c2b49118ee6a9737d38a5c6056cafb7c Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 17 May 2023 17:53:36 +0800 Subject: [PATCH 11/59] add test cases --- tests/system-test/2-query/interp.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 4cdaa2fa7d..a4b81b1e3b 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -2945,6 +2945,13 @@ class TDTestCase: tdLog.printNoPrefix("======step 14: test interp pseudo columns") tdSql.error(f"select _irowts, c6 from {dbname}.{tbname}") + tdLog.printNoPrefix("======step 14: test interp in nested query") + tdSql.query(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{stbname}) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") + tdSql.query(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1}) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") + + tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{stbname}) partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") + tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1}) partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") From cf0d3ddde3f3ea052d84f1b78a4cfa11ac0b5f85 Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 17 May 2023 20:27:12 +0800 Subject: [PATCH 12/59] feat: support update active code by SQL --- source/common/src/tglobal.c | 12 ++++ source/dnode/mnode/impl/src/mndDnode.c | 88 +++++++++++++++++++++++++- 2 files changed, 99 insertions(+), 1 deletion(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 237a52efe5..ac90d4773c 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -1444,6 +1444,18 @@ void taosCfgDynamicOptions(const char *option, const char *value) { return; } + if (strcasecmp(option, "activeCode") == 0) { + // TODO + printf("%s:%d set activeCode: %s\n", __func__, __LINE__, value); + return; + } + + if (strcasecmp(option, "cActiveCode") == 0) { + // TODO + printf("%s:%d set cActiveCode: %s\n", __func__, __LINE__, value); + return; + } + const char *options[] = { "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", "tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag", "uDebugFlag", diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index d4cbcaaacd..3034b3b30a 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -635,6 +635,70 @@ _OVER: return code; } +static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfgReq) { + int32_t code = -1; + int32_t action = -1; + SSdbRaw *pRaw = NULL; + STrans *pTrans = NULL; + + if (strncasecmp(pCfgReq->config, "activeCode", 10) == 0) { + action = 0; + } else if (strncasecmp(pCfgReq->config, "cActiveCode", 11) == 0) { + action = 1; + } + + if (action == -1) { + code = TSDB_CODE_INVALID_CFG; + mWarn("dnode:%d, config dnode, app:%p config:%s value:%s, failed since %s", pCfgReq->dnodeId, pReq->info.ahandle, + pCfgReq->config, pCfgReq->value, tstrerror(code)); + + return code; + } + + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; + while (1) { + SDnodeObj *pDnode = NULL; + pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pDnode); + if (pIter == NULL) break; + + if (pDnode->id != pCfgReq->dnodeId && pCfgReq->dnodeId != -1) { + continue; + } + + pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "cfg-dnode"); + if (pTrans == NULL) goto _OVER; + if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER; + + SDnodeObj tmpDnode = *pDnode; + if (action == 0) { + strncpy(tmpDnode.active, pCfgReq->value, TSDB_ACTIVE_KEY_LEN); + } else if (action == 1) { + strncpy(tmpDnode.connActive, pCfgReq->value, TSDB_CONN_ACTIVE_KEY_LEN); + } else { + ASSERT(0); + } + + pRaw = mndDnodeActionEncode(&tmpDnode); + if (pRaw == NULL || mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER; + (void)sdbSetRawStatus(pRaw, SDB_STATUS_READY); + pRaw = NULL; + + mInfo("dnode:%d, config dnode, cfg:%d, app:%p config:%s value:%s", pDnode->id, pCfgReq->dnodeId, pReq->info.ahandle, + pCfgReq->config, pCfgReq->value); + + sdbRelease(pSdb, pDnode); + } + + if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; + code = 0; + +_OVER: + mndTransDrop(pTrans); + sdbFreeRaw(pRaw); + return code; +} + static int32_t mndProcessDnodeListReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SSdb *pSdb = pMnode->pSdb; @@ -970,6 +1034,28 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { strcpy(dcfgReq.config, "monitor"); snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%d", flag); +#ifdef TD_ENTERPRISE + } else if (strncasecmp(cfgReq.config, "activeCode", 10) == 0 || strncasecmp(cfgReq.config, "cActiveCode", 11) == 0) { + int32_t idx = (strncasecmp(cfgReq.config, "a", 1) == 0) ? 10 : 11; + if (' ' != cfgReq.config[idx] && 0 != cfgReq.config[idx]) { + mError("dnode:%d, failed to config activeCode since invalid conf:%s", cfgReq.dnodeId, cfgReq.config); + terrno = TSDB_CODE_INVALID_CFG; + return -1; + } + + if ((idx == 10 && strlen(cfgReq.value) > TSDB_ACTIVE_KEY_LEN) || + (idx == 11 && strlen(cfgReq.value) > TSDB_CONN_ACTIVE_KEY_LEN)) { + mError("dnode:%d, failed to config activeCode since value out of range. conf:%s, val:%s", cfgReq.dnodeId, + cfgReq.config, cfgReq.value); + terrno = TSDB_CODE_INVALID_OPTION; + return -1; + } + + strcpy(dcfgReq.config, idx == 10 ? "activeCode" : "cActiveCode"); + snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%s", cfgReq.value); + + mndConfigDnode(pMnode, pReq, &cfgReq); +#endif } else { bool findOpt = false; for (int32_t d = 0; d < optionSize; ++d) { @@ -1023,7 +1109,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { tSerializeSDCfgDnodeReq(pBuf, bufLen, &dcfgReq); mInfo("dnode:%d, send config req to dnode, app:%p config:%s value:%s", cfgReq.dnodeId, pReq->info.ahandle, dcfgReq.config, dcfgReq.value); - SRpcMsg rpcMsg = {.msgType = TDMT_DND_CONFIG_DNODE, .pCont = pBuf, .contLen = bufLen, .info = pReq->info}; + SRpcMsg rpcMsg = {.msgType = TDMT_DND_CONFIG_DNODE, .pCont = pBuf, .contLen = bufLen}; tmsgSendReq(&epSet, &rpcMsg); code = 0; } From 5098d440687fea2a96dcb286e6841e0ec40821e4 Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 18 May 2023 12:59:44 +0800 Subject: [PATCH 13/59] feat: support update active code in sdb --- source/dnode/mnode/impl/src/mndDnode.c | 45 +++++++++++++++++--------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 3034b3b30a..0042206bc5 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -227,6 +227,14 @@ static int32_t mndDnodeActionDelete(SSdb *pSdb, SDnodeObj *pDnode) { static int32_t mndDnodeActionUpdate(SSdb *pSdb, SDnodeObj *pOld, SDnodeObj *pNew) { mTrace("dnode:%d, perform update action, old row:%p new row:%p", pOld->id, pOld, pNew); pOld->updateTime = pNew->updateTime; +#ifdef TD_ENTERPRISE + if (strncmp(pOld->active, pNew->active, TSDB_ACTIVE_KEY_LEN) != 0) { + strncpy(pOld->active, pNew->active, TSDB_ACTIVE_KEY_LEN); + } + if (strncmp(pOld->connActive, pNew->connActive, TSDB_CONN_ACTIVE_KEY_LEN) != 0) { + strncpy(pOld->connActive, pNew->connActive, TSDB_CONN_ACTIVE_KEY_LEN); + } +#endif return 0; } @@ -636,7 +644,6 @@ _OVER: } static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfgReq) { - int32_t code = -1; int32_t action = -1; SSdbRaw *pRaw = NULL; STrans *pTrans = NULL; @@ -648,11 +655,11 @@ static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfg } if (action == -1) { - code = TSDB_CODE_INVALID_CFG; + terrno = TSDB_CODE_INVALID_CFG; mWarn("dnode:%d, config dnode, app:%p config:%s value:%s, failed since %s", pCfgReq->dnodeId, pReq->info.ahandle, - pCfgReq->config, pCfgReq->value, tstrerror(code)); + pCfgReq->config, pCfgReq->value, terrstr()); - return code; + return -1; } SSdb *pSdb = pMnode->pSdb; @@ -666,9 +673,11 @@ static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfg continue; } - pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "cfg-dnode"); - if (pTrans == NULL) goto _OVER; - if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER; + if (!pTrans) { + pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "cfg-dnode"); + if (!pTrans) goto _OVER; + if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER; + } SDnodeObj tmpDnode = *pDnode; if (action == 0) { @@ -690,13 +699,14 @@ static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfg sdbRelease(pSdb, pDnode); } - if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; - code = 0; + if (pTrans && mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; + + terrno = 0; _OVER: mndTransDrop(pTrans); sdbFreeRaw(pRaw); - return code; + return terrno; } static int32_t mndProcessDnodeListReq(SRpcMsg *pReq) { @@ -1042,11 +1052,11 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { terrno = TSDB_CODE_INVALID_CFG; return -1; } - - if ((idx == 10 && strlen(cfgReq.value) > TSDB_ACTIVE_KEY_LEN) || - (idx == 11 && strlen(cfgReq.value) > TSDB_CONN_ACTIVE_KEY_LEN)) { - mError("dnode:%d, failed to config activeCode since value out of range. conf:%s, val:%s", cfgReq.dnodeId, - cfgReq.config, cfgReq.value); + int32_t vlen = strlen(cfgReq.value); + if ((idx == 10 && vlen != (TSDB_ACTIVE_KEY_LEN - 1)) || + (idx == 11 && (vlen > (TSDB_CONN_ACTIVE_KEY_LEN -1) || vlen < (TSDB_ACTIVE_KEY_LEN - 1))) { + mError("dnode:%d, failed to config activeCode since invalid vlen. conf:%s, val:%s", cfgReq.dnodeId, cfgReq.config, + cfgReq.value); terrno = TSDB_CODE_INVALID_OPTION; return -1; } @@ -1054,7 +1064,10 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { strcpy(dcfgReq.config, idx == 10 ? "activeCode" : "cActiveCode"); snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%s", cfgReq.value); - mndConfigDnode(pMnode, pReq, &cfgReq); + if (mndConfigDnode(pMnode, pReq, &cfgReq) != 0) { + mError("dnode:%d, failed to config since %s", cfgReq.dnodeId, terrstr()); + return -1; + } #endif } else { bool findOpt = false; From ab06dd04f4da10b45eb34d8f116d337d88b9d161 Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 18 May 2023 13:01:28 +0800 Subject: [PATCH 14/59] feat: support update active code in sdb --- source/dnode/mnode/impl/src/mndDnode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 0042206bc5..bafcd0bdee 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -1054,7 +1054,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { } int32_t vlen = strlen(cfgReq.value); if ((idx == 10 && vlen != (TSDB_ACTIVE_KEY_LEN - 1)) || - (idx == 11 && (vlen > (TSDB_CONN_ACTIVE_KEY_LEN -1) || vlen < (TSDB_ACTIVE_KEY_LEN - 1))) { + (idx == 11 && (vlen > (TSDB_CONN_ACTIVE_KEY_LEN -1) || vlen < (TSDB_ACTIVE_KEY_LEN - 1)))) { mError("dnode:%d, failed to config activeCode since invalid vlen. conf:%s, val:%s", cfgReq.dnodeId, cfgReq.config, cfgReq.value); terrno = TSDB_CODE_INVALID_OPTION; From 0e891c5c1457cb46e07177149002a0eb3d40b8ab Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 18 May 2023 13:30:45 +0800 Subject: [PATCH 15/59] chore: more code --- source/dnode/mnode/impl/src/mndDnode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index bafcd0bdee..cd7cefc2e4 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -1053,8 +1053,8 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { return -1; } int32_t vlen = strlen(cfgReq.value); - if ((idx == 10 && vlen != (TSDB_ACTIVE_KEY_LEN - 1)) || - (idx == 11 && (vlen > (TSDB_CONN_ACTIVE_KEY_LEN -1) || vlen < (TSDB_ACTIVE_KEY_LEN - 1)))) { + if (vlen > 0 && ((idx == 10 && vlen != (TSDB_ACTIVE_KEY_LEN - 1)) || + (idx == 11 && (vlen > (TSDB_CONN_ACTIVE_KEY_LEN - 1) || vlen < (TSDB_ACTIVE_KEY_LEN - 1))))) { mError("dnode:%d, failed to config activeCode since invalid vlen. conf:%s, val:%s", cfgReq.dnodeId, cfgReq.config, cfgReq.value); terrno = TSDB_CODE_INVALID_OPTION; From 1de90208f7418debcd0e708205bd9478b111dfaf Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 18 May 2023 18:02:48 +0800 Subject: [PATCH 16/59] test: test case for alter dnode active code --- include/util/tdef.h | 4 +- source/dnode/mnode/impl/src/mndDnode.c | 4 +- .../0-others/information_schema.py | 44 ++++++++++++++++++- 3 files changed, 47 insertions(+), 5 deletions(-) diff --git a/include/util/tdef.h b/include/util/tdef.h index 427a49fd4e..925b2ed520 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -267,8 +267,8 @@ typedef enum ELogicConditionType { #define TSDB_DNODE_CONFIG_LEN 128 #define TSDB_DNODE_VALUE_LEN 256 -#define TSDB_ACTIVE_KEY_LEN 109 // history 109:? -#define TSDB_CONN_ACTIVE_KEY_LEN 257 // history 257:? +#define TSDB_ACTIVE_KEY_LEN 109 +#define TSDB_CONN_ACTIVE_KEY_LEN 255 #define TSDB_DEFAULT_PKT_SIZE 65480 // same as RPC_MAX_UDP_SIZE diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index cd7cefc2e4..fa9552e08f 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -1055,8 +1055,8 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { int32_t vlen = strlen(cfgReq.value); if (vlen > 0 && ((idx == 10 && vlen != (TSDB_ACTIVE_KEY_LEN - 1)) || (idx == 11 && (vlen > (TSDB_CONN_ACTIVE_KEY_LEN - 1) || vlen < (TSDB_ACTIVE_KEY_LEN - 1))))) { - mError("dnode:%d, failed to config activeCode since invalid vlen. conf:%s, val:%s", cfgReq.dnodeId, cfgReq.config, - cfgReq.value); + mError("dnode:%d, failed to config activeCode since invalid vlen:%d. conf:%s, val:%s", cfgReq.dnodeId, vlen, + cfgReq.config, cfgReq.value); terrno = TSDB_CODE_INVALID_OPTION; return -1; } diff --git a/tests/system-test/0-others/information_schema.py b/tests/system-test/0-others/information_schema.py index 3c4a71c3e4..ef631ee8d9 100644 --- a/tests/system-test/0-others/information_schema.py +++ b/tests/system-test/0-others/information_schema.py @@ -162,7 +162,48 @@ class TDTestCase: for t in range (2): tdSql.query(f'select * from information_schema.ins_columns where db_name="db2" and table_type=="NORMAL_TABLE"') tdSql.checkEqual(20470,len(tdSql.queryResult)) - + + def ins_dnodes_check(self): + tdSql.execute('drop database if exists db2') + tdSql.execute('create database if not exists db2 vgroups 1 replica 1') + tdSql.query(f'select * from information_schema.ins_dnodes') + result = tdSql.queryResult + tdSql.checkEqual(result[0][0],1) + tdSql.checkEqual(result[0][8],"") + tdSql.checkEqual(result[0][9],"") + self.str107 = 'Hc7VCc+' + for t in range (10): + self.str107 += 'tP+2soIXpP' + self.str108 = self.str107 + '=' + self.str109 = self.str108 + '+' + self.str254 = self.str108 + self.str108 + '0123456789001234567890012345678901234567' + self.str255 = self.str254 + '=' + self.str510 = self.str255 + self.str255 + tdSql.error('alter dnode 1 "activeCode" "a"') + tdSql.error('alter dnode 1 "activeCode" "' + self.str107 + '"') + tdSql.execute('alter all dnodes "activeCode" "' + self.str108 + '"') + tdSql.error('alter dnode 1 "activeCode" "' + self.str109 + '"') + tdSql.error('alter all dnodes "activeCode" "' + self.str510 + '"') + tdSql.query(f'select * from information_schema.ins_dnodes') + tdSql.checkEqual(tdSql.queryResult[0][8],self.str108) + tdSql.execute('alter dnode 1 "activeCode" ""') + tdSql.query(f'select active_code,c_active_code from information_schema.ins_dnodes') + tdSql.checkEqual(tdSql.queryResult[0][0],"") + tdSql.checkEqual(tdSql.queryResult[0][1],'') + tdSql.error('alter dnode 1 "cActiveCode" "a"') + tdSql.error('alter dnode 1 "cActiveCode" "' + self.str107 + '"') + tdSql.execute('alter dnode 1 "cActiveCode" "' + self.str254 + '"') + tdSql.error('alter all dnodes "cActiveCode" "' + self.str255 + '"') + tdSql.error('alter dnode 1 "cActiveCode" "' + self.str510 + '"') + tdSql.query(f'select active_code,c_active_code from information_schema.ins_dnodes') + tdSql.checkEqual(tdSql.queryResult[0][0],"") + tdSql.checkEqual(tdSql.queryResult[0][1],self.str254) + tdSql.execute('alter dnode 1 "cActiveCode" "' + self.str109 + '"') + tdSql.query(f'show dnodes') + tdSql.checkEqual(tdSql.queryResult[0][0],self.str109) + tdSql.execute('alter all dnodes "activeCode" ""') + tdSql.query(f'select c_active_code from information_schema.ins_dnodes') + tdSql.checkEqual(tdSql.queryResult[0][0],'') def run(self): self.prepare_data() @@ -170,6 +211,7 @@ class TDTestCase: self.ins_columns_check() # self.ins_col_check_4096() self.ins_stable_check() + self.ins_dnodes_check() def stop(self): From 0a41ba157386e8ef08dff818d6b012a017ce686d Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 18 May 2023 18:48:00 +0800 Subject: [PATCH 17/59] add test cases --- source/libs/parser/src/parTranslater.c | 5 ----- tests/system-test/2-query/interp.py | 11 +++++++++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 486d7ab26c..3a172b1b8a 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1523,11 +1523,6 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; SNode* pTable = pSelect->pFromTable; - if (NULL != pTable && QUERY_NODE_REAL_TABLE != nodeType(pTable) && QUERY_NODE_TEMP_TABLE != nodeType(pTable)) { - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE, - "unsupported table type for %s query", pFunc->functionName); - } - if (pSelect->hasAggFuncs || pSelect->hasMultiRowsFunc || pSelect->hasIndefiniteRowsFunc) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index a4b81b1e3b..fc68aff4a3 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -2952,6 +2952,17 @@ class TDTestCase: tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{stbname}) partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1}) partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") + tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union select * from {dbname}.{ctbname2}) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") + tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union select * from {dbname}.{ctbname2} order by ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") + + tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union all select * from {dbname}.{ctbname2}) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") + tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union all select * from {dbname}.{ctbname2} order by ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") + + tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union all select * from {dbname}.{ctbname2}) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") + tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union all select * from {dbname}.{ctbname2} order by ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") + + tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from (select * from {ctbname1}, {ctbname2} where {ctbname1}.ts = {ctbname2}.ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") From e3f194ed9bb3e64d6dbd557bf4258ba302056502 Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 18 May 2023 19:13:04 +0800 Subject: [PATCH 18/59] chore: code optimization --- source/dnode/mnode/impl/src/mndDnode.c | 66 +++++++++++++++----------- 1 file changed, 38 insertions(+), 28 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index fa9552e08f..23580c17e1 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -44,6 +44,11 @@ static const char *offlineReason[] = { "unknown", }; +enum { + DND_ACTIVE_CODE, + DND_CONN_ACTIVE_CODE, +}; + static int32_t mndCreateDefaultDnode(SMnode *pMnode); static SSdbRaw *mndDnodeActionEncode(SDnodeObj *pDnode); static SSdbRow *mndDnodeActionDecode(SSdbRaw *pRaw); @@ -643,25 +648,10 @@ _OVER: return code; } -static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfgReq) { - int32_t action = -1; +static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfgReq, int8_t action) { SSdbRaw *pRaw = NULL; STrans *pTrans = NULL; - if (strncasecmp(pCfgReq->config, "activeCode", 10) == 0) { - action = 0; - } else if (strncasecmp(pCfgReq->config, "cActiveCode", 11) == 0) { - action = 1; - } - - if (action == -1) { - terrno = TSDB_CODE_INVALID_CFG; - mWarn("dnode:%d, config dnode, app:%p config:%s value:%s, failed since %s", pCfgReq->dnodeId, pReq->info.ahandle, - pCfgReq->config, pCfgReq->value, terrstr()); - - return -1; - } - SSdb *pSdb = pMnode->pSdb; void *pIter = NULL; while (1) { @@ -674,18 +664,19 @@ static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfg } if (!pTrans) { - pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "cfg-dnode"); + pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "config-dnode"); if (!pTrans) goto _OVER; if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER; } SDnodeObj tmpDnode = *pDnode; - if (action == 0) { + if (action == DND_ACTIVE_CODE) { strncpy(tmpDnode.active, pCfgReq->value, TSDB_ACTIVE_KEY_LEN); - } else if (action == 1) { + } else if (action == DND_CONN_ACTIVE_CODE) { strncpy(tmpDnode.connActive, pCfgReq->value, TSDB_CONN_ACTIVE_KEY_LEN); } else { - ASSERT(0); + terrno = TSDB_CODE_INVALID_CFG; + goto _OVER; } pRaw = mndDnodeActionEncode(&tmpDnode); @@ -1045,27 +1036,46 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { strcpy(dcfgReq.config, "monitor"); snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%d", flag); #ifdef TD_ENTERPRISE - } else if (strncasecmp(cfgReq.config, "activeCode", 10) == 0 || strncasecmp(cfgReq.config, "cActiveCode", 11) == 0) { - int32_t idx = (strncasecmp(cfgReq.config, "a", 1) == 0) ? 10 : 11; - if (' ' != cfgReq.config[idx] && 0 != cfgReq.config[idx]) { + } else if (strncasecmp(cfgReq.config, "activeCode", 10) == 0) { + if (' ' != cfgReq.config[10] && 0 != cfgReq.config[10]) { mError("dnode:%d, failed to config activeCode since invalid conf:%s", cfgReq.dnodeId, cfgReq.config); terrno = TSDB_CODE_INVALID_CFG; return -1; } int32_t vlen = strlen(cfgReq.value); - if (vlen > 0 && ((idx == 10 && vlen != (TSDB_ACTIVE_KEY_LEN - 1)) || - (idx == 11 && (vlen > (TSDB_CONN_ACTIVE_KEY_LEN - 1) || vlen < (TSDB_ACTIVE_KEY_LEN - 1))))) { + if (vlen > 0 && vlen != (TSDB_ACTIVE_KEY_LEN - 1)) { mError("dnode:%d, failed to config activeCode since invalid vlen:%d. conf:%s, val:%s", cfgReq.dnodeId, vlen, cfgReq.config, cfgReq.value); terrno = TSDB_CODE_INVALID_OPTION; return -1; } - strcpy(dcfgReq.config, idx == 10 ? "activeCode" : "cActiveCode"); + strcpy(dcfgReq.config, "activeCode"); snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%s", cfgReq.value); - if (mndConfigDnode(pMnode, pReq, &cfgReq) != 0) { - mError("dnode:%d, failed to config since %s", cfgReq.dnodeId, terrstr()); + if (mndConfigDnode(pMnode, pReq, &cfgReq, DND_ACTIVE_CODE) != 0) { + mError("dnode:%d, failed to config activeCode since %s", cfgReq.dnodeId, terrstr()); + return -1; + } + } else if (strncasecmp(cfgReq.config, "cActiveCode", 11) == 0) { + if (' ' != cfgReq.config[11] && 0 != cfgReq.config[11]) { + mError("dnode:%d, failed to config cActiveCode since invalid conf:%s", cfgReq.dnodeId, cfgReq.config); + terrno = TSDB_CODE_INVALID_CFG; + return -1; + } + int32_t vlen = strlen(cfgReq.value); + if (vlen > 0 && (vlen > (TSDB_CONN_ACTIVE_KEY_LEN - 1) || vlen < (TSDB_ACTIVE_KEY_LEN - 1))) { + mError("dnode:%d, failed to config cActiveCode since invalid vlen:%d. conf:%s, val:%s", cfgReq.dnodeId, vlen, + cfgReq.config, cfgReq.value); + terrno = TSDB_CODE_INVALID_OPTION; + return -1; + } + + strcpy(dcfgReq.config, "cActiveCode"); + snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%s", cfgReq.value); + + if (mndConfigDnode(pMnode, pReq, &cfgReq, DND_CONN_ACTIVE_CODE) != 0) { + mError("dnode:%d, failed to config cActiveCode since %s", cfgReq.dnodeId, terrstr()); return -1; } #endif From 24f368c71a33c1700ab0a348257983e8790e05f0 Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 18 May 2023 20:22:12 +0800 Subject: [PATCH 19/59] enh: read active code from cfg --- include/common/tglobal.h | 4 ++++ source/common/src/tglobal.c | 19 +++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 8509d39793..c16d82d801 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -77,6 +77,10 @@ extern int32_t tsElectInterval; extern int32_t tsHeartbeatInterval; extern int32_t tsHeartbeatTimeout; +// dnode +extern char tsActive[]; +extern char tsConnActive[]; + // vnode extern int64_t tsVndCommitMaxIntervalMs; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index ac90d4773c..57a29d2db9 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -66,6 +66,10 @@ int32_t tsElectInterval = 25 * 1000; int32_t tsHeartbeatInterval = 1000; int32_t tsHeartbeatTimeout = 20 * 1000; +// dnode +char tsActive[TSDB_ACTIVE_KEY_LEN] = {0}; +char tsConnActive[TSDB_CONN_ACTIVE_KEY_LEN] = {0}; + // vnode int64_t tsVndCommitMaxIntervalMs = 600 * 1000; @@ -490,6 +494,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt64(pCfg, "mndSdbWriteDelta", tsMndSdbWriteDelta, 20, 10000, 0) != 0) return -1; if (cfgAddInt64(pCfg, "mndLogRetention", tsMndLogRetention, 500, 10000, 0) != 0) return -1; + if (cfgAddString(pCfg, "activeCode", tsActive, 0) != 0) return -1; + if (cfgAddString(pCfg, "cActiveCode", tsConnActive, 0) != 0) return -1; + if (cfgAddBool(pCfg, "monitor", tsEnableMonitor, 0) != 0) return -1; if (cfgAddInt32(pCfg, "monitorInterval", tsMonitorInterval, 1, 200000, 0) != 0) return -1; if (cfgAddString(pCfg, "monitorFqdn", tsMonitorFqdn, 0) != 0) return -1; @@ -885,6 +892,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsMndSdbWriteDelta = cfgGetItem(pCfg, "mndSdbWriteDelta")->i64; tsMndLogRetention = cfgGetItem(pCfg, "mndLogRetention")->i64; + tstrncpy(tsActive, cfgGetItem(pCfg, "activeCode")->str, sizeof(tsActive)); + tstrncpy(tsConnActive, cfgGetItem(pCfg, "cActiveCode")->str, sizeof(tsConnActive)); + tsStartUdfd = cfgGetItem(pCfg, "udf")->bval; tstrncpy(tsUdfdResFuncs, cfgGetItem(pCfg, "udfdResFuncs")->str, sizeof(tsUdfdResFuncs)); tstrncpy(tsUdfdLdLibPath, cfgGetItem(pCfg, "udfdLdLibPath")->str, sizeof(tsUdfdLdLibPath)); @@ -927,6 +937,10 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) { tsAsyncLog = cfgGetItem(pCfg, "asyncLog")->bval; } else if (strcasecmp("assert", name) == 0) { tsAssert = cfgGetItem(pCfg, "assert")->bval; + } else if (strcasecmp("activeCode", name) == 0) { + SConfigItem *pActiveItem = cfgGetItem(pCfg, "activeCode"); + snprintf(tsActive, sizeof(tsActive), "%s", pActiveItem->str); + cfgSetItem(pCfg, "activeCode", tsActive, pActiveItem->stype); } break; } @@ -946,7 +960,12 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) { cDebugFlag = cfgGetItem(pCfg, "cDebugFlag")->i32; } else if (strcasecmp("crashReporting", name) == 0) { tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval; + } else if (strcasecmp("cActiveCode", name) == 0) { + SConfigItem *pActiveItem = cfgGetItem(pCfg, "cActiveCode"); + snprintf(tsConnActive, sizeof(tsConnActive), "%s", pActiveItem->str); + cfgSetItem(pCfg, "cActiveCode", tsActive, pActiveItem->stype); } + break; } case 'd': { From 5fc17e874d3cef07369408d87d3875e3d700e611 Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 18 May 2023 20:24:46 +0800 Subject: [PATCH 20/59] chore: remove unused code --- source/common/src/tglobal.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 57a29d2db9..699b6f6428 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -1463,18 +1463,6 @@ void taosCfgDynamicOptions(const char *option, const char *value) { return; } - if (strcasecmp(option, "activeCode") == 0) { - // TODO - printf("%s:%d set activeCode: %s\n", __func__, __LINE__, value); - return; - } - - if (strcasecmp(option, "cActiveCode") == 0) { - // TODO - printf("%s:%d set cActiveCode: %s\n", __func__, __LINE__, value); - return; - } - const char *options[] = { "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", "tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag", "uDebugFlag", From 8eb4367d460b95c237b97476e6d1292c7f9a6ab1 Mon Sep 17 00:00:00 2001 From: kailixu Date: Fri, 19 May 2023 06:50:12 +0800 Subject: [PATCH 21/59] fix: range check and test case adjust --- source/libs/parser/src/parUtil.c | 1 + tests/system-test/0-others/information_schema.py | 11 +++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index 14da6f8aab..5597bd3df8 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -305,6 +305,7 @@ int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen) { dst[j] = src[k]; j++; } + if (j >= dlen) j = dlen - 1; dst[j] = '\0'; return j; } diff --git a/tests/system-test/0-others/information_schema.py b/tests/system-test/0-others/information_schema.py index ef631ee8d9..e502b22773 100644 --- a/tests/system-test/0-others/information_schema.py +++ b/tests/system-test/0-others/information_schema.py @@ -176,8 +176,10 @@ class TDTestCase: self.str107 += 'tP+2soIXpP' self.str108 = self.str107 + '=' self.str109 = self.str108 + '+' - self.str254 = self.str108 + self.str108 + '0123456789001234567890012345678901234567' + self.str254 = self.str108 + self.str108 + '01234567890123456789012345678901234567' self.str255 = self.str254 + '=' + self.str256 = self.str254 + '=(' + self.str257 = self.str254 + '=()' self.str510 = self.str255 + self.str255 tdSql.error('alter dnode 1 "activeCode" "a"') tdSql.error('alter dnode 1 "activeCode" "' + self.str107 + '"') @@ -192,16 +194,17 @@ class TDTestCase: tdSql.checkEqual(tdSql.queryResult[0][1],'') tdSql.error('alter dnode 1 "cActiveCode" "a"') tdSql.error('alter dnode 1 "cActiveCode" "' + self.str107 + '"') - tdSql.execute('alter dnode 1 "cActiveCode" "' + self.str254 + '"') + tdSql.error('alter dnode 1 "cActiveCode" "' + self.str256 + '"') tdSql.error('alter all dnodes "cActiveCode" "' + self.str255 + '"') + tdSql.execute('alter all dnodes "cActiveCode" "' + self.str254 + '"') tdSql.error('alter dnode 1 "cActiveCode" "' + self.str510 + '"') tdSql.query(f'select active_code,c_active_code from information_schema.ins_dnodes') tdSql.checkEqual(tdSql.queryResult[0][0],"") tdSql.checkEqual(tdSql.queryResult[0][1],self.str254) tdSql.execute('alter dnode 1 "cActiveCode" "' + self.str109 + '"') tdSql.query(f'show dnodes') - tdSql.checkEqual(tdSql.queryResult[0][0],self.str109) - tdSql.execute('alter all dnodes "activeCode" ""') + tdSql.checkEqual(tdSql.queryResult[0][9],self.str109) + tdSql.execute('alter all dnodes "cActiveCode" ""') tdSql.query(f'select c_active_code from information_schema.ins_dnodes') tdSql.checkEqual(tdSql.queryResult[0][0],'') From 0fc1eadeedeb236f4bd82efa90323bb7ce82ba2d Mon Sep 17 00:00:00 2001 From: kailixu Date: Fri, 19 May 2023 06:52:07 +0800 Subject: [PATCH 22/59] chore: revert --- include/common/tglobal.h | 4 ---- source/common/src/tglobal.c | 19 ------------------- 2 files changed, 23 deletions(-) diff --git a/include/common/tglobal.h b/include/common/tglobal.h index c16d82d801..8509d39793 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -77,10 +77,6 @@ extern int32_t tsElectInterval; extern int32_t tsHeartbeatInterval; extern int32_t tsHeartbeatTimeout; -// dnode -extern char tsActive[]; -extern char tsConnActive[]; - // vnode extern int64_t tsVndCommitMaxIntervalMs; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 699b6f6428..237a52efe5 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -66,10 +66,6 @@ int32_t tsElectInterval = 25 * 1000; int32_t tsHeartbeatInterval = 1000; int32_t tsHeartbeatTimeout = 20 * 1000; -// dnode -char tsActive[TSDB_ACTIVE_KEY_LEN] = {0}; -char tsConnActive[TSDB_CONN_ACTIVE_KEY_LEN] = {0}; - // vnode int64_t tsVndCommitMaxIntervalMs = 600 * 1000; @@ -494,9 +490,6 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt64(pCfg, "mndSdbWriteDelta", tsMndSdbWriteDelta, 20, 10000, 0) != 0) return -1; if (cfgAddInt64(pCfg, "mndLogRetention", tsMndLogRetention, 500, 10000, 0) != 0) return -1; - if (cfgAddString(pCfg, "activeCode", tsActive, 0) != 0) return -1; - if (cfgAddString(pCfg, "cActiveCode", tsConnActive, 0) != 0) return -1; - if (cfgAddBool(pCfg, "monitor", tsEnableMonitor, 0) != 0) return -1; if (cfgAddInt32(pCfg, "monitorInterval", tsMonitorInterval, 1, 200000, 0) != 0) return -1; if (cfgAddString(pCfg, "monitorFqdn", tsMonitorFqdn, 0) != 0) return -1; @@ -892,9 +885,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsMndSdbWriteDelta = cfgGetItem(pCfg, "mndSdbWriteDelta")->i64; tsMndLogRetention = cfgGetItem(pCfg, "mndLogRetention")->i64; - tstrncpy(tsActive, cfgGetItem(pCfg, "activeCode")->str, sizeof(tsActive)); - tstrncpy(tsConnActive, cfgGetItem(pCfg, "cActiveCode")->str, sizeof(tsConnActive)); - tsStartUdfd = cfgGetItem(pCfg, "udf")->bval; tstrncpy(tsUdfdResFuncs, cfgGetItem(pCfg, "udfdResFuncs")->str, sizeof(tsUdfdResFuncs)); tstrncpy(tsUdfdLdLibPath, cfgGetItem(pCfg, "udfdLdLibPath")->str, sizeof(tsUdfdLdLibPath)); @@ -937,10 +927,6 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) { tsAsyncLog = cfgGetItem(pCfg, "asyncLog")->bval; } else if (strcasecmp("assert", name) == 0) { tsAssert = cfgGetItem(pCfg, "assert")->bval; - } else if (strcasecmp("activeCode", name) == 0) { - SConfigItem *pActiveItem = cfgGetItem(pCfg, "activeCode"); - snprintf(tsActive, sizeof(tsActive), "%s", pActiveItem->str); - cfgSetItem(pCfg, "activeCode", tsActive, pActiveItem->stype); } break; } @@ -960,12 +946,7 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) { cDebugFlag = cfgGetItem(pCfg, "cDebugFlag")->i32; } else if (strcasecmp("crashReporting", name) == 0) { tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval; - } else if (strcasecmp("cActiveCode", name) == 0) { - SConfigItem *pActiveItem = cfgGetItem(pCfg, "cActiveCode"); - snprintf(tsConnActive, sizeof(tsConnActive), "%s", pActiveItem->str); - cfgSetItem(pCfg, "cActiveCode", tsActive, pActiveItem->stype); } - break; } case 'd': { From 573e16a2a7c5e128598c3eb0e4824e6e9a7cdcf6 Mon Sep 17 00:00:00 2001 From: kailixu Date: Fri, 19 May 2023 06:54:09 +0800 Subject: [PATCH 23/59] chore: test case --- tests/system-test/0-others/information_schema.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/system-test/0-others/information_schema.py b/tests/system-test/0-others/information_schema.py index e502b22773..8245407ade 100644 --- a/tests/system-test/0-others/information_schema.py +++ b/tests/system-test/0-others/information_schema.py @@ -196,6 +196,8 @@ class TDTestCase: tdSql.error('alter dnode 1 "cActiveCode" "' + self.str107 + '"') tdSql.error('alter dnode 1 "cActiveCode" "' + self.str256 + '"') tdSql.error('alter all dnodes "cActiveCode" "' + self.str255 + '"') + tdSql.error('alter all dnodes "cActiveCode" "' + self.str256 + '"') + tdSql.error('alter all dnodes "cActiveCode" "' + self.str257 + '"') tdSql.execute('alter all dnodes "cActiveCode" "' + self.str254 + '"') tdSql.error('alter dnode 1 "cActiveCode" "' + self.str510 + '"') tdSql.query(f'select active_code,c_active_code from information_schema.ins_dnodes') From c770a5c30691f504cf971657802edca008aa3715 Mon Sep 17 00:00:00 2001 From: kailixu Date: Fri, 19 May 2023 06:59:28 +0800 Subject: [PATCH 24/59] chore: code optimization --- source/dnode/mnode/impl/src/mndDnode.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 23580c17e1..d9c1247a93 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -1057,6 +1057,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { mError("dnode:%d, failed to config activeCode since %s", cfgReq.dnodeId, terrstr()); return -1; } + return 0; } else if (strncasecmp(cfgReq.config, "cActiveCode", 11) == 0) { if (' ' != cfgReq.config[11] && 0 != cfgReq.config[11]) { mError("dnode:%d, failed to config cActiveCode since invalid conf:%s", cfgReq.dnodeId, cfgReq.config); @@ -1078,6 +1079,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { mError("dnode:%d, failed to config cActiveCode since %s", cfgReq.dnodeId, terrstr()); return -1; } + return 0; #endif } else { bool findOpt = false; From de9389585995bb8828011b2e642d3d461e9a3f56 Mon Sep 17 00:00:00 2001 From: kailixu Date: Fri, 19 May 2023 09:25:26 +0800 Subject: [PATCH 25/59] chore: code optimization --- source/dnode/mnode/impl/src/mndDnode.c | 36 +++++++------------------- 1 file changed, 9 insertions(+), 27 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index d9c1247a93..587f78f300 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -1036,50 +1036,32 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { strcpy(dcfgReq.config, "monitor"); snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%d", flag); #ifdef TD_ENTERPRISE - } else if (strncasecmp(cfgReq.config, "activeCode", 10) == 0) { - if (' ' != cfgReq.config[10] && 0 != cfgReq.config[10]) { + } else if (strncasecmp(cfgReq.config, "activeCode", 10) == 0 || strncasecmp(cfgReq.config, "cActiveCode", 11) == 0) { + int8_t opt = strncasecmp(cfgReq.config, "a", 1) == 0 ? DND_ACTIVE_CODE : DND_CONN_ACTIVE_CODE; + int8_t index = opt == DND_ACTIVE_CODE ? 10 : 11; + if (' ' != cfgReq.config[index] && 0 != cfgReq.config[index]) { mError("dnode:%d, failed to config activeCode since invalid conf:%s", cfgReq.dnodeId, cfgReq.config); terrno = TSDB_CODE_INVALID_CFG; return -1; } int32_t vlen = strlen(cfgReq.value); - if (vlen > 0 && vlen != (TSDB_ACTIVE_KEY_LEN - 1)) { + if (vlen > 0 && ((opt == DND_ACTIVE_CODE && vlen != (TSDB_ACTIVE_KEY_LEN - 1)) || + (opt == DND_CONN_ACTIVE_CODE && + (vlen > (TSDB_CONN_ACTIVE_KEY_LEN - 1) || vlen < (TSDB_ACTIVE_KEY_LEN - 1))))) { mError("dnode:%d, failed to config activeCode since invalid vlen:%d. conf:%s, val:%s", cfgReq.dnodeId, vlen, cfgReq.config, cfgReq.value); terrno = TSDB_CODE_INVALID_OPTION; return -1; } - strcpy(dcfgReq.config, "activeCode"); + strcpy(dcfgReq.config, opt == DND_ACTIVE_CODE ? "activeCode" : "cActiveCode"); snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%s", cfgReq.value); - if (mndConfigDnode(pMnode, pReq, &cfgReq, DND_ACTIVE_CODE) != 0) { + if (mndConfigDnode(pMnode, pReq, &cfgReq, opt) != 0) { mError("dnode:%d, failed to config activeCode since %s", cfgReq.dnodeId, terrstr()); return -1; } return 0; - } else if (strncasecmp(cfgReq.config, "cActiveCode", 11) == 0) { - if (' ' != cfgReq.config[11] && 0 != cfgReq.config[11]) { - mError("dnode:%d, failed to config cActiveCode since invalid conf:%s", cfgReq.dnodeId, cfgReq.config); - terrno = TSDB_CODE_INVALID_CFG; - return -1; - } - int32_t vlen = strlen(cfgReq.value); - if (vlen > 0 && (vlen > (TSDB_CONN_ACTIVE_KEY_LEN - 1) || vlen < (TSDB_ACTIVE_KEY_LEN - 1))) { - mError("dnode:%d, failed to config cActiveCode since invalid vlen:%d. conf:%s, val:%s", cfgReq.dnodeId, vlen, - cfgReq.config, cfgReq.value); - terrno = TSDB_CODE_INVALID_OPTION; - return -1; - } - - strcpy(dcfgReq.config, "cActiveCode"); - snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%s", cfgReq.value); - - if (mndConfigDnode(pMnode, pReq, &cfgReq, DND_CONN_ACTIVE_CODE) != 0) { - mError("dnode:%d, failed to config cActiveCode since %s", cfgReq.dnodeId, terrstr()); - return -1; - } - return 0; #endif } else { bool findOpt = false; From 807ec0b967ade95aef75ed17f35c43a5e6591566 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 19 May 2023 10:39:33 +0800 Subject: [PATCH 26/59] fix test case --- tests/system-test/2-query/interp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index fc68aff4a3..0f0220fff7 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -2961,7 +2961,7 @@ class TDTestCase: tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union all select * from {dbname}.{ctbname2}) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union all select * from {dbname}.{ctbname2} order by ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") - tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from (select * from {ctbname1}, {ctbname2} where {ctbname1}.ts = {ctbname2}.ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") + tdSql.query(f"select _irowts, _isfilled, interp(c0) from (select {ctbname1}.ts,{ctbname1}.c0 from {ctbname1}, {ctbname2} where {ctbname1}.ts = {ctbname2}.ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") def stop(self): tdSql.close() From d8525123a2b03c54d35564d179b3077061a32f67 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 19 May 2023 11:03:41 +0800 Subject: [PATCH 27/59] enh(stream): add API to retrieve last ts for multi-tables. --- source/dnode/vnode/inc/vnode.h | 1 + source/dnode/vnode/src/tq/tq.c | 3 +- source/dnode/vnode/src/tsdb/tsdbRead.c | 55 ++++++++++++++++++++++++++ source/libs/stream/src/streamRecover.c | 17 ++++---- 4 files changed, 67 insertions(+), 9 deletions(-) diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index da5d6f8b3c..18fa893fa4 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -198,6 +198,7 @@ void *tsdbGetIdx(SMeta *pMeta); void *tsdbGetIvtIdx(SMeta *pMeta); uint64_t tsdbGetReaderMaxVersion(STsdbReader *pReader); void tsdbReaderSetCloseFlag(STsdbReader *pReader); +int64_t tsdbGetLastTimestamp(SVnode* pVnode, void* pTableList, int32_t numOfTables, const char* pIdStr); int32_t tsdbReuseCacherowsReader(void* pReader, void* pTableIdList, int32_t numOfTables); int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, void *pTableIdList, int32_t numOfTables, int32_t numOfCols, diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index a8c2a09319..7dc7f999fb 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -760,6 +760,7 @@ void freePtr(void *ptr) { int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) { int32_t vgId = TD_VID(pTq->pVnode); + pTask->id.idStr = createStreamTaskIdStr(pTask->id.streamId, pTask->id.taskId); pTask->refCnt = 1; pTask->status.schedStatus = TASK_SCHED_STATUS__INACTIVE; @@ -837,7 +838,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) { } if (pTask->taskLevel == TASK_LEVEL__SOURCE) { - SWalFilterCond cond = {.deleteMsg = 1}; + SWalFilterCond cond = {.deleteMsg = 1}; // delete msg also extract from wal files pTask->exec.pWalReader = walOpenReader(pTq->pVnode->pWal, &cond); } diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 36b18ce9f4..cde2672541 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -5506,3 +5506,58 @@ void tsdbReaderSetId(STsdbReader* pReader, const char* idstr) { } void tsdbReaderSetCloseFlag(STsdbReader* pReader) { pReader->code = TSDB_CODE_TSC_QUERY_CANCELLED; } + +/*-------------todo:refactor the implementation of those APIs in this file to seperate the API into two files------*/ +// opt perf, do NOT create so many readers +int64_t tsdbGetLastTimestamp(SVnode* pVnode, void* pTableList, int32_t numOfTables, const char* pIdStr) { + SQueryTableDataCond cond = {.type = TIMEWINDOW_RANGE_CONTAINED, .numOfCols = 1, .order = TSDB_ORDER_DESC, + .startVersion = -1, .endVersion = -1}; + cond.twindows.skey = INT64_MIN; + cond.twindows.ekey = INT64_MAX; + + cond.colList = taosMemoryCalloc(1, sizeof(SColumnInfo)); + cond.pSlotList = taosMemoryMalloc(sizeof(int32_t) * cond.numOfCols); + if (cond.colList == NULL || cond.pSlotList == NULL) { + // todo + } + + cond.colList[0].colId = 1; + cond.colList[0].slotId = 0; + cond.colList[0].type = TSDB_DATA_TYPE_TIMESTAMP; + + cond.pSlotList[0] = 0; + + STableKeyInfo* pTableKeyInfo = pTableList; + STsdbReader* pReader = NULL; + SSDataBlock* pBlock = createDataBlock(); + + SColumnInfoData data = {0}; + data.info = (SColumnInfo) {.type = TSDB_DATA_TYPE_TIMESTAMP, .colId = 1, .bytes = TSDB_KEYSIZE}; + blockDataAppendColInfo(pBlock, &data); + + int64_t key = INT64_MIN; + + for(int32_t i = 0; i < numOfTables; ++i) { + int32_t code = tsdbReaderOpen(pVnode, &cond, &pTableKeyInfo[i], 1, pBlock, &pReader, pIdStr, false, NULL); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + bool hasData = false; + code = tsdbNextDataBlock(pReader, &hasData); + if (!hasData || code != TSDB_CODE_SUCCESS) { + continue; + } + + SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, 0); + int64_t k = *(int64_t*)pCol->pData; + + if (key < k) { + key = k; + } + + tsdbReaderClose(pReader); + } + + return 0; +} diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index f71d078a3d..7236c6c4b9 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -20,7 +20,7 @@ int32_t streamTaskLaunchRecover(SStreamTask* pTask, int64_t version) { if (pTask->taskLevel == TASK_LEVEL__SOURCE) { atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__RECOVER_PREPARE); - qDebug("s-task:%s set task status:%d and start recover", pTask->id.idStr, pTask->status.taskStatus); + qDebug("s-task:%s set task status:%d and start to recover", pTask->id.idStr, pTask->status.taskStatus); streamSetParamForRecover(pTask); streamSourceRecoverPrepareStep1(pTask, version); @@ -46,6 +46,7 @@ int32_t streamTaskLaunchRecover(SStreamTask* pTask, int64_t version) { streamSetParamForRecover(pTask); streamAggRecoverPrepare(pTask); } else if (pTask->taskLevel == TASK_LEVEL__SINK) { + // sink nodes has no specified operation for fill history atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__NORMAL); } @@ -71,23 +72,23 @@ int32_t streamTaskCheckDownstream(SStreamTask* pTask, int64_t version) { req.downstreamTaskId = pTask->fixedEpDispatcher.taskId; pTask->checkReqId = req.reqId; - qDebug("s-task:%s at node %d check downstream task %d at node %d", pTask->id.idStr, pTask->nodeId, req.downstreamTaskId, + qDebug("s-task:%s at node %d check downstream task:%d at node %d", pTask->id.idStr, pTask->nodeId, req.downstreamTaskId, req.downstreamNodeId); streamDispatchCheckMsg(pTask, &req, pTask->fixedEpDispatcher.nodeId, &pTask->fixedEpDispatcher.epSet); } else if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) { SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos; - int32_t vgSz = taosArrayGetSize(vgInfo); - pTask->recoverTryingDownstream = vgSz; - pTask->checkReqIds = taosArrayInit(vgSz, sizeof(int64_t)); + int32_t numOfVgs = taosArrayGetSize(vgInfo); + pTask->recoverTryingDownstream = numOfVgs; + pTask->checkReqIds = taosArrayInit(numOfVgs, sizeof(int64_t)); - for (int32_t i = 0; i < vgSz; i++) { + for (int32_t i = 0; i < numOfVgs; i++) { SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i); req.reqId = tGenIdPI64(); taosArrayPush(pTask->checkReqIds, &req.reqId); req.downstreamNodeId = pVgInfo->vgId; req.downstreamTaskId = pVgInfo->taskId; - qDebug("s-task:%s at node %d check downstream task %d at node %d (shuffle)", pTask->id.idStr, pTask->nodeId, + qDebug("s-task:%s at node %d check downstream task:%d at node %d (shuffle)", pTask->id.idStr, pTask->nodeId, req.downstreamTaskId, req.downstreamNodeId); streamDispatchCheckMsg(pTask, &req, pVgInfo->vgId, &pVgInfo->epSet); } @@ -161,7 +162,7 @@ int32_t streamProcessTaskCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* taosArrayDestroy(pTask->checkReqIds); pTask->checkReqIds = NULL; - qDebug("s-task:%s all downstream tasks:%d are ready, now enter into recover stage", pTask->id.idStr, numOfReqs); + qDebug("s-task:%s all %d downstream tasks are ready, now enter into recover stage", pTask->id.idStr, numOfReqs); streamTaskLaunchRecover(pTask, version); } } else if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH) { From 0de1c61cea47771b01688990b87fdf4371f664ec Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 19 May 2023 15:09:30 +0800 Subject: [PATCH 28/59] add test cases --- tests/system-test/2-query/interp.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 0f0220fff7..12e8f19295 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -38,6 +38,8 @@ class TDTestCase: tdLog.printNoPrefix("==========step2:insert data") + tdSql.execute(f"use db") + tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:05', 5, 5, 5, 5, 5.0, 5.0, true, 'varchar', 'nchar')") tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar')") tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar')") From 90687bae1c48cc80d9217fa541bd5b4df9463910 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 19 May 2023 15:30:50 +0800 Subject: [PATCH 29/59] refactor: do some internal refactor. --- source/libs/stream/inc/streamInc.h | 2 +- source/libs/stream/src/stream.c | 35 ++++++++++--------------- source/libs/stream/src/streamData.c | 3 ++- source/libs/stream/src/streamDispatch.c | 2 +- source/libs/stream/src/streamExec.c | 1 + 5 files changed, 19 insertions(+), 24 deletions(-) diff --git a/source/libs/stream/inc/streamInc.h b/source/libs/stream/inc/streamInc.h index c471bc2bd8..7e1b4dcf2d 100644 --- a/source/libs/stream/inc/streamInc.h +++ b/source/libs/stream/inc/streamInc.h @@ -34,7 +34,7 @@ typedef struct { static SStreamGlobalEnv streamEnv; int32_t streamDispatch(SStreamTask* pTask); -int32_t streamDispatchReqToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData); +int32_t streamConvertDispatchMsgToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData); int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock* pData); int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* data); diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index 26e1c2ab43..1a5ef2e007 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -121,24 +121,22 @@ int32_t streamSchedExec(SStreamTask* pTask) { int32_t streamTaskEnqueueBlocks(SStreamTask* pTask, const SStreamDispatchReq* pReq, SRpcMsg* pRsp) { SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0); - int8_t status; - // enqueue data block - if (pData != NULL) { + int8_t status = 0; + if (pData == NULL) { + streamTaskInputFail(pTask); + status = TASK_INPUT_STATUS__FAILED; + qDebug("vgId:%d, s-task:%s failed to received dispatch msg, reason: out of memory", pTask->pMeta->vgId, pTask->id.idStr); + } else { pData->type = STREAM_INPUT__DATA_BLOCK; pData->srcVgId = pReq->dataSrcVgId; - // decode - /*pData->blocks = pReq->data;*/ - /*pBlock->sourceVer = pReq->sourceVer;*/ - streamDispatchReqToData(pReq, pData); + + streamConvertDispatchMsgToData(pReq, pData); if (tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pData) == 0) { status = TASK_INPUT_STATUS__NORMAL; } else { // input queue is full, upstream is blocked now status = TASK_INPUT_STATUS__BLOCKED; } - } else { - streamTaskInputFail(pTask); - status = TASK_INPUT_STATUS__FAILED; } // rsp by input status @@ -219,7 +217,7 @@ int32_t streamTaskOutput(SStreamTask* pTask, SStreamDataBlock* pBlock) { } int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp, bool exec) { - qDebug("s-task:%s receive dispatch msg from taskId:%d(vgId:%d)", pTask->id.idStr, pReq->upstreamTaskId, + qDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d)", pTask->id.idStr, pReq->upstreamTaskId, pReq->upstreamNodeId); // todo add the input queue buffer limitation @@ -294,16 +292,13 @@ int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem) { double size = QUEUE_MEM_SIZE_IN_MB(pTask->inputQueue->queue); if (type == STREAM_INPUT__DATA_SUBMIT) { - int32_t numOfBlocks = taosQueueItemSize(pTask->inputQueue->queue) + 1; - double size = QUEUE_MEM_SIZE_IN_MB(pTask->inputQueue->queue); - SStreamDataSubmit* px = (SStreamDataSubmit*)pItem; qDebug("s-task:%s submit enqueue msgLen:%d ver:%" PRId64 ", total in queue:%d, size:%.2fMiB", pTask->id.idStr, - px->submit.msgLen, px->submit.ver, numOfBlocks, size); + px->submit.msgLen, px->submit.ver, total, size); if ((pTask->taskLevel == TASK_LEVEL__SOURCE) && tInputQueueIsFull(pTask)) { qError("s-task:%s input queue is full, capacity(size:%d num:%dMiB), current(blocks:%d, size:%.2fMiB) abort", - pTask->id.idStr, STREAM_TASK_INPUT_QUEUEU_CAPACITY, STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE, numOfBlocks, + pTask->id.idStr, STREAM_TASK_INPUT_QUEUEU_CAPACITY, STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE, total, size); streamDataSubmitDestroy(px); taosFreeQitem(pItem); @@ -312,22 +307,20 @@ int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem) { taosWriteQitem(pTask->inputQueue->queue, pItem); } else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE || type == STREAM_INPUT__REF_DATA_BLOCK) { - int32_t numOfBlocks = taosQueueItemSize(pTask->inputQueue->queue) + 1; - double size = QUEUE_MEM_SIZE_IN_MB(pTask->inputQueue->queue); - if ((pTask->taskLevel == TASK_LEVEL__SOURCE) && (tInputQueueIsFull(pTask))) { qError("s-task:%s input queue is full, capacity:%d size:%d MiB, current(blocks:%d, size:%.2fMiB) abort", - pTask->id.idStr, STREAM_TASK_INPUT_QUEUEU_CAPACITY, STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE, numOfBlocks, + pTask->id.idStr, STREAM_TASK_INPUT_QUEUEU_CAPACITY, STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE, total, size); return -1; } - qDebug("s-task:%s data block enqueue, total in queue:%d", pTask->id.idStr, numOfBlocks); + qDebug("s-task:%s data block enqueue, total in queue:%d", pTask->id.idStr, total); taosWriteQitem(pTask->inputQueue->queue, pItem); } else if (type == STREAM_INPUT__CHECKPOINT) { taosWriteQitem(pTask->inputQueue->queue, pItem); } else if (type == STREAM_INPUT__GET_RES) { taosWriteQitem(pTask->inputQueue->queue, pItem); + qDebug("s-task:%s data block enqueue, current(blocks:%d, size:%.2fMiB)", pTask->id.idStr, total, size); } if (type != STREAM_INPUT__GET_RES && type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) { diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c index b70a8c93a9..2233e21f60 100644 --- a/source/libs/stream/src/streamData.c +++ b/source/libs/stream/src/streamData.c @@ -15,7 +15,7 @@ #include "streamInc.h" -int32_t streamDispatchReqToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData) { +int32_t streamConvertDispatchMsgToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData) { int32_t blockNum = pReq->blockNum; SArray* pArray = taosArrayInit_s(sizeof(SSDataBlock), blockNum); if (pArray == NULL) { @@ -39,6 +39,7 @@ int32_t streamDispatchReqToData(const SStreamDispatchReq* pReq, SStreamDataBlock pDataBlock->info.type = pRetrieve->streamBlockType; pDataBlock->info.childId = pReq->upstreamChildId; } + pData->blocks = pArray; return 0; } diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index a757d39d3f..7e0fbd30b1 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -320,7 +320,7 @@ int32_t streamDispatchOneDataReq(SStreamTask* pTask, const SStreamDispatchReq* p msg.pCont = buf; msg.msgType = pTask->dispatchMsgType; - qDebug("dispatch from s-task:%s to taskId:%d vgId:%d data msg", pTask->id.idStr, pReq->taskId, vgId); + qDebug("dispatch from s-task:%s to taskId:0x%x vgId:%d data msg", pTask->id.idStr, pReq->taskId, vgId); tmsgSendReq(pEpSet, &msg); code = 0; diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index d14e8102d8..ec2738558a 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -272,6 +272,7 @@ int32_t streamExecForAll(SStreamTask* pTask) { if (streamTaskShouldPause(&pTask->status)) { return 0; } + SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputQueue); if (qItem == NULL) { if (pTask->taskLevel == TASK_LEVEL__SOURCE && batchSize < MIN_STREAM_EXEC_BATCH_NUM && times < 5) { From 8571e6f6c5bd010e61628767b351e0dd30c2bb24 Mon Sep 17 00:00:00 2001 From: kailixu Date: Fri, 19 May 2023 16:06:20 +0800 Subject: [PATCH 30/59] chore: code optimization --- source/dnode/mnode/impl/src/mndDnode.c | 29 ++++++++++++++++++-------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index d9c1247a93..e344dca0a0 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -649,18 +649,19 @@ _OVER: } static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfgReq, int8_t action) { - SSdbRaw *pRaw = NULL; - STrans *pTrans = NULL; + SSdbRaw *pRaw = NULL; + STrans *pTrans = NULL; + SDnodeObj *pDnode = NULL; + bool cfgAll = pCfgReq->dnodeId == -1; SSdb *pSdb = pMnode->pSdb; void *pIter = NULL; while (1) { - SDnodeObj *pDnode = NULL; - pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pDnode); - if (pIter == NULL) break; - - if (pDnode->id != pCfgReq->dnodeId && pCfgReq->dnodeId != -1) { - continue; + if (cfgAll) { + pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pDnode); + if (pIter == NULL) break; + } else if(!(pDnode = mndAcquireDnode(pMnode, pCfgReq->dnodeId)) { + goto _OVER; } if (!pTrans) { @@ -687,7 +688,12 @@ static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfg mInfo("dnode:%d, config dnode, cfg:%d, app:%p config:%s value:%s", pDnode->id, pCfgReq->dnodeId, pReq->info.ahandle, pCfgReq->config, pCfgReq->value); - sdbRelease(pSdb, pDnode); + if (cfgAll) { + sdbRelease(pSdb, pDnode); + pDnode = NULL; + } else { + break; + } } if (pTrans && mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; @@ -695,6 +701,11 @@ static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfg terrno = 0; _OVER: + if (cfgAll) { + sdbRelease(pSdb, pDnode); + } else { + mndReleaseDnode(pMnode, pDnode); + } mndTransDrop(pTrans); sdbFreeRaw(pRaw); return terrno; From 8fd4b27c436186736cf8a9abab786e6b013328e6 Mon Sep 17 00:00:00 2001 From: kailixu Date: Fri, 19 May 2023 16:10:44 +0800 Subject: [PATCH 31/59] chore: code optimization --- source/dnode/mnode/impl/src/mndDnode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 029d63cb9d..99ec7970ad 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -660,7 +660,7 @@ static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfg if (cfgAll) { pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pDnode); if (pIter == NULL) break; - } else if(!(pDnode = mndAcquireDnode(pMnode, pCfgReq->dnodeId)) { + } else if(!(pDnode = mndAcquireDnode(pMnode, pCfgReq->dnodeId))) { goto _OVER; } From a3ca23b6887b7cfecccdc40858c17e58bee52155 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 19 May 2023 16:43:39 +0800 Subject: [PATCH 32/59] forbid interp ignoring null value used for multiple cols --- source/libs/function/src/builtins.c | 12 +++++++++++- source/libs/parser/src/parTranslater.c | 9 +++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 9529b91fa0..752b84b5ad 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1639,6 +1639,15 @@ static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len) return TSDB_CODE_SUCCESS; } +static EFuncReturnRows interpEstReturnRows(SFunctionNode* pFunc) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (1 < numOfParams && 1 == ((SValueNode*)nodesListGetNode(pFunc->pParameterList, 1))->datum.i) { + return FUNC_RETURN_ROWS_INDEFINITE; + } else { + return FUNC_RETURN_ROWS_N; + } +} + static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { // forbid null as first/last input, since first(c0, null, 1) may have different number of input int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); @@ -2499,7 +2508,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .getEnvFunc = getSelectivityFuncEnv, .initFunc = functionSetup, .processFunc = NULL, - .finalizeFunc = NULL + .finalizeFunc = NULL, + .estimateReturnRowsFunc = interpEstReturnRows }, { .name = "derivative", diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index c5fe3a1f73..2ec32a8bcb 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1531,6 +1531,12 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc if (pSelect->hasAggFuncs || pSelect->hasMultiRowsFunc || pSelect->hasIndefiniteRowsFunc) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } + + if (pSelect->hasInterpFunc && (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc))) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, + "%s ignoring null value options cannot be used when applying to multiple columns", pFunc->functionName); + } + if (NULL != pSelect->pWindow || NULL != pSelect->pGroupByList) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, "%s function is not supported in window query or group query", pFunc->functionName); @@ -1731,7 +1737,10 @@ static void setFuncClassification(SNode* pCurrStmt, SFunctionNode* pFunc) { if (fmIsIndefiniteRowsFunc(pFunc->funcId)) { pSelect->hasIndefiniteRowsFunc = true; pSelect->returnRows = fmGetFuncReturnRows(pFunc); + } else if (fmIsInterpFunc(pFunc->funcId)) { + pSelect->returnRows = fmGetFuncReturnRows(pFunc); } + pSelect->hasMultiRowsFunc = pSelect->hasMultiRowsFunc ? true : fmIsMultiRowsFunc(pFunc->funcId); if (fmIsSelectFunc(pFunc->funcId)) { pSelect->hasSelectFunc = true; From 836efcac9587dca2ac3431bb41b4b6cdfc5e3e55 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 19 May 2023 16:44:08 +0800 Subject: [PATCH 33/59] add test cases --- tests/system-test/2-query/interp.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 8d654dd9e1..37df8a0e34 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -3973,6 +3973,18 @@ class TDTestCase: tdSql.checkData(7, 1, '2020-02-01 00:00:09.000') tdSql.checkData(10, 1, '2020-02-01 00:00:15.000') + # multiple column with ignoring null value is not allowed + + tdSql.query(f"select _irowts, _isfilled, interp(c0), interp(c1), interp(c2) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(linear)") + tdSql.query(f"select _irowts, _isfilled, interp(c0, 0), interp(c1, 0), interp(c2, 0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(linear)") + tdSql.query(f"select _irowts, _isfilled, interp(c0), interp(c1, 0), interp(c2) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(linear)") + + tdSql.error(f"select _irowts, _isfilled, interp(c0), interp(c1, 1), interp(c2) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(linear)") + tdSql.error(f"select _irowts, _isfilled, interp(c0, 1), interp(c1, 0), interp(c2, 0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(linear)") + tdSql.error(f"select _irowts, _isfilled, interp(c0), interp(c1, 0), interp(c2, 1) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(linear)") + tdSql.error(f"select _irowts, _isfilled, interp(c0, 1), interp(c1, 1), interp(c2, 1) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(linear)") + + tdLog.printNoPrefix("======step 15: test interp pseudo columns") tdSql.error(f"select _irowts, c6 from {dbname}.{tbname}") From 91414ed8e3a5ffb96360be289c0295c3c391afac Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 19 May 2023 16:54:40 +0800 Subject: [PATCH 34/59] fix test case --- tests/system-test/2-query/interp.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 12e8f19295..6630713f73 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -2947,7 +2947,7 @@ class TDTestCase: tdLog.printNoPrefix("======step 14: test interp pseudo columns") tdSql.error(f"select _irowts, c6 from {dbname}.{tbname}") - tdLog.printNoPrefix("======step 14: test interp in nested query") + tdLog.printNoPrefix("======step 15: test interp in nested query") tdSql.query(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{stbname}) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") tdSql.query(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1}) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") @@ -2963,7 +2963,7 @@ class TDTestCase: tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union all select * from {dbname}.{ctbname2}) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union all select * from {dbname}.{ctbname2} order by ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") - tdSql.query(f"select _irowts, _isfilled, interp(c0) from (select {ctbname1}.ts,{ctbname1}.c0 from {ctbname1}, {ctbname2} where {ctbname1}.ts = {ctbname2}.ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") + tdSql.query(f"select _irowts, _isfilled, interp(c0) from (select {ctbname1}.ts,{ctbname1}.c0 from {dbname}.{ctbname1}, {dbname}.{ctbname2} where {ctbname1}.ts = {ctbname2}.ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)") def stop(self): tdSql.close() From 709b59a3a60d7bc63a2559f9dd5cb6f35e893159 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 19 May 2023 17:27:15 +0800 Subject: [PATCH 35/59] fix: fix assert crash in tsort.c --- include/common/tdatablock.h | 2 +- source/common/src/tdatablock.c | 7 +++---- source/libs/executor/src/groupoperator.c | 3 ++- source/libs/executor/src/tsort.c | 3 ++- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 99fffa2cf1..33c571fc1b 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -215,7 +215,7 @@ void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows); void blockDataCleanup(SSDataBlock* pDataBlock); void blockDataEmpty(SSDataBlock* pDataBlock); -size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize); +size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize, int32_t extraSize); int32_t blockDataTrimFirstRows(SSDataBlock* pBlock, size_t n); int32_t blockDataKeepFirstNRows(SSDataBlock* pBlock, size_t n); diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 5f7e43668a..2a125a5831 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -445,12 +445,11 @@ int32_t blockDataSplitRows(SSDataBlock* pBlock, bool hasVarCol, int32_t startInd size_t headerSize = sizeof(int32_t); size_t colHeaderSize = sizeof(int32_t) * numOfCols; - size_t payloadSize = pageSize - (headerSize + colHeaderSize); // TODO speedup by checking if the whole page can fit in firstly. if (!hasVarCol) { size_t rowSize = blockDataGetRowSize(pBlock); - int32_t capacity = payloadSize / (rowSize + numOfCols * bitmapChar / 8.0); + int32_t capacity = blockDataGetCapacityInRow(pBlock, pageSize, headerSize + colHeaderSize); if (capacity <= 0) { return TSDB_CODE_FAILED; } @@ -1532,10 +1531,10 @@ SColumnInfoData* bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index) { return taosArrayGet(pBlock->pDataBlock, index); } -size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) { +size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize, int32_t extraSize) { size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); - int32_t payloadSize = pageSize - blockDataGetSerialMetaSize(numOfCols); + int32_t payloadSize = pageSize - extraSize; int32_t rowSize = pBlock->info.rowSize; int32_t nRows = payloadSize / rowSize; ASSERT(nRows >= 1); diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index f2eee82579..936030fa57 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -885,7 +885,8 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition goto _error; } - pInfo->rowCapacity = blockDataGetCapacityInRow(pInfo->binfo.pRes, getBufPageSize(pInfo->pBuf)); + pInfo->rowCapacity = blockDataGetCapacityInRow(pInfo->binfo.pRes, getBufPageSize(pInfo->pBuf), + blockDataGetSerialMetaSize(taosArrayGetSize(pInfo->binfo.pRes->pDataBlock))); pInfo->columnOffset = setupColumnOffset(pInfo->binfo.pRes, pInfo->rowCapacity); code = initGroupOptrInfo(&pInfo->pGroupColVals, &pInfo->groupKeyLen, &pInfo->keyBuf, pInfo->pGroupCols); if (code != TSDB_CODE_SUCCESS) { diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 211f78b981..783597df67 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -533,7 +533,8 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) { pHandle->numOfPages); } - int32_t numOfRows = blockDataGetCapacityInRow(pHandle->pDataBlock, pHandle->pageSize); + int32_t numOfRows = blockDataGetCapacityInRow(pHandle->pDataBlock, pHandle->pageSize, + blockDataGetSerialMetaSize(taosArrayGetSize(pHandle->pDataBlock->pDataBlock))); blockDataEnsureCapacity(pHandle->pDataBlock, numOfRows); // the initial pass + sortPass + final mergePass From e2ec8d738c2f36f60612d135d8a557ef00cb922b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 19 May 2023 18:08:01 +0800 Subject: [PATCH 36/59] other: add some logs. --- source/dnode/vnode/src/tq/tqRestore.c | 4 ++-- source/libs/stream/src/streamExec.c | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/source/dnode/vnode/src/tq/tqRestore.c b/source/dnode/vnode/src/tq/tqRestore.c index 9acf2454af..ff9f95d5fa 100644 --- a/source/dnode/vnode/src/tq/tqRestore.c +++ b/source/dnode/vnode/src/tq/tqRestore.c @@ -119,7 +119,7 @@ int32_t createStreamTaskRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) { int32_t status = pTask->status.taskStatus; if (pTask->taskLevel != TASK_LEVEL__SOURCE) { - tqDebug("s-task:%s level:%d not source task, no need to start", pTask->id.idStr, pTask->taskLevel); +// tqTrace("s-task:%s level:%d not source task, no need to start", pTask->id.idStr, pTask->taskLevel); streamMetaReleaseTask(pStreamMeta, pTask); continue; } @@ -132,7 +132,7 @@ int32_t createStreamTaskRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) { } if (tInputQueueIsFull(pTask)) { - tqDebug("s-task:%s input queue is full, do nothing", pTask->id.idStr); + tqTrace("s-task:%s input queue is full, do nothing", pTask->id.idStr); streamMetaReleaseTask(pStreamMeta, pTask); continue; } diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index ec2738558a..6391a02ace 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -105,9 +105,10 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray* block.info.childId = pTask->selfChildId; taosArrayPush(pRes, &block); - qDebug("task %d(child %d) processed retrieve, reqId %" PRId64, pTask->id.taskId, pTask->selfChildId, + qDebug("s-task:%s(child %d) processed retrieve, reqId:0x%" PRIx64, pTask->id.idStr, pTask->selfChildId, pRetrieveBlock->reqId); } + break; } @@ -118,12 +119,12 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray* continue; } - qDebug("s-task:%s (child %d) executed and get block", pTask->id.idStr, pTask->selfChildId); - SSDataBlock block = {0}; assignOneDataBlock(&block, output); block.info.childId = pTask->selfChildId; taosArrayPush(pRes, &block); + + qDebug("s-task:%s (child %d) executed and get block, total blocks:%d", pTask->id.idStr, pTask->selfChildId, (int32_t)taosArrayGetSize(pRes)); } return 0; From a9b7b8a5fdf4fec59477161ea753496eba71536a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 19 May 2023 19:26:15 +0800 Subject: [PATCH 37/59] other: add some logs. --- source/libs/stream/src/stream.c | 4 ++-- source/libs/stream/src/streamExec.c | 17 ++++++++++++----- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index 1a5ef2e007..128d26bfb1 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -314,13 +314,13 @@ int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem) { return -1; } - qDebug("s-task:%s data block enqueue, total in queue:%d", pTask->id.idStr, total); + qDebug("s-task:%s data block enqueue, current(blocks:%d, size:%.2fMiB)", pTask->id.idStr, total, size); taosWriteQitem(pTask->inputQueue->queue, pItem); } else if (type == STREAM_INPUT__CHECKPOINT) { taosWriteQitem(pTask->inputQueue->queue, pItem); } else if (type == STREAM_INPUT__GET_RES) { taosWriteQitem(pTask->inputQueue->queue, pItem); - qDebug("s-task:%s data block enqueue, current(blocks:%d, size:%.2fMiB)", pTask->id.idStr, total, size); + qDebug("s-task:%s data res enqueue, current(blocks:%d, size:%.2fMiB)", pTask->id.idStr, total, size); } if (type != STREAM_INPUT__GET_RES && type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) { diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 6391a02ace..2d950723bc 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -29,9 +29,10 @@ bool streamTaskShouldPause(const SStreamStatus* pStatus) { return (status == TASK_STATUS__PAUSE); } -static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray* pRes) { +static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray* pRes, int64_t* resSize) { int32_t code = TSDB_CODE_SUCCESS; void* pExecutor = pTask->exec.pExecutor; + *resSize = 0; while (pTask->taskLevel == TASK_LEVEL__SOURCE) { int8_t status = atomic_load_8(&pTask->status.taskStatus); @@ -122,9 +123,12 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray* SSDataBlock block = {0}; assignOneDataBlock(&block, output); block.info.childId = pTask->selfChildId; + + (*resSize) += blockDataGetSize(output); taosArrayPush(pRes, &block); - qDebug("s-task:%s (child %d) executed and get block, total blocks:%d", pTask->id.idStr, pTask->selfChildId, (int32_t)taosArrayGetSize(pRes)); + qDebug("s-task:%s (child %d) executed and get block, total blocks:%d, size:%.2fMiB", pTask->id.idStr, pTask->selfChildId, (int32_t)taosArrayGetSize(pRes), + (*resSize)/1048576.0); } return 0; @@ -330,10 +334,12 @@ int32_t streamExecForAll(SStreamTask* pTask) { continue; } + int64_t resSize = 0; + int64_t st = taosGetTimestampMs(); SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock)); qDebug("s-task:%s start to execute, block batches:%d", pTask->id.idStr, batchSize); - streamTaskExecImpl(pTask, pInput, pRes); + streamTaskExecImpl(pTask, pInput, pRes, &resSize); int64_t ckId = 0; int64_t dataVer = 0; @@ -356,10 +362,11 @@ int32_t streamExecForAll(SStreamTask* pTask) { taosWUnLockLatch(&pTask->pMeta->lock); qDebug("s-task:%s update checkpoint ver succeed", pTask->id.idStr); } - } else { - qDebug("s-task:%s exec end", pTask->id.idStr); } + double el = (taosGetTimestampMs() - st) / 1000.0; + qDebug("s-task:%s exec end, elapsed time:%.2fs, result size:%.2fMiB", pTask->id.idStr, el, resSize/1048576.0); + if (taosArrayGetSize(pRes) != 0) { SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0); if (qRes == NULL) { From 56e5a483dd3d5dd98432dd2a43b13988cb260326 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 19 May 2023 20:00:32 +0800 Subject: [PATCH 38/59] other: update log. --- source/libs/stream/src/streamExec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 2d950723bc..d77d6c21ce 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -124,7 +124,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray* assignOneDataBlock(&block, output); block.info.childId = pTask->selfChildId; - (*resSize) += blockDataGetSize(output); + (*resSize) += blockDataGetSize(output) + sizeof(SSDataBlock) + sizeof(SColumnInfoData) * blockDataGetNumOfCols(&block); taosArrayPush(pRes, &block); qDebug("s-task:%s (child %d) executed and get block, total blocks:%d, size:%.2fMiB", pTask->id.idStr, pTask->selfChildId, (int32_t)taosArrayGetSize(pRes), From 8ab065d01ecf6c50aae8e46ddb27a8830c5378dd Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 19 May 2023 23:53:23 +0800 Subject: [PATCH 39/59] other: update some logs. --- source/common/src/tdatablock.c | 3 --- source/libs/stream/src/streamExec.c | 6 ++++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 5f7e43668a..ff583baaa6 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -2514,9 +2514,6 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { *actualLen = dataLen; *groupId = pBlock->info.id.groupId; ASSERT(dataLen > 0); - - uDebug("build data block, actualLen:%d, rows:%d, cols:%d", dataLen, *rows, *cols); - return dataLen; } diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index d77d6c21ce..e888faf4d1 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -365,9 +365,11 @@ int32_t streamExecForAll(SStreamTask* pTask) { } double el = (taosGetTimestampMs() - st) / 1000.0; - qDebug("s-task:%s exec end, elapsed time:%.2fs, result size:%.2fMiB", pTask->id.idStr, el, resSize/1048576.0); - if (taosArrayGetSize(pRes) != 0) { + int32_t numOfBlocks = taosArrayGetSize(pRes); + qDebug("s-task:%s exec end, elapsed time:%.2fs, result size:%.2fMiB, numOfBlocks:%d", pTask->id.idStr, el, resSize/1048576.0, numOfBlocks); + + if (numOfBlocks > 0) { SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0); if (qRes == NULL) { taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); From 6ded3284da6a5e6adc6f35b943d32d3b86373d9c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 20 May 2023 11:37:17 +0800 Subject: [PATCH 40/59] refactor: do some internal refactor. --- include/libs/stream/tstream.h | 2 +- source/libs/stream/src/stream.c | 7 +- source/libs/stream/src/streamData.c | 4 +- source/libs/stream/src/streamExec.c | 261 ++++++++++++++++------------ source/libs/stream/src/streamMeta.c | 2 + 5 files changed, 165 insertions(+), 111 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 8f5d6c38f3..7c7039c1c0 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -536,7 +536,7 @@ int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, S void streamTaskInputFail(SStreamTask* pTask); int32_t streamTryExec(SStreamTask* pTask); int32_t streamSchedExec(SStreamTask* pTask); -int32_t streamTaskOutput(SStreamTask* pTask, SStreamDataBlock* pBlock); +int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock); bool streamTaskShouldStop(const SStreamStatus* pStatus); bool streamTaskShouldPause(const SStreamStatus* pStatus); diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index 128d26bfb1..7f19529d11 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -195,7 +195,8 @@ int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq, return status == TASK_INPUT_STATUS__NORMAL ? 0 : -1; } -int32_t streamTaskOutput(SStreamTask* pTask, SStreamDataBlock* pBlock) { +// todo add log +int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock) { int32_t code = 0; if (pTask->outputType == TASK_OUTPUT__TABLE) { pTask->tbSink.tbSinkFunc(pTask, pTask->tbSink.vnode, 0, pBlock->blocks); @@ -209,10 +210,14 @@ int32_t streamTaskOutput(SStreamTask* pTask, SStreamDataBlock* pBlock) { ASSERT(pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH); code = taosWriteQitem(pTask->outputQueue->queue, pBlock); if (code != 0) { + taosArrayDestroyEx(pBlock->blocks, (FDelete)blockDataFreeRes); + taosFreeQitem(pBlock); return code; } + streamDispatch(pTask); } + return 0; } diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c index 2233e21f60..ca24414db8 100644 --- a/source/libs/stream/src/streamData.c +++ b/source/libs/stream/src/streamData.c @@ -185,11 +185,13 @@ void streamFreeQitem(SStreamQueueItem* data) { taosFreeQitem(data); } else if (type == STREAM_INPUT__MERGED_SUBMIT) { SStreamMergedSubmit* pMerge = (SStreamMergedSubmit*)data; - int32_t sz = taosArrayGetSize(pMerge->submits); + + int32_t sz = taosArrayGetSize(pMerge->submits); for (int32_t i = 0; i < sz; i++) { int32_t* pRef = taosArrayGetP(pMerge->dataRefs, i); int32_t ref = atomic_sub_fetch_32(pRef, 1); ASSERT(ref >= 0); + if (ref == 0) { SPackedData* pSubmit = (SPackedData*)taosArrayGet(pMerge->submits, i); taosMemoryFree(pSubmit->msgStr); diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index e888faf4d1..122dbc47f5 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -19,7 +19,10 @@ #define MAX_STREAM_EXEC_BATCH_NUM 128 #define MIN_STREAM_EXEC_BATCH_NUM 16 -bool streamTaskShouldStop(const SStreamStatus* pStatus) { +static int32_t updateCheckPointInfo (SStreamTask* pTask); +static SStreamDataBlock* createStreamDataBlockFromResults(SStreamQueueItem* pItem, SStreamTask* pTask, int64_t resultSize, SArray* pRes); + + bool streamTaskShouldStop(const SStreamStatus* pStatus) { int32_t status = atomic_load_8((int8_t*)&pStatus->taskStatus); return (status == TASK_STATUS__STOP) || (status == TASK_STATUS__DROPPING); } @@ -29,55 +32,17 @@ bool streamTaskShouldPause(const SStreamStatus* pStatus) { return (status == TASK_STATUS__PAUSE); } -static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray* pRes, int64_t* resSize) { +static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, int64_t* totalSize, int32_t* totalBlocks) { int32_t code = TSDB_CODE_SUCCESS; void* pExecutor = pTask->exec.pExecutor; - *resSize = 0; - while (pTask->taskLevel == TASK_LEVEL__SOURCE) { - int8_t status = atomic_load_8(&pTask->status.taskStatus); - if (status != TASK_STATUS__NORMAL && status != TASK_STATUS__PAUSE) { - qError("stream task wait for the end of fill history, s-task:%s, status:%d", pTask->id.idStr, - atomic_load_8(&pTask->status.taskStatus)); - taosMsleep(2); - } else { - break; - } - } + *totalBlocks = 0; + *totalSize = 0; - // set input - const SStreamQueueItem* pItem = (const SStreamQueueItem*)data; - if (pItem->type == STREAM_INPUT__GET_RES) { - const SStreamTrigger* pTrigger = (const SStreamTrigger*)data; - qSetMultiStreamInput(pExecutor, pTrigger->pBlock, 1, STREAM_INPUT__DATA_BLOCK); - } else if (pItem->type == STREAM_INPUT__DATA_SUBMIT) { - ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE); - const SStreamDataSubmit* pSubmit = (const SStreamDataSubmit*)data; - qSetMultiStreamInput(pExecutor, &pSubmit->submit, 1, STREAM_INPUT__DATA_SUBMIT); - qDebug("s-task:%s set submit blocks as source block completed, %p %p len:%d ver:%" PRId64, pTask->id.idStr, pSubmit, - pSubmit->submit.msgStr, pSubmit->submit.msgLen, pSubmit->submit.ver); - } else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) { - const SStreamDataBlock* pBlock = (const SStreamDataBlock*)data; + SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock)); + int32_t size = 0; + int32_t numOfBlocks = 0; - SArray* pBlockList = pBlock->blocks; - int32_t numOfBlocks = taosArrayGetSize(pBlockList); - qDebug("s-task:%s set sdata blocks as input num:%d, ver:%" PRId64, pTask->id.idStr, numOfBlocks, pBlock->sourceVer); - qSetMultiStreamInput(pExecutor, pBlockList->pData, numOfBlocks, STREAM_INPUT__DATA_BLOCK); - } else if (pItem->type == STREAM_INPUT__MERGED_SUBMIT) { - const SStreamMergedSubmit* pMerged = (const SStreamMergedSubmit*)data; - - SArray* pBlockList = pMerged->submits; - int32_t numOfBlocks = taosArrayGetSize(pBlockList); - qDebug("s-task:%s %p set submit input (merged), batch num:%d", pTask->id.idStr, pTask, numOfBlocks); - qSetMultiStreamInput(pExecutor, pBlockList->pData, numOfBlocks, STREAM_INPUT__MERGED_SUBMIT); - } else if (pItem->type == STREAM_INPUT__REF_DATA_BLOCK) { - const SStreamRefDataBlock* pRefBlock = (const SStreamRefDataBlock*)data; - qSetMultiStreamInput(pExecutor, pRefBlock->pBlock, 1, STREAM_INPUT__DATA_BLOCK); - } else { - ASSERT(0); - } - - // pExecutor while (1) { if (streamTaskShouldStop(&pTask->status)) { return 0; @@ -98,7 +63,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray* if (pItem->type == STREAM_INPUT__DATA_RETRIEVE) { SSDataBlock block = {0}; - const SStreamDataBlock* pRetrieveBlock = (const SStreamDataBlock*)data; + const SStreamDataBlock* pRetrieveBlock = (const SStreamDataBlock*) pItem; ASSERT(taosArrayGetSize(pRetrieveBlock->blocks) == 1); assignOneDataBlock(&block, taosArrayGet(pRetrieveBlock->blocks, 0)); @@ -124,11 +89,49 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray* assignOneDataBlock(&block, output); block.info.childId = pTask->selfChildId; - (*resSize) += blockDataGetSize(output) + sizeof(SSDataBlock) + sizeof(SColumnInfoData) * blockDataGetNumOfCols(&block); + size += blockDataGetSize(output) + sizeof(SSDataBlock) + sizeof(SColumnInfoData) * blockDataGetNumOfCols(&block); + numOfBlocks += 1; + taosArrayPush(pRes, &block); - qDebug("s-task:%s (child %d) executed and get block, total blocks:%d, size:%.2fMiB", pTask->id.idStr, pTask->selfChildId, (int32_t)taosArrayGetSize(pRes), - (*resSize)/1048576.0); + qDebug("s-task:%s (child %d) executed and get block, total blocks:%d, size:%.2fMiB", pTask->id.idStr, + pTask->selfChildId, numOfBlocks, size / 1048576.0); + + // current output should be dispatched to down stream nodes + if (numOfBlocks > 1000) { + code = updateCheckPointInfo(pTask); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + ASSERT(numOfBlocks == taosArrayGetSize(pRes)); + + if (numOfBlocks > 0) { + SStreamDataBlock* pStreamBlocks = createStreamDataBlockFromResults(pItem, pTask, size, pRes); + if (pStreamBlocks == NULL) { + return -1; + } + + qDebug("s-task:%s output exec stream data blocks, num:%d, size:%.2fMiB", pTask->id.idStr, numOfBlocks, size/1048576.0); + + code = streamTaskOutputResultBlock(pTask, pStreamBlocks); + if (code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY) { // back pressure and record position + taosArrayClearEx(pRes, (FDelete)blockDataFreeRes); + taosFreeQitem(pStreamBlocks); + return -1; + } + } else { + taosArrayClearEx(pRes, (FDelete)blockDataFreeRes); + } + } + + *totalSize += size; + *totalBlocks += numOfBlocks; + + size = 0; + numOfBlocks = 0; + + ASSERT(taosArrayGetSize(pRes) == 0); } return 0; @@ -205,7 +208,7 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { qRes->type = STREAM_INPUT__DATA_BLOCK; qRes->blocks = pRes; - code = streamTaskOutput(pTask, qRes); + code = streamTaskOutputResultBlock(pTask, qRes); if (code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY) { taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); taosFreeQitem(qRes); @@ -251,7 +254,7 @@ int32_t streamBatchExec(SStreamTask* pTask, int32_t batchLimit) { if (pTask->taskLevel == TASK_LEVEL__SINK) { ASSERT(((SStreamQueueItem*)pItem)->type == STREAM_INPUT__DATA_BLOCK); - streamTaskOutput(pTask, (SStreamDataBlock*)pItem); + streamTaskOutputResultBlock(pTask, (SStreamDataBlock*)pItem); } // exec impl @@ -262,6 +265,57 @@ int32_t streamBatchExec(SStreamTask* pTask, int32_t batchLimit) { } #endif +int32_t updateCheckPointInfo (SStreamTask* pTask) { + int64_t ckId = 0; + int64_t dataVer = 0; + qGetCheckpointVersion(pTask->exec.pExecutor, &dataVer, &ckId); + + SCheckpointInfo* pCkInfo = &pTask->chkInfo; + if (ckId > pCkInfo->id) { // save it since the checkpoint is updated + qDebug("s-task:%s exec end, start to update check point, ver from %" PRId64 " to %" PRId64 + ", checkPoint id:%" PRId64 " -> %" PRId64, pTask->id.idStr, pCkInfo->version, dataVer, pCkInfo->id, ckId); + + pTask->chkInfo = (SCheckpointInfo){.version = dataVer, .id = ckId, .currentVer = pCkInfo->currentVer}; + + taosWLockLatch(&pTask->pMeta->lock); + + streamMetaSaveTask(pTask->pMeta, pTask); + if (streamMetaCommit(pTask->pMeta) < 0) { + taosWUnLockLatch(&pTask->pMeta->lock); + qError("s-task:%s failed to commit stream meta, since %s", pTask->id.idStr, terrstr()); + return -1; + } else { + taosWUnLockLatch(&pTask->pMeta->lock); + qDebug("s-task:%s update checkpoint ver succeed", pTask->id.idStr); + } + } + + return TSDB_CODE_SUCCESS; +} + +SStreamDataBlock* createStreamDataBlockFromResults(SStreamQueueItem* pItem, SStreamTask* pTask, int64_t resultSize, SArray* pRes) { + SStreamDataBlock* pStreamBlocks = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, resultSize); + if (pStreamBlocks == NULL) { + taosArrayClearEx(pRes, (FDelete)blockDataFreeRes); + return NULL; + } + + pStreamBlocks->type = STREAM_INPUT__DATA_BLOCK; + pStreamBlocks->blocks = pRes; + + if (pItem->type == STREAM_INPUT__DATA_SUBMIT) { + SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)pItem; + pStreamBlocks->childId = pTask->selfChildId; + pStreamBlocks->sourceVer = pSubmit->ver; + } else if (pItem->type == STREAM_INPUT__MERGED_SUBMIT) { + SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)pItem; + pStreamBlocks->childId = pTask->selfChildId; + pStreamBlocks->sourceVer = pMerged->ver; + } + + return pStreamBlocks; +} + int32_t streamExecForAll(SStreamTask* pTask) { int32_t code = 0; while (1) { @@ -330,79 +384,70 @@ int32_t streamExecForAll(SStreamTask* pTask) { if (pTask->taskLevel == TASK_LEVEL__SINK) { ASSERT(pInput->type == STREAM_INPUT__DATA_BLOCK); qDebug("s-task:%s sink node start to sink result. numOfBlocks:%d", pTask->id.idStr, batchSize); - streamTaskOutput(pTask, (SStreamDataBlock*)pInput); + streamTaskOutputResultBlock(pTask, (SStreamDataBlock*)pInput); continue; } - int64_t resSize = 0; + // wait for the task to be ready to go + while (pTask->taskLevel == TASK_LEVEL__SOURCE) { + int8_t status = atomic_load_8(&pTask->status.taskStatus); + if (status != TASK_STATUS__NORMAL && status != TASK_STATUS__PAUSE) { + qError("stream task wait for the end of fill history, s-task:%s, status:%d", pTask->id.idStr, + atomic_load_8(&pTask->status.taskStatus)); + taosMsleep(2); + } else { + break; + } + } + int64_t st = taosGetTimestampMs(); - SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock)); qDebug("s-task:%s start to execute, block batches:%d", pTask->id.idStr, batchSize); - streamTaskExecImpl(pTask, pInput, pRes, &resSize); + { + // set input + void* pExecutor = pTask->exec.pExecutor; - int64_t ckId = 0; - int64_t dataVer = 0; - qGetCheckpointVersion(pTask->exec.pExecutor, &dataVer, &ckId); - if (ckId > pTask->chkInfo.id) { // save it since the checkpoint is updated - qDebug("s-task:%s exec end, start to update check point, ver from %" PRId64 " to %" PRId64 - ", checkPoint id:%" PRId64 " -> %" PRId64, - pTask->id.idStr, pTask->chkInfo.version, dataVer, pTask->chkInfo.id, ckId); + const SStreamQueueItem* pItem = pInput; + if (pItem->type == STREAM_INPUT__GET_RES) { + const SStreamTrigger* pTrigger = (const SStreamTrigger*)pInput; + qSetMultiStreamInput(pExecutor, pTrigger->pBlock, 1, STREAM_INPUT__DATA_BLOCK); + } else if (pItem->type == STREAM_INPUT__DATA_SUBMIT) { + ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE); + const SStreamDataSubmit* pSubmit = (const SStreamDataSubmit*)pInput; + qSetMultiStreamInput(pExecutor, &pSubmit->submit, 1, STREAM_INPUT__DATA_SUBMIT); + qDebug("s-task:%s set submit blocks as source block completed, %p %p len:%d ver:%" PRId64, pTask->id.idStr, pSubmit, + pSubmit->submit.msgStr, pSubmit->submit.msgLen, pSubmit->submit.ver); + } else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) { + const SStreamDataBlock* pBlock = (const SStreamDataBlock*)pInput; - pTask->chkInfo = (SCheckpointInfo){.version = dataVer, .id = ckId, .currentVer = pTask->chkInfo.currentVer}; + SArray* pBlockList = pBlock->blocks; + int32_t numOfBlocks = taosArrayGetSize(pBlockList); + qDebug("s-task:%s set sdata blocks as input num:%d, ver:%" PRId64, pTask->id.idStr, numOfBlocks, pBlock->sourceVer); + qSetMultiStreamInput(pExecutor, pBlockList->pData, numOfBlocks, STREAM_INPUT__DATA_BLOCK); + } else if (pItem->type == STREAM_INPUT__MERGED_SUBMIT) { + const SStreamMergedSubmit* pMerged = (const SStreamMergedSubmit*)pInput; - taosWLockLatch(&pTask->pMeta->lock); - - streamMetaSaveTask(pTask->pMeta, pTask); - if (streamMetaCommit(pTask->pMeta) < 0) { - taosWUnLockLatch(&pTask->pMeta->lock); - qError("s-task:%s failed to commit stream meta, since %s", pTask->id.idStr, terrstr()); - return -1; + SArray* pBlockList = pMerged->submits; + int32_t numOfBlocks = taosArrayGetSize(pBlockList); + qDebug("s-task:%s %p set submit input (merged), batch num:%d", pTask->id.idStr, pTask, numOfBlocks); + qSetMultiStreamInput(pExecutor, pBlockList->pData, numOfBlocks, STREAM_INPUT__MERGED_SUBMIT); + } else if (pItem->type == STREAM_INPUT__REF_DATA_BLOCK) { + const SStreamRefDataBlock* pRefBlock = (const SStreamRefDataBlock*)pInput; + qSetMultiStreamInput(pExecutor, pRefBlock->pBlock, 1, STREAM_INPUT__DATA_BLOCK); } else { - taosWUnLockLatch(&pTask->pMeta->lock); - qDebug("s-task:%s update checkpoint ver succeed", pTask->id.idStr); + ASSERT(0); } } - double el = (taosGetTimestampMs() - st) / 1000.0; + int64_t resSize = 0; + int32_t totalBlocks = 0; + streamTaskExecImpl(pTask, pInput, &resSize, &totalBlocks); - int32_t numOfBlocks = taosArrayGetSize(pRes); - qDebug("s-task:%s exec end, elapsed time:%.2fs, result size:%.2fMiB, numOfBlocks:%d", pTask->id.idStr, el, resSize/1048576.0, numOfBlocks); - - if (numOfBlocks > 0) { - SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0); - if (qRes == NULL) { - taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); - streamFreeQitem(pInput); - return -1; - } - - qRes->type = STREAM_INPUT__DATA_BLOCK; - qRes->blocks = pRes; - - if (((SStreamQueueItem*)pInput)->type == STREAM_INPUT__DATA_SUBMIT) { - SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)pInput; - qRes->childId = pTask->selfChildId; - qRes->sourceVer = pSubmit->ver; - } else if (((SStreamQueueItem*)pInput)->type == STREAM_INPUT__MERGED_SUBMIT) { - SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)pInput; - qRes->childId = pTask->selfChildId; - qRes->sourceVer = pMerged->ver; - } - - code = streamTaskOutput(pTask, qRes); - if (code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY) { - // backpressure and record position - taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); - streamFreeQitem(pInput); - taosFreeQitem(qRes); - return -1; - } - } else { - taosArrayDestroy(pRes); - } + double el = (taosGetTimestampMs() - st) / 1000.0; + qDebug("s-task:%s exec end, elapsed time:%.2fs, result size:%.2fMiB, numOfBlocks:%d", pTask->id.idStr, el, resSize / 1048576.0, totalBlocks); streamFreeQitem(pInput); } + return 0; } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 682ce08c7f..e091b6864b 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -296,6 +296,7 @@ int32_t streamMetaBegin(SStreamMeta* pMeta) { return 0; } +// todo add error log int32_t streamMetaCommit(SStreamMeta* pMeta) { if (tdbCommit(pMeta->db, pMeta->txn) < 0) { qError("failed to commit stream meta"); @@ -311,6 +312,7 @@ int32_t streamMetaCommit(SStreamMeta* pMeta) { TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { return -1; } + return 0; } From 5cbad1da3ef4355b861c167cbcff0d1643b5cf95 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 20 May 2023 12:20:04 +0800 Subject: [PATCH 41/59] refactor: do some internal refactor. --- source/libs/stream/src/streamExec.c | 73 +++++++++++++++++++---------- 1 file changed, 47 insertions(+), 26 deletions(-) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 122dbc47f5..038bbbd35b 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -18,6 +18,7 @@ // maximum allowed processed block batches. One block may include several submit blocks #define MAX_STREAM_EXEC_BATCH_NUM 128 #define MIN_STREAM_EXEC_BATCH_NUM 16 +#define MAX_STREAM_RESULT_DUMP_THRESHOLD 1000 static int32_t updateCheckPointInfo (SStreamTask* pTask); static SStreamDataBlock* createStreamDataBlockFromResults(SStreamQueueItem* pItem, SStreamTask* pTask, int64_t resultSize, SArray* pRes); @@ -32,6 +33,39 @@ bool streamTaskShouldPause(const SStreamStatus* pStatus) { return (status == TASK_STATUS__PAUSE); } +static int32_t doDumpResult(SStreamTask* pTask, SStreamQueueItem* pItem, SArray* pRes, + int32_t size, int64_t* totalSize, int32_t* totalBlocks) { + int32_t code = updateCheckPointInfo(pTask); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + int32_t numOfBlocks = taosArrayGetSize(pRes); + if (numOfBlocks > 0) { + SStreamDataBlock* pStreamBlocks = createStreamDataBlockFromResults(pItem, pTask, size, pRes); + if (pStreamBlocks == NULL) { + return -1; + } + + qDebug("s-task:%s dump stream result data blocks, num:%d, size:%.2fMiB", pTask->id.idStr, numOfBlocks, size/1048576.0); + + code = streamTaskOutputResultBlock(pTask, pStreamBlocks); + if (code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY) { // back pressure and record position + taosArrayClearEx(pRes, (FDelete)blockDataFreeRes); + taosFreeQitem(pStreamBlocks); + return -1; + } + } else { + taosArrayClearEx(pRes, (FDelete)blockDataFreeRes); + } + + *totalSize += size; + *totalBlocks += numOfBlocks; + + ASSERT(taosArrayGetSize(pRes) == 0); + return TSDB_CODE_SUCCESS; +} + static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, int64_t* totalSize, int32_t* totalBlocks) { int32_t code = TSDB_CODE_SUCCESS; void* pExecutor = pTask->exec.pExecutor; @@ -98,38 +132,25 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i pTask->selfChildId, numOfBlocks, size / 1048576.0); // current output should be dispatched to down stream nodes - if (numOfBlocks > 1000) { - code = updateCheckPointInfo(pTask); + if (numOfBlocks >= MAX_STREAM_RESULT_DUMP_THRESHOLD) { + ASSERT(numOfBlocks == taosArrayGetSize(pRes)); + code = doDumpResult(pTask, pItem, pRes, size, totalSize, totalBlocks); if (code != TSDB_CODE_SUCCESS) { return code; } - ASSERT(numOfBlocks == taosArrayGetSize(pRes)); - - if (numOfBlocks > 0) { - SStreamDataBlock* pStreamBlocks = createStreamDataBlockFromResults(pItem, pTask, size, pRes); - if (pStreamBlocks == NULL) { - return -1; - } - - qDebug("s-task:%s output exec stream data blocks, num:%d, size:%.2fMiB", pTask->id.idStr, numOfBlocks, size/1048576.0); - - code = streamTaskOutputResultBlock(pTask, pStreamBlocks); - if (code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY) { // back pressure and record position - taosArrayClearEx(pRes, (FDelete)blockDataFreeRes); - taosFreeQitem(pStreamBlocks); - return -1; - } - } else { - taosArrayClearEx(pRes, (FDelete)blockDataFreeRes); - } + size = 0; + numOfBlocks = 0; + ASSERT(taosArrayGetSize(pRes) == 0); } + } - *totalSize += size; - *totalBlocks += numOfBlocks; - - size = 0; - numOfBlocks = 0; + if (numOfBlocks > 0) { + ASSERT(numOfBlocks == taosArrayGetSize(pRes)); + code = doDumpResult(pTask, pItem, pRes, size, totalSize, totalBlocks); + if (code != TSDB_CODE_SUCCESS) { + return code; + } ASSERT(taosArrayGetSize(pRes) == 0); } From 336102b8c52905d47d455cea4a3352f1fe47d14a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 20 May 2023 13:34:49 +0800 Subject: [PATCH 42/59] refactor: do some internal refactor. --- source/libs/stream/inc/streamInc.h | 3 +- source/libs/stream/src/stream.c | 14 +++------ source/libs/stream/src/streamDispatch.c | 19 ++++++------ source/libs/stream/src/streamExec.c | 41 ++++++++++++++++--------- 4 files changed, 43 insertions(+), 34 deletions(-) diff --git a/source/libs/stream/inc/streamInc.h b/source/libs/stream/inc/streamInc.h index 7e1b4dcf2d..bd13d9ec37 100644 --- a/source/libs/stream/inc/streamInc.h +++ b/source/libs/stream/inc/streamInc.h @@ -33,7 +33,8 @@ typedef struct { static SStreamGlobalEnv streamEnv; -int32_t streamDispatch(SStreamTask* pTask); +void destroyStreamDataBlock(SStreamDataBlock* pBlock); +int32_t streamDispatch(SStreamTask* pTask, SStreamDataBlock** pBlock); int32_t streamConvertDispatchMsgToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData); int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock* pData); int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* data); diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index 7f19529d11..80511cd491 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -200,22 +200,16 @@ int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock int32_t code = 0; if (pTask->outputType == TASK_OUTPUT__TABLE) { pTask->tbSink.tbSinkFunc(pTask, pTask->tbSink.vnode, 0, pBlock->blocks); - taosArrayDestroyEx(pBlock->blocks, (FDelete)blockDataFreeRes); - taosFreeQitem(pBlock); } else if (pTask->outputType == TASK_OUTPUT__SMA) { pTask->smaSink.smaSink(pTask->smaSink.vnode, pTask->smaSink.smaId, pBlock->blocks); - taosArrayDestroyEx(pBlock->blocks, (FDelete)blockDataFreeRes); - taosFreeQitem(pBlock); } else { ASSERT(pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH); code = taosWriteQitem(pTask->outputQueue->queue, pBlock); if (code != 0) { - taosArrayDestroyEx(pBlock->blocks, (FDelete)blockDataFreeRes); - taosFreeQitem(pBlock); return code; } - streamDispatch(pTask); + streamDispatch(pTask, NULL); } return 0; @@ -260,8 +254,10 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i return 0; } - // continue dispatch - streamDispatch(pTask); + SStreamDataBlock* pBlock = NULL; + // continue dispatch one block to down stream in pipeline + streamDispatch(pTask, &pBlock); + destroyStreamDataBlock(pBlock); return 0; } diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 7e0fbd30b1..82b3f9a2f2 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -501,7 +501,7 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat return code; } -int32_t streamDispatch(SStreamTask* pTask) { +int32_t streamDispatch(SStreamTask* pTask, SStreamDataBlock** pBlock) { ASSERT(pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH); int32_t numOfElems = taosQueueItemSize(pTask->outputQueue->queue); @@ -517,23 +517,24 @@ int32_t streamDispatch(SStreamTask* pTask) { return 0; } - SStreamDataBlock* pBlock = streamQueueNextItem(pTask->outputQueue); - if (pBlock == NULL) { + SStreamDataBlock* pDispatchedBlock = streamQueueNextItem(pTask->outputQueue); + if (pDispatchedBlock == NULL) { qDebug("s-task:%s stop dispatching since no output in output queue", pTask->id.idStr); atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL); return 0; } - ASSERT(pBlock->type == STREAM_INPUT__DATA_BLOCK); + ASSERT(pDispatchedBlock->type == STREAM_INPUT__DATA_BLOCK); - int32_t code = 0; - if (streamDispatchAllBlocks(pTask, pBlock) < 0) { - code = -1; + int32_t code = streamDispatchAllBlocks(pTask, *pBlock); + if (code != TSDB_CODE_SUCCESS) { streamQueueProcessFail(pTask->outputQueue); atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL); } - taosArrayDestroyEx(pBlock->blocks, (FDelete)blockDataFreeRes); - taosFreeQitem(pBlock); + if (pBlock != NULL) { + *pBlock = pDispatchedBlock; + } + return code; } diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 038bbbd35b..18afc367bd 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -23,7 +23,7 @@ static int32_t updateCheckPointInfo (SStreamTask* pTask); static SStreamDataBlock* createStreamDataBlockFromResults(SStreamQueueItem* pItem, SStreamTask* pTask, int64_t resultSize, SArray* pRes); - bool streamTaskShouldStop(const SStreamStatus* pStatus) { +bool streamTaskShouldStop(const SStreamStatus* pStatus) { int32_t status = atomic_load_8((int8_t*)&pStatus->taskStatus); return (status == TASK_STATUS__STOP) || (status == TASK_STATUS__DROPPING); } @@ -33,10 +33,11 @@ bool streamTaskShouldPause(const SStreamStatus* pStatus) { return (status == TASK_STATUS__PAUSE); } -static int32_t doDumpResult(SStreamTask* pTask, SStreamQueueItem* pItem, SArray* pRes, - int32_t size, int64_t* totalSize, int32_t* totalBlocks) { +static int32_t doDumpResult(SStreamTask* pTask, SStreamQueueItem* pItem, SArray* pRes, int32_t size, int64_t* totalSize, + int32_t* totalBlocks) { int32_t code = updateCheckPointInfo(pTask); if (code != TSDB_CODE_SUCCESS) { + taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); return code; } @@ -44,6 +45,7 @@ static int32_t doDumpResult(SStreamTask* pTask, SStreamQueueItem* pItem, SArray* if (numOfBlocks > 0) { SStreamDataBlock* pStreamBlocks = createStreamDataBlockFromResults(pItem, pTask, size, pRes); if (pStreamBlocks == NULL) { + taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); return -1; } @@ -51,18 +53,19 @@ static int32_t doDumpResult(SStreamTask* pTask, SStreamQueueItem* pItem, SArray* code = streamTaskOutputResultBlock(pTask, pStreamBlocks); if (code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY) { // back pressure and record position - taosArrayClearEx(pRes, (FDelete)blockDataFreeRes); - taosFreeQitem(pStreamBlocks); + destroyStreamDataBlock(pStreamBlocks); return -1; } + + *totalSize += size; + *totalBlocks += numOfBlocks; + + ASSERT(taosArrayGetSize(pRes) == 0); + destroyStreamDataBlock(pStreamBlocks); } else { - taosArrayClearEx(pRes, (FDelete)blockDataFreeRes); + taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); } - *totalSize += size; - *totalBlocks += numOfBlocks; - - ASSERT(taosArrayGetSize(pRes) == 0); return TSDB_CODE_SUCCESS; } @@ -73,12 +76,13 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i *totalBlocks = 0; *totalSize = 0; - SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock)); int32_t size = 0; int32_t numOfBlocks = 0; + SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock)); while (1) { if (streamTaskShouldStop(&pTask->status)) { + taosArrayDestroy(pRes); // memory leak return 0; } @@ -141,7 +145,6 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i size = 0; numOfBlocks = 0; - ASSERT(taosArrayGetSize(pRes) == 0); } } @@ -151,8 +154,8 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i if (code != TSDB_CODE_SUCCESS) { return code; } - - ASSERT(taosArrayGetSize(pRes) == 0); + } else { + taosArrayDestroy(pRes); } return 0; @@ -238,7 +241,10 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) { qDebug("s-task:%s scan exec dispatch blocks:%d", pTask->id.idStr, batchCnt); - streamDispatch(pTask); + + SStreamDataBlock* pBlock = NULL; + streamDispatch(pTask, &pBlock); + destroyStreamDataBlock(pBlock); } if (finished) { @@ -337,6 +343,11 @@ SStreamDataBlock* createStreamDataBlockFromResults(SStreamQueueItem* pItem, SStr return pStreamBlocks; } +void destroyStreamDataBlock(SStreamDataBlock* pBlock) { + taosArrayDestroyEx(pBlock->blocks, (FDelete)blockDataFreeRes); + taosFreeQitem(pBlock); +} + int32_t streamExecForAll(SStreamTask* pTask) { int32_t code = 0; while (1) { From 4974ac78f7bc95188d5ccf45d53c951948236a26 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 20 May 2023 13:42:09 +0800 Subject: [PATCH 43/59] fix: fix invalid ref. --- source/libs/stream/src/streamDispatch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 82b3f9a2f2..62b734da4e 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -526,7 +526,7 @@ int32_t streamDispatch(SStreamTask* pTask, SStreamDataBlock** pBlock) { ASSERT(pDispatchedBlock->type == STREAM_INPUT__DATA_BLOCK); - int32_t code = streamDispatchAllBlocks(pTask, *pBlock); + int32_t code = streamDispatchAllBlocks(pTask, pDispatchedBlock); if (code != TSDB_CODE_SUCCESS) { streamQueueProcessFail(pTask->outputQueue); atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL); From 484788a60f1cacd700ea21e12d8b3e86c4ad8f70 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 20 May 2023 14:29:51 +0800 Subject: [PATCH 44/59] fix:add null ptr check. --- source/libs/stream/src/streamDispatch.c | 3 +++ source/libs/stream/src/streamExec.c | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 62b734da4e..33e7b949f9 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -503,6 +503,9 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat int32_t streamDispatch(SStreamTask* pTask, SStreamDataBlock** pBlock) { ASSERT(pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH); + if (pBlock != NULL) { + *pBlock = NULL; + } int32_t numOfElems = taosQueueItemSize(pTask->outputQueue->queue); if (numOfElems > 0) { diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 18afc367bd..eedf5fe90a 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -344,6 +344,10 @@ SStreamDataBlock* createStreamDataBlockFromResults(SStreamQueueItem* pItem, SStr } void destroyStreamDataBlock(SStreamDataBlock* pBlock) { + if (pBlock == NULL) { + return; + } + taosArrayDestroyEx(pBlock->blocks, (FDelete)blockDataFreeRes); taosFreeQitem(pBlock); } From afc384bead58c2db5526415092c9f424c1da1f90 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 20 May 2023 14:44:34 +0800 Subject: [PATCH 45/59] fix:remove invalid assert. --- source/libs/stream/src/streamExec.c | 1 - 1 file changed, 1 deletion(-) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index eedf5fe90a..77bac6cee7 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -60,7 +60,6 @@ static int32_t doDumpResult(SStreamTask* pTask, SStreamQueueItem* pItem, SArray* *totalSize += size; *totalBlocks += numOfBlocks; - ASSERT(taosArrayGetSize(pRes) == 0); destroyStreamDataBlock(pStreamBlocks); } else { taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); From 10692fde5e7375b63fcf106d69b9088db84e2684 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 20 May 2023 14:53:28 +0800 Subject: [PATCH 46/59] other: merge main. --- source/libs/stream/src/streamExec.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 77bac6cee7..a9c8ffca0c 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -77,9 +77,11 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i int32_t size = 0; int32_t numOfBlocks = 0; - SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock)); + SArray* pRes = NULL; while (1) { + pRes = taosArrayInit(4, sizeof(SSDataBlock)); + if (streamTaskShouldStop(&pTask->status)) { taosArrayDestroy(pRes); // memory leak return 0; From 6932c59510b070e6f053d577f06ace86d2806eb4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 20 May 2023 15:09:30 +0800 Subject: [PATCH 47/59] other: merge main. --- source/libs/stream/src/streamExec.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index a9c8ffca0c..b325a8a961 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -80,7 +80,9 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i SArray* pRes = NULL; while (1) { - pRes = taosArrayInit(4, sizeof(SSDataBlock)); + if (pRes == NULL) { + pRes = taosArrayInit(4, sizeof(SSDataBlock)); + } if (streamTaskShouldStop(&pTask->status)) { taosArrayDestroy(pRes); // memory leak From 84146e6aaaed7ebdfafd8be87a3e4200789cf6cc Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 20 May 2023 15:16:51 +0800 Subject: [PATCH 48/59] other: merge main. --- source/libs/stream/src/streamExec.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index b325a8a961..2f019529e4 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -146,6 +146,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i return code; } + pRes = NULL; size = 0; numOfBlocks = 0; } From 0ffb0cb4a01001a99f93e00229f82cb18d5e5a16 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 20 May 2023 15:17:14 +0800 Subject: [PATCH 49/59] docs: update wechat tdengine1 (#21395) * docs: update readme with tdengin1 wechat * docs: change wechat account from eco to tdengine1 --- CONTRIBUTING-CN.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING-CN.md b/CONTRIBUTING-CN.md index efaa2077fe..887cff0978 100644 --- a/CONTRIBUTING-CN.md +++ b/CONTRIBUTING-CN.md @@ -18,7 +18,7 @@ 注意:修改文档的分支要以`docs/`为开头,以免进行不必要的测试。 4. 创建pull request,将自己的分支合并到开发分支`3.0`,我们开发团队将尽快审核。 -如遇任何问题,请添加官方微信TDengineECO。我们的团队会帮忙解决。 +如遇任何问题,请添加官方微信 tdengine1。我们的团队会帮忙解决。 ## 给贡献者的礼品 @@ -48,4 +48,4 @@ TDengine 社区致力于让更多的开发者理解和使用它。 ## 联系我们 -如果您有什么问题需要解决,或者有什么问题需要解答,可以添加微信:TDengineECO +如果您有什么问题需要解决,或者有什么问题需要解答,可以添加微信:tdengine1。 From aa7ea60bca6217d589ca1453959e6cb6f6a63b86 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 20 May 2023 15:43:48 +0800 Subject: [PATCH 50/59] fix(stream): update the free strategy. --- source/libs/stream/inc/streamInc.h | 2 +- source/libs/stream/src/stream.c | 8 ++++---- source/libs/stream/src/streamDispatch.c | 12 +++--------- source/libs/stream/src/streamExec.c | 7 ++----- 4 files changed, 10 insertions(+), 19 deletions(-) diff --git a/source/libs/stream/inc/streamInc.h b/source/libs/stream/inc/streamInc.h index bd13d9ec37..049aac6214 100644 --- a/source/libs/stream/inc/streamInc.h +++ b/source/libs/stream/inc/streamInc.h @@ -34,7 +34,7 @@ typedef struct { static SStreamGlobalEnv streamEnv; void destroyStreamDataBlock(SStreamDataBlock* pBlock); -int32_t streamDispatch(SStreamTask* pTask, SStreamDataBlock** pBlock); +int32_t streamDispatch(SStreamTask* pTask); int32_t streamConvertDispatchMsgToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData); int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock* pData); int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* data); diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index 80511cd491..7987490bf5 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -200,8 +200,10 @@ int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock int32_t code = 0; if (pTask->outputType == TASK_OUTPUT__TABLE) { pTask->tbSink.tbSinkFunc(pTask, pTask->tbSink.vnode, 0, pBlock->blocks); + destroyStreamDataBlock(pBlock); } else if (pTask->outputType == TASK_OUTPUT__SMA) { pTask->smaSink.smaSink(pTask->smaSink.vnode, pTask->smaSink.smaId, pBlock->blocks); + destroyStreamDataBlock(pBlock); } else { ASSERT(pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH); code = taosWriteQitem(pTask->outputQueue->queue, pBlock); @@ -209,7 +211,7 @@ int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock return code; } - streamDispatch(pTask, NULL); + streamDispatch(pTask); } return 0; @@ -254,10 +256,8 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i return 0; } - SStreamDataBlock* pBlock = NULL; // continue dispatch one block to down stream in pipeline - streamDispatch(pTask, &pBlock); - destroyStreamDataBlock(pBlock); + streamDispatch(pTask); return 0; } diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 33e7b949f9..5723802d34 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -501,12 +501,8 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat return code; } -int32_t streamDispatch(SStreamTask* pTask, SStreamDataBlock** pBlock) { +int32_t streamDispatch(SStreamTask* pTask) { ASSERT(pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH); - if (pBlock != NULL) { - *pBlock = NULL; - } - int32_t numOfElems = taosQueueItemSize(pTask->outputQueue->queue); if (numOfElems > 0) { qDebug("s-task:%s try to dispatch intermediate result block to downstream, elem in outputQ:%d", pTask->id.idStr, @@ -535,9 +531,7 @@ int32_t streamDispatch(SStreamTask* pTask, SStreamDataBlock** pBlock) { atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL); } - if (pBlock != NULL) { - *pBlock = pDispatchedBlock; - } - + // this block can be freed only when it has been pushed to down stream. + destroyStreamDataBlock(pDispatchedBlock); return code; } diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 2f019529e4..077d011900 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -60,7 +60,7 @@ static int32_t doDumpResult(SStreamTask* pTask, SStreamQueueItem* pItem, SArray* *totalSize += size; *totalBlocks += numOfBlocks; - destroyStreamDataBlock(pStreamBlocks); +// destroyStreamDataBlock(pStreamBlocks); } else { taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); } @@ -245,10 +245,7 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) { qDebug("s-task:%s scan exec dispatch blocks:%d", pTask->id.idStr, batchCnt); - - SStreamDataBlock* pBlock = NULL; - streamDispatch(pTask, &pBlock); - destroyStreamDataBlock(pBlock); + streamDispatch(pTask); } if (finished) { From 3cef1e7c42b4b2629445b454649edd24479543c8 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 20 May 2023 15:45:07 +0800 Subject: [PATCH 51/59] fix(stream): add some logs. --- source/libs/stream/src/stream.c | 2 +- source/libs/stream/src/streamExec.c | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index 7987490bf5..00ce5aa40e 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -207,7 +207,7 @@ int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock } else { ASSERT(pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH); code = taosWriteQitem(pTask->outputQueue->queue, pBlock); - if (code != 0) { + if (code != 0) { // todo failed to add it into the output queue, free it. return code; } diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 077d011900..65643af963 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -59,8 +59,6 @@ static int32_t doDumpResult(SStreamTask* pTask, SStreamQueueItem* pItem, SArray* *totalSize += size; *totalBlocks += numOfBlocks; - -// destroyStreamDataBlock(pStreamBlocks); } else { taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); } From de44c9160c2ef1b0c7715c4d13676ceabaa55d76 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 20 May 2023 18:05:39 +0800 Subject: [PATCH 52/59] refactor: do some internal refactor. --- include/libs/stream/tstream.h | 3 +- source/dnode/snode/src/snode.c | 10 ++--- source/dnode/vnode/src/tq/tq.c | 40 ++++++++++------- source/dnode/vnode/src/vnd/vnodeSync.c | 4 +- source/libs/stream/inc/streamInc.h | 7 ++- source/libs/stream/src/stream.c | 44 +++++++++--------- source/libs/stream/src/streamData.c | 50 ++++++++++++++++++--- source/libs/stream/src/streamDispatch.c | 60 +++++++++++++------------ source/libs/stream/src/streamExec.c | 37 +-------------- source/libs/stream/src/streamRecover.c | 20 +++++---- 10 files changed, 149 insertions(+), 126 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 7c7039c1c0..8e7dd0bb0d 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -372,6 +372,7 @@ typedef struct { int32_t upstreamChildId; int32_t upstreamNodeId; int32_t blockNum; + int64_t totalLen; SArray* dataLen; // SArray SArray* data; // SArray } SStreamDispatchReq; @@ -527,7 +528,7 @@ void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq); int32_t streamSetupTrigger(SStreamTask* pTask); int32_t streamProcessRunReq(SStreamTask* pTask); -int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg, bool exec); +int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg, bool exec); int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code); int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg); diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index 2ff338242f..ec4500aee6 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -43,7 +43,7 @@ void sndEnqueueStreamDispatch(SSnode *pSnode, SRpcMsg *pMsg) { .info = pMsg->info, .code = 0, }; - streamProcessDispatchReq(pTask, &req, &rsp, false); + streamProcessDispatchMsg(pTask, &req, &rsp, false); streamMetaReleaseTask(pSnode->pMeta, pTask); rpcFreeCont(pMsg->pCont); taosFreeQitem(pMsg); @@ -203,17 +203,13 @@ int32_t sndProcessTaskDispatchReq(SSnode *pSnode, SRpcMsg *pMsg, bool exec) { SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId); if (pTask) { - SRpcMsg rsp = { - .info = pMsg->info, - .code = 0, - }; - streamProcessDispatchReq(pTask, &req, &rsp, exec); + SRpcMsg rsp = { .info = pMsg->info, .code = 0 }; + streamProcessDispatchMsg(pTask, &req, &rsp, exec); streamMetaReleaseTask(pSnode->pMeta, pTask); return 0; } else { return -1; } - return 0; } int32_t sndProcessTaskRetrieveReq(SSnode *pSnode, SRpcMsg *pMsg) { diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 7dc7f999fb..c9749906d0 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -853,14 +853,17 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) { } int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) { - char* msgStr = pMsg->pCont; - char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead)); - int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); + char* msgStr = pMsg->pCont; + char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead)); + int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); + SStreamTaskCheckReq req; SDecoder decoder; + tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen); tDecodeSStreamTaskCheckReq(&decoder, &req); tDecoderClear(&decoder); + int32_t taskId = req.downstreamTaskId; SStreamTaskCheckRsp rsp = { .reqId = req.reqId, @@ -874,18 +877,18 @@ int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) { SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId); - if (pTask) { + if (pTask != NULL) { rsp.status = streamTaskCheckStatus(pTask); streamMetaReleaseTask(pTq->pStreamMeta, pTask); - tqDebug("tq recv task check req(reqId:0x%" PRIx64 + tqDebug("s-task:%s recv task check req(reqId:0x%" PRIx64 ") %d at node %d task status:%d, check req from task %d at node %d, rsp status %d", - rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, pTask->status.taskStatus, rsp.upstreamTaskId, - rsp.upstreamNodeId, rsp.status); + pTask->id.idStr, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, pTask->status.taskStatus, + rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status); } else { rsp.status = 0; - tqDebug("tq recv task check(taskId:%d not built yet) req(reqId:0x%" PRIx64 - ") %d at node %d, check req from task %d at node %d, rsp status %d", + tqDebug("tq recv task check(taskId:0x%x not built yet) req(reqId:0x%" PRIx64 + ") %d at node %d, check req from task:0x%x at node %d, rsp status %d", taskId, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status); } @@ -893,9 +896,10 @@ int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) { SEncoder encoder; int32_t code; int32_t len; + tEncodeSize(tEncodeSStreamTaskCheckRsp, &rsp, len, code); if (code < 0) { - tqError("unable to encode rsp %d", __LINE__); + tqError("vgId:%d failed to encode task check rsp, task:0x%x", pTq->pStreamMeta->vgId, taskId); return -1; } @@ -908,6 +912,7 @@ int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) { tEncoderClear(&encoder); SRpcMsg rspMsg = {.code = 0, .pCont = buf, .contLen = sizeof(SMsgHead) + len, .info = pMsg->info}; + tmsgSendRsp(&rspMsg); return 0; } @@ -919,17 +924,20 @@ int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, int64_t sversion, char* msg, int32 SDecoder decoder; tDecoderInit(&decoder, (uint8_t*)msg, msgLen); code = tDecodeSStreamTaskCheckRsp(&decoder, &rsp); + if (code < 0) { tDecoderClear(&decoder); return -1; } tDecoderClear(&decoder); - tqDebug("tq recv task check rsp(reqId:0x%" PRIx64 ") %d at node %d check req from task %d at node %d, status %d", + tqDebug("tq recv task check rsp(reqId:0x%" PRIx64 ") %d at node %d check req from task:0x%x at node %d, status %d", rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status); SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, rsp.upstreamTaskId); if (pTask == NULL) { + tqError("tq failed to locate the stream task:0x%x vgId:%d, it may have been destroyed", rsp.upstreamTaskId, + pTq->pStreamMeta->vgId); return -1; } @@ -1230,15 +1238,17 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) { char* msgStr = pMsg->pCont; char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead)); int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); - SStreamDispatchReq req; - SDecoder decoder; + + SStreamDispatchReq req = {0}; + + SDecoder decoder; tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen); tDecodeStreamDispatchReq(&decoder, &req); SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.taskId); if (pTask) { SRpcMsg rsp = {.info = pMsg->info, .code = 0}; - streamProcessDispatchReq(pTask, &req, &rsp, exec); + streamProcessDispatchMsg(pTask, &req, &rsp, exec); streamMetaReleaseTask(pTq->pStreamMeta, pTask); return 0; } else { @@ -1357,7 +1367,7 @@ int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) { SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId); if (pTask) { SRpcMsg rsp = {.info = pMsg->info, .code = 0}; - streamProcessDispatchReq(pTask, &req, &rsp, false); + streamProcessDispatchMsg(pTask, &req, &rsp, false); streamMetaReleaseTask(pTq->pStreamMeta, pTask); rpcFreeCont(pMsg->pCont); taosFreeQitem(pMsg); diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 4ea5e3c6ec..29f1ddc50f 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -551,9 +551,9 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx) // start to restore all stream tasks if (tsDisableStream) { - vInfo("vgId:%d, not restore stream tasks, since disabled", pVnode->config.vgId); + vInfo("vgId:%d, not launch stream tasks, since stream tasks are disabled", pVnode->config.vgId); } else { - vInfo("vgId:%d start to restore stream tasks", pVnode->config.vgId); + vInfo("vgId:%d start to launch stream tasks", pVnode->config.vgId); tqStartStreamTasks(pVnode->pTq); } } diff --git a/source/libs/stream/inc/streamInc.h b/source/libs/stream/inc/streamInc.h index 049aac6214..2c1956998a 100644 --- a/source/libs/stream/inc/streamInc.h +++ b/source/libs/stream/inc/streamInc.h @@ -33,9 +33,12 @@ typedef struct { static SStreamGlobalEnv streamEnv; +int32_t streamDispatchStreamBlock(SStreamTask* pTask); + +SStreamDataBlock* createStreamDataFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg); +SStreamDataBlock* createStreamBlockFromResults(SStreamQueueItem* pItem, SStreamTask* pTask, int64_t resultSize, SArray* pRes); void destroyStreamDataBlock(SStreamDataBlock* pBlock); -int32_t streamDispatch(SStreamTask* pTask); -int32_t streamConvertDispatchMsgToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData); + int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock* pData); int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* data); diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index 00ce5aa40e..e33e7845bb 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -120,19 +120,16 @@ int32_t streamSchedExec(SStreamTask* pTask) { } int32_t streamTaskEnqueueBlocks(SStreamTask* pTask, const SStreamDispatchReq* pReq, SRpcMsg* pRsp) { - SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0); - int8_t status = 0; - if (pData == NULL) { + + SStreamDataBlock* pBlock = createStreamDataFromDispatchMsg(pReq, STREAM_INPUT__DATA_BLOCK, pReq->dataSrcVgId); + if (pBlock == NULL) { streamTaskInputFail(pTask); status = TASK_INPUT_STATUS__FAILED; - qDebug("vgId:%d, s-task:%s failed to received dispatch msg, reason: out of memory", pTask->pMeta->vgId, pTask->id.idStr); + qDebug("vgId:%d, s-task:%s failed to receive dispatch msg, reason: out of memory", pTask->pMeta->vgId, + pTask->id.idStr); } else { - pData->type = STREAM_INPUT__DATA_BLOCK; - pData->srcVgId = pReq->dataSrcVgId; - - streamConvertDispatchMsgToData(pReq, pData); - if (tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pData) == 0) { + if (tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pBlock) == 0) { status = TASK_INPUT_STATUS__NORMAL; } else { // input queue is full, upstream is blocked now status = TASK_INPUT_STATUS__BLOCKED; @@ -142,15 +139,16 @@ int32_t streamTaskEnqueueBlocks(SStreamTask* pTask, const SStreamDispatchReq* pR // rsp by input status void* buf = rpcMallocCont(sizeof(SMsgHead) + sizeof(SStreamDispatchRsp)); ((SMsgHead*)buf)->vgId = htonl(pReq->upstreamNodeId); - SStreamDispatchRsp* pCont = POINTER_SHIFT(buf, sizeof(SMsgHead)); - pCont->inputStatus = status; - pCont->streamId = htobe64(pReq->streamId); - pCont->upstreamNodeId = htonl(pReq->upstreamNodeId); - pCont->upstreamTaskId = htonl(pReq->upstreamTaskId); - pCont->downstreamNodeId = htonl(pTask->nodeId); - pCont->downstreamTaskId = htonl(pTask->id.taskId); - pRsp->pCont = buf; + SStreamDispatchRsp* pDispatchRsp = POINTER_SHIFT(buf, sizeof(SMsgHead)); + pDispatchRsp->inputStatus = status; + pDispatchRsp->streamId = htobe64(pReq->streamId); + pDispatchRsp->upstreamNodeId = htonl(pReq->upstreamNodeId); + pDispatchRsp->upstreamTaskId = htonl(pReq->upstreamTaskId); + pDispatchRsp->downstreamNodeId = htonl(pTask->nodeId); + pDispatchRsp->downstreamTaskId = htonl(pTask->id.taskId); + + pRsp->pCont = buf; pRsp->contLen = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp); tmsgSendRsp(pRsp); @@ -211,15 +209,15 @@ int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock return code; } - streamDispatch(pTask); + streamDispatchStreamBlock(pTask); } return 0; } -int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp, bool exec) { - qDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d)", pTask->id.idStr, pReq->upstreamTaskId, - pReq->upstreamNodeId); +int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp, bool exec) { + qDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d), msgLen:%" PRId64, pTask->id.idStr, + pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->totalLen); // todo add the input queue buffer limitation streamTaskEnqueueBlocks(pTask, pReq, pRsp); @@ -257,7 +255,7 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i } // continue dispatch one block to down stream in pipeline - streamDispatch(pTask); + streamDispatchStreamBlock(pTask); return 0; } @@ -267,7 +265,7 @@ int32_t streamProcessRunReq(SStreamTask* pTask) { } /*if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {*/ - /*streamDispatch(pTask);*/ + /*streamDispatchStreamBlock(pTask);*/ /*}*/ return 0; } diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c index ca24414db8..7c06e7deb3 100644 --- a/source/libs/stream/src/streamData.c +++ b/source/libs/stream/src/streamData.c @@ -15,20 +15,28 @@ #include "streamInc.h" -int32_t streamConvertDispatchMsgToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData) { +SStreamDataBlock* createStreamDataFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg) { + SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, pReq->totalLen); + if (pData == NULL) { + return NULL; + } + + pData->type = blockType; + pData->srcVgId = srcVg; + int32_t blockNum = pReq->blockNum; SArray* pArray = taosArrayInit_s(sizeof(SSDataBlock), blockNum); if (pArray == NULL) { - return -1; + return NULL; } - ASSERT(pReq->blockNum == taosArrayGetSize(pReq->data)); - ASSERT(pReq->blockNum == taosArrayGetSize(pReq->dataLen)); + ASSERT((pReq->blockNum == taosArrayGetSize(pReq->data)) && (pReq->blockNum == taosArrayGetSize(pReq->dataLen))); for (int32_t i = 0; i < blockNum; i++) { SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*) taosArrayGetP(pReq->data, i); SSDataBlock* pDataBlock = taosArrayGet(pArray, i); blockDecode(pDataBlock, pRetrieve->data); + // TODO: refactor pDataBlock->info.window.skey = be64toh(pRetrieve->skey); pDataBlock->info.window.ekey = be64toh(pRetrieve->ekey); @@ -41,7 +49,39 @@ int32_t streamConvertDispatchMsgToData(const SStreamDispatchReq* pReq, SStreamDa } pData->blocks = pArray; - return 0; + return pData; +} + +SStreamDataBlock* createStreamBlockFromResults(SStreamQueueItem* pItem, SStreamTask* pTask, int64_t resultSize, SArray* pRes) { + SStreamDataBlock* pStreamBlocks = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, resultSize); + if (pStreamBlocks == NULL) { + taosArrayClearEx(pRes, (FDelete)blockDataFreeRes); + return NULL; + } + + pStreamBlocks->type = STREAM_INPUT__DATA_BLOCK; + pStreamBlocks->blocks = pRes; + + if (pItem->type == STREAM_INPUT__DATA_SUBMIT) { + SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)pItem; + pStreamBlocks->childId = pTask->selfChildId; + pStreamBlocks->sourceVer = pSubmit->ver; + } else if (pItem->type == STREAM_INPUT__MERGED_SUBMIT) { + SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)pItem; + pStreamBlocks->childId = pTask->selfChildId; + pStreamBlocks->sourceVer = pMerged->ver; + } + + return pStreamBlocks; +} + +void destroyStreamDataBlock(SStreamDataBlock* pBlock) { + if (pBlock == NULL) { + return; + } + + taosArrayDestroyEx(pBlock->blocks, (FDelete)blockDataFreeRes); + taosFreeQitem(pBlock); } int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock* pData) { diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 5723802d34..1473345730 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -24,6 +24,7 @@ int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* p if (tEncodeI32(pEncoder, pReq->upstreamChildId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->upstreamNodeId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->blockNum) < 0) return -1; + if (tEncodeI64(pEncoder, pReq->totalLen) < 0) return -1; ASSERT(taosArrayGetSize(pReq->data) == pReq->blockNum); ASSERT(taosArrayGetSize(pReq->dataLen) == pReq->blockNum); for (int32_t i = 0; i < pReq->blockNum; i++) { @@ -45,6 +46,8 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) { if (tDecodeI32(pDecoder, &pReq->upstreamChildId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->upstreamNodeId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->blockNum) < 0) return -1; + if (tDecodeI64(pDecoder, &pReq->totalLen) < 0) return -1; + ASSERT(pReq->blockNum > 0); pReq->data = taosArrayInit(pReq->blockNum, sizeof(void*)); pReq->dataLen = taosArrayInit(pReq->blockNum, sizeof(int32_t)); @@ -178,7 +181,7 @@ CLEAR: return code; } -static int32_t streamAddBlockToDispatchMsg(const SSDataBlock* pBlock, SStreamDispatchReq* pReq) { +static int32_t streamAddBlockIntoDispatchMsg(const SSDataBlock* pBlock, SStreamDispatchReq* pReq) { int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock); void* buf = taosMemoryCalloc(1, dataStrLen); if (buf == NULL) return -1; @@ -205,6 +208,7 @@ static int32_t streamAddBlockToDispatchMsg(const SSDataBlock* pBlock, SStreamDis taosArrayPush(pReq->dataLen, &actualLen); taosArrayPush(pReq->data, &buf); + pReq->totalLen += dataStrLen; return 0; } @@ -291,7 +295,7 @@ int32_t streamDispatchOneRecoverFinishReq(SStreamTask* pTask, const SStreamRecov return 0; } -int32_t streamDispatchOneDataReq(SStreamTask* pTask, const SStreamDispatchReq* pReq, int32_t vgId, SEpSet* pEpSet) { +int32_t doSendDispatchMsg(SStreamTask* pTask, const SStreamDispatchReq* pReq, int32_t vgId, SEpSet* pEpSet) { void* buf = NULL; int32_t code = -1; SRpcMsg msg = {0}; @@ -325,6 +329,7 @@ int32_t streamDispatchOneDataReq(SStreamTask* pTask, const SStreamDispatchReq* p code = 0; return 0; + FAIL: if (buf) rpcFreeCont(buf); return code; @@ -360,7 +365,7 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, j); ASSERT(pVgInfo->vgId > 0); if (hashValue >= pVgInfo->hashBegin && hashValue <= pVgInfo->hashEnd) { - if (streamAddBlockToDispatchMsg(pDataBlock, &pReqs[j]) < 0) { + if (streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[j]) < 0) { return -1; } if (pReqs[j].blockNum == 0) { @@ -376,9 +381,9 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S } int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) { - int32_t code = -1; - int32_t blockNum = taosArrayGetSize(pData->blocks); - ASSERT(blockNum != 0); + int32_t code = 0; + int32_t numOfBlocks = taosArrayGetSize(pData->blocks); + ASSERT(numOfBlocks != 0); if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH) { SStreamDispatchReq req = { @@ -387,19 +392,23 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat .upstreamTaskId = pTask->id.taskId, .upstreamChildId = pTask->selfChildId, .upstreamNodeId = pTask->nodeId, - .blockNum = blockNum, + .blockNum = numOfBlocks, }; - req.data = taosArrayInit(blockNum, sizeof(void*)); - req.dataLen = taosArrayInit(blockNum, sizeof(int32_t)); + req.data = taosArrayInit(numOfBlocks, sizeof(void*)); + req.dataLen = taosArrayInit(numOfBlocks, sizeof(int32_t)); if (req.data == NULL || req.dataLen == NULL) { - goto FAIL_FIXED_DISPATCH; + taosArrayDestroyP(req.data, taosMemoryFree); + taosArrayDestroy(req.dataLen); + return code; } - for (int32_t i = 0; i < blockNum; i++) { + for (int32_t i = 0; i < numOfBlocks; i++) { SSDataBlock* pDataBlock = taosArrayGet(pData->blocks, i); - if (streamAddBlockToDispatchMsg(pDataBlock, &req) < 0) { - goto FAIL_FIXED_DISPATCH; + if (streamAddBlockIntoDispatchMsg(pDataBlock, &req) < 0) { + taosArrayDestroyP(req.data, taosMemoryFree); + taosArrayDestroy(req.dataLen); + return code; } } @@ -410,19 +419,14 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat req.taskId = downstreamTaskId; qDebug("s-task:%s (child taskId:%d) fix-dispatch blocks:%d to down stream s-task:%d in vgId:%d", pTask->id.idStr, - pTask->selfChildId, blockNum, downstreamTaskId, vgId); + pTask->selfChildId, numOfBlocks, downstreamTaskId, vgId); - if (streamDispatchOneDataReq(pTask, &req, vgId, pEpSet) < 0) { - goto FAIL_FIXED_DISPATCH; + if (doSendDispatchMsg(pTask, &req, vgId, pEpSet) < 0) { + taosArrayDestroyP(req.data, taosMemoryFree); + taosArrayDestroy(req.dataLen); + return code; } - code = 0; - - FAIL_FIXED_DISPATCH: - taosArrayDestroyP(req.data, taosMemoryFree); - taosArrayDestroy(req.dataLen); - return code; - } else if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) { int32_t rspCnt = atomic_load_32(&pTask->shuffleDispatcher.waitingRspCnt); ASSERT(rspCnt == 0); @@ -452,13 +456,13 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat pReqs[i].taskId = pVgInfo->taskId; } - for (int32_t i = 0; i < blockNum; i++) { + for (int32_t i = 0; i < numOfBlocks; i++) { SSDataBlock* pDataBlock = taosArrayGet(pData->blocks, i); // TODO: do not use broadcast if (pDataBlock->info.type == STREAM_DELETE_RESULT) { for (int32_t j = 0; j < vgSz; j++) { - if (streamAddBlockToDispatchMsg(pDataBlock, &pReqs[j]) < 0) { + if (streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[j]) < 0) { goto FAIL_SHUFFLE_DISPATCH; } if (pReqs[j].blockNum == 0) { @@ -475,7 +479,7 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat } qDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to %d vgroups", pTask->id.idStr, pTask->selfChildId, - blockNum, vgSz); + numOfBlocks, vgSz); for (int32_t i = 0; i < vgSz; i++) { if (pReqs[i].blockNum > 0) { @@ -483,7 +487,7 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat qDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to vgId:%d", pTask->id.idStr, pTask->selfChildId, pReqs[i].blockNum, pVgInfo->vgId); - if (streamDispatchOneDataReq(pTask, &pReqs[i], pVgInfo->vgId, &pVgInfo->epSet) < 0) { + if (doSendDispatchMsg(pTask, &pReqs[i], pVgInfo->vgId, &pVgInfo->epSet) < 0) { goto FAIL_SHUFFLE_DISPATCH; } } @@ -501,7 +505,7 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat return code; } -int32_t streamDispatch(SStreamTask* pTask) { +int32_t streamDispatchStreamBlock(SStreamTask* pTask) { ASSERT(pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH); int32_t numOfElems = taosQueueItemSize(pTask->outputQueue->queue); if (numOfElems > 0) { diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 65643af963..c7420b0bef 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -21,7 +21,6 @@ #define MAX_STREAM_RESULT_DUMP_THRESHOLD 1000 static int32_t updateCheckPointInfo (SStreamTask* pTask); -static SStreamDataBlock* createStreamDataBlockFromResults(SStreamQueueItem* pItem, SStreamTask* pTask, int64_t resultSize, SArray* pRes); bool streamTaskShouldStop(const SStreamStatus* pStatus) { int32_t status = atomic_load_8((int8_t*)&pStatus->taskStatus); @@ -43,7 +42,7 @@ static int32_t doDumpResult(SStreamTask* pTask, SStreamQueueItem* pItem, SArray* int32_t numOfBlocks = taosArrayGetSize(pRes); if (numOfBlocks > 0) { - SStreamDataBlock* pStreamBlocks = createStreamDataBlockFromResults(pItem, pTask, size, pRes); + SStreamDataBlock* pStreamBlocks = createStreamBlockFromResults(pItem, pTask, size, pRes); if (pStreamBlocks == NULL) { taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); return -1; @@ -243,7 +242,7 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) { qDebug("s-task:%s scan exec dispatch blocks:%d", pTask->id.idStr, batchCnt); - streamDispatch(pTask); + streamDispatchStreamBlock(pTask); } if (finished) { @@ -319,38 +318,6 @@ int32_t updateCheckPointInfo (SStreamTask* pTask) { return TSDB_CODE_SUCCESS; } -SStreamDataBlock* createStreamDataBlockFromResults(SStreamQueueItem* pItem, SStreamTask* pTask, int64_t resultSize, SArray* pRes) { - SStreamDataBlock* pStreamBlocks = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, resultSize); - if (pStreamBlocks == NULL) { - taosArrayClearEx(pRes, (FDelete)blockDataFreeRes); - return NULL; - } - - pStreamBlocks->type = STREAM_INPUT__DATA_BLOCK; - pStreamBlocks->blocks = pRes; - - if (pItem->type == STREAM_INPUT__DATA_SUBMIT) { - SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)pItem; - pStreamBlocks->childId = pTask->selfChildId; - pStreamBlocks->sourceVer = pSubmit->ver; - } else if (pItem->type == STREAM_INPUT__MERGED_SUBMIT) { - SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)pItem; - pStreamBlocks->childId = pTask->selfChildId; - pStreamBlocks->sourceVer = pMerged->ver; - } - - return pStreamBlocks; -} - -void destroyStreamDataBlock(SStreamDataBlock* pBlock) { - if (pBlock == NULL) { - return; - } - - taosArrayDestroyEx(pBlock->blocks, (FDelete)blockDataFreeRes); - taosFreeQitem(pBlock); -} - int32_t streamExecForAll(SStreamTask* pTask) { int32_t code = 0; while (1) { diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 7236c6c4b9..eb2535782e 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -55,7 +55,7 @@ int32_t streamTaskLaunchRecover(SStreamTask* pTask, int64_t version) { // checkstatus int32_t streamTaskCheckDownstream(SStreamTask* pTask, int64_t version) { - qDebug("s-taks:%s in fill history stage, ver:%"PRId64, pTask->id.idStr, version); + qDebug("s-task:%s in fill history stage, ver:%"PRId64, pTask->id.idStr, version); SStreamTaskCheckReq req = { .streamId = pTask->id.streamId, @@ -72,7 +72,7 @@ int32_t streamTaskCheckDownstream(SStreamTask* pTask, int64_t version) { req.downstreamTaskId = pTask->fixedEpDispatcher.taskId; pTask->checkReqId = req.reqId; - qDebug("s-task:%s at node %d check downstream task:%d at node %d", pTask->id.idStr, pTask->nodeId, req.downstreamTaskId, + qDebug("s-task:%s at node %d check downstream task:0x%x at node %d", pTask->id.idStr, pTask->nodeId, req.downstreamTaskId, req.downstreamNodeId); streamDispatchCheckMsg(pTask, &req, pTask->fixedEpDispatcher.nodeId, &pTask->fixedEpDispatcher.epSet); } else if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) { @@ -88,7 +88,7 @@ int32_t streamTaskCheckDownstream(SStreamTask* pTask, int64_t version) { taosArrayPush(pTask->checkReqIds, &req.reqId); req.downstreamNodeId = pVgInfo->vgId; req.downstreamTaskId = pVgInfo->taskId; - qDebug("s-task:%s at node %d check downstream task:%d at node %d (shuffle)", pTask->id.idStr, pTask->nodeId, + qDebug("s-task:%s at node %d check downstream task:0x%x at node %d (shuffle)", pTask->id.idStr, pTask->nodeId, req.downstreamTaskId, req.downstreamNodeId); streamDispatchCheckMsg(pTask, &req, pVgInfo->vgId, &pVgInfo->epSet); } @@ -111,15 +111,16 @@ int32_t streamRecheckOneDownstream(SStreamTask* pTask, const SStreamTaskCheckRsp .childId = pRsp->childId, }; - qDebug("s-task:%s at node %d check downstream task %d at node %d (recheck)", pTask->id.idStr, pTask->nodeId, + qDebug("s-task:%s at node %d check downstream task:0x%x at node %d (recheck)", pTask->id.idStr, pTask->nodeId, req.downstreamTaskId, req.downstreamNodeId); if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH) { streamDispatchCheckMsg(pTask, &req, pRsp->downstreamNodeId, &pTask->fixedEpDispatcher.epSet); } else if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) { SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos; - int32_t vgSz = taosArrayGetSize(vgInfo); - for (int32_t i = 0; i < vgSz; i++) { + + int32_t numOfVgs = taosArrayGetSize(vgInfo); + for (int32_t i = 0; i < numOfVgs; i++) { SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i); if (pVgInfo->taskId == req.downstreamTaskId) { streamDispatchCheckMsg(pTask, &req, pRsp->downstreamNodeId, &pVgInfo->epSet); @@ -135,7 +136,9 @@ int32_t streamTaskCheckStatus(SStreamTask* pTask) { } int32_t streamProcessTaskCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp, int64_t version) { - qDebug("task %d at node %d recv check rsp from task %d at node %d: status %d", pRsp->upstreamTaskId, + ASSERT(pTask->id.taskId == pRsp->upstreamTaskId); + + qDebug("s-task:%s at node %d recv check rsp from task:0x%x at node %d: status %d", pTask->id.idStr, pRsp->upstreamNodeId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->status); if (pRsp->status == 1) { @@ -175,9 +178,10 @@ int32_t streamProcessTaskCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* ASSERT(0); } } else { // not ready, wait for 100ms and retry - qDebug("s-task:%s downstream taskId:%d (vgId:%d) not ready, wait for 100ms and retry", pTask->id.idStr, + qDebug("s-task:%s downstream taskId:0x%x (vgId:%d) not ready, wait for 100ms and retry", pTask->id.idStr, pRsp->downstreamTaskId, pRsp->downstreamNodeId); taosMsleep(100); + streamRecheckOneDownstream(pTask, pRsp); } From 899e4d3350610d456bf3ae6c60b15272f6e4f989 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 20 May 2023 18:13:58 +0800 Subject: [PATCH 53/59] refactor: do some internal refactor. --- source/dnode/vnode/src/tq/tq.c | 8 +++++--- source/libs/stream/src/streamMeta.c | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index c9749906d0..74361ec319 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -957,6 +957,8 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms // 1.deserialize msg and build task SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask)); if (pTask == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + tqError("vgId:%d failed to create stream task due to out of memory, alloc size:%d", vgId, (int32_t) sizeof(SStreamTask)); return -1; } @@ -974,9 +976,9 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms // 2.save task, use the newest commit version as the initial start version of stream task. taosWLockLatch(&pTq->pStreamMeta->lock); code = streamMetaAddDeployedTask(pTq->pStreamMeta, sversion, pTask); + int32_t numOfTasks = streamMetaGetNumOfTasks(pTq->pStreamMeta); if (code < 0) { - tqError("vgId:%d failed to add s-task:%s, total:%d", vgId, pTask->id.idStr, - streamMetaGetNumOfTasks(pTq->pStreamMeta)); + tqError("vgId:%d failed to add s-task:%s, total:%d", vgId, pTask->id.idStr, numOfTasks); taosWUnLockLatch(&pTq->pStreamMeta->lock); return -1; } @@ -989,7 +991,7 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms } tqDebug("vgId:%d s-task:%s is deployed and add meta from mnd, status:%d, total:%d", vgId, pTask->id.idStr, - pTask->status.taskStatus, streamMetaGetNumOfTasks(pTq->pStreamMeta)); + pTask->status.taskStatus, numOfTasks); return 0; } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index e091b6864b..33375fe921 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -375,7 +375,7 @@ int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver) { } if (pTask->fillHistory) { - pTask->status.taskStatus = TASK_STATUS__WAIT_DOWNSTREAM; + ASSERT(pTask->status.taskStatus == TASK_STATUS__WAIT_DOWNSTREAM); streamTaskCheckDownstream(pTask, ver); } } From cb75e5a863dd10b75aff6d645db8f58b86a26e36 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 20 May 2023 22:07:04 +0800 Subject: [PATCH 54/59] fix(stream): fix memory leak. --- source/libs/stream/src/streamDispatch.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 1473345730..722013115f 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -421,12 +421,10 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat qDebug("s-task:%s (child taskId:%d) fix-dispatch blocks:%d to down stream s-task:%d in vgId:%d", pTask->id.idStr, pTask->selfChildId, numOfBlocks, downstreamTaskId, vgId); - if (doSendDispatchMsg(pTask, &req, vgId, pEpSet) < 0) { - taosArrayDestroyP(req.data, taosMemoryFree); - taosArrayDestroy(req.dataLen); - return code; - } - + code = doSendDispatchMsg(pTask, &req, vgId, pEpSet); + taosArrayDestroyP(req.data, taosMemoryFree); + taosArrayDestroy(req.dataLen); + return code; } else if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) { int32_t rspCnt = atomic_load_32(&pTask->shuffleDispatcher.waitingRspCnt); ASSERT(rspCnt == 0); From dcb91082f9aa1c047b36909412ae980ca3e05b4e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 20 May 2023 23:37:40 +0800 Subject: [PATCH 55/59] fix(stream): fix error. --- source/dnode/snode/src/snode.c | 6 ++---- source/libs/stream/src/stream.c | 4 ++-- source/libs/stream/src/streamDispatch.c | 20 ++++++++------------ source/libs/stream/src/streamExec.c | 7 ++----- 4 files changed, 14 insertions(+), 23 deletions(-) diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index ec4500aee6..e9225f3d6e 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -223,11 +223,9 @@ int32_t sndProcessTaskRetrieveReq(SSnode *pSnode, SRpcMsg *pMsg) { tDecoderClear(&decoder); int32_t taskId = req.dstTaskId; SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId); + if (pTask) { - SRpcMsg rsp = { - .info = pMsg->info, - .code = 0, - }; + SRpcMsg rsp = { .info = pMsg->info, .code = 0}; streamProcessRetrieveReq(pTask, &req, &rsp); streamMetaReleaseTask(pSnode->pMeta, pTask); tDeleteStreamRetrieveReq(&req); diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index e33e7845bb..8cc1ef1dd3 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -161,7 +161,7 @@ int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq, // enqueue if (pData != NULL) { - qDebug("task %d(child %d) recv retrieve req from task %d, reqId %" PRId64, pTask->id.taskId, pTask->selfChildId, + qDebug("s-task:%s (child %d) recv retrieve req from task:0x%x, reqId %" PRId64, pTask->id.idStr, pTask->selfChildId, pReq->srcTaskId, pReq->reqId); pData->type = STREAM_INPUT__DATA_RETRIEVE; @@ -271,7 +271,7 @@ int32_t streamProcessRunReq(SStreamTask* pTask) { } int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pRsp) { - qDebug("s-task:%s receive retrieve req from node %d taskId:%d", pTask->id.idStr, pReq->srcNodeId, pReq->srcTaskId); + qDebug("s-task:%s receive retrieve req from node %d taskId:0x%x", pTask->id.idStr, pReq->srcNodeId, pReq->srcTaskId); streamTaskEnqueueRetrieve(pTask, pReq, pRsp); ASSERT(pTask->taskLevel != TASK_LEVEL__SINK); diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 722013115f..401a8b9e74 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -138,7 +138,6 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock) SStreamChildEpInfo* pEpInfo = taosArrayGetP(pTask->childEpInfo, i); req.dstNodeId = pEpInfo->nodeId; req.dstTaskId = pEpInfo->taskId; - int32_t code; int32_t len; tEncodeSize(tEncodeStreamRetrieveReq, &req, len, code); if (code < 0) { @@ -158,23 +157,18 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock) tEncodeStreamRetrieveReq(&encoder, &req); tEncoderClear(&encoder); - SRpcMsg rpcMsg = { - .code = 0, - .msgType = TDMT_STREAM_RETRIEVE, - .pCont = buf, - .contLen = sizeof(SMsgHead) + len, - }; - + SRpcMsg rpcMsg = { .code = 0, .msgType = TDMT_STREAM_RETRIEVE, .pCont = buf, .contLen = sizeof(SMsgHead) + len }; if (tmsgSendReq(&pEpInfo->epSet, &rpcMsg) < 0) { ASSERT(0); goto CLEAR; } - buf = NULL; - qDebug("s-task:%s (child %d) send retrieve req to task %d at node %d, reqId %" PRId64, pTask->id.idStr, + buf = NULL; + qDebug("s-task:%s (child %d) send retrieve req to task %d at node %d, reqId:0x%" PRIx64, pTask->id.idStr, pTask->selfChildId, pEpInfo->taskId, pEpInfo->nodeId, req.reqId); } code = 0; + CLEAR: taosMemoryFree(pRetrieve); rpcFreeCont(buf); @@ -400,12 +394,14 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat if (req.data == NULL || req.dataLen == NULL) { taosArrayDestroyP(req.data, taosMemoryFree); taosArrayDestroy(req.dataLen); - return code; + return TSDB_CODE_OUT_OF_MEMORY; } for (int32_t i = 0; i < numOfBlocks; i++) { SSDataBlock* pDataBlock = taosArrayGet(pData->blocks, i); - if (streamAddBlockIntoDispatchMsg(pDataBlock, &req) < 0) { + code = streamAddBlockIntoDispatchMsg(pDataBlock, &req); + + if (code != TSDB_CODE_SUCCESS) { taosArrayDestroyP(req.data, taosMemoryFree); taosArrayDestroy(req.dataLen); return code; diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index c7420b0bef..55474541ed 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -108,7 +108,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i block.info.type = STREAM_PULL_OVER; block.info.childId = pTask->selfChildId; taosArrayPush(pRes, &block); - + numOfBlocks += 1; qDebug("s-task:%s(child %d) processed retrieve, reqId:0x%" PRIx64, pTask->id.idStr, pTask->selfChildId, pRetrieveBlock->reqId); } @@ -152,14 +152,11 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i if (numOfBlocks > 0) { ASSERT(numOfBlocks == taosArrayGetSize(pRes)); code = doDumpResult(pTask, pItem, pRes, size, totalSize, totalBlocks); - if (code != TSDB_CODE_SUCCESS) { - return code; - } } else { taosArrayDestroy(pRes); } - return 0; + return code; } int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { From 94d71f601d1fbcacb2c22c5db7edac37c6426df6 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 21 May 2023 14:20:01 +0800 Subject: [PATCH 56/59] docs: udf requires python 3.7+ (#21397) --- docs/en/12-taos-sql/26-udf.md | 2 +- docs/zh/12-taos-sql/26-udf.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/12-taos-sql/26-udf.md b/docs/en/12-taos-sql/26-udf.md index c4d6d4fca4..b533b98b3d 100644 --- a/docs/en/12-taos-sql/26-udf.md +++ b/docs/en/12-taos-sql/26-udf.md @@ -17,7 +17,7 @@ CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_typ ``` - OR REPLACE: if the UDF exists, the UDF properties are modified - function_name: The scalar function name to be used in the SQL statement - - LANGUAGE 'C|Python': the programming language of UDF. Now C or Python is supported. If this clause is omitted, C is assumed as the programming language. + - LANGUAGE 'C|Python': the programming language of UDF. Now C or Python (v3.7+) is supported. If this clause is omitted, C is assumed as the programming language. - library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes. - output_type: The data type of the results of the UDF. diff --git a/docs/zh/12-taos-sql/26-udf.md b/docs/zh/12-taos-sql/26-udf.md index c1d2761d7d..307831ce80 100644 --- a/docs/zh/12-taos-sql/26-udf.md +++ b/docs/zh/12-taos-sql/26-udf.md @@ -38,7 +38,7 @@ CREATE [OR REPLACE] AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE ``` - OR REPLACE: 如果函数已经存在,会修改已有的函数属性。 - function_name:聚合函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致; - - LANGUAGE 'C|Python':函数编程语言,目前支持C语言和Python语言。 + - LANGUAGE 'C|Python':函数编程语言,目前支持C语言和Python语言(v3.7+)。 - library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件)。如果编程语言是Python,路径是包含 UDF 函数实现的Python文件路径。这个路径需要用英文单引号或英文双引号括起来;; - output_type:此函数计算结果的数据类型名称; - buffer_size:中间计算结果的缓冲区大小,单位是字节。如果不使用可以不设置。 From 0a38e63c34fddb37ad4825f66d7656e1adc5fe63 Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Mon, 22 May 2023 09:33:17 +0800 Subject: [PATCH 57/59] doc: add td.connect.type --- docs/en/07-develop/07-tmq.mdx | 9 +++++---- docs/zh/07-develop/07-tmq.mdx | 9 +++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index a4eb41bd7e..93291e8e05 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -285,10 +285,11 @@ You configure the following parameters when creating a consumer: | Parameter | Type | Description | Remarks | | :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- | -| `td.connect.ip` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection | -| `td.connect.user` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection | -| `td.connect.pass` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection | -| `td.connect.port` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection | +| `td.connect.type` | string | connection type | Only valid for Java connector: "jni" means native connection, "ws" means websocket connection, the default is "jni" | +| `td.connect.ip` | string | IP address of the server side | | +| `td.connect.user` | string | User Name | | +| `td.connect.pass` | string | Password | | +| `td.connect.port` | string | Port of the server side | | | `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. | | `client.id` | string | Client ID | Maximum length: 192. | | `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) | diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx index 11aef1f7e8..39538f1f15 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.mdx @@ -405,10 +405,11 @@ consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"}) | 参数名称 | 类型 | 参数说明 | 备注 | |:------:|:----:|:-------:|:---:| -| `td.connect.ip` | string | 用于创建连接|| -| `td.connect.user` | string | 用于创建连接|| -| `td.connect.pass` | string | 用于创建连接|| -| `td.connect.port` | string | 用于创建连接|| +| `td.connect.type` | string | 指定连接类型 |仅在Java连接器中有效,"jni" 表示原生连接,"ws"表示 websocket 连接,默认为 "jni" | +| `td.connect.ip` | string | 服务端 IP 地址|| +| `td.connect.user` | string | 用户名|| +| `td.connect.pass` | string | 密码|| +| `td.connect.port` | string | 服务端 端口号|| | `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192 | | `client.id` | string | 客户端 ID | 最大长度:192 | | `msg.with.table.name` | string | 是否允许从消息中解析表名,不适用于列订阅 | 合法值:`true`, `false` | From 2aad82370942592a5314b880f86c23ffaf3121e3 Mon Sep 17 00:00:00 2001 From: t_max <1172915550@qq.com> Date: Mon, 22 May 2023 10:53:10 +0800 Subject: [PATCH 58/59] fix: free assignment crash on Windows --- examples/c/tmq.c | 4 ++-- include/client/taos.h | 1 + source/client/src/clientTmq.c | 8 ++++++++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/examples/c/tmq.c b/examples/c/tmq.c index 94545dfaad..e1133c109e 100644 --- a/examples/c/tmq.c +++ b/examples/c/tmq.c @@ -73,7 +73,7 @@ static int32_t init_env() { taos_free_result(pRes); // create database - pRes = taos_query(pConn, "create database tmqdb precision 'ns'"); + pRes = taos_query(pConn, "create database tmqdb precision 'ns' WAL_RETENTION_PERIOD 3600"); if (taos_errno(pRes) != 0) { printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes)); goto END; @@ -289,7 +289,7 @@ void consume_repeatly(tmq_t* tmq) { } } - free(pAssign); + tmq_free_assignment(pAssign); // let's do it again basic_consume_loop(tmq); diff --git a/include/client/taos.h b/include/client/taos.h index 8811c4ab64..9f5a84fb22 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -288,6 +288,7 @@ DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, int32_t *numOfAssignment); +DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); /* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */ diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index c38e206a9b..aff5846092 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -2584,6 +2584,14 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a } } +void tmq_free_assignment(tmq_topic_assignment* pAssignment) { + if (pAssignment == NULL) { + return; + } + + taosMemoryFree(pAssignment); +} + int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_t offset) { if (tmq == NULL) { tscError("invalid tmq handle, null"); From c12996bee70b48b039e9f3288d113ce3be0f2283 Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Mon, 22 May 2023 11:11:52 +0800 Subject: [PATCH 59/59] doc: refine td.connect.type --- docs/en/07-develop/07-tmq.mdx | 18 +----------------- docs/zh/07-develop/07-tmq.mdx | 27 ++++++--------------------- 2 files changed, 7 insertions(+), 38 deletions(-) diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index 93291e8e05..d951923de5 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -285,7 +285,6 @@ You configure the following parameters when creating a consumer: | Parameter | Type | Description | Remarks | | :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- | -| `td.connect.type` | string | connection type | Only valid for Java connector: "jni" means native connection, "ws" means websocket connection, the default is "jni" | | `td.connect.ip` | string | IP address of the server side | | | `td.connect.user` | string | User Name | | | `td.connect.pass` | string | Password | | @@ -326,6 +325,7 @@ Java programs use the following parameters: | Parameter | Type | Description | Remarks | | ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- | +| `td.connect.type` | string | connection type: "jni" means native connection, "ws" means websocket connection, the default is "jni" | | `bootstrap.servers` | string |Connection address, such as `localhost:6030` | | `value.deserializer` | string | Value deserializer; to use this method, implement the `com.taosdata.jdbc.tmq.Deserializer` interface or inherit the `com.taosdata.jdbc.tmq.ReferenceDeserializer` type | | `value.deserializer.encoding` | string | Specify the encoding for string deserialization | | @@ -400,22 +400,6 @@ from taos.tmq import Consumer consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"}) ``` -Python programs use the following parameters: - -| Parameter | Type | Description | Remarks | -|:---------:|:----:|:-----------:|:-------:| -| `td.connect.ip` | string | Used in establishing a connection|| -| `td.connect.user` | string | Used in establishing a connection|| -| `td.connect.pass` | string | Used in establishing a connection|| -| `td.connect.port` | string | Used in establishing a connection|| -| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192 | -| `client.id` | string | Client ID | Maximum length: 192 | -| `msg.with.table.name` | string | Specify whether to deserialize table names from messages | pecify `true` or `false` | -| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` | -| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | | -| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) | -| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` | - diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx index 39538f1f15..bfea926f53 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.mdx @@ -285,10 +285,10 @@ CREATE TOPIC topic_name AS DATABASE db_name; | 参数名称 | 类型 | 参数说明 | 备注 | | :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- | -| `td.connect.ip` | string | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 | -| `td.connect.user` | string | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 | -| `td.connect.pass` | string | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 | -| `td.connect.port` | integer | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 | +| `td.connect.ip` | string | 服务端的 IP 地址 | | +| `td.connect.user` | string | 用户名 | | +| `td.connect.pass` | string | 密码 | | +| `td.connect.port` | integer | 服务端的端口号 | | | `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 | | `client.id` | string | 客户端 ID | 最大长度:192。 | | `auto.offset.reset` | enum | 消费组订阅的初始位置 |
`earliest`: default;从头开始订阅;
`latest`: 仅从最新数据开始订阅;
`none`: 没有提交的 offset 无法订阅 | @@ -321,10 +321,11 @@ tmq_conf_destroy(conf);
-对于 Java 程序,使用如下配置项: +对于 Java 程序,还可以使用如下配置项: | 参数名称 | 类型 | 参数说明 | | ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- | +| `td.connect.type` | string | 连接类型,"jni" 指原生连接,"ws" 指 websocket 连接,默认值为 "jni" | | `bootstrap.servers` | string | 连接地址,如 `localhost:6030` | | `value.deserializer` | string | 值解析方法,使用此方法应实现 `com.taosdata.jdbc.tmq.Deserializer` 接口或继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer` 类 | | `value.deserializer.encoding` | string | 指定字符串解析的字符集 | | @@ -401,22 +402,6 @@ from taos.tmq import Consumer consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"}) ``` -其中,`configs` 为 dict 类型,传递创建 Consumer 的参数。可以配置的参数有: - -| 参数名称 | 类型 | 参数说明 | 备注 | -|:------:|:----:|:-------:|:---:| -| `td.connect.type` | string | 指定连接类型 |仅在Java连接器中有效,"jni" 表示原生连接,"ws"表示 websocket 连接,默认为 "jni" | -| `td.connect.ip` | string | 服务端 IP 地址|| -| `td.connect.user` | string | 用户名|| -| `td.connect.pass` | string | 密码|| -| `td.connect.port` | string | 服务端 端口号|| -| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192 | -| `client.id` | string | 客户端 ID | 最大长度:192 | -| `msg.with.table.name` | string | 是否允许从消息中解析表名,不适用于列订阅 | 合法值:`true`, `false` | -| `enable.auto.commit` | string | 启用自动提交 | 合法值:`true`, `false` | -| `auto.commit.interval.ms` | string | 以毫秒为单位的自动提交时间间隔 | 默认值:5000 ms | -| `auto.offset.reset` | string | 消费组订阅的初始位置 | 可选:`earliest`(default), `latest`, `none` | -