From 3eadb9b921c0c9d1ca9e9e55bd8cba8b2bb581d4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 10 Jan 2023 23:08:25 +0800 Subject: [PATCH 01/14] enh(query): disable the memset when allocate the memory for dataBlock. --- source/common/src/tdatablock.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 171679baae..a5869c9305 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1161,15 +1161,16 @@ void blockDataEmpty(SSDataBlock* pDataBlock) { pInfo->window.skey = 0; } -// todo temporarily disable it +/* + * NOTE: the type of the input column may be TSDB_DATA_TYPE_NULL, which is used to denote + * the all NULL value in this column. It is an internal representation of all NULL value column, and no visible to + * any users. The length of TSDB_DATA_TYPE_NULL is 0, and it is an special case. + */ static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo* pBlockInfo, uint32_t numOfRows, bool clearPayload) { if (numOfRows <= 0 || numOfRows <= pBlockInfo->capacity) { return TSDB_CODE_SUCCESS; } - // todo temp disable it - // ASSERT(pColumn->info.bytes != 0); - int32_t existedRows = pBlockInfo->rows; if (IS_VAR_DATA_TYPE(pColumn->info.type)) { @@ -1194,7 +1195,8 @@ static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo* return TSDB_CODE_FAILED; } - // make sure the allocated memory is MALLOC_ALIGN_BYTES aligned + // here we employ the aligned malloc function, to make sure that the address of allocated memory is aligned + // to MALLOC_ALIGN_BYTES tmp = taosMemoryMallocAlign(MALLOC_ALIGN_BYTES, numOfRows * pColumn->info.bytes); if (tmp == NULL) { return TSDB_CODE_OUT_OF_MEMORY; @@ -1208,7 +1210,7 @@ static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo* pColumn->pData = tmp; - // todo remove it soon + // check if the allocated memory is aligned to the requried bytes. #if defined LINUX if ((((uint64_t)pColumn->pData) & (MALLOC_ALIGN_BYTES - 1)) != 0x0) { return TSDB_CODE_FAILED; @@ -1216,7 +1218,7 @@ static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo* #endif if (clearPayload) { - memset(tmp + pColumn->info.bytes * existedRows, 0, pColumn->info.bytes * (numOfRows - existedRows)); +// memset(tmp + pColumn->info.bytes * existedRows, 0, pColumn->info.bytes * (numOfRows - existedRows)); } } From abae9e07905021bdfbe93bb1714f20110da94a48 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 10 Jan 2023 23:40:16 +0800 Subject: [PATCH 02/14] fix(query): set 0 when result is not qualified. --- source/libs/scalar/src/filter.c | 1 + tests/script/tsim/parser/alter1.sim | 1 + 2 files changed, 2 insertions(+) diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index de660ae958..74d555af77 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -3181,6 +3181,7 @@ bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, SColumnInfoData *pRe void *colData = colDataGetData(pData, i); if (colData == NULL || colDataIsNull_s(pData, i)) { all = false; + p[i] = 0; continue; } diff --git a/tests/script/tsim/parser/alter1.sim b/tests/script/tsim/parser/alter1.sim index 369419dcd9..cf9da46fba 100644 --- a/tests/script/tsim/parser/alter1.sim +++ b/tests/script/tsim/parser/alter1.sim @@ -88,6 +88,7 @@ sql insert into car1 values (now, 1, 1,1 ) (now +1s, 2,2,2) car2 values (now, 1, sql select c1+speed from stb where c1 > 0 if $rows != 3 then + print $rows , expect 3 return -1 endi From 75f4e64326d5ab3cd55b3e38cb37135644e7985b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 10 Jan 2023 23:45:49 +0800 Subject: [PATCH 03/14] enh(query): adjust the memset rule when creating new datablock. --- include/common/tdatablock.h | 1 - source/common/src/tdatablock.c | 21 +-------------------- 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index e1d3b01611..7209d45a6d 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -231,7 +231,6 @@ int32_t blockDataSort_rv(SSDataBlock* pDataBlock, SArray* pOrderInfo, bool nullF int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows, bool clearPayload); int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows); -int32_t blockDataEnsureCapacityNoClear(SSDataBlock* pDataBlock, uint32_t numOfRows); void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows); void blockDataCleanup(SSDataBlock* pDataBlock); diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index a5869c9305..1f5748ebbd 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1218,7 +1218,7 @@ static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo* #endif if (clearPayload) { -// memset(tmp + pColumn->info.bytes * existedRows, 0, pColumn->info.bytes * (numOfRows - existedRows)); + memset(tmp + pColumn->info.bytes * existedRows, 0, pColumn->info.bytes * (numOfRows - existedRows)); } } @@ -1251,25 +1251,6 @@ int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows) { return TSDB_CODE_SUCCESS; } - size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock); - for (int32_t i = 0; i < numOfCols; ++i) { - SColumnInfoData* p = taosArrayGet(pDataBlock->pDataBlock, i); - code = doEnsureCapacity(p, &pDataBlock->info, numOfRows, true); - if (code) { - return code; - } - } - - pDataBlock->info.capacity = numOfRows; - return TSDB_CODE_SUCCESS; -} - -int32_t blockDataEnsureCapacityNoClear(SSDataBlock* pDataBlock, uint32_t numOfRows) { - int32_t code = 0; - if (numOfRows == 0 || numOfRows <= pDataBlock->info.capacity) { - return TSDB_CODE_SUCCESS; - } - size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* p = taosArrayGet(pDataBlock->pDataBlock, i); From e0dbe221992f9e78e062dbd186012902d1e8c27e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 11 Jan 2023 10:27:23 +0800 Subject: [PATCH 04/14] fix(query): fix uninitialized memory buffer accessed. --- include/common/tdatablock.h | 9 ++++++++- source/common/src/tdatablock.c | 4 ++-- source/libs/executor/src/projectoperator.c | 1 - source/libs/executor/src/sysscanoperator.c | 7 +++++++ source/libs/function/src/builtinsimpl.c | 17 ++++++++--------- 5 files changed, 25 insertions(+), 13 deletions(-) diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 7209d45a6d..9c5b712db6 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -41,6 +41,12 @@ typedef struct SBlockOrderInfo { BMCharPos(bm_, r_) |= (1u << (7u - BitPos(r_))); \ } while (0) +#define colDataSetNull_f_s(c_, r_) \ + do { \ + colDataSetNull_f((c_)->nullbitmap, r_); \ + memset(((char*)(c_)->pData) + (c_)->info.bytes * (r_), 0, (c_)->info.bytes); \ + } while (0) + #define colDataClearNull_f(bm_, r_) \ do { \ BMCharPos(bm_, r_) &= ((char)(~(1u << (7u - BitPos(r_))))); \ @@ -136,7 +142,7 @@ static FORCE_INLINE void colDataAppendNULL(SColumnInfoData* pColumnInfoData, uin if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { colDataSetNull_var(pColumnInfoData, currentRow); // it is a null value of VAR type. } else { - colDataSetNull_f(pColumnInfoData->nullbitmap, currentRow); + colDataSetNull_f_s(pColumnInfoData, currentRow); } pColumnInfoData->hasNull = true; @@ -151,6 +157,7 @@ static FORCE_INLINE void colDataAppendNNULL(SColumnInfoData* pColumnInfoData, ui for (int32_t i = start; i < start + nRows; ++i) { colDataSetNull_f(pColumnInfoData->nullbitmap, i); } + memset(pColumnInfoData->pData + start * pColumnInfoData->info.bytes, 0, pColumnInfoData->info.bytes * nRows); } pColumnInfoData->hasNull = true; diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 1f5748ebbd..43f272d599 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -69,7 +69,7 @@ int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, con if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { pColumnInfoData->varmeta.offset[currentRow] = -1; // it is a null value of VAR type. } else { - colDataSetNull_f(pColumnInfoData->nullbitmap, currentRow); + colDataSetNull_f_s(pColumnInfoData, currentRow); } pColumnInfoData->hasNull = true; @@ -825,7 +825,7 @@ static int32_t blockDataAssign(SColumnInfoData* pCols, const SSDataBlock* pDataB } else { for (int32_t j = 0; j < pDataBlock->info.rows; ++j) { if (colDataIsNull_f(pSrc->nullbitmap, index[j])) { - colDataSetNull_f(pDst->nullbitmap, j); + colDataSetNull_f_s(pDst, j); continue; } memcpy(pDst->pData + j * pDst->info.bytes, pSrc->pData + index[j] * pDst->info.bytes, pDst->info.bytes); diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index 926720fc65..4d38f2c8e9 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -279,7 +279,6 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { // for stream interval if (pBlock->info.type == STREAM_RETRIEVE || pBlock->info.type == STREAM_DELETE_RESULT || pBlock->info.type == STREAM_DELETE_DATA) { - // printDataBlock1(pBlock, "project1"); return pBlock; } diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index ac32b54f56..05570eda2f 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -1918,6 +1918,13 @@ static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) { colDataAppend(pColInfo, 0, p, false); taosMemoryFree(p); + // make the valgrind happy that all memory buffer has been initialized already. + if (slotId != 0) { + SColumnInfoData* p1 = taosArrayGet(pBlock->pDataBlock, 0); + int64_t v = 0; + colDataAppendInt64(p1, 0, &v); + } + pBlock->info.rows = 1; pOperator->status = OP_EXEC_DONE; return pBlock; diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 8fde27e046..2beb53a312 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -2645,7 +2645,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, int32_t v = *(int32_t*)pv; int64_t delta = factor * (v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { - colDataSetNull_f(pOutput->nullbitmap, pos); + colDataSetNull_f_s(pOutput, pos); } else { colDataAppendInt64(pOutput, pos, &delta); } @@ -2658,7 +2658,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, int8_t v = *(int8_t*)pv; int64_t delta = factor * (v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { - colDataSetNull_f(pOutput->nullbitmap, pos); + colDataSetNull_f_s(pOutput, pos); } else { colDataAppendInt64(pOutput, pos, &delta); } @@ -2669,7 +2669,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, int16_t v = *(int16_t*)pv; int64_t delta = factor * (v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { - colDataSetNull_f(pOutput->nullbitmap, pos); + colDataSetNull_f_s(pOutput, pos); } else { colDataAppendInt64(pOutput, pos, &delta); } @@ -2681,7 +2681,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, int64_t v = *(int64_t*)pv; int64_t delta = factor * (v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { - colDataSetNull_f(pOutput->nullbitmap, pos); + colDataSetNull_f_s(pOutput, pos); } else { colDataAppendInt64(pOutput, pos, &delta); } @@ -2692,7 +2692,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, float v = *(float*)pv; double delta = factor * (v - pDiffInfo->prev.d64); // direct previous may be null if ((delta < 0 && pDiffInfo->ignoreNegative) || isinf(delta) || isnan(delta)) { // check for overflow - colDataSetNull_f(pOutput->nullbitmap, pos); + colDataSetNull_f_s(pOutput, pos); } else { colDataAppendDouble(pOutput, pos, &delta); } @@ -2703,7 +2703,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, double v = *(double*)pv; double delta = factor * (v - pDiffInfo->prev.d64); // direct previous may be null if ((delta < 0 && pDiffInfo->ignoreNegative) || isinf(delta) || isnan(delta)) { // check for overflow - colDataSetNull_f(pOutput->nullbitmap, pos); + colDataSetNull_f_s(pOutput, pos); } else { colDataAppendDouble(pOutput, pos, &delta); } @@ -2738,7 +2738,7 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) { if (colDataIsNull_f(pInputCol->nullbitmap, i)) { if (pDiffInfo->includeNull) { - colDataSetNull_f(pOutput->nullbitmap, pos); + colDataSetNull_f_s(pOutput, pos); numOfElems += 1; } @@ -2776,8 +2776,7 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) { if (colDataIsNull_f(pInputCol->nullbitmap, i)) { if (pDiffInfo->includeNull) { - colDataSetNull_f(pOutput->nullbitmap, pos); - + colDataSetNull_f_s(pOutput, pos); numOfElems += 1; } continue; From 9a95c3d7ab89cf28cc1d62df7c2b6db5fba4869e Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 11 Jan 2023 10:44:07 +0800 Subject: [PATCH 05/14] enh: refact vmFile.c --- source/dnode/mgmt/mgmt_vnode/src/vmFile.c | 173 +++++++++++----------- 1 file changed, 89 insertions(+), 84 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index 8337fb5d10..17f20fdd7d 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -14,8 +14,8 @@ */ #define _DEFAULT_SOURCE -#include "vmInt.h" #include "tjson.h" +#include "vmInt.h" #define MAX_CONTENT_LEN 2 * 1024 * 1024 @@ -46,102 +46,107 @@ SVnodeObj **vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes) { return pVnodes; } -int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes) { - int32_t code = TSDB_CODE_INVALID_JSON_FORMAT; - int32_t len = 0; - int32_t maxLen = MAX_CONTENT_LEN; - char *content = taosMemoryCalloc(1, maxLen + 1); - cJSON *root = NULL; - FILE *fp = NULL; - char file[PATH_MAX] = {0}; +static int32_t vmDecodeVnodeList(SJson *pJson, SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes) { + int32_t code = -1; SWrapperCfg *pCfgs = NULL; - TdFilePtr pFile = NULL; - snprintf(file, sizeof(file), "%s%svnodes.json", pMgmt->path, TD_DIRSEP); - - pFile = taosOpenFile(file, TD_FILE_READ); - if (pFile == NULL) { - dInfo("file %s not exist", file); - code = 0; - goto _OVER; - } - - if (content == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } - - len = (int32_t)taosReadFile(pFile, content, maxLen); - if (len <= 0) { - dError("failed to read %s since content is null", file); - goto _OVER; - } - - content[len] = 0; - root = cJSON_Parse(content); - if (root == NULL) { - dError("failed to read %s since invalid json format", file); - goto _OVER; - } - - cJSON *vnodes = cJSON_GetObjectItem(root, "vnodes"); - if (!vnodes || vnodes->type != cJSON_Array) { - dError("failed to read %s since vnodes not found", file); - goto _OVER; - } + SJson *vnodes = tjsonGetObjectItem(pJson, "vnodes"); + if (vnodes == NULL) return -1; int32_t vnodesNum = cJSON_GetArraySize(vnodes); if (vnodesNum > 0) { pCfgs = taosMemoryCalloc(vnodesNum, sizeof(SWrapperCfg)); - if (pCfgs == NULL) { - dError("failed to read %s since out of memory", file); - code = TSDB_CODE_OUT_OF_MEMORY; - goto _OVER; - } - - for (int32_t i = 0; i < vnodesNum; ++i) { - cJSON *vnode = cJSON_GetArrayItem(vnodes, i); - SWrapperCfg *pCfg = &pCfgs[i]; - - cJSON *vgId = cJSON_GetObjectItem(vnode, "vgId"); - if (!vgId || vgId->type != cJSON_Number) { - dError("failed to read %s since vgId not found", file); - taosMemoryFree(pCfgs); - goto _OVER; - } - pCfg->vgId = vgId->valueint; - snprintf(pCfg->path, sizeof(pCfg->path), "%s%svnode%d", pMgmt->path, TD_DIRSEP, pCfg->vgId); - - cJSON *dropped = cJSON_GetObjectItem(vnode, "dropped"); - if (!dropped || dropped->type != cJSON_Number) { - dError("failed to read %s since dropped not found", file); - taosMemoryFree(pCfgs); - goto _OVER; - } - pCfg->dropped = dropped->valueint; - - cJSON *vgVersion = cJSON_GetObjectItem(vnode, "vgVersion"); - if (!vgVersion || vgVersion->type != cJSON_Number) { - dError("failed to read %s since vgVersion not found", file); - taosMemoryFree(pCfgs); - goto _OVER; - } - pCfg->vgVersion = vgVersion->valueint; - } - - *ppCfgs = pCfgs; + if (pCfgs == NULL) return -1; + } + + for (int32_t i = 0; i < vnodesNum; ++i) { + SJson *vnode = tjsonGetArrayItem(vnodes, i); + if (vnode == NULL) goto _OVER; + + SWrapperCfg *pCfg = &pCfgs[i]; + tjsonGetInt32ValueFromDouble(vnode, "id", pCfg->vgId, code); + if (code < 0) goto _OVER; + tjsonGetInt32ValueFromDouble(vnode, "dropped", pCfg->dropped, code); + if (code < 0) goto _OVER; + tjsonGetInt32ValueFromDouble(vnode, "vgVersion", pCfg->vgVersion, code); + if (code < 0) goto _OVER; + + snprintf(pCfg->path, sizeof(pCfg->path), "%s%svnode%d", pMgmt->path, TD_DIRSEP, pCfg->vgId); } - *numOfVnodes = vnodesNum; code = 0; - dInfo("succcessed to read file %s, numOfVnodes:%d", file, vnodesNum); + *ppCfgs = pCfgs; + *numOfVnodes = vnodesNum; _OVER: - if (content != NULL) taosMemoryFree(content); - if (root != NULL) cJSON_Delete(root); + if (*ppCfgs == NULL) taosMemoryFree(pCfgs); + return code; +} + +int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes) { + int32_t code = -1; + TdFilePtr pFile = NULL; + char *pData = NULL; + SJson *pJson = NULL; + char file[PATH_MAX] = {0}; + SWrapperCfg *pCfgs = NULL; + snprintf(file, sizeof(file), "%s%svnodes.json", pMgmt->path, TD_DIRSEP); + + if (taosStatFile(file, NULL, NULL) < 0) { + dInfo("vnode file:%s not exist", file); + return 0; + } + + pFile = taosOpenFile(file, TD_FILE_READ); + if (pFile == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to open vnode file:%s since %s", file, terrstr()); + goto _OVER; + } + + int64_t size = 0; + if (taosFStatFile(pFile, &size, NULL) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to fstat mnode file:%s since %s", file, terrstr()); + goto _OVER; + } + + pData = taosMemoryMalloc(size + 1); + if (pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto _OVER; + } + + if (taosReadFile(pFile, pData, size) != size) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to read vnode file:%s since %s", file, terrstr()); + goto _OVER; + } + + pData[size] = '\0'; + + pJson = tjsonParse(pData); + if (pJson == NULL) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; + goto _OVER; + } + + if (vmDecodeVnodeList(pJson, pMgmt, ppCfgs, numOfVnodes) < 0) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; + goto _OVER; + } + + code = 0; + dInfo("succceed to read vnode file %s", file); + +_OVER: + if (pData != NULL) taosMemoryFree(pData); + if (pJson != NULL) cJSON_Delete(pJson); if (pFile != NULL) taosCloseFile(&pFile); - terrno = code; + if (code != 0) { + dError("failed to read vnode file:%s since %s", file, terrstr()); + } return code; } From 4de9f965f462c3adb2a8228c5c1b14adcba6f34c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Chappyguoxy=E2=80=9D?= <“happy_guoxy@163.com”> Date: Wed, 11 Jan 2023 10:58:30 +0800 Subject: [PATCH 06/14] test: refine query cases --- tests/parallel_test/cases.task | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 1205da31b3..d4fe45d42b 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1039,6 +1039,11 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_null_none.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_null_none.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_null_none.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/blockSMA.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-21561.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-20582.py From 5db74213dab678f853da5f3bb217fd9368589e89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Chappyguoxy=E2=80=9D?= <“happy_guoxy@163.com”> Date: Wed, 11 Jan 2023 10:58:44 +0800 Subject: [PATCH 07/14] test: refine query cases --- tests/system-test/2-query/nestedQuery.py | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/tests/system-test/2-query/nestedQuery.py b/tests/system-test/2-query/nestedQuery.py index 034ab8dcdc..3d0db9a562 100755 --- a/tests/system-test/2-query/nestedQuery.py +++ b/tests/system-test/2-query/nestedQuery.py @@ -851,6 +851,7 @@ class TDTestCase: tdLog.info("========mark==%s==="% mark); try: tdSql.query(sql,queryTimes=1) + self.explain_sql(sql) except: tdLog.info("sql is not support :=====%s; " %sql) tdSql.error(sql) @@ -4995,9 +4996,7 @@ class TDTestCase: sql += "%s ;" % random.choice(self.limit_u_where) tdLog.info(sql) tdLog.info(len(sql)) - tdSql.query(sql) - self.cur1.execute(sql) - self.explain_sql(sql) + self.data_check(sql,mark='15-2') tdSql.query("select 15-2.2 from stable_1;") for i in range(self.fornum): @@ -5013,9 +5012,7 @@ class TDTestCase: sql += "%s ;" % random.choice(self.limit_u_where) tdLog.info(sql) tdLog.info(len(sql)) - tdSql.query(sql) - self.cur1.execute(sql) - self.explain_sql(sql) + self.data_check(sql,mark='15-2.2') self.restartDnodes() tdSql.query("select 15-3 from stable_1;") @@ -5033,9 +5030,7 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_where) tdLog.info(sql) tdLog.info(len(sql)) - tdSql.query(sql) - self.cur1.execute(sql) - self.explain_sql(sql) + self.data_check(sql,mark='15-3') tdSql.query("select 15-4 from stable_1;") for i in range(self.fornum): @@ -5052,9 +5047,7 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_u_where) tdLog.info(sql) tdLog.info(len(sql)) - tdSql.query(sql) - self.cur1.execute(sql) - self.explain_sql(sql) + self.data_check(sql,mark='15-4') tdSql.query("select 15-4.2 from stable_1;") for i in range(self.fornum): @@ -5087,8 +5080,7 @@ class TDTestCase: tdLog.info(sql) tdLog.info(len(sql)) tdSql.query(sql) - self.cur1.execute(sql) - self.explain_sql(sql) + self.data_check(sql,mark='15-5') #16 select * from (select calc_aggregate_regulars as agg from regular_table where <\>\in\and\or order by limit offset ) #self.dropandcreateDB_random("%s" %db, 1) From 07adf3097ad00c59b34f07401a53fa15a89cd986 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Chappyguoxy=E2=80=9D?= <“happy_guoxy@163.com”> Date: Wed, 11 Jan 2023 10:58:57 +0800 Subject: [PATCH 08/14] test: refine query cases --- tests/system-test/2-query/out_of_order.py | 191 ++++++++++++++++++++++ 1 file changed, 191 insertions(+) create mode 100644 tests/system-test/2-query/out_of_order.py diff --git a/tests/system-test/2-query/out_of_order.py b/tests/system-test/2-query/out_of_order.py new file mode 100644 index 0000000000..5b52661bae --- /dev/null +++ b/tests/system-test/2-query/out_of_order.py @@ -0,0 +1,191 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import random + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql, replicaVar): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run_benchmark(self,dbname,tables,per_table_num,order,replica): + #O :Out of order + #A :Repliaca + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + os.system("%staosBenchmark -d %s -t %d -n %d -O %d -a %d -b float,double,nchar\(200\),binary\(50\) -T 50 -y " % (binPath,dbname,tables,per_table_num,order,replica)) + + def sql_base(self,dbname): + self.check_sub(dbname) + sql1 = "select count(*) from %s.meters" %dbname + self.sql_base_check(sql1,sql1) + + self.check_sub(dbname) + sql2 = "select count(ts) from %s.meters" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(_c0) from %s.meters" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(c0) from %s.meters" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(c1) from %s.meters" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(c2) from %s.meters" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(c3) from %s.meters" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(t0) from %s.meters" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(t1) from %s.meters" %dbname + self.sql_base_check(sql1,sql2) + + + self.check_sub(dbname) + sql2 = "select count(ts) from (select * from %s.meters)" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(_c0) from (select * from %s.meters)" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(c0) from (select * from %s.meters)" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(c1) from (select * from %s.meters)" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(c2) from (select * from %s.meters)" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(c3) from (select * from %s.meters)" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(t0) from (select * from %s.meters)" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(t1) from (select * from %s.meters)" %dbname + self.sql_base_check(sql1,sql2) + + + def sql_base_check(self,sql1,sql2): + tdSql.query(sql1) + sql1_result = tdSql.getData(0,0) + tdLog.info("sql:%s , result: %s" %(sql1,sql1_result)) + + tdSql.query(sql2) + sql2_result = tdSql.getData(0,0) + tdLog.info("sql:%s , result: %s" %(sql2,sql2_result)) + + if sql1_result==sql2_result: + tdLog.info(f"checkEqual success, sql1_result={sql1_result},sql2_result={sql2_result}") + else : + tdLog.exit(f"checkEqual error, sql1_result=={sql1_result},sql2_result={sql2_result}") + + def run_sql(self,dbname): + self.sql_base(dbname) + + tdSql.execute(" flush database %s;" %dbname) + + self.sql_base(dbname) + + def check_sub(self,dbname): + + sql = "select count(*) from (select distinct(tbname) from %s.meters)" %dbname + tdSql.query(sql) + num = tdSql.getData(0,0) + + for i in range(0,num): + sql1 = "select count(*) from %s.d%d" %(dbname,i) + tdSql.query(sql1) + sql1_result = tdSql.getData(0,0) + tdLog.info("sql:%s , result: %s" %(sql1,sql1_result)) + + def check_out_of_order(self,dbname,tables,per_table_num,order,replica): + self.run_benchmark(dbname,tables,per_table_num,order,replica) + print("sleep 10 seconds") + #time.sleep(10) + print("sleep 10 seconds finish") + + self.run_sql(dbname) + + def run(self): + startTime = time.time() + + #self.check_out_of_order('db1',10,random.randint(10000,50000),random.randint(1,10),1) + self.check_out_of_order('db1',random.randint(50,200),random.randint(10000,20000),random.randint(1,5),1) + + # self.check_out_of_order('db2',random.randint(50,200),random.randint(10000,50000),random.randint(5,50),1) + + # self.check_out_of_order('db3',random.randint(50,200),random.randint(10000,50000),random.randint(50,100),1) + + # self.check_out_of_order('db4',random.randint(50,200),random.randint(10000,50000),100,1) + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From ba7f9f1df15f40587cf17cde65c9d6cf848dd52e Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 11 Jan 2023 11:06:16 +0800 Subject: [PATCH 09/14] enh: refact dmEps.c --- source/dnode/mgmt/node_util/src/dmEps.c | 172 ++++++++++-------------- 1 file changed, 70 insertions(+), 102 deletions(-) diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c index 3e2d8b53aa..6eb4d44294 100644 --- a/source/dnode/mgmt/node_util/src/dmEps.c +++ b/source/dnode/mgmt/node_util/src/dmEps.c @@ -41,14 +41,49 @@ static void dmGetDnodeEp(SDnodeData *pData, int32_t dnodeId, char *pEp, char *pF taosThreadRwlockUnlock(&pData->lock); } +static int32_t dmDecodeEps(SJson *pJson, SDnodeData *pData) { + int32_t code = 0; + + tjsonGetInt32ValueFromDouble(pJson, "dnodeId", pData->dnodeId, code); + if (code < 0) return -1; + tjsonGetNumberValue(pJson, "dnodeVer", pData->dnodeVer, code); + if (code < 0) return -1; + tjsonGetNumberValue(pJson, "clusterId", pData->clusterId, code); + if (code < 0) return -1; + tjsonGetInt32ValueFromDouble(pJson, "dropped", pData->dropped, code); + if (code < 0) return -1; + + SJson *dnodes = tjsonGetObjectItem(pJson, "dnodes"); + if (dnodes == NULL) return 0; + int32_t numOfDnodes = tjsonGetArraySize(dnodes); + + for (int32_t i = 0; i < numOfDnodes; ++i) { + SJson *dnode = tjsonGetArrayItem(dnodes, i); + if (dnode == NULL) return -1; + + SDnodeEp dnodeEp = {0}; + tjsonGetInt32ValueFromDouble(dnode, "id", dnodeEp.id, code); + if (code < 0) return -1; + code = tjsonGetStringValue(dnode, "fqdn", dnodeEp.ep.fqdn); + if (code < 0) return -1; + tjsonGetUInt16ValueFromDouble(dnode, "port", dnodeEp.ep.port, code); + if (code < 0) return -1; + tjsonGetInt8ValueFromDouble(dnode, "isMnode", dnodeEp.isMnode, code); + if (code < 0) return -1; + + if (taosArrayPush(pData->dnodeEps, &dnodeEp) == NULL) return -1; + } + + return 0; +} + int32_t dmReadEps(SDnodeData *pData) { - int32_t code = TSDB_CODE_INVALID_JSON_FORMAT; - int32_t len = 0; - int32_t maxLen = 256 * 1024; - char *content = taosMemoryCalloc(1, maxLen + 1); - cJSON *root = NULL; - char file[PATH_MAX] = {0}; + int32_t code = -1; TdFilePtr pFile = NULL; + char *content = NULL; + SJson *pJson = NULL; + char file[PATH_MAX] = {0}; + snprintf(file, sizeof(file), "%s%sdnode%sdnode.json", tsDataDir, TD_DIRSEP, TD_DIRSEP); pData->dnodeEps = taosArrayInit(1, sizeof(SDnodeEp)); if (pData->dnodeEps == NULL) { @@ -56,129 +91,62 @@ int32_t dmReadEps(SDnodeData *pData) { goto _OVER; } - snprintf(file, sizeof(file), "%s%sdnode%sdnode.json", tsDataDir, TD_DIRSEP, TD_DIRSEP); - pFile = taosOpenFile(file, TD_FILE_READ); - if (pFile == NULL) { + if (taosStatFile(file, NULL, NULL) < 0) { + dInfo("dnode file:%s not exist", file); code = 0; goto _OVER; } - len = (int32_t)taosReadFile(pFile, content, maxLen); - if (len <= 0) { - dError("failed to read %s since content is null", file); + pFile = taosOpenFile(file, TD_FILE_READ); + if (pFile == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to open dnode file:%s since %s", file, terrstr()); goto _OVER; } - content[len] = 0; - root = cJSON_Parse(content); - if (root == NULL) { - dError("failed to read %s since invalid json format", file); + int64_t size = 0; + if (taosFStatFile(pFile, &size, NULL) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to fstat dnode file:%s since %s", file, terrstr()); goto _OVER; } - cJSON *dnodeId = cJSON_GetObjectItem(root, "dnodeId"); - if (!dnodeId || dnodeId->type != cJSON_Number) { - dError("failed to read %s since dnodeId not found", file); - goto _OVER; - } - pData->dnodeId = dnodeId->valueint; - - cJSON *dnodeVer = cJSON_GetObjectItem(root, "dnodeVer"); - if (!dnodeVer || dnodeVer->type != cJSON_String) { - dError("failed to read %s since dnodeVer not found", file); - goto _OVER; - } - pData->dnodeVer = atoll(dnodeVer->valuestring); - - cJSON *clusterId = cJSON_GetObjectItem(root, "clusterId"); - if (!clusterId || clusterId->type != cJSON_String) { - dError("failed to read %s since clusterId not found", file); - goto _OVER; - } - pData->clusterId = atoll(clusterId->valuestring); - - cJSON *dropped = cJSON_GetObjectItem(root, "dropped"); - if (!dropped || dropped->type != cJSON_Number) { - dError("failed to read %s since dropped not found", file); - goto _OVER; - } - pData->dropped = dropped->valueint; - - cJSON *dnodes = cJSON_GetObjectItem(root, "dnodes"); - if (!dnodes || dnodes->type != cJSON_Array) { - dError("failed to read %s since dnodes not found", file); + content = taosMemoryMalloc(size + 1); + if (content == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; goto _OVER; } - int32_t numOfDnodes = cJSON_GetArraySize(dnodes); - if (numOfDnodes <= 0) { - dError("failed to read %s since numOfDnodes:%d invalid", file, numOfDnodes); + if (taosReadFile(pFile, content, size) != size) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to read dnode file:%s since %s", file, terrstr()); goto _OVER; } - for (int32_t i = 0; i < numOfDnodes; ++i) { - cJSON *node = cJSON_GetArrayItem(dnodes, i); - if (node == NULL) break; + content[size] = '\0'; - SDnodeEp dnodeEp = {0}; + pJson = tjsonParse(content); + if (pJson == NULL) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; + goto _OVER; + } - cJSON *did = cJSON_GetObjectItem(node, "id"); - if (!did || did->type != cJSON_Number) { - dError("failed to read %s since dnodeId not found", file); - goto _OVER; - } - - dnodeEp.id = did->valueint; - - cJSON *dnodeFqdn = cJSON_GetObjectItem(node, "fqdn"); - if (!dnodeFqdn || dnodeFqdn->type != cJSON_String || dnodeFqdn->valuestring == NULL) { - dError("failed to read %s since dnodeFqdn not found", file); - goto _OVER; - } - tstrncpy(dnodeEp.ep.fqdn, dnodeFqdn->valuestring, TSDB_FQDN_LEN); - - cJSON *dnodePort = cJSON_GetObjectItem(node, "port"); - if (!dnodePort || dnodePort->type != cJSON_Number) { - dError("failed to read %s since dnodePort not found", file); - goto _OVER; - } - - dnodeEp.ep.port = dnodePort->valueint; - - cJSON *isMnode = cJSON_GetObjectItem(node, "isMnode"); - if (!isMnode || isMnode->type != cJSON_Number) { - dError("failed to read %s since isMnode not found", file); - goto _OVER; - } - dnodeEp.isMnode = isMnode->valueint; - - taosArrayPush(pData->dnodeEps, &dnodeEp); + if (dmDecodeEps(pJson, pData) < 0) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; + goto _OVER; } code = 0; - dDebug("succcessed to read file %s", file); + dInfo("succceed to read mnode file %s", file); _OVER: if (content != NULL) taosMemoryFree(content); - if (root != NULL) cJSON_Delete(root); + if (pJson != NULL) cJSON_Delete(pJson); if (pFile != NULL) taosCloseFile(&pFile); - if (taosArrayGetSize(pData->dnodeEps) == 0) { - SDnodeEp dnodeEp = {0}; - dnodeEp.isMnode = 1; - taosGetFqdnPortFromEp(tsFirst, &dnodeEp.ep); - taosArrayPush(pData->dnodeEps, &dnodeEp); + if (code != 0) { + dError("failed to read dnode file:%s since %s", file, terrstr()); } - - dDebug("reset dnode list on startup"); - dmResetEps(pData, pData->dnodeEps); - - if (dmIsEpChanged(pData, pData->dnodeId, tsLocalEp)) { - dError("localEp %s different with %s and need reconfigured", tsLocalEp, file); - return -1; - } - - terrno = code; return code; } From 8c9daa32fc7d96ab7e36393de725f016a16262d5 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 11 Jan 2023 11:18:39 +0800 Subject: [PATCH 10/14] enh: refact dmFile.c --- source/dnode/mgmt/node_util/src/dmFile.c | 77 +++++++++++++++++------- 1 file changed, 55 insertions(+), 22 deletions(-) diff --git a/source/dnode/mgmt/node_util/src/dmFile.c b/source/dnode/mgmt/node_util/src/dmFile.c index 4dcc962a20..fb05f08c0c 100644 --- a/source/dnode/mgmt/node_util/src/dmFile.c +++ b/source/dnode/mgmt/node_util/src/dmFile.c @@ -19,48 +19,81 @@ #define MAXLEN 1024 -int32_t dmReadFile(const char *path, const char *name, bool *pDeployed) { - int32_t code = TSDB_CODE_INVALID_JSON_FORMAT; - int64_t len = 0; - char content[MAXLEN + 1] = {0}; - cJSON *root = NULL; - char file[PATH_MAX] = {0}; - TdFilePtr pFile = NULL; +static int32_t dmDecodeFile(SJson *pJson, bool *deployed) { + int32_t code = 0; + int32_t value = 0; + tjsonGetInt32ValueFromDouble(pJson, "deployed", value, code); + if (code < 0) return -1; + + *deployed = (value != 0); + return code; +} + +int32_t dmReadFile(const char *path, const char *name, bool *pDeployed) { + int32_t code = -1; + TdFilePtr pFile = NULL; + char *content = NULL; + SJson *pJson = NULL; + char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%s%s.json", path, TD_DIRSEP, name); - pFile = taosOpenFile(file, TD_FILE_READ); - if (pFile == NULL) { + + if (taosStatFile(file, NULL, NULL) < 0) { + dInfo("file:%s not exist", file); code = 0; goto _OVER; } - len = taosReadFile(pFile, content, MAXLEN); - if (len <= 0) { - dError("failed to read %s since content is null", file); + pFile = taosOpenFile(file, TD_FILE_READ); + if (pFile == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to open file:%s since %s", file, terrstr()); goto _OVER; } - root = cJSON_Parse(content); - if (root == NULL) { - dError("failed to read %s since invalid json format", file); + int64_t size = 0; + if (taosFStatFile(pFile, &size, NULL) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to fstat file:%s since %s", file, terrstr()); goto _OVER; } - cJSON *deployed = cJSON_GetObjectItem(root, "deployed"); - if (!deployed || deployed->type != cJSON_Number) { - dError("failed to read %s since deployed not found", file); + content = taosMemoryMalloc(size + 1); + if (content == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto _OVER; + } + + if (taosReadFile(pFile, content, size) != size) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to read file:%s since %s", file, terrstr()); + goto _OVER; + } + + content[size] = '\0'; + + pJson = tjsonParse(content); + if (pJson == NULL) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; + goto _OVER; + } + + if (dmDecodeFile(pJson, pDeployed) < 0) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; goto _OVER; } - *pDeployed = deployed->valueint != 0; - dDebug("succcessed to read file %s, deployed:%d", file, *pDeployed); code = 0; + dInfo("succceed to read mnode file %s", file); _OVER: - if (root != NULL) cJSON_Delete(root); + if (content != NULL) taosMemoryFree(content); + if (pJson != NULL) cJSON_Delete(pJson); if (pFile != NULL) taosCloseFile(&pFile); - terrno = code; + if (code != 0) { + dError("failed to read dnode file:%s since %s", file, terrstr()); + } return code; } From 71ef10409ce34ca8782c71090b86951630ca98f0 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 11 Jan 2023 11:37:59 +0800 Subject: [PATCH 11/14] enh: adjust dmEps.c --- source/dnode/mgmt/node_util/src/dmEps.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c index 6eb4d44294..7bae703753 100644 --- a/source/dnode/mgmt/node_util/src/dmEps.c +++ b/source/dnode/mgmt/node_util/src/dmEps.c @@ -147,6 +147,22 @@ _OVER: if (code != 0) { dError("failed to read dnode file:%s since %s", file, terrstr()); } + + if (taosArrayGetSize(pData->dnodeEps) == 0) { + SDnodeEp dnodeEp = {0}; + dnodeEp.isMnode = 1; + taosGetFqdnPortFromEp(tsFirst, &dnodeEp.ep); + taosArrayPush(pData->dnodeEps, &dnodeEp); + } + + dDebug("reset dnode list on startup"); + dmResetEps(pData, pData->dnodeEps); + + if (dmIsEpChanged(pData, pData->dnodeId, tsLocalEp)) { + dError("localEp %s different with %s and need reconfigured", tsLocalEp, file); + return -1; + } + return code; } @@ -215,7 +231,7 @@ _OVER: if (code != 0) { if (terrno == 0) terrno = TAOS_SYSTEM_ERROR(errno); - dInfo("succeed to write dnode file:%s since %s, dnodeVer:%" PRId64, realfile, terrstr(), pData->dnodeVer); + dError("failed to write dnode file:%s since %s, dnodeVer:%" PRId64, realfile, terrstr(), pData->dnodeVer); } return code; } From c5959c5b1927a8c6002c4ae10e77944de5501084 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 11 Jan 2023 12:24:44 +0800 Subject: [PATCH 12/14] fix: invalid json item --- source/dnode/mgmt/mgmt_vnode/src/vmFile.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index 17f20fdd7d..bf176ebb40 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -49,6 +49,7 @@ SVnodeObj **vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes) { static int32_t vmDecodeVnodeList(SJson *pJson, SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes) { int32_t code = -1; SWrapperCfg *pCfgs = NULL; + *ppCfgs = NULL; SJson *vnodes = tjsonGetObjectItem(pJson, "vnodes"); if (vnodes == NULL) return -1; @@ -64,7 +65,7 @@ static int32_t vmDecodeVnodeList(SJson *pJson, SVnodeMgmt *pMgmt, SWrapperCfg ** if (vnode == NULL) goto _OVER; SWrapperCfg *pCfg = &pCfgs[i]; - tjsonGetInt32ValueFromDouble(vnode, "id", pCfg->vgId, code); + tjsonGetInt32ValueFromDouble(vnode, "vgId", pCfg->vgId, code); if (code < 0) goto _OVER; tjsonGetInt32ValueFromDouble(vnode, "dropped", pCfg->dropped, code); if (code < 0) goto _OVER; From 3a848ccb360ceae85ee5abe3511f6ddc56ffc4bd Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 11 Jan 2023 13:19:17 +0800 Subject: [PATCH 13/14] fix(query): fix the invalid return value check for percentile function. --- source/libs/function/src/builtinsimpl.c | 6 ++++-- source/libs/function/src/tpercentile.c | 9 +++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 2beb53a312..4a0b2682cb 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -1662,6 +1662,8 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) { int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SVariant* pVal = &pCtx->param[1].param; + terrno = 0; + double v = 0; GET_TYPED_DATA(v, double, pVal->nType, &pVal->i); @@ -1675,8 +1677,8 @@ int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { tMemBucketDestroy(pMemBucket); - if (ppInfo->result < 0) { - return TSDB_CODE_NO_AVAIL_DISK; + if (terrno != TSDB_CODE_SUCCESS) { + return terrno; } return functionFinalize(pCtx, pBlock); diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 04472c42ec..d4a362659c 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -92,6 +92,7 @@ static void resetPosInfo(SSlotInfo *pInfo) { double findOnlyResult(tMemBucket *pMemBucket) { ASSERT(pMemBucket->total == 1); + terrno = 0; for (int32_t i = 0; i < pMemBucket->numOfSlots; ++i) { tMemBucketSlot *pSlot = &pMemBucket->pSlots[i]; @@ -108,8 +109,10 @@ double findOnlyResult(tMemBucket *pMemBucket) { int32_t *pageId = taosArrayGet(list, 0); SFilePage *pPage = getBufPage(pMemBucket->pBuffer, *pageId); if (pPage == NULL) { - return -1; + terrno = TSDB_CODE_NO_AVAIL_DISK; + return 0; } + ASSERT(pPage->num == 1); double v = 0; @@ -546,9 +549,7 @@ double getPercentile(tMemBucket *pMemBucket, double percent) { // if only one elements exists, return it if (pMemBucket->total == 1) { - if (findOnlyResult(pMemBucket) < 0) { - return -1; - } + return findOnlyResult(pMemBucket); } percent = fabs(percent); From 1872627343b8d499707e6ec8688698363dca1ad0 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 11 Jan 2023 14:53:13 +0800 Subject: [PATCH 14/14] fix(tdb/ofp): upgrade large key ofp case --- source/libs/tdb/src/db/tdbBtree.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index 79d37b7674..029039f911 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -1063,11 +1063,11 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const } else { int nLeftKey = kLen; // pack partial key and nextPgno - memcpy(pCell + nHeader, pKey, nLocal - 4); - nLeft -= nLocal - 4; - nLeftKey -= nLocal - 4; + memcpy(pCell + nHeader, pKey, nLocal - nHeader - sizeof(pgno)); + nLeft -= nLocal - nHeader - sizeof(pgno); + nLeftKey -= nLocal - nHeader - sizeof(pgno); - memcpy(pCell + nHeader + nLocal - 4, &pgno, sizeof(pgno)); + memcpy(pCell + nLocal - sizeof(pgno), &pgno, sizeof(pgno)); int lastKeyPageSpace = 0; // pack left key & val to ovpages @@ -1087,9 +1087,12 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const if (lastKeyPage) { if (lastKeyPageSpace >= vLen) { - memcpy(pBuf + kLen - nLeftKey, pVal, vLen); + if (vLen > 0) { + memcpy(pBuf + kLen - nLeftKey, pVal, vLen); + + nLeft -= vLen; + } - nLeft -= vLen; pgno = 0; } else { memcpy(pBuf + kLen - nLeftKey, pVal, lastKeyPageSpace); @@ -1111,7 +1114,7 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const } } - memcpy(pBuf + kLen - nLeft, &pgno, sizeof(pgno)); + memcpy(pBuf + bytes, &pgno, sizeof(pgno)); ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0); if (ret < 0) {