feat: los_memory.c代码重复率处理
【背景】 los_memory.c中有4处遍历内存节点的操作,重复率较高 【修改方案】 1.提取一个遍历内存节点的函数,入参可传入处理函数来应对不同处理场景 【影响】 对现有的产品编译不会有影响。 re #I44WNU Signed-off-by: lanleinan <lanleinan@163.com> Change-Id: Iafd25be39e76ee29e4fb27e5ef65f1888cc23f02
This commit is contained in:
parent
53117f9f47
commit
475db62db1
|
@ -280,13 +280,16 @@ STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave)
|
|||
|
||||
#if (LOSCFG_MEM_MUL_REGIONS == 1)
|
||||
/**
|
||||
* When LOSCFG_MEM_MUL_REGIONS is enabled to support multiple non-continuous memory regions, the gap between two memory regions
|
||||
* is marked as a used OsMemNodeHead node. The gap node couldn't be freed, and would also be skipped in some DFX functions. The
|
||||
* 'ptr.prev' pointer of this node is set to OS_MEM_GAP_NODE_MAGIC to identify that this is a gap node.
|
||||
* When LOSCFG_MEM_MUL_REGIONS is enabled to support multiple non-continuous memory regions,
|
||||
* the gap between two memory regions is marked as a used OsMemNodeHead node. The gap node
|
||||
* couldn't be freed, and would also be skipped in some DFX functions. The 'ptr.prev' pointer
|
||||
* of this node is set to OS_MEM_GAP_NODE_MAGIC to identify that this is a gap node.
|
||||
*/
|
||||
#define OS_MEM_GAP_NODE_MAGIC 0xDCBAABCD
|
||||
#define OS_MEM_MARK_GAP_NODE(node) (((struct OsMemNodeHead *)(node))->ptr.prev = (struct OsMemNodeHead *)OS_MEM_GAP_NODE_MAGIC)
|
||||
#define OS_MEM_IS_GAP_NODE(node) (((struct OsMemNodeHead *)(node))->ptr.prev == (struct OsMemNodeHead *)OS_MEM_GAP_NODE_MAGIC)
|
||||
#define OS_MEM_MARK_GAP_NODE(node) \
|
||||
(((struct OsMemNodeHead *)(node))->ptr.prev = (struct OsMemNodeHead *)OS_MEM_GAP_NODE_MAGIC)
|
||||
#define OS_MEM_IS_GAP_NODE(node) \
|
||||
(((struct OsMemNodeHead *)(node))->ptr.prev == (struct OsMemNodeHead *)OS_MEM_GAP_NODE_MAGIC)
|
||||
#else
|
||||
#define OS_MEM_MARK_GAP_NODE(node)
|
||||
#define OS_MEM_IS_GAP_NODE(node) FALSE
|
||||
|
@ -302,9 +305,7 @@ STATIC INLINE VOID OsMemNodeSetTaskID(struct OsMemUsedNodeHead *node)
|
|||
node->header.taskID = LOS_CurTaskIDGet();
|
||||
}
|
||||
#endif
|
||||
|
||||
#if (LOSCFG_TASK_MEM_USED == 1)
|
||||
VOID OsTaskMemUsed(VOID *pool, UINT32 *outArray, UINT32 arraySize)
|
||||
STATIC VOID OsAllMemNodeDoHandle(VOID *pool, VOID (*handle)(struct OsMemNodeHead *curNode, VOID *arg), VOID *arg)
|
||||
{
|
||||
struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
|
||||
struct OsMemNodeHead *tmpNode = NULL;
|
||||
|
@ -322,36 +323,48 @@ VOID OsTaskMemUsed(VOID *pool, UINT32 *outArray, UINT32 arraySize)
|
|||
|
||||
MEM_LOCK(poolInfo, intSave);
|
||||
endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
|
||||
#if OS_MEM_EXPAND_ENABLE
|
||||
UINT32 size;
|
||||
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode;
|
||||
tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
|
||||
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode; tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
|
||||
if (tmpNode == endNode) {
|
||||
#if OS_MEM_EXPAND_ENABLE
|
||||
UINT32 size;
|
||||
if (OsMemIsLastSentinelNode(endNode) == FALSE) {
|
||||
size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
|
||||
tmpNode = OsMemSentinelNodeGet(endNode);
|
||||
endNode = OS_MEM_END_NODE(tmpNode, size);
|
||||
continue;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
#else
|
||||
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode < endNode;
|
||||
tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
|
||||
#endif
|
||||
#ifndef LOSCFG_MEM_MUL_REGIONS
|
||||
if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
|
||||
#else
|
||||
if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(tmpNode)) {
|
||||
#endif
|
||||
if (tmpNode->taskID < arraySize) {
|
||||
outArray[tmpNode->taskID] += OS_MEM_NODE_GET_SIZE(tmpNode->sizeAndFlag);
|
||||
}
|
||||
break;
|
||||
}
|
||||
handle(tmpNode, arg);
|
||||
}
|
||||
MEM_UNLOCK(poolInfo, intSave);
|
||||
}
|
||||
|
||||
#if (LOSCFG_TASK_MEM_USED == 1)
|
||||
STATIC VOID GetTaskMemUsedHandle(struct OsMemNodeHead *curNode, VOID *arg)
|
||||
{
|
||||
UINT32 *args = (UINT32 *)arg;
|
||||
UINT32 *outArray = (UINT32 *)(UINTPTR)*args;
|
||||
UINT32 arraySize = *(args + 1);
|
||||
#ifndef LOSCFG_MEM_MUL_REGIONS
|
||||
if (OS_MEM_NODE_GET_USED_FLAG(curNode->sizeAndFlag)) {
|
||||
#else
|
||||
if (OS_MEM_NODE_GET_USED_FLAG(curNode->sizeAndFlag) && !OS_MEM_IS_GAP_NODE(curNode)) {
|
||||
#endif
|
||||
if (curNode->taskID < arraySize) {
|
||||
outArray[curNode->taskID] += OS_MEM_NODE_GET_SIZE(curNode->sizeAndFlag);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
VOID OsTaskMemUsed(VOID *pool, UINT32 *outArray, UINT32 arraySize)
|
||||
{
|
||||
UINT32 args[2] = {(UINT32)(UINTPTR)outArray, arraySize};
|
||||
OsAllMemNodeDoHandle(pool, GetTaskMemUsedHandle, (VOID *)args);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if (LOSCFG_MEM_WATERLINE == 1)
|
||||
|
@ -592,20 +605,15 @@ STATIC INLINE VOID OsMemUsedNodePrint(struct OsMemNodeHead *node)
|
|||
}
|
||||
}
|
||||
|
||||
STATIC VOID OsMemUsedNodePrintHandle(struct OsMemNodeHead *node, VOID *arg)
|
||||
{
|
||||
UNUSED(arg);
|
||||
OsMemUsedNodePrint(node);
|
||||
return;
|
||||
}
|
||||
|
||||
VOID LOS_MemUsedNodeShow(VOID *pool)
|
||||
{
|
||||
if (pool == NULL) {
|
||||
PRINTK("input param is NULL\n");
|
||||
return;
|
||||
}
|
||||
if (LOS_MemIntegrityCheck(pool)) {
|
||||
PRINTK("LOS_MemIntegrityCheck error\n");
|
||||
return;
|
||||
}
|
||||
struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
|
||||
struct OsMemNodeHead *tmpNode = NULL;
|
||||
struct OsMemNodeHead *endNode = NULL;
|
||||
UINT32 intSave;
|
||||
UINT32 count;
|
||||
|
||||
PRINTK("\n\rnode size ");
|
||||
|
@ -614,33 +622,9 @@ VOID LOS_MemUsedNodeShow(VOID *pool)
|
|||
}
|
||||
PRINTK("\n");
|
||||
|
||||
MEM_LOCK(poolInfo, intSave);
|
||||
OsMemLeakCheckInit();
|
||||
endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
|
||||
#if OS_MEM_EXPAND_ENABLE
|
||||
UINT32 size;
|
||||
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode;
|
||||
tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
|
||||
if (tmpNode == endNode) {
|
||||
if (OsMemIsLastSentinelNode(endNode) == FALSE) {
|
||||
size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
|
||||
tmpNode = OsMemSentinelNodeGet(endNode);
|
||||
endNode = OS_MEM_END_NODE(tmpNode, size);
|
||||
continue;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
OsMemUsedNodePrint(tmpNode);
|
||||
}
|
||||
}
|
||||
#else
|
||||
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode < endNode;
|
||||
tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
|
||||
OsMemUsedNodePrint(tmpNode);
|
||||
}
|
||||
#endif
|
||||
MEM_UNLOCK(poolInfo, intSave);
|
||||
OsAllMemNodeDoHandle(pool, OsMemUsedNodePrintHandle, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
#if (LOSCFG_KERNEL_PRINTF != 0)
|
||||
|
@ -688,7 +672,8 @@ STATIC INLINE struct OsMemFreeNodeHead *OsMemFindCurSuitableBlock(struct OsMemPo
|
|||
|
||||
STATIC INLINE UINT32 OsMemNotEmptyIndexGet(struct OsMemPoolHead *poolHead, UINT32 index)
|
||||
{
|
||||
UINT32 mask = poolHead->freeListBitmap[index >> 5]; /* 5: Divide by 32 to calculate the index of the bitmap array. */
|
||||
/* 5: Divide by 32 to calculate the index of the bitmap array. */
|
||||
UINT32 mask = poolHead->freeListBitmap[index >> 5];
|
||||
mask &= ~((1 << (index & OS_MEM_BITMAP_MASK)) - 1);
|
||||
if (mask != 0) {
|
||||
index = OsMemFFS(mask) + (index & ~OS_MEM_BITMAP_MASK);
|
||||
|
@ -723,7 +708,8 @@ STATIC INLINE struct OsMemFreeNodeHead *OsMemFindNextSuitableBlock(VOID *pool, U
|
|||
}
|
||||
|
||||
for (index = LOS_Align(index + 1, 32); index < OS_MEM_FREE_LIST_COUNT; index += 32) {
|
||||
mask = poolHead->freeListBitmap[index >> 5]; /* 5: Divide by 32 to calculate the index of the bitmap array. */
|
||||
/* 5: Divide by 32 to calculate the index of the bitmap array. */
|
||||
mask = poolHead->freeListBitmap[index >> 5];
|
||||
if (mask != 0) {
|
||||
index = OsMemFFS(mask) + index;
|
||||
goto DONE;
|
||||
|
@ -744,12 +730,14 @@ DONE:
|
|||
|
||||
STATIC INLINE VOID OsMemSetFreeListBit(struct OsMemPoolHead *head, UINT32 index)
|
||||
{
|
||||
head->freeListBitmap[index >> 5] |= 1U << (index & 0x1f); /* 5: Divide by 32 to calculate the index of the bitmap array. */
|
||||
/* 5: Divide by 32 to calculate the index of the bitmap array. */
|
||||
head->freeListBitmap[index >> 5] |= 1U << (index & 0x1f);
|
||||
}
|
||||
|
||||
STATIC INLINE VOID OsMemClearFreeListBit(struct OsMemPoolHead *head, UINT32 index)
|
||||
{
|
||||
head->freeListBitmap[index >> 5] &= ~(1U << (index & 0x1f)); /* 5: Divide by 32 to calculate the index of the bitmap array. */
|
||||
/* 5: Divide by 32 to calculate the index of the bitmap array. */
|
||||
head->freeListBitmap[index >> 5] &= ~(1U << (index & 0x1f));
|
||||
}
|
||||
|
||||
STATIC INLINE VOID OsMemListAdd(struct OsMemPoolHead *pool, UINT32 listIndex, struct OsMemFreeNodeHead *node)
|
||||
|
@ -865,7 +853,8 @@ STATIC UINT32 OsMemPoolInit(VOID *pool, UINT32 size)
|
|||
|
||||
poolHead->info.pool = pool;
|
||||
poolHead->info.totalSize = size;
|
||||
poolHead->info.attr &= ~(OS_MEM_POOL_UNLOCK_ENABLE | OS_MEM_POOL_EXPAND_ENABLE); /* default attr: lock, not expand. */
|
||||
/* default attr: lock, not expand. */
|
||||
poolHead->info.attr &= ~(OS_MEM_POOL_UNLOCK_ENABLE | OS_MEM_POOL_EXPAND_ENABLE);
|
||||
|
||||
newNode = OS_MEM_FIRST_NODE(pool);
|
||||
newNode->sizeAndFlag = (size - sizeof(struct OsMemPoolHead) - OS_MEM_NODE_HEAD_SIZE);
|
||||
|
@ -1456,8 +1445,26 @@ VOID *LOS_MemRealloc(VOID *pool, VOID *ptr, UINT32 size)
|
|||
}
|
||||
|
||||
#if (LOSCFG_MEM_FREE_BY_TASKID == 1)
|
||||
STATIC VOID MemNodeFreeByTaskIDHandle(struct OsMemNodeHead *curNode, VOID *arg)
|
||||
{
|
||||
UINT32 *args = (UINT32 *)arg;
|
||||
UINT32 taskID = *args;
|
||||
struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)(UINTPTR)(*(args + 1));
|
||||
struct OsMemUsedNodeHead *node = NULL;
|
||||
if (!OS_MEM_NODE_GET_USED_FLAG(curNode->sizeAndFlag)) {
|
||||
return;
|
||||
}
|
||||
|
||||
node = (struct OsMemUsedNodeHead *)curNode;
|
||||
if (node->header.taskID == taskID) {
|
||||
OsMemFree(poolHead, &node->header);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
UINT32 LOS_MemFreeByTaskID(VOID *pool, UINT32 taskID)
|
||||
{
|
||||
UINT32 args[2] = {taskID, (UINT32)(UINTPTR)pool};
|
||||
if (pool == NULL) {
|
||||
return OS_ERROR;
|
||||
}
|
||||
|
@ -1466,26 +1473,7 @@ UINT32 LOS_MemFreeByTaskID(VOID *pool, UINT32 taskID)
|
|||
return OS_ERROR;
|
||||
}
|
||||
|
||||
struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
|
||||
struct OsMemNodeHead *tmpNode = NULL;
|
||||
struct OsMemUsedNodeHead *node = NULL;
|
||||
struct OsMemNodeHead *endNode = NULL;
|
||||
UINT32 intSave;
|
||||
|
||||
MEM_LOCK(poolHead, intSave);
|
||||
endNode = OS_MEM_END_NODE(pool, poolHead->info.totalSize);
|
||||
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode < endNode;
|
||||
tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
|
||||
if (!OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
node = (struct OsMemUsedNodeHead *)tmpNode;
|
||||
if (node->header.taskID == taskID) {
|
||||
OsMemFree(poolHead, &node->header);
|
||||
}
|
||||
}
|
||||
MEM_UNLOCK(poolHead, intSave);
|
||||
OsAllMemNodeDoHandle(pool, MemNodeFreeByTaskIDHandle, (VOID *)args);
|
||||
|
||||
return LOS_OK;
|
||||
}
|
||||
|
@ -1519,11 +1507,19 @@ UINT32 LOS_MemPoolSizeGet(const VOID *pool)
|
|||
return count;
|
||||
}
|
||||
|
||||
STATIC VOID MemUsedGetHandle(struct OsMemNodeHead *curNode, VOID *arg)
|
||||
{
|
||||
UINT32 *memUsed = (UINT32 *)arg;
|
||||
if (OS_MEM_IS_GAP_NODE(curNode)) {
|
||||
*memUsed += OS_MEM_NODE_HEAD_SIZE;
|
||||
} else if (OS_MEM_NODE_GET_USED_FLAG(curNode->sizeAndFlag)) {
|
||||
*memUsed += OS_MEM_NODE_GET_SIZE(curNode->sizeAndFlag);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
UINT32 LOS_MemTotalUsedGet(VOID *pool)
|
||||
{
|
||||
struct OsMemNodeHead *tmpNode = NULL;
|
||||
struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
|
||||
struct OsMemNodeHead *endNode = NULL;
|
||||
UINT32 memUsed = 0;
|
||||
UINT32 intSave;
|
||||
|
||||
|
@ -1531,39 +1527,7 @@ UINT32 LOS_MemTotalUsedGet(VOID *pool)
|
|||
return LOS_NOK;
|
||||
}
|
||||
|
||||
MEM_LOCK(poolInfo, intSave);
|
||||
endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
|
||||
#if OS_MEM_EXPAND_ENABLE
|
||||
UINT32 size;
|
||||
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode;) {
|
||||
if (tmpNode == endNode) {
|
||||
memUsed += OS_MEM_NODE_HEAD_SIZE;
|
||||
if (OsMemIsLastSentinelNode(endNode) == FALSE) {
|
||||
size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
|
||||
tmpNode = OsMemSentinelNodeGet(endNode);
|
||||
endNode = OS_MEM_END_NODE(tmpNode, size);
|
||||
continue;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
|
||||
memUsed += OS_MEM_NODE_GET_SIZE(tmpNode->sizeAndFlag);
|
||||
}
|
||||
tmpNode = OS_MEM_NEXT_NODE(tmpNode);
|
||||
}
|
||||
}
|
||||
#else
|
||||
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode < endNode;) {
|
||||
if (OS_MEM_IS_GAP_NODE(tmpNode)) {
|
||||
memUsed += OS_MEM_NODE_HEAD_SIZE;
|
||||
} else if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
|
||||
memUsed += OS_MEM_NODE_GET_SIZE(tmpNode->sizeAndFlag);
|
||||
}
|
||||
tmpNode = OS_MEM_NEXT_NODE(tmpNode);
|
||||
}
|
||||
#endif
|
||||
MEM_UNLOCK(poolInfo, intSave);
|
||||
OsAllMemNodeDoHandle(pool, MemUsedGetHandle, (VOID *)&memUsed);
|
||||
|
||||
return memUsed;
|
||||
}
|
||||
|
@ -1871,7 +1835,7 @@ ERROR_OUT:
|
|||
return LOS_NOK;
|
||||
}
|
||||
|
||||
STATIC INLINE VOID OsMemInfoGet(struct OsMemPoolHead *poolInfo, struct OsMemNodeHead *node,
|
||||
STATIC INLINE VOID OsMemInfoGet(struct OsMemNodeHead *node,
|
||||
LOS_MEM_POOL_STATUS *poolStatus)
|
||||
{
|
||||
UINT32 totalUsedSize = 0;
|
||||
|
@ -1906,9 +1870,17 @@ STATIC INLINE VOID OsMemInfoGet(struct OsMemPoolHead *poolInfo, struct OsMemNode
|
|||
poolStatus->freeNodeNum += freeNodeNum;
|
||||
}
|
||||
|
||||
STATIC VOID OsMemNodeInfoGetHandle(struct OsMemNodeHead *curNode, VOID *arg)
|
||||
{
|
||||
LOS_MEM_POOL_STATUS *poolStatus = (LOS_MEM_POOL_STATUS *)arg;
|
||||
OsMemInfoGet(curNode, poolStatus);
|
||||
return;
|
||||
}
|
||||
|
||||
UINT32 LOS_MemInfoGet(VOID *pool, LOS_MEM_POOL_STATUS *poolStatus)
|
||||
{
|
||||
struct OsMemPoolHead *poolInfo = pool;
|
||||
UINT32 intSave;
|
||||
|
||||
if (poolStatus == NULL) {
|
||||
PRINT_ERR("can't use NULL addr to save info\n");
|
||||
|
@ -1922,35 +1894,9 @@ UINT32 LOS_MemInfoGet(VOID *pool, LOS_MEM_POOL_STATUS *poolStatus)
|
|||
|
||||
(VOID)memset_s(poolStatus, sizeof(LOS_MEM_POOL_STATUS), 0, sizeof(LOS_MEM_POOL_STATUS));
|
||||
|
||||
struct OsMemNodeHead *tmpNode = NULL;
|
||||
struct OsMemNodeHead *endNode = NULL;
|
||||
UINT32 intSave;
|
||||
OsAllMemNodeDoHandle(pool, OsMemNodeInfoGetHandle, (VOID *)poolStatus);
|
||||
|
||||
MEM_LOCK(poolInfo, intSave);
|
||||
endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
|
||||
#if OS_MEM_EXPAND_ENABLE
|
||||
UINT32 size;
|
||||
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode; tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
|
||||
if (tmpNode == endNode) {
|
||||
poolStatus->totalUsedSize += OS_MEM_NODE_HEAD_SIZE;
|
||||
poolStatus->usedNodeNum++;
|
||||
if (OsMemIsLastSentinelNode(endNode) == FALSE) {
|
||||
size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
|
||||
tmpNode = OsMemSentinelNodeGet(endNode);
|
||||
endNode = OS_MEM_END_NODE(tmpNode, size);
|
||||
continue;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
OsMemInfoGet(poolInfo, tmpNode, poolStatus);
|
||||
}
|
||||
}
|
||||
#else
|
||||
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode < endNode; tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
|
||||
OsMemInfoGet(poolInfo, tmpNode, poolStatus);
|
||||
}
|
||||
#endif
|
||||
#if (LOSCFG_MEM_WATERLINE == 1)
|
||||
poolStatus->usageWaterLine = poolInfo->info.waterLine;
|
||||
#endif
|
||||
|
@ -2028,8 +1974,10 @@ UINT32 LOS_MemFreeNodeShow(VOID *pool)
|
|||
} else {
|
||||
UINT32 val = 1 << (((index - OS_MEM_SMALL_BUCKET_COUNT) >> OS_MEM_SLI) + OS_MEM_LARGE_START_BUCKET);
|
||||
UINT32 offset = val >> OS_MEM_SLI;
|
||||
PRINTK("size: [0x%x, 0x%x], num: %u\n", (offset * ((index - OS_MEM_SMALL_BUCKET_COUNT) % (1 << OS_MEM_SLI))) + val,
|
||||
((offset * (((index - OS_MEM_SMALL_BUCKET_COUNT) % (1 << OS_MEM_SLI)) + 1)) + val - 1), countNum[index]);
|
||||
PRINTK("size: [0x%x, 0x%x], num: %u\n",
|
||||
(offset * ((index - OS_MEM_SMALL_BUCKET_COUNT) % (1 << OS_MEM_SLI))) + val,
|
||||
((offset * (((index - OS_MEM_SMALL_BUCKET_COUNT) % (1 << OS_MEM_SLI)) + 1)) + val - 1),
|
||||
countNum[index]);
|
||||
}
|
||||
}
|
||||
PRINTK("\n ********************************************************************\n\n");
|
||||
|
@ -2047,7 +1995,8 @@ VOID LOS_MemUnlockEnable(VOID *pool)
|
|||
}
|
||||
|
||||
#if (LOSCFG_MEM_MUL_REGIONS == 1)
|
||||
STATIC INLINE UINT32 OsMemMulRegionsParamCheck(VOID *pool, const LosMemRegion * const memRegions, UINT32 memRegionCount)
|
||||
STATIC INLINE UINT32 OsMemMulRegionsParamCheck(VOID *pool, const LosMemRegion * const memRegions,
|
||||
UINT32 memRegionCount)
|
||||
{
|
||||
const LosMemRegion *memRegion = NULL;
|
||||
VOID *lastStartAddress = NULL;
|
||||
|
@ -2072,17 +2021,19 @@ STATIC INLINE UINT32 OsMemMulRegionsParamCheck(VOID *pool, const LosMemRegion *
|
|||
curStartAddress = memRegion->startAddress;
|
||||
curLength = memRegion->length;
|
||||
if ((curStartAddress == NULL) || (curLength == 0)) {
|
||||
PRINT_ERR("Memory address or length configured wrongly:address:0x%x, the length:0x%x\n", (UINTPTR)curStartAddress, curLength);
|
||||
PRINT_ERR("Memory address or length configured wrongly:address:0x%x, the length:0x%x\n",
|
||||
(UINTPTR)curStartAddress, curLength);
|
||||
return LOS_NOK;
|
||||
}
|
||||
if (((UINTPTR)curStartAddress & (OS_MEM_ALIGN_SIZE - 1)) || (curLength & (OS_MEM_ALIGN_SIZE - 1))) {
|
||||
PRINT_ERR("Memory address or length configured not aligned:address:0x%x, the length:0x%x, alignsize:%d\n", \
|
||||
(UINTPTR)curStartAddress, curLength, OS_MEM_ALIGN_SIZE);
|
||||
PRINT_ERR("Memory address or length configured not aligned:address:0x%x, the length:0x%x, alignsize:%d\n",
|
||||
(UINTPTR)curStartAddress, curLength, OS_MEM_ALIGN_SIZE);
|
||||
return LOS_NOK;
|
||||
}
|
||||
if ((lastStartAddress != NULL) && (((UINT8 *)lastStartAddress + lastLength) >= (UINT8 *)curStartAddress)) {
|
||||
PRINT_ERR("Memory regions overlapped, the last start address:0x%x, the length:0x%x, the current start address:0x%x\n", \
|
||||
(UINTPTR)lastStartAddress, lastLength, (UINTPTR)curStartAddress);
|
||||
PRINT_ERR("Memory regions overlapped, the last start address:0x%x, "
|
||||
"the length:0x%x, the current start address:0x%x\n",
|
||||
(UINTPTR)lastStartAddress, lastLength, (UINTPTR)curStartAddress);
|
||||
return LOS_NOK;
|
||||
}
|
||||
memRegion++;
|
||||
|
@ -2093,7 +2044,8 @@ STATIC INLINE UINT32 OsMemMulRegionsParamCheck(VOID *pool, const LosMemRegion *
|
|||
return LOS_OK;
|
||||
}
|
||||
|
||||
STATIC INLINE VOID OsMemMulRegionsLink(struct OsMemPoolHead *poolHead, VOID *lastStartAddress, UINT32 lastLength, struct OsMemNodeHead *lastEndNode, const LosMemRegion *memRegion)
|
||||
STATIC INLINE VOID OsMemMulRegionsLink(struct OsMemPoolHead *poolHead, VOID *lastStartAddress, UINT32 lastLength,
|
||||
struct OsMemNodeHead *lastEndNode, const LosMemRegion *memRegion)
|
||||
{
|
||||
UINT32 curLength;
|
||||
UINT32 gapSize;
|
||||
|
@ -2172,7 +2124,8 @@ UINT32 LOS_MemRegionsAdd(VOID *pool, const LosMemRegion *const memRegions, UINT3
|
|||
|
||||
firstFreeNode = OS_MEM_FIRST_NODE(lastStartAddress);
|
||||
lastEndNode = OS_MEM_END_NODE(lastStartAddress, lastLength);
|
||||
while (regionCount < memRegionCount) { // traverse the rest memory regions, and initialize them as free nodes and link together
|
||||
/* traverse the rest memory regions, and initialize them as free nodes and link together */
|
||||
while (regionCount < memRegionCount) {
|
||||
curStartAddress = memRegion->startAddress;
|
||||
curLength = memRegion->length;
|
||||
|
||||
|
|
Loading…
Reference in New Issue