Fix bug when memory drain part 1

This commit is contained in:
TXuian 2024-10-31 15:40:46 +08:00
parent 3e1479bdf0
commit 78cba2564e
16 changed files with 117 additions and 57 deletions

View File

@ -74,7 +74,7 @@ Modification:
#include "cortex_a9.h"
#define NR_CPU 3
#define NR_CPU 4
__attribute__((always_inline, optimize("O0"))) static inline uint32_t user_mode()
{

View File

@ -41,10 +41,14 @@ static void tracer_init_node(TracerNode* node, char* name, tracemeta_ac_type typ
node->parent = NULL;
if (name != NULL) {
char* p_name = (char*)slab_alloc(&sys_tracer.node_name_allocator);
if (!p_name) {
p_name = "BAD_NAME(NOMEM)";
} else {
strcpy(p_name, name);
p_name[TRACER_NODE_NAME_LEN - 1] = '\0';
node->name = p_name;
}
}
doubleListNodeInit(&node->children_guard);
node->p_resource = p_resource;
doubleListNodeInit(&node->list_node);
@ -58,8 +62,8 @@ void sys_tracer_init()
sys_tracer.sys_tracer_tag.meta = &sys_tracer.root_node;
// init memory allocator
slab_init(&sys_tracer.node_allocator, sizeof(TracerNode));
slab_init(&sys_tracer.node_name_allocator, sizeof(char[TRACER_NODE_NAME_LEN]));
slab_init(&sys_tracer.node_allocator, sizeof(TracerNode), "TracerNodeAllocator");
slab_init(&sys_tracer.node_name_allocator, sizeof(char[TRACER_NODE_NAME_LEN]), "TracerNodeNameAllocator");
}
static char* parse_path(char* path, char* const name)
@ -81,7 +85,7 @@ static char* parse_path(char* path, char* const name)
// handle current name
int len = path - cur_start;
if (len >= TRACER_NODE_NAME_LEN) {
strncpy(name, cur_start, TRACER_NODE_NAME_LEN);
strncpy(name, cur_start, TRACER_NODE_NAME_LEN - 1);
name[TRACER_NODE_NAME_LEN - 1] = '\0';
} else {
strncpy(name, cur_start, len);
@ -177,7 +181,7 @@ bool DeleteResource(TraceTag* target, TraceTag* owner)
assert(target != NULL && owner != NULL);
assert(owner->meta != NULL && owner->meta->type == TRACER_OWNER);
if (target->meta == NULL) {
ERROR("Tracer: Delete a empty resource\n");
ERROR("Tracer: Delete a empty resource, owner: %s\n", owner->meta->name);
return false;
}

View File

@ -42,15 +42,15 @@ struct slab_state {
};
struct slab_allocator {
size_t element_size;
size_t nr_elements;
size_t slabsize;
uint64_t bitmap_empty;
struct slab_state *partial, *empty, *full;
char* name;
};
void slab_init(struct slab_allocator*, size_t);
void slab_init(struct slab_allocator*, size_t, char* name);
void slab_destroy(const struct slab_allocator*);
void* slab_alloc(struct slab_allocator*);

View File

@ -16,7 +16,7 @@ typedef struct Queue {
void queue_init(Queue* queue);
QueueNode* queue_front(Queue* queue);
bool queue_is_empty(Queue* queue);
void dequeue(Queue* queue);
void enqueue(Queue* queue, uintptr_t key, void* data);
bool dequeue(Queue* queue);
bool enqueue(Queue* queue, uintptr_t key, void* data);
void module_queue_factory_init(TraceTag* _softkernel_tag);

View File

@ -75,8 +75,10 @@ void* kalloc_by_ownership(TraceTag owner, uintptr_t size)
}
struct MemUsage* usage = GetSysObject(struct MemUsage, &owner);
assert(0 == rbt_insert(&usage->mem_block_map, (uintptr_t)new_mem, NULL));
// DEBUG("%p %p %p %p\n", usage, usage->mem_block_root, usage->tag, new_mem);
if (0 != rbt_insert(&usage->mem_block_map, (uintptr_t)new_mem, NULL)) {
kfree(new_mem);
return NULL;
}
return new_mem;
}
@ -118,7 +120,10 @@ void* raw_alloc_by_ownership(TraceTag owner, uintptr_t size)
}
struct MemUsage* usage = GetSysObject(struct MemUsage, &owner);
assert(0 == rbt_insert(&usage->mem_block_map, (uintptr_t)new_mem, NULL));
if (0 != rbt_insert(&usage->mem_block_map, (uintptr_t)new_mem, NULL)) {
raw_free(new_mem);
return NULL;
}
return new_mem;
}

View File

@ -48,7 +48,7 @@ Modification:
#define ARENA_SIZE_PER_INCREASE PAGE_SIZE
#define MAX_NR_ELEMENT_PER_SLABPAGE 64
void slab_init(struct slab_allocator* const allocator, const size_t element_size)
void slab_init(struct slab_allocator* const allocator, const size_t element_size, char* name)
{
if (allocator == NULL) {
panic("init a NULL slab_allocator\n");
@ -64,8 +64,11 @@ void slab_init(struct slab_allocator* const allocator, const size_t element_size
allocator->nr_elements = allocator->nr_elements > MAX_NR_ELEMENT_PER_SLABPAGE ? MAX_NR_ELEMENT_PER_SLABPAGE : allocator->nr_elements;
allocator->bitmap_empty = ~BITMAP_BITS_EMPTY_FULL >> (MAX_NR_ELEMENT_PER_SLABPAGE - allocator->nr_elements);
allocator->partial = allocator->empty = allocator->full = NULL;
if (name) {
allocator->name = name;
}
}
void* slab_alloc(struct slab_allocator* const allocator)
@ -108,7 +111,7 @@ void* slab_alloc(struct slab_allocator* const allocator)
/* achieve slab from outer arena */
allocator->partial = (struct slab_state*)LOWLEVEL_ALLOC(allocator->slabsize);
if (UNLIKELY(allocator->partial == NULL)) {
ERROR("no enough memory\n");
ERROR("slab %s: no enough memory\n", allocator->name);
return allocator->partial = NULL;
}
allocator->partial->prev = allocator->partial->next = NULL;

View File

@ -44,7 +44,7 @@ static struct slab_allocator* SessionAllocator()
static bool init = false;
static struct slab_allocator session_slab;
if (!init) {
slab_init(&session_slab, sizeof(struct session_backend));
slab_init(&session_slab, sizeof(struct session_backend), "SessionAllocator");
}
return &session_slab;
}
@ -207,11 +207,26 @@ void unmap_task_share_pages(struct Thread* task, const uintptr_t task_vaddr, con
static int next_session_id = 1;
struct session_backend* create_share_pages(struct Thread* client, struct Thread* server, const int capacity)
{
/* alloc session backend */
struct session_backend* session_backend = (struct session_backend*)slab_alloc(SessionAllocator());
if (UNLIKELY(session_backend == NULL)) {
return NULL;
}
session_backend->session_id = next_session_id++;
if (0 != rbt_insert(&client->cli_sess_map, session_backend->session_id, &session_backend->client_side)) {
DEBUG("Rbt of %s no memory\n", client->name);
slab_free(SessionAllocator(), session_backend);
return NULL;
}
if (0 != rbt_insert(&server->svr_sess_map, session_backend->session_id, &session_backend->server_side)) {
DEBUG("Rbt of %s no memory\n", server->name);
rbt_delete(&client->cli_sess_map, session_backend->session_id);
slab_free(SessionAllocator(), session_backend);
return NULL;
}
sem_id_t new_sem_id = ksemaphore_alloc(&xizi_task_manager.semaphore_pool, 0);
if (new_sem_id == INVALID_SEM_ID) {
@ -253,7 +268,6 @@ struct session_backend* create_share_pages(struct Thread* client, struct Thread*
}
/* build session_backend */
session_backend->session_id = next_session_id++;
session_backend->buf_kernel_addr = kern_vaddr;
session_backend->nr_pages = nr_pages;
session_backend->client = client;
@ -264,7 +278,6 @@ struct session_backend* create_share_pages(struct Thread* client, struct Thread*
session_backend->client_side.closed = false;
doubleListNodeInit(&session_backend->client_side.node);
doubleListAddOnBack(&session_backend->client_side.node, &client->cli_sess_listhead);
rbt_insert(&client->cli_sess_map, session_backend->session_id, &session_backend->client_side);
// init server side session struct
session_backend->server_side.buf_addr = server_vaddr;
session_backend->server_side.capacity = true_capacity;
@ -273,7 +286,6 @@ struct session_backend* create_share_pages(struct Thread* client, struct Thread*
session_backend->server_side.closed = false;
doubleListNodeInit(&session_backend->server_side.node);
doubleListAddOnBack(&session_backend->server_side.node, &server->svr_sess_listhead);
rbt_insert(&server->svr_sess_map, session_backend->session_id, &session_backend->server_side);
server->memspace->mem_size += true_capacity;
client->memspace->mem_size += true_capacity;
@ -298,6 +310,7 @@ int delete_share_pages(struct session_backend* session_backend)
/* unmap share pages */
// close ssesion in server's perspective
if (session_backend->server_side.closed && session_backend->server != NULL) {
rbt_delete(&session_backend->server->svr_sess_map, session_backend->session_id);
xizi_share_page_manager.unmap_task_share_pages(session_backend->server, session_backend->server_side.buf_addr, session_backend->nr_pages);
doubleListDel(&session_backend->server_side.node);
session_backend->server->memspace->mem_size -= session_backend->nr_pages * PAGE_SIZE;
@ -306,6 +319,7 @@ int delete_share_pages(struct session_backend* session_backend)
// close ssesion in client's perspective
if (session_backend->client_side.closed && session_backend->client != NULL) {
rbt_delete(&session_backend->client->cli_sess_map, session_backend->session_id);
xizi_share_page_manager.unmap_task_share_pages(session_backend->client, session_backend->client_side.buf_addr, session_backend->nr_pages);
doubleListDel(&session_backend->client_side.node);
session_backend->client->memspace->mem_size -= session_backend->nr_pages * PAGE_SIZE;

View File

@ -63,8 +63,17 @@ int sys_close_session(struct Thread* cur_task, struct Session* session)
assert(session_backend->client == cur_task);
assert(client_session->closed == false);
client_session->closed = true;
rbt_delete(&cur_task->cli_sess_map, client_session_node->key);
xizi_share_page_manager.delete_share_pages(session_backend);
struct Thread* server_to_info = session_backend->server;
if (!enqueue(&server_to_info->sessions_to_be_handle, 0, (void*)&session_backend->server_side)) {
// @todo fix memory leak
} else {
assert(!queue_is_empty(&server_to_info->sessions_to_be_handle));
if (server_to_info->state == BLOCKED) {
xizi_task_manager.task_unblock(session_backend->server);
}
}
}
RbtNode* server_session_node = rbt_search(&cur_task->svr_sess_map, session->id);
@ -80,7 +89,6 @@ int sys_close_session(struct Thread* cur_task, struct Session* session)
assert(session_backend->server == cur_task);
assert(server_session->closed == false);
server_session->closed = true;
rbt_delete(&cur_task->cli_sess_map, server_session_node->key);
xizi_share_page_manager.delete_share_pages(session_backend);
}

View File

@ -47,14 +47,13 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
int cur_userland_idx = 0;
while (!queue_is_empty(&cur_task->sessions_in_handle)) {
struct server_session* server_session = (struct server_session*)queue_front(&cur_task->sessions_in_handle)->data;
assert(server_session != NULL);
// wrong session info
if (userland_session_arr[cur_userland_idx].id != SERVER_SESSION_BACKEND(server_session)->session_id || //
(uintptr_t)userland_session_arr[cur_userland_idx].buf != server_session->buf_addr) {
ERROR("mismatched old session from %s, user buf: %x, server buf: %x\n", cur_task->name, userland_session_arr[cur_userland_idx].buf, server_session->buf_addr);
return -1;
}
} else {
// update session_backend
ksemaphore_signal(&xizi_task_manager.semaphore_pool, SERVER_SESSION_BACKEND(server_session)->client_sem_to_wait);
@ -62,11 +61,11 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
server_session->tail = userland_session_arr[cur_userland_idx].tail;
userland_session_arr[cur_userland_idx].buf = NULL;
userland_session_arr[cur_userland_idx].id = -1;
}
dequeue(&cur_task->sessions_in_handle);
assert(dequeue(&cur_task->sessions_in_handle));
cur_userland_idx++;
}
int nr_handled_calls = cur_userland_idx;
/* poll with new sessions */
cur_userland_idx = 0;
@ -76,6 +75,7 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
}
struct server_session* server_session = (struct server_session*)queue_front(&cur_task->sessions_to_be_handle)->data;
assert(server_session != NULL);
if (SERVER_SESSION_BACKEND(server_session)->client_side.closed) {
// client had closed it, then server will close it too
@ -89,7 +89,7 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
continue;
}
userland_session_arr[cur_userland_idx++] = (struct Session) {
userland_session_arr[cur_userland_idx] = (struct Session) {
.buf = (void*)server_session->buf_addr,
.capacity = server_session->capacity,
.head = server_session->head,
@ -97,8 +97,13 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
.id = SERVER_SESSION_BACKEND(server_session)->session_id,
};
enqueue(&cur_task->sessions_in_handle, 0, (void*)server_session);
dequeue(&cur_task->sessions_to_be_handle);
if (!enqueue(&cur_task->sessions_in_handle, 0, (void*)server_session)) {
userland_session_arr[cur_userland_idx].buf = NULL;
userland_session_arr[cur_userland_idx].id = 0;
break;
}
assert(dequeue(&cur_task->sessions_to_be_handle));
cur_userland_idx++;
}
// end of userland copy
@ -108,7 +113,8 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
if (queue_is_empty(&cur_task->sessions_in_handle) && queue_is_empty(&cur_task->sessions_to_be_handle)) {
xizi_task_manager.task_yield_noschedule(cur_task, false);
xizi_task_manager.task_block(&xizi_task_manager.task_blocked_list_head, cur_task);
// @todo support blocking(now bug at 4 cores running)
// xizi_task_manager.task_block(&xizi_task_manager.task_blocked_list_head, cur_task);
}
return 0;
}

View File

@ -60,7 +60,7 @@ int sys_new_thread(struct MemSpace* pmemspace, struct Thread* task, uintptr_t en
last = name + 1;
}
}
strncpy(task->name, last, sizeof(task->name));
strncpy(task->name, last, sizeof(task->name) - 1);
// init pcb schedule attributes
xizi_task_manager.task_set_default_schedule_attr(task);

View File

@ -53,7 +53,10 @@ int sys_wait_session(struct Session* userland_session)
/* handle calling */
struct session_backend* session_backend = CLIENT_SESSION_BACKEND(client_session);
struct Thread* server_to_call = session_backend->server;
enqueue(&server_to_call->sessions_to_be_handle, 0, (void*)&session_backend->server_side);
if (!enqueue(&server_to_call->sessions_to_be_handle, 0, (void*)&session_backend->server_side)) {
sys_exit(cur_task);
return -1;
}
assert(!queue_is_empty(&server_to_call->sessions_to_be_handle));
ksemaphore_wait(&xizi_task_manager.semaphore_pool, cur_task, session_backend->client_sem_to_wait);

View File

@ -61,6 +61,7 @@ struct MemSpace* alloc_memspace(char* name)
slab_free(&xizi_task_manager.memspace_allocator, (void*)pmemspace);
return NULL;
}
assert(pmemspace->tag.meta != NULL);
rbtree_init(&pmemspace->kernspace_mem_usage.mem_block_map);
if (!CreateResourceTag(&pmemspace->kernspace_mem_usage.tag, &pmemspace->tag, "MemUsage", TRACER_SYSOBJECT, (void*)&pmemspace->kernspace_mem_usage) || //

View File

@ -25,7 +25,7 @@ void semaphore_pool_init(struct XiziSemaphorePool* sem_pool)
{
assert(sem_pool != NULL);
sem_pool->next_sem_id = INVALID_SEM_ID + 1;
slab_init(&sem_pool->allocator, sizeof(struct ksemaphore));
slab_init(&sem_pool->allocator, sizeof(struct ksemaphore), "SemAllocator");
doubleListNodeInit(&sem_pool->sem_list_guard);
rbtree_init(&sem_pool->sem_pool_map);
sem_pool->nr_sem = 0;
@ -60,7 +60,11 @@ sem_id_t ksemaphore_alloc(struct XiziSemaphorePool* sem_pool, sem_val_t val)
doubleListNodeInit(&sem->sem_list_node);
doubleListNodeInit(&sem->wait_list_guard);
rbt_insert(&sem_pool->sem_pool_map, sem->id, sem);
if (0 != rbt_insert(&sem_pool->sem_pool_map, sem->id, sem)) {
slab_free(&sem_pool->allocator, sem);
return INVALID_SEM_ID;
}
doubleListAddOnHead(&sem->sem_list_node, &sem_pool->sem_list_guard);
sem_pool->nr_sem++;

View File

@ -73,9 +73,9 @@ static void _task_manager_init()
doubleListNodeInit(&xizi_task_manager.task_running_list_head);
doubleListNodeInit(&xizi_task_manager.task_sleep_list_head);
// init task (slab) allocator
slab_init(&xizi_task_manager.memspace_allocator, sizeof(struct MemSpace));
slab_init(&xizi_task_manager.task_allocator, sizeof(struct Thread));
slab_init(&xizi_task_manager.task_buddy_allocator, sizeof(struct KBuddy));
slab_init(&xizi_task_manager.memspace_allocator, sizeof(struct MemSpace), "MemlpaceCtrlBlockAllocator");
slab_init(&xizi_task_manager.task_allocator, sizeof(struct Thread), "TreadCtrlBlockAllocator");
slab_init(&xizi_task_manager.task_buddy_allocator, sizeof(struct KBuddy), "DMBuddyAllocator");
semaphore_pool_init(&xizi_task_manager.semaphore_pool);
// tid pool
@ -117,7 +117,6 @@ int _task_return_sys_resources(struct Thread* ptask)
assert(session_backend->server == ptask);
// cut the connection from task to session
server_session->closed = true;
rbt_delete(&ptask->svr_sess_map, session_backend->session_id);
xizi_share_page_manager.delete_share_pages(session_backend);
}
@ -130,8 +129,17 @@ int _task_return_sys_resources(struct Thread* ptask)
assert(session_backend->client == ptask);
// cut the connection from task to session
client_session->closed = true;
rbt_delete(&ptask->cli_sess_map, session_backend->session_id);
xizi_share_page_manager.delete_share_pages(session_backend);
struct Thread* server_to_info = session_backend->server;
if (!enqueue(&server_to_info->sessions_to_be_handle, 0, (void*)&session_backend->server_side)) {
// @todo fix memory leak
} else {
assert(!queue_is_empty(&server_to_info->sessions_to_be_handle));
if (server_to_info->state == BLOCKED) {
xizi_task_manager.task_unblock(session_backend->server);
}
}
}
if (ptask->server_identifier.meta != NULL) {

View File

@ -14,7 +14,7 @@ static struct QueueFactory queue_factory;
void module_queue_factory_init(TraceTag* _softkernel_tag)
{
CreateResourceTag(&queue_factory.tag, _softkernel_tag, "GlobalQueueFactory", TRACER_SYSOBJECT, &queue_factory);
slab_init(&queue_factory.queue_ele_allocator, sizeof(struct QueueNode));
slab_init(&queue_factory.queue_ele_allocator, sizeof(struct QueueNode), "QueueNodeAllocator");
}
void queue_init(Queue* queue)
@ -39,12 +39,12 @@ bool queue_is_empty(Queue* queue)
return false;
}
void dequeue(Queue* queue)
bool dequeue(Queue* queue)
{
struct QueueNode* temp = queue->front;
if (queue->front == NULL) {
return;
return false;
}
if (queue->front == queue->rear)
@ -54,22 +54,26 @@ void dequeue(Queue* queue)
queue->nr_ele--;
slab_free(&queue_factory.queue_ele_allocator, (void*)temp);
return true;
}
void enqueue(Queue* queue, uintptr_t key, void* data)
bool enqueue(Queue* queue, uintptr_t key, void* data)
{
QueueNode* temp = (struct QueueNode*)slab_alloc(&queue_factory.queue_ele_allocator);
if (temp == NULL) {
return false;
}
temp->key = key;
temp->data = data;
temp->next = NULL;
if (queue->front == NULL && queue->rear == NULL) {
queue->front = queue->rear = temp;
queue->nr_ele++;
return;
}
} else {
queue->rear->next = temp;
queue->rear = temp;
}
queue->nr_ele++;
return true;
}

View File

@ -14,7 +14,7 @@ static struct RbtFactory rbt_factory;
void module_rbt_factory_init(TraceTag* _softkernel_tag)
{
CreateResourceTag(&rbt_factory.tag, _softkernel_tag, "GlobalRbtFactory", TRACER_SYSOBJECT, &rbt_factory);
slab_init(&rbt_factory.rbtnode_ele_allocator, sizeof(struct RbtNode));
slab_init(&rbt_factory.rbtnode_ele_allocator, sizeof(struct RbtNode), "RbtNodeAllocator");
}
void delete_case1(RbtTree* tree, RbtNode* node);