Update free thread

This commit is contained in:
TXuian 2024-11-01 11:37:11 +08:00
parent 02f6a412de
commit 2f54409819
8 changed files with 145 additions and 120 deletions

View File

@ -36,6 +36,10 @@ Modification:
#include "list.h"
#include "task.h"
enum {
INVALID_SESS_ID = 0,
};
/// @brief userland session info copy
struct Session {
uintptr_t id;
@ -68,7 +72,7 @@ struct client_session {
struct session_backend {
struct server_session server_side;
struct client_session client_side;
int session_id; // id of this session
uintptr_t session_id; // id of this session
int nr_pages; // pages used by this pipe
struct Thread* client; // client of this pipe
struct Thread* server; // server of this pipe
@ -93,22 +97,5 @@ extern struct XiziSharePageManager xizi_share_page_manager;
int module_share_page_init(struct SharePageRightGroup* right_group);
static inline void client_close_session(struct Thread* thd, struct client_session* cli_sess)
{
assert(cli_sess != NULL);
struct session_backend* sess_backend = CLIENT_SESSION_BACKEND(cli_sess);
assert(sess_backend->client == thd);
assert(cli_sess->closed == false);
cli_sess->closed = true;
xizi_share_page_manager.delete_share_pages(sess_backend);
}
static inline void server_close_session(struct Thread* thd, struct server_session* svr_sess)
{
assert(svr_sess != NULL);
struct session_backend* sess_backend = SERVER_SESSION_BACKEND(svr_sess);
assert(sess_backend->server == thd);
assert(svr_sess->closed == false);
svr_sess->closed = true;
xizi_share_page_manager.delete_share_pages(sess_backend);
}
void client_close_session(struct Thread* thd, struct client_session* cli_sess);
void server_close_session(struct Thread* thd, struct server_session* svr_sess);

View File

@ -48,7 +48,8 @@ Modification:
#define SLEEP_MONITOR_CORE 0
enum ProcState {
INIT = 0,
UNINIT = 0,
INIT,
READY,
RUNNING,
DEAD,
@ -133,7 +134,7 @@ struct TaskLifecycleOperations {
/* new a task control block, checkout #sys_spawn for usage */
struct Thread* (*new_thread)(struct MemSpace* pmemspace);
/* free a task control block, this calls #free_user_pgdir to free all vitual spaces */
void (*free_pcb)(struct Thread*);
void (*free_thread)(struct Thread*);
};
struct XiziTaskManager {

View File

@ -204,7 +204,7 @@ void unmap_task_share_pages(struct Thread* task, const uintptr_t task_vaddr, con
}
}
static int next_session_id = 1;
static int next_session_id = INVALID_SESS_ID + 1;
struct session_backend* create_share_pages(struct Thread* client, struct Thread* server, const int capacity)
{
@ -361,4 +361,28 @@ int module_share_page_init(struct SharePageRightGroup* _right_group)
/* assign rights to share page module */
right_group = *_right_group;
return 0;
}
void client_close_session(struct Thread* thd, struct client_session* cli_sess)
{
assert(cli_sess != NULL);
struct session_backend* sess_backend = CLIENT_SESSION_BACKEND(cli_sess);
assert(sess_backend->client == thd);
assert(cli_sess->closed == false);
uintptr_t sess_id = sess_backend->session_id;
cli_sess->closed = true;
xizi_share_page_manager.delete_share_pages(sess_backend);
assert(NULL == rbt_search(&thd->cli_sess_map, sess_id));
}
void server_close_session(struct Thread* thd, struct server_session* svr_sess)
{
assert(svr_sess != NULL);
struct session_backend* sess_backend = SERVER_SESSION_BACKEND(svr_sess);
assert(sess_backend->server == thd);
assert(svr_sess->closed == false);
uintptr_t sess_id = sess_backend->session_id;
svr_sess->closed = true;
xizi_share_page_manager.delete_share_pages(sess_backend);
assert(NULL == rbt_search(&thd->svr_sess_map, sess_id));
}

View File

@ -43,7 +43,7 @@ int sys_exit(struct Thread* ptask)
// free that task straightly if it's a blocked task
if (ptask->state == BLOCKED) {
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
tlo->free_pcb(ptask);
tlo->free_thread(ptask);
}
// yield current task in case it wants to exit itself
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false);

View File

@ -40,9 +40,9 @@ int sys_new_thread(struct MemSpace* pmemspace, struct Thread* task, uintptr_t en
struct ThreadStackPointer loaded_sp = load_user_stack(pmemspace, argv);
if (loaded_sp.stack_idx == -1) {
ERROR("Uable to load params to memspace.\n");
/* memspace is freed alone with free_pcb() */
/* memspace is freed alone with free_thread() */
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
tlo->free_pcb(task);
tlo->free_thread(task);
return -1;
}

View File

@ -236,7 +236,7 @@ struct ThreadStackPointer load_user_stack(struct MemSpace* pmemspace, char** arg
}
/* allocate memory space for user stack */
uintptr_t* stack_bottom = (uintptr_t*)kalloc(USER_STACK_SIZE);
uintptr_t* stack_bottom = (uintptr_t*)kalloc_by_ownership(pmemspace->kernspace_mem_usage.tag, USER_STACK_SIZE);
if (UNLIKELY(stack_bottom == NULL)) {
ERROR("No memory to alloc user stack.\n");
handle_error_stack_loading(pmemspace, stack_idx, stack_bottom, false);

View File

@ -49,7 +49,7 @@ struct Thread* max_priority_runnable_task(void)
} else if (task->dead && task->state != RUNNING) {
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
tlo->free_pcb(task);
tlo->free_thread(task);
return NULL;
}
}
@ -67,7 +67,7 @@ struct Thread* round_robin_runnable_task(uint32_t priority)
return task;
} else if (task->dead && task->state != RUNNING) {
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
tlo->free_pcb(task);
tlo->free_thread(task);
return NULL;
}
}

View File

@ -99,54 +99,6 @@ static void _task_manager_init()
ready_task_priority = 0;
}
int _task_return_sys_resources(struct Thread* ptask)
{
assert(ptask != NULL);
/* handle sessions for condition 1, ref. delete_share_pages() */
// close all server_sessions
while (!IS_DOUBLE_LIST_EMPTY(&ptask->svr_sess_listhead)) {
// RbtNode* sess_ref_node = ptask->svr_sess_map.root;
struct server_session* svr_session = CONTAINER_OF(ptask->svr_sess_listhead.next, struct server_session, node);
server_close_session(ptask, svr_session);
}
// close all client_sessions
while (!IS_DOUBLE_LIST_EMPTY(&ptask->cli_sess_listhead)) {
// RbtNode* sess_ref_node = ptask->cli_sess_map.root;
struct client_session* cli_session = CONTAINER_OF(ptask->cli_sess_listhead.next, struct client_session, node);
client_close_session(ptask, cli_session);
// info server that session is closed
struct session_backend* session_backend = CLIENT_SESSION_BACKEND(cli_session);
struct Thread* server_to_info = session_backend->server;
if (!enqueue(&server_to_info->sessions_to_be_handle, 0, (void*)&session_backend->server_side)) {
// @todo fix memory leak
} else {
assert(!queue_is_empty(&server_to_info->sessions_to_be_handle));
if (server_to_info->state == BLOCKED) {
xizi_task_manager.task_unblock(session_backend->server);
}
}
}
/* delete server identifier */
if (ptask->server_identifier.meta != NULL) {
// @todo figure out server-identifier ownership
struct TraceTag server_identifier_owner;
AchieveResourceTag(&server_identifier_owner, RequireRootTag(), "softkernel/server-identifier");
assert(server_identifier_owner.meta != NULL);
assert(DeleteResource(&ptask->server_identifier, &server_identifier_owner));
}
// delete registered irq if there is one
if (ptask->bind_irq) {
sys_unbind_irq_all(ptask);
}
return 0;
}
extern void trap_return(void);
__attribute__((optimize("O0"))) void task_prepare_enter()
{
@ -162,60 +114,120 @@ static void _free_thread(struct Thread* task)
ERROR("deallocating a NULL task\n");
return;
}
assert(task->state >= INIT);
assert(task->memspace != NULL);
_task_return_sys_resources(task);
// ignore [name, tid, dead, ]
// case thread context [kern_stack_addr, ]
/* free thread's user stack */
if (task->thread_context.user_stack_idx != -1) {
// stack is mapped in vspace, so it should be freed from pgdir
assert(task->thread_context.user_stack_idx >= 0 && task->thread_context.user_stack_idx < 64);
assert(task->memspace != NULL);
/* the stack must have be set in memspace if bitmap has been set */
assert(xizi_pager.unmap_pages(task->memspace->pgdir.pd_addr, task->thread_context.uspace_stack_addr, USER_STACK_SIZE));
bitmap64_free(&task->memspace->thread_stack_idx_bitmap, task->thread_context.user_stack_idx);
/* thread's user stack space is also allocated for kernel free space */
assert(kfree((char*)task->thread_context.ustack_kvaddr));
if (task->memspace != NULL) {
task->memspace->mem_size -= USER_STACK_SIZE;
}
}
// remove thread from used task list
task_node_leave_list(task);
/* free memspace if needed to */
if (task->memspace != NULL) {
/* free thread's kernel stack */
if (task->thread_context.kern_stack_addr) {
// kfree_by_ownership(task->memspace->kernspace_mem_usage.tag, (char*)task->thread_context.kern_stack_addr);
/* 1. close all ipcall sessions */
ERROR_FREE
{
/* handle sessions for condition 1, ref. delete_share_pages() */
// close all server_sessions
while (!IS_DOUBLE_LIST_EMPTY(&task->svr_sess_listhead)) {
// RbtNode* sess_ref_node = ptask->svr_sess_map.root;
struct server_session* svr_session = CONTAINER_OF(task->svr_sess_listhead.next, struct server_session, node);
server_close_session(task, svr_session);
}
// awake deamon in this memspace
if (task->memspace->thread_to_notify != NULL) {
if (task->memspace->thread_to_notify != task) {
if (task->memspace->thread_to_notify->state == BLOCKED) {
xizi_task_manager.task_unblock(task->memspace->thread_to_notify);
} else {
task->memspace->thread_to_notify->advance_unblock = true;
// close all client_sessions
while (!IS_DOUBLE_LIST_EMPTY(&task->cli_sess_listhead)) {
// RbtNode* sess_ref_node = ptask->cli_sess_map.root;
struct client_session* cli_session = CONTAINER_OF(task->cli_sess_listhead.next, struct client_session, node);
client_close_session(task, cli_session);
// info server that session is closed
struct session_backend* session_backend = CLIENT_SESSION_BACKEND(cli_session);
struct Thread* server_to_info = session_backend->server;
if (!enqueue(&server_to_info->sessions_to_be_handle, 0, (void*)&session_backend->server_side)) {
// @todo fix memory leak
} else {
assert(!queue_is_empty(&server_to_info->sessions_to_be_handle));
if (server_to_info->state == BLOCKED) {
xizi_task_manager.task_unblock(session_backend->server);
}
} else if (task->memspace->thread_to_notify == task) {
task->memspace->thread_to_notify = NULL;
}
}
doubleListDel(&task->memspace_list_node);
assert(IS_DOUBLE_LIST_EMPTY(&task->svr_sess_listhead));
assert(IS_DOUBLE_LIST_EMPTY(&task->cli_sess_listhead));
// assert(rbt_is_empty(&task->svr_sess_map));
// assert(rbt_is_empty(&task->cli_sess_map));
/* free memspace if thread is the last one using it */
if (IS_DOUBLE_LIST_EMPTY(&task->memspace->thread_list_guard)) {
// free memspace
free_memspace(task->memspace);
/// @todo handle server transition
/* delete server identifier */
if (task->server_identifier.meta != NULL) {
// @todo figure out server-identifier ownership
struct TraceTag server_identifier_owner;
AchieveResourceTag(&server_identifier_owner, RequireRootTag(), "softkernel/server-identifier");
assert(server_identifier_owner.meta != NULL);
assert(DeleteResource(&task->server_identifier, &server_identifier_owner));
}
}
/* 2. quit interrupt handling */
ERROR_FREE
{
// delete registered irq if there is one
if (task->bind_irq) {
sys_unbind_irq_all(task);
}
}
/* 3. quit schedule */
ERROR_FREE
{
// remove thread from used task list
task_node_leave_list(task);
/// ignore [ticks, sleep context, state]
}
/* 3. free context */
ERROR_FREE
{
/* free thread's kernel stack */
assert(task->thread_context.kern_stack_addr != (uintptr_t)NULL);
assert(kfree_by_ownership(task->memspace->kernspace_mem_usage.tag, (void*)task->thread_context.kern_stack_addr));
/* free thread's user stack */
if (task->thread_context.user_stack_idx != -1) {
// stack is mapped in vspace, so it should be freed from pgdir
assert(task->memspace != NULL);
assert(task->thread_context.user_stack_idx >= 0 && task->thread_context.user_stack_idx < 64);
/* the stack must have be set in memspace if bitmap has been set */
assert(xizi_pager.unmap_pages(task->memspace->pgdir.pd_addr, task->thread_context.uspace_stack_addr, USER_STACK_SIZE));
bitmap64_free(&task->memspace->thread_stack_idx_bitmap, task->thread_context.user_stack_idx);
/* thread's user stack space is also allocated for kernel free space */
assert(kfree_by_ownership(task->memspace->kernspace_mem_usage.tag, (char*)task->thread_context.ustack_kvaddr));
}
}
/* free memspace if needed to */
doubleListDel(&task->memspace_list_node);
/* free memspace if thread is the last one using it */
if (IS_DOUBLE_LIST_EMPTY(&task->memspace->thread_list_guard)) {
// free memspace
free_memspace(task->memspace);
} else if (task->memspace->thread_to_notify != NULL) {
// awake deamon in this memspace
if (task->memspace->thread_to_notify != task) {
if (task->memspace->thread_to_notify->state == BLOCKED) {
xizi_task_manager.task_unblock(task->memspace->thread_to_notify);
} else {
task->memspace->thread_to_notify->advance_unblock = true;
}
} else if (task->memspace->thread_to_notify == task) {
task->memspace->thread_to_notify = NULL;
}
}
// free task back to allocator
slab_free(&xizi_task_manager.task_allocator, (void*)task);
ERROR_FREE
{
slab_free(&xizi_task_manager.task_allocator, (void*)task);
}
}
/* alloc a new task with init */
@ -280,12 +292,13 @@ static struct Thread* _new_thread(struct MemSpace* pmemspace)
// [name]
// [schedule related]
task->state = INIT;
return task;
}
struct TaskLifecycleOperations task_lifecycle_ops = {
.new_thread = _new_thread,
.free_pcb = _free_thread,
.free_thread = _free_thread,
};
static void _task_set_default_schedule_attr(struct Thread* task)
@ -311,7 +324,7 @@ extern void context_switch(struct context**, struct context*);
static void _scheduler(struct SchedulerRightGroup right_group)
{
struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag);
// struct XiziTrapDriver* p_intr_driver = AchieveResource(&right_group.intr_driver_tag);
// struct XiziTrapDriver* p_intr_driver = AchieveResource(&right_group.intr_driver_tag);
struct Thread* next_task;
struct CPU* cpu = cur_cpu();