add ERROR_FREE sign, break task manager

This commit is contained in:
TXuian 2024-10-31 23:33:58 +08:00
parent ce727442de
commit 3d43cb9644
13 changed files with 197 additions and 113 deletions

View File

@ -41,3 +41,5 @@ extern void panic(char*);
#define LIKELY(exp) __builtin_expect(exp, 1)
#define UNLIKELY(exp) __builtin_expect(exp, 0)
#define ERROR_FREE

View File

@ -46,22 +46,23 @@ static inline void bitmap64_init(struct bitmap64* bitmap)
static inline int bitmap64_alloc(struct bitmap64* bitmap)
{
int free_bit = -1;
// free bit is the first 0 bit, from [1, 64]
// free bit is the first 0 bit, from [0, 63]
free_bit = __builtin_ffsl(~(uint64_t)(bitmap->map));
// handle if bitmap is full (no using 64th bit here)
if (free_bit == 0) {
return -1;
}
assert(free_bit < 64 && free_bit >= 1);
free_bit -= 1;
assert(free_bit < 64 && free_bit >= 0);
// alloc and return
bitmap->map |= (1 << (free_bit - 1));
return free_bit - 1;
bitmap->map |= (1ULL << free_bit);
return free_bit;
}
static inline void bitmap64_free(struct bitmap64* bitmap, int idx)
{
// usages of bitmap64 must be correct
assert((bitmap->map & (1 << idx)) != 0);
assert((bitmap->map & (1ULL << idx)) != 0);
// free bit
bitmap->map &= ~(uint64_t)(1 << idx);
bitmap->map &= ~(uint64_t)(1ULL << idx);
}

View File

@ -23,6 +23,7 @@ typedef struct RbtNode {
typedef struct RbtTree {
RbtNode* root;
int nr_ele;
} RbtTree;
void rbtree_init(RbtTree* tree);
@ -31,3 +32,8 @@ RbtNode* rbt_search(RbtTree* tree, uintptr_t key);
int rbt_delete(RbtTree* tree, uintptr_t key);
void module_rbt_factory_init(TraceTag* _softkernel_tag);
static inline bool rbt_is_empty(RbtTree* tree)
{
return tree->nr_ele == 0;
}

View File

@ -92,3 +92,23 @@ struct XiziSharePageManager {
extern struct XiziSharePageManager xizi_share_page_manager;
int module_share_page_init(struct SharePageRightGroup* right_group);
static inline void client_close_session(struct Thread* thd, struct client_session* cli_sess)
{
assert(cli_sess != NULL);
struct session_backend* sess_backend = CLIENT_SESSION_BACKEND(cli_sess);
assert(sess_backend->client == thd);
assert(cli_sess->closed == false);
cli_sess->closed = true;
xizi_share_page_manager.delete_share_pages(sess_backend);
}
static inline void server_close_session(struct Thread* thd, struct server_session* svr_sess)
{
assert(svr_sess != NULL);
struct session_backend* sess_backend = SERVER_SESSION_BACKEND(svr_sess);
assert(sess_backend->server == thd);
assert(svr_sess->closed == false);
svr_sess->closed = true;
xizi_share_page_manager.delete_share_pages(sess_backend);
}

View File

@ -104,7 +104,7 @@ struct Thread {
Queue sessions_to_be_handle;
Queue sessions_in_handle;
struct TraceTag server_identifier;
bool advance_unblock;
bool advance_unblock; // @todo abandon
/* task schedule attributes */
struct double_list_node node;
@ -120,6 +120,22 @@ struct SchedulerRightGroup {
struct TraceTag mmu_driver_tag;
};
/* @todo task pool to maintain task lifetime and support fast task search */
struct GlobalTaskPool {
RbtTree thd_ref_map;
struct double_list_node thd_listing_head;
};
struct TaskScheduler {
};
struct TaskLifecycleOperations {
/* new a task control block, checkout #sys_spawn for usage */
struct Thread* (*new_thread)(struct MemSpace* pmemspace);
/* free a task control block, this calls #free_user_pgdir to free all vitual spaces */
void (*free_pcb)(struct Thread*);
};
struct XiziTaskManager {
TraceTag tag;
/* thead schedule lists */
@ -128,6 +144,10 @@ struct XiziTaskManager {
struct double_list_node task_blocked_list_head;
struct double_list_node task_sleep_list_head;
struct XiziSemaphorePool semaphore_pool;
/* living task pool */
TraceTag task_pool_tag;
/* task lifecycle Ops */
TraceTag task_lifecycle_ops_tag;
/* mem allocator */
struct slab_allocator memspace_allocator;
@ -137,10 +157,6 @@ struct XiziTaskManager {
/* init task manager */
void (*init)();
/* new a task control block, checkout #sys_spawn for usage */
struct Thread* (*new_task_cb)(struct MemSpace* pmemspace);
/* free a task control block, this calls #free_user_pgdir to free all vitual spaces */
void (*free_pcb)(struct Thread*);
/* init a task control block, set name, remain_tick, state, cwd, priority, etc. */
void (*task_set_default_schedule_attr)(struct Thread*);

View File

@ -310,31 +310,42 @@ int delete_share_pages(struct session_backend* session_backend)
/* unmap share pages */
// close ssesion in server's perspective
if (session_backend->server_side.closed && session_backend->server != NULL) {
rbt_delete(&session_backend->server->svr_sess_map, session_backend->session_id);
xizi_share_page_manager.unmap_task_share_pages(session_backend->server, session_backend->server_side.buf_addr, session_backend->nr_pages);
doubleListDel(&session_backend->server_side.node);
session_backend->server->memspace->mem_size -= session_backend->nr_pages * PAGE_SIZE;
session_backend->server = NULL;
ERROR_FREE
{
assert(0 == rbt_delete(&session_backend->server->svr_sess_map, session_backend->session_id));
doubleListDel(&session_backend->server_side.node);
session_backend->server->memspace->mem_size -= session_backend->nr_pages * PAGE_SIZE;
session_backend->server = NULL;
}
}
// close ssesion in client's perspective
if (session_backend->client_side.closed && session_backend->client != NULL) {
rbt_delete(&session_backend->client->cli_sess_map, session_backend->session_id);
xizi_share_page_manager.unmap_task_share_pages(session_backend->client, session_backend->client_side.buf_addr, session_backend->nr_pages);
doubleListDel(&session_backend->client_side.node);
session_backend->client->memspace->mem_size -= session_backend->nr_pages * PAGE_SIZE;
session_backend->client = NULL;
ERROR_FREE
{
assert(0 == rbt_delete(&session_backend->client->cli_sess_map, session_backend->session_id));
doubleListDel(&session_backend->client_side.node);
session_backend->client->memspace->mem_size -= session_backend->nr_pages * PAGE_SIZE;
session_backend->client = NULL;
assert(ksemaphore_free(&xizi_task_manager.semaphore_pool, session_backend->client_sem_to_wait));
}
}
/* free seesion backend */
if (session_backend->server_side.closed && session_backend->client_side.closed) {
assert(session_backend->client == NULL && session_backend->server == NULL);
kfree((void*)session_backend->buf_kernel_addr);
slab_free(SessionAllocator(), (void*)session_backend);
ERROR_FREE
{
assert(session_backend->client == NULL && session_backend->server == NULL);
assert(kfree((void*)session_backend->buf_kernel_addr));
slab_free(SessionAllocator(), (void*)session_backend);
}
}
ksemaphore_free(&xizi_task_manager.semaphore_pool, session_backend->client_sem_to_wait);
return 0;
}

View File

@ -42,7 +42,8 @@ int sys_exit(struct Thread* ptask)
ptask->dead = true;
// free that task straightly if it's a blocked task
if (ptask->state == BLOCKED) {
xizi_task_manager.free_pcb(ptask);
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
tlo->free_pcb(ptask);
}
// yield current task in case it wants to exit itself
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false);

View File

@ -124,7 +124,8 @@ int sys_register_irq(int irq_num, int irq_opcode)
xizi_pager.new_pgdir(&pmemspace->pgdir);
memcpy(pmemspace->pgdir.pd_addr, kern_pgdir.pd_addr, TOPLEVLE_PAGEDIR_SIZE);
kernel_irq_proxy = xizi_task_manager.new_task_cb(pmemspace);
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
kernel_irq_proxy = tlo->new_thread(pmemspace);
kernel_irq_proxy->state = NEVER_RUN;
}

View File

@ -52,7 +52,8 @@ int sys_spawn(char* img_start, char* name, char** argv)
}
// alloc a new pcb
struct Thread* new_task_cb = xizi_task_manager.new_task_cb(pmemspace);
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
struct Thread* new_task_cb = tlo->new_thread(pmemspace);
if (UNLIKELY(!new_task_cb)) {
ERROR("Unable to new task control block %x.\n");
// error task allocation may free memspace before hand

View File

@ -41,7 +41,8 @@ int sys_new_thread(struct MemSpace* pmemspace, struct Thread* task, uintptr_t en
if (loaded_sp.stack_idx == -1) {
ERROR("Uable to load params to memspace.\n");
/* memspace is freed alone with free_pcb() */
xizi_task_manager.free_pcb(task);
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
tlo->free_pcb(task);
return -1;
}
@ -80,7 +81,8 @@ int sys_thread(uintptr_t entry, char* name, char** argv)
// use current task's memspace
struct MemSpace* pmemspace = cur_task->memspace;
struct Thread* task = xizi_task_manager.new_task_cb(pmemspace);
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
struct Thread* task = tlo->new_thread(pmemspace);
if (UNLIKELY(!task)) {
ERROR("Unable to new task control block.\n");
return -1;

View File

@ -47,7 +47,9 @@ struct Thread* max_priority_runnable_task(void)
// found a runnable task, stop this look up
return task;
} else if (task->dead && task->state != RUNNING) {
xizi_task_manager.free_pcb(task);
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
tlo->free_pcb(task);
return NULL;
}
}
@ -64,7 +66,8 @@ struct Thread* round_robin_runnable_task(uint32_t priority)
// found a runnable task, stop this look up
return task;
} else if (task->dead && task->state != RUNNING) {
xizi_task_manager.free_pcb(task);
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
tlo->free_pcb(task);
return NULL;
}
}

View File

@ -43,6 +43,9 @@ Modification:
struct CPU global_cpus[NR_CPU];
uint32_t ready_task_priority;
struct GlobalTaskPool global_task_pool;
extern struct TaskLifecycleOperations task_lifecycle_ops;
static inline void task_node_leave_list(struct Thread* task)
{
doubleListDel(&task->node);
@ -65,19 +68,31 @@ static inline void task_node_add_to_ready_list_back(struct Thread* task)
static void _task_manager_init()
{
assert(CreateResourceTag(&xizi_task_manager.task_lifecycle_ops_tag, &xizi_task_manager.tag, //
"TaskLifeCycleOpTool", TRACER_SYSOBJECT, (void*)&task_lifecycle_ops));
// init task list to NULL
for (int i = 0; i < TASK_MAX_PRIORITY; i++) {
doubleListNodeInit(&xizi_task_manager.task_list_head[i]);
}
/* task scheduling list */
doubleListNodeInit(&xizi_task_manager.task_blocked_list_head);
doubleListNodeInit(&xizi_task_manager.task_running_list_head);
doubleListNodeInit(&xizi_task_manager.task_sleep_list_head);
// init task (slab) allocator
slab_init(&xizi_task_manager.memspace_allocator, sizeof(struct MemSpace), "MemlpaceCtrlBlockAllocator");
slab_init(&xizi_task_manager.task_allocator, sizeof(struct Thread), "TreadCtrlBlockAllocator");
slab_init(&xizi_task_manager.task_buddy_allocator, sizeof(struct KBuddy), "DMBuddyAllocator");
/* global semaphore factory */
semaphore_pool_init(&xizi_task_manager.semaphore_pool);
/* task pool */
doubleListNodeInit(&global_task_pool.thd_listing_head);
rbtree_init(&global_task_pool.thd_ref_map);
// tid pool
xizi_task_manager.next_pid = 0;
@ -85,52 +100,26 @@ static void _task_manager_init()
ready_task_priority = 0;
}
/// @brief alloc a new task without init
static struct Thread* _alloc_task_cb()
{
// alloc task and add it to used task list
struct Thread* task = (struct Thread*)slab_alloc(&xizi_task_manager.task_allocator);
if (UNLIKELY(task == NULL)) {
ERROR("Not enough memory\n");
return NULL;
}
// set tid once task is allocated
memset(task, 0, sizeof(*task));
task->tid = xizi_task_manager.next_pid++;
task->thread_context.user_stack_idx = -1;
return task;
}
int _task_return_sys_resources(struct Thread* ptask)
{
assert(ptask != NULL);
/* handle sessions for condition 1, ref. delete_share_pages() */
struct session_backend* session_backend = NULL;
// close all server_sessions
struct server_session* server_session = NULL;
while (!IS_DOUBLE_LIST_EMPTY(&ptask->svr_sess_listhead)) {
server_session = CONTAINER_OF(ptask->svr_sess_listhead.next, struct server_session, node);
assert(server_session != NULL);
session_backend = SERVER_SESSION_BACKEND(server_session);
assert(session_backend->server == ptask);
// cut the connection from task to session
server_session->closed = true;
xizi_share_page_manager.delete_share_pages(session_backend);
// RbtNode* sess_ref_node = ptask->svr_sess_map.root;
struct server_session* svr_session = CONTAINER_OF(ptask->svr_sess_listhead.next, struct server_session, node);
server_close_session(ptask, svr_session);
}
// close all client_sessions
struct client_session* client_session = NULL;
while (!IS_DOUBLE_LIST_EMPTY(&ptask->cli_sess_listhead)) {
client_session = CONTAINER_OF(ptask->cli_sess_listhead.next, struct client_session, node);
assert(client_session != NULL);
session_backend = CLIENT_SESSION_BACKEND(client_session);
assert(session_backend->client == ptask);
// cut the connection from task to session
client_session->closed = true;
xizi_share_page_manager.delete_share_pages(session_backend);
// RbtNode* sess_ref_node = ptask->cli_sess_map.root;
struct client_session* cli_session = CONTAINER_OF(ptask->cli_sess_listhead.next, struct client_session, node);
client_close_session(ptask, cli_session);
// info server that session is closed
struct session_backend* session_backend = CLIENT_SESSION_BACKEND(cli_session);
struct Thread* server_to_info = session_backend->server;
if (!enqueue(&server_to_info->sessions_to_be_handle, 0, (void*)&session_backend->server_side)) {
// @todo fix memory leak
@ -142,11 +131,13 @@ int _task_return_sys_resources(struct Thread* ptask)
}
}
/* delete server identifier */
if (ptask->server_identifier.meta != NULL) {
// @todo figure out server-identifier ownership
struct TraceTag server_identifier_owner;
AchieveResourceTag(&server_identifier_owner, RequireRootTag(), "softkernel/server-identifier");
assert(server_identifier_owner.meta != NULL);
DeleteResource(&ptask->server_identifier, &server_identifier_owner);
assert(DeleteResource(&ptask->server_identifier, &server_identifier_owner));
}
// delete registered irq if there is one
@ -157,9 +148,16 @@ int _task_return_sys_resources(struct Thread* ptask)
return 0;
}
extern void trap_return(void);
__attribute__((optimize("O0"))) void task_prepare_enter()
{
xizi_leave_kernel();
trap_return();
}
/// @brief this function changes task list without locking, so it must be called inside a lock critical area
/// @param task
static void _dealloc_task_cb(struct Thread* task)
static void _free_thread(struct Thread* task)
{
if (UNLIKELY(task == NULL)) {
ERROR("deallocating a NULL task\n");
@ -222,69 +220,80 @@ static void _dealloc_task_cb(struct Thread* task)
}
/* alloc a new task with init */
extern void trap_return(void);
__attribute__((optimize("O0"))) void task_prepare_enter()
static struct Thread* _new_thread(struct MemSpace* pmemspace)
{
xizi_leave_kernel();
trap_return();
}
assert(pmemspace != NULL);
static struct Thread* _new_task_cb(struct MemSpace* pmemspace)
{
// alloc task space
struct Thread* task = _alloc_task_cb();
if (!task) {
// alloc task space
struct Thread* task = (struct Thread*)slab_alloc(&xizi_task_manager.task_allocator);
if (task == NULL) {
ERROR("Not enough memory\n");
return NULL;
}
/* init basic task member */
doubleListNodeInit(&task->cli_sess_listhead);
rbtree_init(&task->cli_sess_map);
doubleListNodeInit(&task->svr_sess_listhead);
rbtree_init(&task->svr_sess_map);
queue_init(&task->sessions_in_handle);
queue_init(&task->sessions_to_be_handle);
/* when creating a new task, memspace will be freed outside during memory shortage */
assert(pmemspace != NULL);
task->memspace = pmemspace;
/* init main thread of task */
task->thread_context.task = task;
// alloc stack page for task
if ((void*)(task->thread_context.kern_stack_addr = (uintptr_t)kalloc_by_ownership(pmemspace->kernspace_mem_usage.tag, USER_STACK_SIZE)) == NULL) {
/* here inside, will no free memspace */
_dealloc_task_cb(task);
slab_free(&xizi_task_manager.task_allocator, (void*)task);
return NULL;
}
/* from now on, _new_task_cb() will not generate error */
/* init vm */
task->thread_context.user_stack_idx = -1;
doubleListNodeInit(&task->memspace_list_node);
doubleListAddOnBack(&task->memspace_list_node, &pmemspace->thread_list_guard);
ERROR_FREE
{
/* init basic task ref member */
task->tid = xizi_task_manager.next_pid++;
task->bind_irq = false;
/* set context of main thread stack */
/// stack bottom
memset((void*)task->thread_context.kern_stack_addr, 0x00, USER_STACK_SIZE);
char* sp = (char*)task->thread_context.kern_stack_addr + USER_STACK_SIZE - 4;
/* vm & memory member */
task->thread_context.user_stack_idx = -1;
task->memspace = pmemspace;
doubleListNodeInit(&task->memspace_list_node);
doubleListAddOnBack(&task->memspace_list_node, &pmemspace->thread_list_guard);
/// 1. trap frame into stack, for process to nomally return by trap_return
sp -= sizeof(*task->thread_context.trapframe);
task->thread_context.trapframe = (struct trapframe*)sp;
/* thread context */
task->thread_context.task = task;
memset((void*)task->thread_context.kern_stack_addr, 0x00, USER_STACK_SIZE);
/// stack bottom
char* sp = (char*)task->thread_context.kern_stack_addr + USER_STACK_SIZE - 4;
/// 2. context into stack
sp -= sizeof(*task->thread_context.context);
task->thread_context.context = (struct context*)sp;
arch_init_context(task->thread_context.context);
/// 1. trap frame into stack, for process to nomally return by trap_return
/// trapframe (user context)
sp -= sizeof(*task->thread_context.trapframe);
task->thread_context.trapframe = (struct trapframe*)sp;
/// 2. context into stack
// (kernel context)
sp -= sizeof(*task->thread_context.context);
task->thread_context.context = (struct context*)sp;
arch_init_context(task->thread_context.context);
/* ipc member */
doubleListNodeInit(&task->cli_sess_listhead);
doubleListNodeInit(&task->svr_sess_listhead);
rbtree_init(&task->cli_sess_map);
rbtree_init(&task->svr_sess_map);
queue_init(&task->sessions_in_handle);
queue_init(&task->sessions_to_be_handle);
/// server identifier
task->server_identifier.meta = NULL;
}
// [name]
// [schedule related]
return task;
}
struct TaskLifecycleOperations task_lifecycle_ops = {
.new_thread = _new_thread,
.free_pcb = _free_thread,
};
static void _task_set_default_schedule_attr(struct Thread* task)
{
task->remain_tick = TASK_CLOCK_TICK;
task->maxium_tick = TASK_CLOCK_TICK * 10;
task->dead = false;
task->state = READY;
task->priority = TASK_DEFAULT_PRIORITY;
task_node_add_to_ready_list_head(task);
@ -393,8 +402,6 @@ static void _set_cur_task_priority(int priority)
struct XiziTaskManager xizi_task_manager = {
.init = _task_manager_init,
.new_task_cb = _new_task_cb,
.free_pcb = _dealloc_task_cb,
.task_set_default_schedule_attr = _task_set_default_schedule_attr,
.next_runnable_task = max_priority_runnable_task,

View File

@ -255,6 +255,7 @@ static void rotate_right(RbtNode* node, RbtTree* tree)
void rbtree_init(RbtTree* tree)
{
tree->root = NULL;
tree->nr_ele = 0;
}
RbtNode* __rbtree_insert(RbtNode* node, RbtTree* tree)
@ -324,14 +325,20 @@ RbtNode* __rbtree_insert(RbtNode* node, RbtTree* tree)
int rbt_insert(RbtTree* tree, uintptr_t key, void* data)
{
if (rbt_search(tree, key) != NULL) {
return -2;
}
RbtNode* node = rbtree_createnode(key, data);
RbtNode* samenode = NULL;
if (node == NULL)
return -1;
else
samenode = __rbtree_insert(node, tree);
if (samenode != NULL)
return -2;
assert(samenode == NULL);
tree->nr_ele++;
return 0;
}
@ -419,6 +426,7 @@ void delete_case6(RbtTree* t, RbtNode* n)
rotate_right(n->parent, t);
}
}
void __rbtree_remove(RbtNode* node, RbtTree* tree)
{
RbtNode* left = node->left;
@ -450,5 +458,10 @@ int rbt_delete(RbtTree* tree, uintptr_t key)
return -1;
else
__rbtree_remove(node, tree);
tree->nr_ele--;
if (rbt_is_empty(tree)) {
assert(tree->root == NULL);
}
return 0;
}