From e5df6012afbcc15c561bb8c3cacd6d118da3d50c Mon Sep 17 00:00:00 2001 From: TXuian <1163589503@qq.com> Date: Sun, 28 Apr 2024 15:49:41 +0800 Subject: [PATCH] Support blocking task. --- .../XiZi_AIoT/softkernel/include/task.h | 7 ++++ .../softkernel/syscall/sys_poll_session.c | 9 ++++++ .../XiZi_AIoT/softkernel/syscall/sys_state.c | 8 +++++ .../XiZi_AIoT/softkernel/syscall/sys_yield.c | 12 ++++++- Ubiquitous/XiZi_AIoT/softkernel/task/task.c | 32 ++++++++++++++++--- .../XiZi_AIoT/softkernel/trap/abort_handler.c | 2 ++ 6 files changed, 64 insertions(+), 6 deletions(-) diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/task.h b/Ubiquitous/XiZi_AIoT/softkernel/include/task.h index a69701ee6..da65da17f 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/task.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/task.h @@ -85,6 +85,7 @@ struct TaskMicroDescriptor { /* task communication resources */ struct double_list_node cli_sess_listhead; struct double_list_node svr_sess_listhead; + bool current_ipc_handled; struct KBuddy* massive_ipc_allocator; struct TraceTag server_identifier; @@ -103,6 +104,7 @@ struct SchedulerRightGroup { struct XiziTaskManager { struct double_list_node task_list_head[TASK_MAX_PRIORITY]; /* list of task control blocks that are allocated */ + struct double_list_node task_blocked_list_head; struct slab_allocator task_allocator; struct slab_allocator task_buddy_allocator; uint32_t next_pid; @@ -120,8 +122,13 @@ struct XiziTaskManager { struct TaskMicroDescriptor* (*next_runnable_task)(void); /* function that's runing by kernel thread context, schedule use tasks */ void (*task_scheduler)(struct SchedulerRightGroup); + + /* handle task state */ /* call to yield current use task */ void (*task_yield_noschedule)(struct TaskMicroDescriptor* task, bool is_blocking); + /* block and unblock task */ + void (*task_block)(struct TaskMicroDescriptor* task); + void (*task_unblock)(struct TaskMicroDescriptor* task); /* set task priority */ void (*set_cur_task_priority)(int priority); }; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c index 66275cacb..4a9eb2999 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c @@ -55,6 +55,15 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity) return -1; } // update session_backend + // if current session is handled + if (server_session->head != userland_session_arr[i].head) { + struct TaskMicroDescriptor* client = SERVER_SESSION_BACKEND(server_session)->client; + if (client->state == BLOCKED) { + xizi_task_manager.task_unblock(client); + } else { + client->current_ipc_handled = true; + } + } server_session->head = userland_session_arr[i].head; server_session->tail = userland_session_arr[i].tail; doubleListDel(cur_node); diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_state.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_state.c index ff9e783e9..01309cb22 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_state.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_state.c @@ -85,6 +85,14 @@ void show_tasks(void) LOG_PRINTF(" %d %s %d %d\n", task->pid, task->name, task->priority, task->mem_size >> 10); } } + + DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_blocked_list_head, node) + { + LOG_PRINTF(" BLOCK "); + _padding(task->name); + LOG_PRINTF(" %d %s %d %d\n", task->pid, task->name, task->priority, task->mem_size >> 10); + } + LOG_PRINTF("******************************************************\n"); return; } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c index 19e112468..7db762765 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c @@ -35,6 +35,16 @@ Modification: int sys_yield(task_yield_reason reason) { - xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false); + struct TaskMicroDescriptor* cur_task = cur_cpu()->task; + xizi_task_manager.task_yield_noschedule(cur_task, false); + + // handle ipc block + if ((reason & SYS_TASK_YIELD_BLOCK_IPC) != 0) { + if (cur_task->current_ipc_handled) { + cur_task->current_ipc_handled = false; + } else { + xizi_task_manager.task_block(cur_task); + } + } return 0; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/task.c b/Ubiquitous/XiZi_AIoT/softkernel/task/task.c index a81bbc920..b7133e3fe 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/task.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/task.c @@ -48,6 +48,7 @@ static void _task_manager_init() for (int i = 0; i < TASK_MAX_PRIORITY; i++) { doubleListNodeInit(&xizi_task_manager.task_list_head[i]); } + doubleListNodeInit(&xizi_task_manager.task_blocked_list_head); // init task (slab) allocator slab_init(&xizi_task_manager.task_allocator, sizeof(struct TaskMicroDescriptor)); slab_init(&xizi_task_manager.task_buddy_allocator, sizeof(struct KBuddy)); @@ -271,11 +272,7 @@ static void _task_yield_noschedule(struct TaskMicroDescriptor* task, bool blocki // rearrage current task position doubleListDel(&task->node); if (task->state == RUNNING) { - if (!blocking) { - task->state = READY; - } else { - task->state = BLOCKED; - } + task->state = READY; } task->remain_tick = TASK_CLOCK_TICK; if (task == cur_cpu()->task) { @@ -284,6 +281,28 @@ static void _task_yield_noschedule(struct TaskMicroDescriptor* task, bool blocki doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]); } +static void _task_block(struct TaskMicroDescriptor* task) +{ + assert(task != NULL); + assert(task->state != RUNNING); + doubleListDel(&task->node); + if (xizi_task_manager.task_list_head[task->priority].next == &xizi_task_manager.task_list_head[task->priority]) { + ready_task_priority &= ~(1 << task->priority); + } + task->state = BLOCKED; + doubleListAddOnHead(&task->node, &xizi_task_manager.task_blocked_list_head); +} + +static void _task_unblock(struct TaskMicroDescriptor* task) +{ + assert(task != NULL); + assert(task->state == BLOCKED); + doubleListDel(&task->node); + task->state = READY; + doubleListAddOnHead(&task->node, &xizi_task_manager.task_list_head[task->priority]); + ready_task_priority |= (1 << task->priority); +} + static void _set_cur_task_priority(int priority) { if (priority < 0 || priority >= TASK_MAX_PRIORITY) { @@ -312,6 +331,9 @@ struct XiziTaskManager xizi_task_manager = { .next_runnable_task = max_priority_runnable_task, .task_scheduler = _scheduler, + + .task_block = _task_block, + .task_unblock = _task_unblock, .task_yield_noschedule = _task_yield_noschedule, .set_cur_task_priority = _set_cur_task_priority }; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/trap/abort_handler.c b/Ubiquitous/XiZi_AIoT/softkernel/trap/abort_handler.c index a378ef5f9..1a8d9b37a 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/trap/abort_handler.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/trap/abort_handler.c @@ -65,6 +65,7 @@ void dabort_handler(struct trapframe* r) sys_exit(cur_task); assert(cur_cpu()->task == NULL); context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler); + panic("dabort end should never be reashed.\n"); } void iabort_handler(struct trapframe* r) @@ -83,4 +84,5 @@ void iabort_handler(struct trapframe* r) sys_exit(cur_task); assert(cur_cpu()->task == NULL); context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler); + panic("iabort end should never be reashed.\n"); }