From af1ceec30876663224b287b6a5fa95878f92c09f Mon Sep 17 00:00:00 2001 From: TXuian <1163589503@qq.com> Date: Tue, 24 Dec 2024 02:28:20 +0800 Subject: [PATCH] Add schedule node --- .../arch/arm/armv8-a/cortex-a55/core.h | 2 +- .../XiZi_AIoT/softkernel/include/rbtree.h | 5 +- .../XiZi_AIoT/softkernel/include/scheduler.h | 4 +- .../XiZi_AIoT/softkernel/include/task.h | 8 +- .../XiZi_AIoT/softkernel/syscall/sys_kill.c | 61 +++++-------- .../XiZi_AIoT/softkernel/syscall/sys_mmap.c | 4 +- .../softkernel/syscall/sys_poll_session.c | 2 +- .../softkernel/syscall/sys_register_irq.c | 4 +- .../XiZi_AIoT/softkernel/syscall/sys_state.c | 91 +++++++------------ .../XiZi_AIoT/softkernel/task/schedule.c | 52 +++++------ .../XiZi_AIoT/softkernel/task/semaphore.c | 1 - Ubiquitous/XiZi_AIoT/softkernel/task/task.c | 4 +- .../XiZi_AIoT/softkernel/tools/rbtree.c | 14 +-- .../softkernel/trap/clock_irq_handler.c | 5 +- 14 files changed, 110 insertions(+), 147 deletions(-) diff --git a/Ubiquitous/XiZi_AIoT/hardkernel/arch/arm/armv8-a/cortex-a55/core.h b/Ubiquitous/XiZi_AIoT/hardkernel/arch/arm/armv8-a/cortex-a55/core.h index 936804fbb..9312dfa79 100644 --- a/Ubiquitous/XiZi_AIoT/hardkernel/arch/arm/armv8-a/cortex-a55/core.h +++ b/Ubiquitous/XiZi_AIoT/hardkernel/arch/arm/armv8-a/cortex-a55/core.h @@ -73,7 +73,7 @@ Modification: #include "cortex_a55.h" -#define NR_CPU 1 // maximum number of CPUs +#define NR_CPU 4 // maximum number of CPUs static inline uintptr_t arch_curr_tick() { diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/rbtree.h b/Ubiquitous/XiZi_AIoT/softkernel/include/rbtree.h index f62bd3eae..3ff77e1b9 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/rbtree.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/rbtree.h @@ -33,13 +33,14 @@ typedef struct RbtTree { int nr_ele; } RbtTree; -typedef void(rbt_traverse_fn)(RbtNode* node); +// return if the traverse needs to continue +typedef bool(rbt_traverse_fn)(RbtNode* node, void* data); void rbtree_init(RbtTree* tree); int rbt_insert(RbtTree* tree, uintptr_t key, void* data); RbtNode* rbt_search(RbtTree* tree, uintptr_t key); int rbt_delete(RbtTree* tree, uintptr_t key); -void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn); +void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn, void* data); void module_rbt_factory_init(TraceTag* _softkernel_tag); diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h b/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h index 45885d3ec..94f657f61 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h @@ -9,13 +9,13 @@ typedef uintptr_t snode_id_t; enum ThreadState { - INIT = 0, + NEVER_RUN = 0, + INIT, READY, RUNNING, DEAD, BLOCKED, SLEEPING, - NEVER_RUN, NR_STATE, }; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/task.h b/Ubiquitous/XiZi_AIoT/softkernel/include/task.h index f7cc37c9f..267c66388 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/task.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/task.h @@ -49,6 +49,8 @@ Modification: #define TASK_NAME_MAX_LEN 16 #define SLEEP_MONITOR_CORE 0 +typedef int tid_t; + /* Thread Control Block */ struct ThreadContext { struct Thread* task; // process of current thread @@ -95,12 +97,6 @@ struct Thread { bool advance_unblock; // @todo abandon /* task schedule attributes */ - // struct double_list_node node; - // struct TaskSleepContext sleep_context; - // enum ThreadState state; - // int priority; // priority - // int remain_tick; - // int maxium_tick; struct ScheduleNode snode; }; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_kill.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_kill.c index 9510ca226..53c181738 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_kill.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_kill.c @@ -27,53 +27,36 @@ Author: AIIT XUOS Lab Modification: 1. first version *************************************************/ +#include "task.h" #include "trap_common.h" -#include "task.h" +static bool kill_succ; + +static bool kill_task(RbtNode* node, void* id) +{ + struct ScheduleNode* snode = (struct ScheduleNode*)node->data; + struct Thread* thd = snode->pthd; + tid_t target_id = *(tid_t*)id; + + if (thd->tid == target_id) { + sys_exit(thd); + kill_succ = true; + return false; + } + + return true; +} extern int sys_exit(struct Thread* task); int sys_kill(int id) { - struct Thread* task = NULL; - // check if task is a running one - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_running_list_head, node) - { - if (task->tid == id) { - sys_exit(task); - return 0; - } + kill_succ = false; + for (int pool_id = 0; pool_id < NR_STATE; pool_id++) { + rbt_traverse(&g_scheduler.snode_state_pool[pool_id], kill_task, (void*)&id); } - // check if task is a blocking one - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_blocked_list_head, node) - { - if (task->tid == id) { - sys_exit(task); - return 0; - } + if (kill_succ) { + return 0; } - - struct ksemaphore* sem = NULL; - DOUBLE_LIST_FOR_EACH_ENTRY(sem, &xizi_task_manager.semaphore_pool.sem_list_guard, sem_list_node) - { - task = NULL; - DOUBLE_LIST_FOR_EACH_ENTRY(task, &sem->wait_list_guard, node) - { - sys_exit(task); - return 0; - } - } - - // check if task is a ready one - for (int prio = 0; prio < TASK_MAX_PRIORITY; prio++) { - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[prio], node) - { - if (task->tid == id) { - sys_exit(task); - return 0; - } - } - } - return -1; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_mmap.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_mmap.c index 2739a0968..6cd460257 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_mmap.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_mmap.c @@ -118,8 +118,8 @@ int sys_mmap_v2(uintptr_t* vaddr, uintptr_t* paddr, int len, sys_mmap_info* info } uintptr_t paddr_to_map = *paddr; - if (paddr_to_map >= PHY_MEM_BASE && paddr_to_map < PHY_MEM_STOP && cur_task->tid > 1) { - ERROR("mapping invalid memory: 0x%p\n", paddr_to_map); + if (paddr_to_map >= PHY_MEM_BASE && paddr_to_map < PHY_MEM_STOP && cur_task->tid > 2) { + ERROR("mapping invalid memory: 0x%p by %d\n", paddr_to_map, cur_task->tid); return -1; } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c index 691b1fffa..1cb39c55a 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c @@ -114,7 +114,7 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity) if (queue_is_empty(&cur_task->sessions_in_handle) && queue_is_empty(&cur_task->sessions_to_be_handle)) { task_yield(cur_task); // @todo support blocking(now bug at 4 cores running) - // xizi_task_manager.task_block(&xizi_task_manager.task_blocked_list_head, cur_task); + task_block(cur_task); } return 0; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c index 1eaca105d..5b02e91fe 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c @@ -126,7 +126,9 @@ int sys_register_irq(int irq_num, int irq_opcode) struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); kernel_irq_proxy = tlo->new_thread(pmemspace); - kernel_irq_proxy->snode.state = NEVER_RUN; + task_trans_sched_state(&kernel_irq_proxy->snode, // + &g_scheduler.snode_state_pool[INIT], // + &g_scheduler.snode_state_pool[NEVER_RUN], NEVER_RUN); } // bind irq to session diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_state.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_state.c index 6f97f1d1c..056014c69 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_state.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_state.c @@ -42,71 +42,50 @@ Modification: extern uint8_t _binary_fs_img_start[], _binary_fs_img_end[]; #define SHOWINFO_BORDER_LINE() LOG_PRINTF("******************************************************\n"); -#define SHOWTASK_TASK_BASE_INFO(task) LOG_PRINTF(" %-6d %-16s %-4d 0x%x(%-d)\n", task->tid, task->name, task->priority, task->memspace->mem_size >> 10, task->memspace->mem_size >> 10) +#define SHOWTASK_TASK_BASE_INFO(task) LOG_PRINTF(" %-6d %-16s %-4d 0x%x(%-d)\n", task->tid, task->name, 0, task->memspace->mem_size >> 10, task->memspace->mem_size >> 10) + +bool print_info(RbtNode* node, void* data) +{ + struct ScheduleNode* snode = (struct ScheduleNode*)node->data; + struct Thread* thd = snode->pthd; + switch (snode->state) { + case INIT: + LOG_PRINTF("%-8s", "INIT"); + break; + case READY: + LOG_PRINTF("%-8s", "READY"); + break; + case RUNNING: + LOG_PRINTF("%-8s", "RUNNING"); + break; + case DEAD: + LOG_PRINTF("%-8s", "DEAD"); + break; + case BLOCKED: + LOG_PRINTF("%-8s", "BLOCK"); + break; + case SLEEPING: + LOG_PRINTF("%-8s", "SLEEP"); + break; + default: + break; + } + + SHOWTASK_TASK_BASE_INFO(thd); + return true; +} void show_tasks(void) { - struct Thread* task = NULL; SHOWINFO_BORDER_LINE(); for (int i = 0; i < NR_CPU; i++) { LOG_PRINTF("CPU %-2d: %s\n", i, (global_cpus[i].task == NULL ? "NULL" : global_cpus[i].task->name)); } SHOWINFO_BORDER_LINE(); LOG_PRINTF("%-8s %-6s %-16s %-4s %-8s\n", "STAT", "ID", "TASK", "PRI", "MEM(KB)"); - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_running_list_head, node) - { - LOG_PRINTF("%-8s", "RUNNING"); - SHOWTASK_TASK_BASE_INFO(task); - } - for (int i = 0; i < TASK_MAX_PRIORITY; i++) { - if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[i])) { - continue; - } - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[i], node) - { - switch (task->state) { - case INIT: - LOG_PRINTF("%-8s", "INIT"); - break; - case READY: - LOG_PRINTF("%-8s", "READY"); - break; - case RUNNING: - LOG_PRINTF("%-8s", "RUNNING"); - break; - case DEAD: - LOG_PRINTF("%-8s", "DEAD"); - break; - default: - break; - } - - SHOWTASK_TASK_BASE_INFO(task); - } - } - - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_blocked_list_head, node) - { - LOG_PRINTF("%-8s", "BLOCK"); - SHOWTASK_TASK_BASE_INFO(task); - } - - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_sleep_list_head, node) - { - LOG_PRINTF("%-8s", "SLEEP"); - SHOWTASK_TASK_BASE_INFO(task); - } - - struct ksemaphore* sem = NULL; - DOUBLE_LIST_FOR_EACH_ENTRY(sem, &xizi_task_manager.semaphore_pool.sem_list_guard, sem_list_node) - { - task = NULL; - DOUBLE_LIST_FOR_EACH_ENTRY(task, &sem->wait_list_guard, node) - { - LOG_PRINTF("%-8s", "BLOCK"); - SHOWTASK_TASK_BASE_INFO(task); - } + for (int pool_id = INIT; pool_id < NR_STATE; pool_id++) { + rbt_traverse(&g_scheduler.snode_state_pool[pool_id], print_info, NULL); } SHOWINFO_BORDER_LINE(); @@ -150,7 +129,7 @@ void show_cpu(void) assert(current_task != NULL); LOG_PRINTF(" ID COMMAND USED_TICKS FREE_TICKS \n"); - LOG_PRINTF(" %d %s %d %d\n", cpu_id, current_task->name, TASK_CLOCK_TICK - current_task->remain_tick, current_task->remain_tick); + LOG_PRINTF(" %d %s %d %d\n", cpu_id, current_task->name, TASK_CLOCK_TICK - current_task->snode.sched_context.remain_tick, current_task->snode.sched_context.remain_tick); LOG_PRINTF("***********************************************************\n"); return; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c b/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c index 4b926891d..0b6031004 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c @@ -30,33 +30,31 @@ Modification: #include "log.h" #include "schedule_algo.h" +static struct Thread* next_runable_task; + +bool find_runable_task(RbtNode* node, void* data) +{ + struct ScheduleNode* snode = (struct ScheduleNode*)node->data; + struct Thread* thd = snode->pthd; + + if (!thd->dead) { + next_runable_task = thd; + return false; + } else { + struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); + tlo->free_pcb(thd); + return false; + } + + return true; +} + struct Thread* max_priority_runnable_task(void) { - static struct Thread* task = NULL; - // static int priority = 0; - - // priority = __builtin_ffs(ready_task_priority) - 1; - // if (priority > 31 || priority < 0) { - // return NULL; - // } - - // DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node) - // { - // assert(task != NULL); - // if (task->state == READY && !task->dead) { - // // found a runnable task, stop this look up - // return task; - // } else if (task->dead && task->state != RUNNING) { - - // struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); - // tlo->free_pcb(task); - // return NULL; - // } - // } - if (!rbt_is_empty(&g_scheduler.snode_state_pool[READY])) { - return ((struct ScheduleNode*)(g_scheduler.snode_state_pool[READY].root->data))->pthd; - } - return NULL; + /// @todo better strategy + next_runable_task = NULL; + rbt_traverse(&g_scheduler.snode_state_pool[READY], find_runable_task, NULL); + return next_runable_task; } #include "multicores.h" @@ -79,6 +77,8 @@ bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd) bool task_trans_sched_state(struct ScheduleNode* snode, RbtTree* from_pool, RbtTree* to_pool, enum ThreadState target_state) { + assert(snode != NULL); + // DEBUG("%d %p %d %s\n", snode->snode_id, snode->pthd, snode->pthd->tid, snode->pthd->name); assert(snode->snode_id != UNINIT_SNODE_ID && snode->pthd != NULL); if (RBTTREE_DELETE_SUCC != rbt_delete(from_pool, snode->snode_id)) { DEBUG("Thread %d not in from schedule pool\n", snode->pthd->tid); @@ -98,7 +98,6 @@ void task_dead(struct Thread* thd) { assert(thd != NULL); struct ScheduleNode* snode = &thd->snode; - enum ThreadState thd_cur_state = snode->state; assert(snode->state == READY); @@ -106,6 +105,7 @@ void task_dead(struct Thread* thd) &g_scheduler.snode_state_pool[READY], // &g_scheduler.snode_state_pool[DEAD], DEAD); assert(trans_res = true); + assert(RBTTREE_DELETE_SUCC == rbt_delete(&g_scheduler.snode_state_pool[DEAD], snode->snode_id)); return; } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c b/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c index 65d0626f9..90b79f46f 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c @@ -154,7 +154,6 @@ bool ksemaphore_free(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id) return false; } - struct Thread* thd = NULL; // by design: no waking any waiting threads rbt_delete(&sem_pool->sem_pool_map, sem_id); diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/task.c b/Ubiquitous/XiZi_AIoT/softkernel/task/task.c index 4b29d99ca..726355542 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/task.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/task.c @@ -83,7 +83,7 @@ static void _task_manager_init() } // tid pool - xizi_task_manager.next_pid = 0; + xizi_task_manager.next_pid = 1; // init priority bit map ready_task_priority = 0; @@ -221,6 +221,7 @@ static struct Thread* _new_thread(struct MemSpace* pmemspace) } // [schedule related] + task->tid = xizi_task_manager.next_pid++; if (!init_schedule_node(&task->snode, task)) { ERROR("Not enough memory\n"); slab_free(&xizi_task_manager.task_allocator, (void*)task); @@ -238,7 +239,6 @@ static struct Thread* _new_thread(struct MemSpace* pmemspace) ERROR_FREE { /* init basic task ref member */ - task->tid = xizi_task_manager.next_pid++; task->bind_irq = false; /* vm & memory member */ diff --git a/Ubiquitous/XiZi_AIoT/softkernel/tools/rbtree.c b/Ubiquitous/XiZi_AIoT/softkernel/tools/rbtree.c index 14430e1b8..b3e356465 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/tools/rbtree.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/tools/rbtree.c @@ -466,17 +466,19 @@ int rbt_delete(RbtTree* tree, uintptr_t key) return RBTTREE_DELETE_SUCC; } -void rbt_traverse_inner(RbtNode* node, rbt_traverse_fn fn) +void rbt_traverse_inner(RbtNode* node, rbt_traverse_fn fn, void* data) { if (node == NULL) { return; } - fn(node); - rbt_traverse_inner(node->left, fn); - rbt_traverse_inner(node->right, fn); + + if (fn(node, data)) { + rbt_traverse_inner(node->left, fn, data); + rbt_traverse_inner(node->right, fn, data); + } } -void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn) +void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn, void* data) { - rbt_traverse_inner(tree->root, fn); + rbt_traverse_inner(tree->root, fn, data); } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c b/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c index c22faad7f..1e691fced 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c @@ -62,9 +62,10 @@ void hw_current_second(uintptr_t* second) *second = p_clock_driver->get_second(); } -void count_down_sleeping_task(RbtNode* node) +bool count_down_sleeping_task(RbtNode* node, void* data) { /// @todo implement + return false; } uint64_t global_tick = 0; @@ -86,7 +87,7 @@ int xizi_clock_handler(int irq, void* tf, void* arg) } // todo: cpu 0 will handle sleeping thread - rbt_traverse(&g_scheduler.snode_state_pool[SLEEPING], count_down_sleeping_task); + rbt_traverse(&g_scheduler.snode_state_pool[SLEEPING], count_down_sleeping_task, NULL); // DOUBLE_LIST_FOR_EACH_ENTRY(thread, &xizi_task_manager.task_sleep_list_head, node) // {