From 21304531a54bd79bb67dbccec2a800963ff02ca2 Mon Sep 17 00:00:00 2001 From: TXuian <1163589503@qq.com> Date: Tue, 24 Dec 2024 01:01:54 +0800 Subject: [PATCH 1/3] Add schedule node midway --- .../cortex-a9/imx6q-sabrelite/trap_common.c | 4 +- .../XiZi_AIoT/softkernel/include/ksemaphore.h | 2 +- .../XiZi_AIoT/softkernel/include/rbtree.h | 10 ++ .../XiZi_AIoT/softkernel/include/scheduler.h | 48 +++++- .../XiZi_AIoT/softkernel/include/task.h | 35 +--- .../softkernel/syscall/sys_close_session.c | 4 +- .../XiZi_AIoT/softkernel/syscall/sys_exit.c | 4 +- .../softkernel/syscall/sys_poll_session.c | 2 +- .../softkernel/syscall/sys_register_irq.c | 8 +- .../XiZi_AIoT/softkernel/syscall/sys_sleep.c | 9 +- .../XiZi_AIoT/softkernel/syscall/sys_thread.c | 2 +- .../softkernel/syscall/sys_wait_session.c | 4 +- .../XiZi_AIoT/softkernel/syscall/sys_yield.c | 2 +- .../XiZi_AIoT/softkernel/syscall/syscall.c | 2 +- .../XiZi_AIoT/softkernel/task/schedule.c | 154 ++++++++++++------ .../XiZi_AIoT/softkernel/task/semaphore.c | 26 ++- Ubiquitous/XiZi_AIoT/softkernel/task/task.c | 125 ++++---------- .../XiZi_AIoT/softkernel/tools/rbtree.c | 25 ++- .../softkernel/trap/clock_irq_handler.c | 34 ++-- .../softkernel/trap/default_irq_handler.c | 2 +- .../softkernel/trap/software_irq_handler.c | 4 +- 21 files changed, 269 insertions(+), 237 deletions(-) diff --git a/Ubiquitous/XiZi_AIoT/hardkernel/intr/arm/armv7-a/cortex-a9/imx6q-sabrelite/trap_common.c b/Ubiquitous/XiZi_AIoT/hardkernel/intr/arm/armv7-a/cortex-a9/imx6q-sabrelite/trap_common.c index f76b56719..51aab3bad 100644 --- a/Ubiquitous/XiZi_AIoT/hardkernel/intr/arm/armv7-a/cortex-a9/imx6q-sabrelite/trap_common.c +++ b/Ubiquitous/XiZi_AIoT/hardkernel/intr/arm/armv7-a/cortex-a9/imx6q-sabrelite/trap_common.c @@ -57,7 +57,7 @@ void panic(char* s) /* stack for different mode*/ static char mode_stack_pages[NR_CPU][NR_MODE_STACKS][MODE_STACK_SIZE]; extern uint32_t _vector_jumper; -extern uint32_t _vector_start; +extern uint32_t* _vector_start; extern uint32_t _vector_end; void init_cpu_mode_stacks(int cpu_id) @@ -75,7 +75,7 @@ static void _sys_irq_init(int cpu_id) /* load exception vectors */ init_cpu_mode_stacks(cpu_id); if (cpu_id == 0) { - volatile uint32_t* vector_base = &_vector_start; + volatile uint32_t* vector_base = (uint32_t*)&_vector_start; // Set Interrupt handler start address vector_base[1] = (uint32_t)trap_undefined_instruction; // Undefined Instruction diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/ksemaphore.h b/Ubiquitous/XiZi_AIoT/softkernel/include/ksemaphore.h index d30e6732a..2e2c65dbb 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/ksemaphore.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/ksemaphore.h @@ -41,7 +41,7 @@ struct ksemaphore { sem_id_t id; sem_val_t val; /* list of waiting threads */ - struct double_list_node wait_list_guard; + RbtTree wait_thd_tree; /* list to manage semaphores */ /// @todo Use RB-Tree to manage all semaphores struct double_list_node sem_list_node; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/rbtree.h b/Ubiquitous/XiZi_AIoT/softkernel/include/rbtree.h index 0ffbdbe16..f62bd3eae 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/rbtree.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/rbtree.h @@ -5,6 +5,13 @@ #include "actracer.h" +#define RBTTREE_INSERT_SECC 0 +#define RBTTREE_INSERT_FAILED -1 +#define RBTTREE_INSERT_EXISTED -2 + +#define RBTTREE_DELETE_SUCC 0 +#define RBTTREE_DELETE_FAILED -1 + // CLRS // Insertion and Deletion in a Red Black Tree enum rbt_type { @@ -26,10 +33,13 @@ typedef struct RbtTree { int nr_ele; } RbtTree; +typedef void(rbt_traverse_fn)(RbtNode* node); + void rbtree_init(RbtTree* tree); int rbt_insert(RbtTree* tree, uintptr_t key, void* data); RbtNode* rbt_search(RbtTree* tree, uintptr_t key); int rbt_delete(RbtTree* tree, uintptr_t key); +void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn); void module_rbt_factory_init(TraceTag* _softkernel_tag); diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h b/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h index fbb3d03a9..45885d3ec 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h @@ -2,20 +2,52 @@ #pragma once #include "actracer.h" #include "ksemaphore.h" +#include "rbtree.h" #define TASK_MAX_PRIORITY 32 +#define UNINIT_SNODE_ID 0 +typedef uintptr_t snode_id_t; + +enum ThreadState { + INIT = 0, + READY, + RUNNING, + DEAD, + BLOCKED, + SLEEPING, + NEVER_RUN, + NR_STATE, +}; + +typedef struct ScheduleContext { + intptr_t remain_tick; +} ScheduleContext; + +typedef struct TaskSleepContext { + int64_t remain_ms; +} TaskSleepContext; struct ScheduleNode { - TraceTag task_ref; - struct double_list_node list_node; + struct Thread* pthd; + snode_id_t snode_id; + enum ThreadState state; + + ScheduleContext sched_context; + TaskSleepContext sleep_context; }; struct Scheduler { TraceTag tag; - - struct double_list_node task_list_head[TASK_MAX_PRIORITY]; /* list of task control blocks that are allocated */ - struct double_list_node task_running_list_head; - struct double_list_node task_blocked_list_head; - struct double_list_node task_sleep_list_head; + RbtTree snode_state_pool[NR_STATE]; struct XiziSemaphorePool semaphore_pool; -}; \ No newline at end of file +}; + +extern struct Scheduler g_scheduler; + +bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd); + +bool task_trans_sched_state(struct ScheduleNode* snode, RbtTree* from_pool, RbtTree* to_pool, enum ThreadState target_state); +void task_block(struct Thread* thd); +void task_dead(struct Thread* thd); +void task_yield(struct Thread* thd); +void task_into_ready(struct Thread* thd); \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/task.h b/Ubiquitous/XiZi_AIoT/softkernel/include/task.h index 977642f15..f7cc37c9f 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/task.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/task.h @@ -41,22 +41,14 @@ Modification: #include "share_page.h" #include "spinlock.h" +#include "scheduler.h" + #define TASK_CLOCK_TICK 50 #define TASK_MAX_PRIORITY 32 #define TASK_DEFAULT_PRIORITY 2 #define TASK_NAME_MAX_LEN 16 #define SLEEP_MONITOR_CORE 0 -enum ProcState { - INIT = 0, - READY, - RUNNING, - DEAD, - BLOCKED, - SLEEPING, - NEVER_RUN, -}; - /* Thread Control Block */ struct ThreadContext { struct Thread* task; // process of current thread @@ -75,10 +67,6 @@ struct ThreadContext { struct trapframe* trapframe; }; -struct TaskSleepContext { - int64_t remain_ms; -}; - /* Process Control Block */ struct Thread { /* task name */ @@ -107,12 +95,13 @@ struct Thread { bool advance_unblock; // @todo abandon /* task schedule attributes */ - struct double_list_node node; - struct TaskSleepContext sleep_context; - enum ProcState state; - int priority; // priority - int remain_tick; - int maxium_tick; + // struct double_list_node node; + // struct TaskSleepContext sleep_context; + // enum ThreadState state; + // int priority; // priority + // int remain_tick; + // int maxium_tick; + struct ScheduleNode snode; }; struct SchedulerRightGroup { @@ -157,9 +146,6 @@ struct XiziTaskManager { /* init task manager */ void (*init)(); - /* init a task control block, set name, remain_tick, state, cwd, priority, etc. */ - void (*task_set_default_schedule_attr)(struct Thread*); - /* use by task_scheduler, find next READY task, should be in locked */ struct Thread* (*next_runnable_task)(void); /* function that's runing by kernel thread context, schedule use tasks */ @@ -168,9 +154,6 @@ struct XiziTaskManager { /* handle task state */ /* call to yield current use task */ void (*task_yield_noschedule)(struct Thread* task, bool is_blocking); - /* block and unblock task */ - void (*task_block)(struct double_list_node* head, struct Thread* task); - void (*task_unblock)(struct Thread* task); /* set task priority */ void (*set_cur_task_priority)(int priority); }; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_close_session.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_close_session.c index 4ba31a83e..4823230dd 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_close_session.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_close_session.c @@ -70,8 +70,8 @@ int sys_close_session(struct Thread* cur_task, struct Session* session) // @todo fix memory leak } else { assert(!queue_is_empty(&server_to_info->sessions_to_be_handle)); - if (server_to_info->state == BLOCKED) { - xizi_task_manager.task_unblock(session_backend->server); + if (server_to_info->snode.state == BLOCKED) { + task_into_ready(session_backend->server); } } } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_exit.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_exit.c index 4b9596de7..23bb257d6 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_exit.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_exit.c @@ -41,11 +41,11 @@ int sys_exit(struct Thread* ptask) assert(ptask != NULL); ptask->dead = true; // free that task straightly if it's a blocked task - if (ptask->state == BLOCKED) { + if (ptask->snode.state == BLOCKED) { struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); tlo->free_pcb(ptask); } // yield current task in case it wants to exit itself - xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false); + task_yield(cur_cpu()->task); return 0; } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c index 8f2f14e3f..691b1fffa 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c @@ -112,7 +112,7 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity) } if (queue_is_empty(&cur_task->sessions_in_handle) && queue_is_empty(&cur_task->sessions_to_be_handle)) { - xizi_task_manager.task_yield_noschedule(cur_task, false); + task_yield(cur_task); // @todo support blocking(now bug at 4 cores running) // xizi_task_manager.task_block(&xizi_task_manager.task_blocked_list_head, cur_task); } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c index c0260db1f..1eaca105d 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c @@ -76,8 +76,8 @@ static void send_irq_to_user(int irq_num) buf->header.magic = IPC_MSG_MAGIC; buf->header.valid = 1; - if (irq_forward_table[irq_num].handle_task->state == BLOCKED) { - xizi_task_manager.task_unblock(irq_forward_table[irq_num].handle_task); + if (irq_forward_table[irq_num].handle_task->snode.state == BLOCKED) { + task_into_ready(irq_forward_table[irq_num].handle_task); } /* add session head */ @@ -92,7 +92,7 @@ int user_irq_handler(int irq, void* tf, void* arg) next_task_emergency = irq_forward_table[irq].handle_task; if (cur_cpu()->task != NULL) { - xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false); + task_yield(cur_cpu()->task); } } return 0; @@ -126,7 +126,7 @@ int sys_register_irq(int irq_num, int irq_opcode) struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); kernel_irq_proxy = tlo->new_thread(pmemspace); - kernel_irq_proxy->state = NEVER_RUN; + kernel_irq_proxy->snode.state = NEVER_RUN; } // bind irq to session diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_sleep.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_sleep.c index bce382597..3ba6bbc80 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_sleep.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_sleep.c @@ -36,10 +36,11 @@ Modification: int sys_sleep(intptr_t ms) { struct Thread* cur_task = cur_cpu()->task; - xizi_task_manager.task_yield_noschedule(cur_task, false); - xizi_task_manager.task_block(&xizi_task_manager.task_sleep_list_head, cur_task); - cur_task->state = SLEEPING; - cur_task->sleep_context.remain_ms = ms; + task_yield(cur_task); + cur_task->snode.sleep_context.remain_ms = ms; + task_trans_sched_state(&cur_task->snode, // + &g_scheduler.snode_state_pool[READY], // + &g_scheduler.snode_state_pool[SLEEPING], SLEEPING); return 0; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_thread.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_thread.c index 533e06f10..25a372c0c 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_thread.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_thread.c @@ -64,7 +64,7 @@ int sys_new_thread(struct MemSpace* pmemspace, struct Thread* task, uintptr_t en strncpy(task->name, last, sizeof(task->name) - 1); // init pcb schedule attributes - xizi_task_manager.task_set_default_schedule_attr(task); + task_into_ready(task); // thread init done by here if (pmemspace->thread_to_notify == NULL) { diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_wait_session.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_wait_session.c index 0eea989a8..4108e180c 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_wait_session.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_wait_session.c @@ -60,8 +60,8 @@ int sys_wait_session(struct Session* userland_session) assert(!queue_is_empty(&server_to_call->sessions_to_be_handle)); ksemaphore_wait(&xizi_task_manager.semaphore_pool, cur_task, session_backend->client_sem_to_wait); - if (server_to_call->state == BLOCKED) { - xizi_task_manager.task_unblock(session_backend->server); + if (server_to_call->snode.state == BLOCKED) { + task_into_ready(session_backend->server); } return 0; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c index 1cc2689a1..fe4f128ee 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c @@ -36,6 +36,6 @@ Modification: int sys_yield(task_yield_reason reason) { struct Thread* cur_task = cur_cpu()->task; - xizi_task_manager.task_yield_noschedule(cur_task, false); + task_yield(cur_task); return 0; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/syscall.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/syscall.c index a0e2291e2..76135f24c 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/syscall.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/syscall.c @@ -39,7 +39,7 @@ int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, u switch (sys_num) { case SYSCALL_TEST: - ret = arch_curr_tick(); + ret = 0; break; case SYSCALL_SPAWN: ret = sys_spawn((char*)param1, (char*)param2, (char**)param3); diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c b/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c index 13b85c62b..4b926891d 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c @@ -33,65 +33,123 @@ Modification: struct Thread* max_priority_runnable_task(void) { static struct Thread* task = NULL; - static int priority = 0; + // static int priority = 0; - priority = __builtin_ffs(ready_task_priority) - 1; - if (priority > 31 || priority < 0) { - return NULL; - } + // priority = __builtin_ffs(ready_task_priority) - 1; + // if (priority > 31 || priority < 0) { + // return NULL; + // } - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node) - { - assert(task != NULL); - if (task->state == READY && !task->dead) { - // found a runnable task, stop this look up - return task; - } else if (task->dead && task->state != RUNNING) { + // DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node) + // { + // assert(task != NULL); + // if (task->state == READY && !task->dead) { + // // found a runnable task, stop this look up + // return task; + // } else if (task->dead && task->state != RUNNING) { - struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); - tlo->free_pcb(task); - return NULL; - } + // struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); + // tlo->free_pcb(task); + // return NULL; + // } + // } + if (!rbt_is_empty(&g_scheduler.snode_state_pool[READY])) { + return ((struct ScheduleNode*)(g_scheduler.snode_state_pool[READY].root->data))->pthd; } return NULL; } -struct Thread* round_robin_runnable_task(uint32_t priority) +#include "multicores.h" +#include "rbtree.h" +#include "task.h" + +bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd) { - struct Thread* task = NULL; - - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node) - { - if (task->state == READY && !task->dead) { - // found a runnable task, stop this look up - return task; - } else if (task->dead && task->state != RUNNING) { - struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); - tlo->free_pcb(task); - return NULL; - } + snode->pthd = bind_thd; + snode->snode_id = bind_thd->tid; + snode->sched_context.remain_tick = 0; + snode->sleep_context.remain_ms = 0; + snode->state = INIT; + if (RBTTREE_INSERT_SECC != rbt_insert(&g_scheduler.snode_state_pool[INIT], // + snode->snode_id, (void*)snode)) { + return false; } - - return NULL; + return true; } -/* recover task priority */ -void recover_priority(void) +bool task_trans_sched_state(struct ScheduleNode* snode, RbtTree* from_pool, RbtTree* to_pool, enum ThreadState target_state) { - struct Thread* task = NULL; - for (int i = 1; i < TASK_MAX_PRIORITY; i++) { - if (i == TASK_DEFAULT_PRIORITY) - continue; - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[i], node) - { - if (!IS_DOUBLE_LIST_EMPTY(&task->node)) { - // DEBUG("%s priority recover\n", task->name); - task->priority = TASK_DEFAULT_PRIORITY; - doubleListDel(&task->node); - doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]); - i--; - break; - } - } + assert(snode->snode_id != UNINIT_SNODE_ID && snode->pthd != NULL); + if (RBTTREE_DELETE_SUCC != rbt_delete(from_pool, snode->snode_id)) { + DEBUG("Thread %d not in from schedule pool\n", snode->pthd->tid); + return false; } + + if (RBTTREE_INSERT_SECC != rbt_insert(to_pool, snode->snode_id, (void*)snode)) { + DEBUG("Thread %d trans state failed\n", snode->pthd->tid); + return false; + } + + snode->state = target_state; + return true; +} + +void task_dead(struct Thread* thd) +{ + assert(thd != NULL); + struct ScheduleNode* snode = &thd->snode; + enum ThreadState thd_cur_state = snode->state; + + assert(snode->state == READY); + + bool trans_res = task_trans_sched_state(snode, // + &g_scheduler.snode_state_pool[READY], // + &g_scheduler.snode_state_pool[DEAD], DEAD); + assert(trans_res = true); + return; +} + +void task_block(struct Thread* thd) +{ + assert(thd != NULL); + struct ScheduleNode* snode = &thd->snode; + enum ThreadState thd_cur_state = snode->state; + + assert(thd_cur_state != RUNNING); + + bool trans_res = task_trans_sched_state(snode, // + &g_scheduler.snode_state_pool[thd_cur_state], // + &g_scheduler.snode_state_pool[BLOCKED], BLOCKED); + assert(trans_res = true); + return; +} + +void task_into_ready(struct Thread* thd) +{ + assert(thd != NULL); + struct ScheduleNode* snode = &thd->snode; + enum ThreadState thd_cur_state = snode->state; + + bool trans_res = task_trans_sched_state(snode, // + &g_scheduler.snode_state_pool[thd_cur_state], // + &g_scheduler.snode_state_pool[READY], READY); + snode->sched_context.remain_tick = TASK_CLOCK_TICK; + assert(trans_res = true); + return; +} + +void task_yield(struct Thread* thd) +{ + assert(thd != NULL); + struct ScheduleNode* snode = &thd->snode; + enum ThreadState thd_cur_state = snode->state; + + assert(thd == cur_cpu()->task && thd_cur_state == RUNNING); + cur_cpu()->task = NULL; + + bool trans_res = task_trans_sched_state(snode, // + &g_scheduler.snode_state_pool[thd_cur_state], // + &g_scheduler.snode_state_pool[READY], READY); + assert(trans_res = true); + return; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c b/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c index fc1ed0e44..65d0626f9 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c @@ -58,7 +58,7 @@ sem_id_t ksemaphore_alloc(struct XiziSemaphorePool* sem_pool, sem_val_t val) } sem->val = val; doubleListNodeInit(&sem->sem_list_node); - doubleListNodeInit(&sem->wait_list_guard); + rbtree_init(&sem->wait_thd_tree); if (0 != rbt_insert(&sem_pool->sem_pool_map, sem->id, sem)) { slab_free(&sem_pool->allocator, sem); @@ -88,7 +88,7 @@ bool ksemaphore_consume(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id, sem bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, sem_id_t sem_id) { assert(thd != NULL); - assert(thd->state == RUNNING); + assert(thd->snode.state == RUNNING); /* find sem */ struct ksemaphore* sem = ksemaphore_get_by_id(sem_pool, sem_id); // invalid sem id @@ -105,8 +105,9 @@ bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, sem // waiting at the sem sem->val--; - xizi_task_manager.task_yield_noschedule(thd, false); - xizi_task_manager.task_block(&sem->wait_list_guard, thd); + task_yield(thd); + task_block(thd); + assert(RBTTREE_INSERT_SECC == rbt_insert(&sem->wait_thd_tree, thd->tid, thd)); return true; } @@ -120,12 +121,11 @@ bool ksemaphore_signal(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id) } if (sem->val < 0) { - if (!IS_DOUBLE_LIST_EMPTY(&sem->wait_list_guard)) { - struct Thread* thd = CONTAINER_OF(sem->wait_list_guard.next, struct Thread, node); - assert(thd != NULL && thd->state == BLOCKED); - xizi_task_manager.task_unblock(thd); - // DEBUG("waking %s\n", thd->name); - } + assert(!rbt_is_empty(&sem->wait_thd_tree)); + RbtNode* root = sem->wait_thd_tree.root; + struct Thread* thd = (struct Thread*)root->data; + rbt_delete(&sem->wait_thd_tree, root->key); + task_into_ready(thd); } sem->val++; @@ -155,11 +155,7 @@ bool ksemaphore_free(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id) } struct Thread* thd = NULL; - DOUBLE_LIST_FOR_EACH_ENTRY(thd, &sem->wait_list_guard, node) - { - assert(thd != NULL); - xizi_task_manager.task_unblock(thd); - } + // by design: no waking any waiting threads rbt_delete(&sem_pool->sem_pool_map, sem_id); doubleListDel(&sem->sem_list_node); diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/task.c b/Ubiquitous/XiZi_AIoT/softkernel/task/task.c index 916ebc475..4b29d99ca 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/task.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/task.c @@ -44,28 +44,9 @@ struct CPU global_cpus[NR_CPU]; uint32_t ready_task_priority; struct GlobalTaskPool global_task_pool; +struct Scheduler g_scheduler; extern struct TaskLifecycleOperations task_lifecycle_ops; -static inline void task_node_leave_list(struct Thread* task) -{ - doubleListDel(&task->node); - if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[task->priority])) { - ready_task_priority &= ~((uint32_t)1 << task->priority); - } -} - -static inline void task_node_add_to_ready_list_head(struct Thread* task) -{ - doubleListAddOnHead(&task->node, &xizi_task_manager.task_list_head[task->priority]); - ready_task_priority |= ((uint32_t)1 << task->priority); -} - -static inline void task_node_add_to_ready_list_back(struct Thread* task) -{ - doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]); - ready_task_priority |= ((uint32_t)1 << task->priority); -} - static void _task_manager_init() { assert(CreateResourceTag(&xizi_task_manager.task_lifecycle_ops_tag, &xizi_task_manager.tag, // @@ -93,6 +74,14 @@ static void _task_manager_init() doubleListNodeInit(&global_task_pool.thd_listing_head); rbtree_init(&global_task_pool.thd_ref_map); + // scheduler + assert(CreateResourceTag(&g_scheduler.tag, &xizi_task_manager.tag, // + "GlobalScheduler", TRACER_SYSOBJECT, (void*)&g_scheduler)); + semaphore_pool_init(&g_scheduler.semaphore_pool); + for (int pool_id = 0; pool_id < NR_STATE; pool_id++) { + rbtree_init(&g_scheduler.snode_state_pool[pool_id]); + } + // tid pool xizi_task_manager.next_pid = 0; @@ -125,8 +114,8 @@ int _task_return_sys_resources(struct Thread* ptask) // @todo fix memory leak } else { assert(!queue_is_empty(&server_to_info->sessions_to_be_handle)); - if (server_to_info->state == BLOCKED) { - xizi_task_manager.task_unblock(session_backend->server); + if (server_to_info->snode.state == BLOCKED) { + task_into_ready(server_to_info); } } } @@ -184,7 +173,7 @@ static void _free_thread(struct Thread* task) } // remove thread from used task list - task_node_leave_list(task); + task_dead(task); /* free memspace if needed to */ if (task->memspace != NULL) { @@ -196,8 +185,8 @@ static void _free_thread(struct Thread* task) // awake deamon in this memspace if (task->memspace->thread_to_notify != NULL) { if (task->memspace->thread_to_notify != task) { - if (task->memspace->thread_to_notify->state == BLOCKED) { - xizi_task_manager.task_unblock(task->memspace->thread_to_notify); + if (task->memspace->thread_to_notify->snode.state == BLOCKED) { + task_into_ready(task->memspace->thread_to_notify); } else { task->memspace->thread_to_notify->advance_unblock = true; } @@ -231,9 +220,17 @@ static struct Thread* _new_thread(struct MemSpace* pmemspace) return NULL; } + // [schedule related] + if (!init_schedule_node(&task->snode, task)) { + ERROR("Not enough memory\n"); + slab_free(&xizi_task_manager.task_allocator, (void*)task); + return NULL; + } + // alloc stack page for task if ((void*)(task->thread_context.kern_stack_addr = (uintptr_t)kalloc_by_ownership(pmemspace->kernspace_mem_usage.tag, USER_STACK_SIZE)) == NULL) { /* here inside, will no free memspace */ + assert(RBTTREE_DELETE_SUCC == rbt_delete(&g_scheduler.snode_state_pool[INIT], task->snode.snode_id)); slab_free(&xizi_task_manager.task_allocator, (void*)task); return NULL; } @@ -279,7 +276,6 @@ static struct Thread* _new_thread(struct MemSpace* pmemspace) } // [name] - // [schedule related] return task; } @@ -289,22 +285,12 @@ struct TaskLifecycleOperations task_lifecycle_ops = { .free_pcb = _free_thread, }; -static void _task_set_default_schedule_attr(struct Thread* task) -{ - task->remain_tick = TASK_CLOCK_TICK; - task->maxium_tick = TASK_CLOCK_TICK * 10; - task->dead = false; - task->state = READY; - task->priority = TASK_DEFAULT_PRIORITY; - task_node_add_to_ready_list_head(task); -} - static void task_state_set_running(struct Thread* task) { - assert(task != NULL && task->state == READY); - task->state = RUNNING; - task_node_leave_list(task); - doubleListAddOnHead(&task->node, &xizi_task_manager.task_running_list_head); + assert(task != NULL && task->snode.state == READY); + task_trans_sched_state(&task->snode, // + &g_scheduler.snode_state_pool[READY], // + &g_scheduler.snode_state_pool[RUNNING], RUNNING); } struct Thread* next_task_emergency = NULL; @@ -319,7 +305,7 @@ static void _scheduler(struct SchedulerRightGroup right_group) next_task = NULL; /* find next runnable task */ assert(cur_cpu()->task == NULL); - if (next_task_emergency != NULL && next_task_emergency->state == READY) { + if (next_task_emergency != NULL && next_task_emergency->snode.state == READY) { next_task = next_task_emergency; } else { next_task = xizi_task_manager.next_runnable_task(); @@ -340,76 +326,21 @@ static void _scheduler(struct SchedulerRightGroup right_group) assert(next_task->memspace->pgdir.pd_addr != NULL); p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->memspace->pgdir.pd_addr)); context_switch(&cpu->scheduler, next_task->thread_context.context); - assert(next_task->state != RUNNING); + assert(next_task->snode.state != RUNNING); } } -static void _task_yield_noschedule(struct Thread* task, bool blocking) -{ - assert(task != NULL); - /// @warning only support current task yield now - assert(task == cur_cpu()->task && task->state == RUNNING); - - // rearrage current task position - task_node_leave_list(task); - if (task->state == RUNNING) { - task->state = READY; - } - task->remain_tick = TASK_CLOCK_TICK; - cur_cpu()->task = NULL; - task_node_add_to_ready_list_back(task); -} - -static void _task_block(struct double_list_node* head, struct Thread* task) -{ - assert(head != NULL); - assert(task != NULL); - assert(task->state != RUNNING); - task_node_leave_list(task); - task->state = BLOCKED; - doubleListAddOnHead(&task->node, head); -} - -static void _task_unblock(struct Thread* task) -{ - assert(task != NULL); - assert(task->state == BLOCKED || task->state == SLEEPING); - task_node_leave_list(task); - task->state = READY; - task_node_add_to_ready_list_back(task); -} - /// @brief @warning not tested function /// @param priority static void _set_cur_task_priority(int priority) { - if (priority < 0 || priority >= TASK_MAX_PRIORITY) { - ERROR("priority is invalid\n"); - return; - } - - struct Thread* current_task = cur_cpu()->task; - assert(current_task != NULL && current_task->state == RUNNING); - - task_node_leave_list(current_task); - - current_task->priority = priority; - - task_node_add_to_ready_list_back(current_task); - return; } struct XiziTaskManager xizi_task_manager = { .init = _task_manager_init, - .task_set_default_schedule_attr = _task_set_default_schedule_attr, - .next_runnable_task = max_priority_runnable_task, .task_scheduler = _scheduler, - - .task_block = _task_block, - .task_unblock = _task_unblock, - .task_yield_noschedule = _task_yield_noschedule, .set_cur_task_priority = _set_cur_task_priority }; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/tools/rbtree.c b/Ubiquitous/XiZi_AIoT/softkernel/tools/rbtree.c index b194882bb..14430e1b8 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/tools/rbtree.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/tools/rbtree.c @@ -326,20 +326,20 @@ RbtNode* __rbtree_insert(RbtNode* node, RbtTree* tree) int rbt_insert(RbtTree* tree, uintptr_t key, void* data) { if (rbt_search(tree, key) != NULL) { - return -2; + return RBTTREE_INSERT_EXISTED; } RbtNode* node = rbtree_createnode(key, data); RbtNode* samenode = NULL; if (node == NULL) - return -1; + return RBTTREE_INSERT_FAILED; else samenode = __rbtree_insert(node, tree); assert(samenode == NULL); tree->nr_ele++; - return 0; + return RBTTREE_INSERT_SECC; } void replace_node(RbtTree* t, RbtNode* oldn, RbtNode* newn) @@ -455,7 +455,7 @@ int rbt_delete(RbtTree* tree, uintptr_t key) { RbtNode* node = do_lookup(key, tree, NULL); if (node == NULL) - return -1; + return RBTTREE_DELETE_FAILED; else __rbtree_remove(node, tree); @@ -463,5 +463,20 @@ int rbt_delete(RbtTree* tree, uintptr_t key) if (rbt_is_empty(tree)) { assert(tree->root == NULL); } - return 0; + return RBTTREE_DELETE_SUCC; +} + +void rbt_traverse_inner(RbtNode* node, rbt_traverse_fn fn) +{ + if (node == NULL) { + return; + } + fn(node); + rbt_traverse_inner(node->left, fn); + rbt_traverse_inner(node->right, fn); +} + +void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn) +{ + rbt_traverse_inner(tree->root, fn); } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c b/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c index 937e4b219..c22faad7f 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c @@ -62,6 +62,11 @@ void hw_current_second(uintptr_t* second) *second = p_clock_driver->get_second(); } +void count_down_sleeping_task(RbtNode* node) +{ + /// @todo implement +} + uint64_t global_tick = 0; int xizi_clock_handler(int irq, void* tf, void* arg) { @@ -73,24 +78,25 @@ int xizi_clock_handler(int irq, void* tf, void* arg) // handle current thread struct Thread* current_task = cur_cpu()->task; if (current_task) { - current_task->remain_tick--; - current_task->maxium_tick--; - if (current_task->remain_tick == 0) { - xizi_task_manager.task_yield_noschedule(current_task, false); + struct ScheduleNode* snode = ¤t_task->snode; + snode->sched_context.remain_tick--; + if (snode->sched_context.remain_tick == 0) { + task_into_ready(current_task); } } // todo: cpu 0 will handle sleeping thread - struct Thread* thread = NULL; - DOUBLE_LIST_FOR_EACH_ENTRY(thread, &xizi_task_manager.task_sleep_list_head, node) - { - assert(thread->state == SLEEPING); - thread->sleep_context.remain_ms--; - if (thread->sleep_context.remain_ms <= 0) { - xizi_task_manager.task_unblock(thread); - break; - } - } + rbt_traverse(&g_scheduler.snode_state_pool[SLEEPING], count_down_sleeping_task); + + // DOUBLE_LIST_FOR_EACH_ENTRY(thread, &xizi_task_manager.task_sleep_list_head, node) + // { + // assert(thread->state == SLEEPING); + // thread->sleep_context.remain_ms--; + // if (thread->sleep_context.remain_ms <= 0) { + // xizi_task_manager.task_unblock(thread); + // break; + // } + // } } return 0; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/trap/default_irq_handler.c b/Ubiquitous/XiZi_AIoT/softkernel/trap/default_irq_handler.c index baa3ae2de..64053ed80 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/trap/default_irq_handler.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/trap/default_irq_handler.c @@ -84,7 +84,7 @@ void intr_irq_dispatch(struct trapframe* tf) // finish irq. p_intr_driver->hw_after_irq(int_info); - if (cur_cpu()->task == NULL || current_task->state != RUNNING) { + if (cur_cpu()->task == NULL || current_task->snode.state != RUNNING) { cur_cpu()->task = NULL; context_switch(¤t_task->thread_context.context, cur_cpu()->scheduler); } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/trap/software_irq_handler.c b/Ubiquitous/XiZi_AIoT/softkernel/trap/software_irq_handler.c index 25c4bd1de..62422e9e0 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/trap/software_irq_handler.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/trap/software_irq_handler.c @@ -56,7 +56,7 @@ void software_irq_dispatch(struct trapframe* tf) /// @todo: Handle dead task int syscall_num = -1; - if (cur_task && cur_task->state != DEAD) { + if (cur_task && cur_task->snode.state != DEAD) { cur_task->thread_context.trapframe = tf; // call syscall @@ -64,7 +64,7 @@ void software_irq_dispatch(struct trapframe* tf) arch_set_return(tf, ret); } - if ((cur_cpu()->task == NULL && cur_task != NULL) || cur_task->state != RUNNING) { + if ((cur_cpu()->task == NULL && cur_task != NULL) || cur_task->snode.state != RUNNING) { cur_cpu()->task = NULL; context_switch(&cur_task->thread_context.context, cur_cpu()->scheduler); } From af1ceec30876663224b287b6a5fa95878f92c09f Mon Sep 17 00:00:00 2001 From: TXuian <1163589503@qq.com> Date: Tue, 24 Dec 2024 02:28:20 +0800 Subject: [PATCH 2/3] Add schedule node --- .../arch/arm/armv8-a/cortex-a55/core.h | 2 +- .../XiZi_AIoT/softkernel/include/rbtree.h | 5 +- .../XiZi_AIoT/softkernel/include/scheduler.h | 4 +- .../XiZi_AIoT/softkernel/include/task.h | 8 +- .../XiZi_AIoT/softkernel/syscall/sys_kill.c | 61 +++++-------- .../XiZi_AIoT/softkernel/syscall/sys_mmap.c | 4 +- .../softkernel/syscall/sys_poll_session.c | 2 +- .../softkernel/syscall/sys_register_irq.c | 4 +- .../XiZi_AIoT/softkernel/syscall/sys_state.c | 91 +++++++------------ .../XiZi_AIoT/softkernel/task/schedule.c | 52 +++++------ .../XiZi_AIoT/softkernel/task/semaphore.c | 1 - Ubiquitous/XiZi_AIoT/softkernel/task/task.c | 4 +- .../XiZi_AIoT/softkernel/tools/rbtree.c | 14 +-- .../softkernel/trap/clock_irq_handler.c | 5 +- 14 files changed, 110 insertions(+), 147 deletions(-) diff --git a/Ubiquitous/XiZi_AIoT/hardkernel/arch/arm/armv8-a/cortex-a55/core.h b/Ubiquitous/XiZi_AIoT/hardkernel/arch/arm/armv8-a/cortex-a55/core.h index 936804fbb..9312dfa79 100644 --- a/Ubiquitous/XiZi_AIoT/hardkernel/arch/arm/armv8-a/cortex-a55/core.h +++ b/Ubiquitous/XiZi_AIoT/hardkernel/arch/arm/armv8-a/cortex-a55/core.h @@ -73,7 +73,7 @@ Modification: #include "cortex_a55.h" -#define NR_CPU 1 // maximum number of CPUs +#define NR_CPU 4 // maximum number of CPUs static inline uintptr_t arch_curr_tick() { diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/rbtree.h b/Ubiquitous/XiZi_AIoT/softkernel/include/rbtree.h index f62bd3eae..3ff77e1b9 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/rbtree.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/rbtree.h @@ -33,13 +33,14 @@ typedef struct RbtTree { int nr_ele; } RbtTree; -typedef void(rbt_traverse_fn)(RbtNode* node); +// return if the traverse needs to continue +typedef bool(rbt_traverse_fn)(RbtNode* node, void* data); void rbtree_init(RbtTree* tree); int rbt_insert(RbtTree* tree, uintptr_t key, void* data); RbtNode* rbt_search(RbtTree* tree, uintptr_t key); int rbt_delete(RbtTree* tree, uintptr_t key); -void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn); +void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn, void* data); void module_rbt_factory_init(TraceTag* _softkernel_tag); diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h b/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h index 45885d3ec..94f657f61 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h @@ -9,13 +9,13 @@ typedef uintptr_t snode_id_t; enum ThreadState { - INIT = 0, + NEVER_RUN = 0, + INIT, READY, RUNNING, DEAD, BLOCKED, SLEEPING, - NEVER_RUN, NR_STATE, }; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/task.h b/Ubiquitous/XiZi_AIoT/softkernel/include/task.h index f7cc37c9f..267c66388 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/task.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/task.h @@ -49,6 +49,8 @@ Modification: #define TASK_NAME_MAX_LEN 16 #define SLEEP_MONITOR_CORE 0 +typedef int tid_t; + /* Thread Control Block */ struct ThreadContext { struct Thread* task; // process of current thread @@ -95,12 +97,6 @@ struct Thread { bool advance_unblock; // @todo abandon /* task schedule attributes */ - // struct double_list_node node; - // struct TaskSleepContext sleep_context; - // enum ThreadState state; - // int priority; // priority - // int remain_tick; - // int maxium_tick; struct ScheduleNode snode; }; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_kill.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_kill.c index 9510ca226..53c181738 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_kill.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_kill.c @@ -27,53 +27,36 @@ Author: AIIT XUOS Lab Modification: 1. first version *************************************************/ +#include "task.h" #include "trap_common.h" -#include "task.h" +static bool kill_succ; + +static bool kill_task(RbtNode* node, void* id) +{ + struct ScheduleNode* snode = (struct ScheduleNode*)node->data; + struct Thread* thd = snode->pthd; + tid_t target_id = *(tid_t*)id; + + if (thd->tid == target_id) { + sys_exit(thd); + kill_succ = true; + return false; + } + + return true; +} extern int sys_exit(struct Thread* task); int sys_kill(int id) { - struct Thread* task = NULL; - // check if task is a running one - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_running_list_head, node) - { - if (task->tid == id) { - sys_exit(task); - return 0; - } + kill_succ = false; + for (int pool_id = 0; pool_id < NR_STATE; pool_id++) { + rbt_traverse(&g_scheduler.snode_state_pool[pool_id], kill_task, (void*)&id); } - // check if task is a blocking one - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_blocked_list_head, node) - { - if (task->tid == id) { - sys_exit(task); - return 0; - } + if (kill_succ) { + return 0; } - - struct ksemaphore* sem = NULL; - DOUBLE_LIST_FOR_EACH_ENTRY(sem, &xizi_task_manager.semaphore_pool.sem_list_guard, sem_list_node) - { - task = NULL; - DOUBLE_LIST_FOR_EACH_ENTRY(task, &sem->wait_list_guard, node) - { - sys_exit(task); - return 0; - } - } - - // check if task is a ready one - for (int prio = 0; prio < TASK_MAX_PRIORITY; prio++) { - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[prio], node) - { - if (task->tid == id) { - sys_exit(task); - return 0; - } - } - } - return -1; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_mmap.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_mmap.c index 2739a0968..6cd460257 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_mmap.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_mmap.c @@ -118,8 +118,8 @@ int sys_mmap_v2(uintptr_t* vaddr, uintptr_t* paddr, int len, sys_mmap_info* info } uintptr_t paddr_to_map = *paddr; - if (paddr_to_map >= PHY_MEM_BASE && paddr_to_map < PHY_MEM_STOP && cur_task->tid > 1) { - ERROR("mapping invalid memory: 0x%p\n", paddr_to_map); + if (paddr_to_map >= PHY_MEM_BASE && paddr_to_map < PHY_MEM_STOP && cur_task->tid > 2) { + ERROR("mapping invalid memory: 0x%p by %d\n", paddr_to_map, cur_task->tid); return -1; } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c index 691b1fffa..1cb39c55a 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c @@ -114,7 +114,7 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity) if (queue_is_empty(&cur_task->sessions_in_handle) && queue_is_empty(&cur_task->sessions_to_be_handle)) { task_yield(cur_task); // @todo support blocking(now bug at 4 cores running) - // xizi_task_manager.task_block(&xizi_task_manager.task_blocked_list_head, cur_task); + task_block(cur_task); } return 0; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c index 1eaca105d..5b02e91fe 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c @@ -126,7 +126,9 @@ int sys_register_irq(int irq_num, int irq_opcode) struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); kernel_irq_proxy = tlo->new_thread(pmemspace); - kernel_irq_proxy->snode.state = NEVER_RUN; + task_trans_sched_state(&kernel_irq_proxy->snode, // + &g_scheduler.snode_state_pool[INIT], // + &g_scheduler.snode_state_pool[NEVER_RUN], NEVER_RUN); } // bind irq to session diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_state.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_state.c index 6f97f1d1c..056014c69 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_state.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_state.c @@ -42,71 +42,50 @@ Modification: extern uint8_t _binary_fs_img_start[], _binary_fs_img_end[]; #define SHOWINFO_BORDER_LINE() LOG_PRINTF("******************************************************\n"); -#define SHOWTASK_TASK_BASE_INFO(task) LOG_PRINTF(" %-6d %-16s %-4d 0x%x(%-d)\n", task->tid, task->name, task->priority, task->memspace->mem_size >> 10, task->memspace->mem_size >> 10) +#define SHOWTASK_TASK_BASE_INFO(task) LOG_PRINTF(" %-6d %-16s %-4d 0x%x(%-d)\n", task->tid, task->name, 0, task->memspace->mem_size >> 10, task->memspace->mem_size >> 10) + +bool print_info(RbtNode* node, void* data) +{ + struct ScheduleNode* snode = (struct ScheduleNode*)node->data; + struct Thread* thd = snode->pthd; + switch (snode->state) { + case INIT: + LOG_PRINTF("%-8s", "INIT"); + break; + case READY: + LOG_PRINTF("%-8s", "READY"); + break; + case RUNNING: + LOG_PRINTF("%-8s", "RUNNING"); + break; + case DEAD: + LOG_PRINTF("%-8s", "DEAD"); + break; + case BLOCKED: + LOG_PRINTF("%-8s", "BLOCK"); + break; + case SLEEPING: + LOG_PRINTF("%-8s", "SLEEP"); + break; + default: + break; + } + + SHOWTASK_TASK_BASE_INFO(thd); + return true; +} void show_tasks(void) { - struct Thread* task = NULL; SHOWINFO_BORDER_LINE(); for (int i = 0; i < NR_CPU; i++) { LOG_PRINTF("CPU %-2d: %s\n", i, (global_cpus[i].task == NULL ? "NULL" : global_cpus[i].task->name)); } SHOWINFO_BORDER_LINE(); LOG_PRINTF("%-8s %-6s %-16s %-4s %-8s\n", "STAT", "ID", "TASK", "PRI", "MEM(KB)"); - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_running_list_head, node) - { - LOG_PRINTF("%-8s", "RUNNING"); - SHOWTASK_TASK_BASE_INFO(task); - } - for (int i = 0; i < TASK_MAX_PRIORITY; i++) { - if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[i])) { - continue; - } - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[i], node) - { - switch (task->state) { - case INIT: - LOG_PRINTF("%-8s", "INIT"); - break; - case READY: - LOG_PRINTF("%-8s", "READY"); - break; - case RUNNING: - LOG_PRINTF("%-8s", "RUNNING"); - break; - case DEAD: - LOG_PRINTF("%-8s", "DEAD"); - break; - default: - break; - } - - SHOWTASK_TASK_BASE_INFO(task); - } - } - - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_blocked_list_head, node) - { - LOG_PRINTF("%-8s", "BLOCK"); - SHOWTASK_TASK_BASE_INFO(task); - } - - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_sleep_list_head, node) - { - LOG_PRINTF("%-8s", "SLEEP"); - SHOWTASK_TASK_BASE_INFO(task); - } - - struct ksemaphore* sem = NULL; - DOUBLE_LIST_FOR_EACH_ENTRY(sem, &xizi_task_manager.semaphore_pool.sem_list_guard, sem_list_node) - { - task = NULL; - DOUBLE_LIST_FOR_EACH_ENTRY(task, &sem->wait_list_guard, node) - { - LOG_PRINTF("%-8s", "BLOCK"); - SHOWTASK_TASK_BASE_INFO(task); - } + for (int pool_id = INIT; pool_id < NR_STATE; pool_id++) { + rbt_traverse(&g_scheduler.snode_state_pool[pool_id], print_info, NULL); } SHOWINFO_BORDER_LINE(); @@ -150,7 +129,7 @@ void show_cpu(void) assert(current_task != NULL); LOG_PRINTF(" ID COMMAND USED_TICKS FREE_TICKS \n"); - LOG_PRINTF(" %d %s %d %d\n", cpu_id, current_task->name, TASK_CLOCK_TICK - current_task->remain_tick, current_task->remain_tick); + LOG_PRINTF(" %d %s %d %d\n", cpu_id, current_task->name, TASK_CLOCK_TICK - current_task->snode.sched_context.remain_tick, current_task->snode.sched_context.remain_tick); LOG_PRINTF("***********************************************************\n"); return; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c b/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c index 4b926891d..0b6031004 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c @@ -30,33 +30,31 @@ Modification: #include "log.h" #include "schedule_algo.h" +static struct Thread* next_runable_task; + +bool find_runable_task(RbtNode* node, void* data) +{ + struct ScheduleNode* snode = (struct ScheduleNode*)node->data; + struct Thread* thd = snode->pthd; + + if (!thd->dead) { + next_runable_task = thd; + return false; + } else { + struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); + tlo->free_pcb(thd); + return false; + } + + return true; +} + struct Thread* max_priority_runnable_task(void) { - static struct Thread* task = NULL; - // static int priority = 0; - - // priority = __builtin_ffs(ready_task_priority) - 1; - // if (priority > 31 || priority < 0) { - // return NULL; - // } - - // DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node) - // { - // assert(task != NULL); - // if (task->state == READY && !task->dead) { - // // found a runnable task, stop this look up - // return task; - // } else if (task->dead && task->state != RUNNING) { - - // struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); - // tlo->free_pcb(task); - // return NULL; - // } - // } - if (!rbt_is_empty(&g_scheduler.snode_state_pool[READY])) { - return ((struct ScheduleNode*)(g_scheduler.snode_state_pool[READY].root->data))->pthd; - } - return NULL; + /// @todo better strategy + next_runable_task = NULL; + rbt_traverse(&g_scheduler.snode_state_pool[READY], find_runable_task, NULL); + return next_runable_task; } #include "multicores.h" @@ -79,6 +77,8 @@ bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd) bool task_trans_sched_state(struct ScheduleNode* snode, RbtTree* from_pool, RbtTree* to_pool, enum ThreadState target_state) { + assert(snode != NULL); + // DEBUG("%d %p %d %s\n", snode->snode_id, snode->pthd, snode->pthd->tid, snode->pthd->name); assert(snode->snode_id != UNINIT_SNODE_ID && snode->pthd != NULL); if (RBTTREE_DELETE_SUCC != rbt_delete(from_pool, snode->snode_id)) { DEBUG("Thread %d not in from schedule pool\n", snode->pthd->tid); @@ -98,7 +98,6 @@ void task_dead(struct Thread* thd) { assert(thd != NULL); struct ScheduleNode* snode = &thd->snode; - enum ThreadState thd_cur_state = snode->state; assert(snode->state == READY); @@ -106,6 +105,7 @@ void task_dead(struct Thread* thd) &g_scheduler.snode_state_pool[READY], // &g_scheduler.snode_state_pool[DEAD], DEAD); assert(trans_res = true); + assert(RBTTREE_DELETE_SUCC == rbt_delete(&g_scheduler.snode_state_pool[DEAD], snode->snode_id)); return; } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c b/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c index 65d0626f9..90b79f46f 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c @@ -154,7 +154,6 @@ bool ksemaphore_free(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id) return false; } - struct Thread* thd = NULL; // by design: no waking any waiting threads rbt_delete(&sem_pool->sem_pool_map, sem_id); diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/task.c b/Ubiquitous/XiZi_AIoT/softkernel/task/task.c index 4b29d99ca..726355542 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/task.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/task.c @@ -83,7 +83,7 @@ static void _task_manager_init() } // tid pool - xizi_task_manager.next_pid = 0; + xizi_task_manager.next_pid = 1; // init priority bit map ready_task_priority = 0; @@ -221,6 +221,7 @@ static struct Thread* _new_thread(struct MemSpace* pmemspace) } // [schedule related] + task->tid = xizi_task_manager.next_pid++; if (!init_schedule_node(&task->snode, task)) { ERROR("Not enough memory\n"); slab_free(&xizi_task_manager.task_allocator, (void*)task); @@ -238,7 +239,6 @@ static struct Thread* _new_thread(struct MemSpace* pmemspace) ERROR_FREE { /* init basic task ref member */ - task->tid = xizi_task_manager.next_pid++; task->bind_irq = false; /* vm & memory member */ diff --git a/Ubiquitous/XiZi_AIoT/softkernel/tools/rbtree.c b/Ubiquitous/XiZi_AIoT/softkernel/tools/rbtree.c index 14430e1b8..b3e356465 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/tools/rbtree.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/tools/rbtree.c @@ -466,17 +466,19 @@ int rbt_delete(RbtTree* tree, uintptr_t key) return RBTTREE_DELETE_SUCC; } -void rbt_traverse_inner(RbtNode* node, rbt_traverse_fn fn) +void rbt_traverse_inner(RbtNode* node, rbt_traverse_fn fn, void* data) { if (node == NULL) { return; } - fn(node); - rbt_traverse_inner(node->left, fn); - rbt_traverse_inner(node->right, fn); + + if (fn(node, data)) { + rbt_traverse_inner(node->left, fn, data); + rbt_traverse_inner(node->right, fn, data); + } } -void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn) +void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn, void* data) { - rbt_traverse_inner(tree->root, fn); + rbt_traverse_inner(tree->root, fn, data); } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c b/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c index c22faad7f..1e691fced 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c @@ -62,9 +62,10 @@ void hw_current_second(uintptr_t* second) *second = p_clock_driver->get_second(); } -void count_down_sleeping_task(RbtNode* node) +bool count_down_sleeping_task(RbtNode* node, void* data) { /// @todo implement + return false; } uint64_t global_tick = 0; @@ -86,7 +87,7 @@ int xizi_clock_handler(int irq, void* tf, void* arg) } // todo: cpu 0 will handle sleeping thread - rbt_traverse(&g_scheduler.snode_state_pool[SLEEPING], count_down_sleeping_task); + rbt_traverse(&g_scheduler.snode_state_pool[SLEEPING], count_down_sleeping_task, NULL); // DOUBLE_LIST_FOR_EACH_ENTRY(thread, &xizi_task_manager.task_sleep_list_head, node) // { From e0ff4537263d02e658395d5e3330913ee386cf09 Mon Sep 17 00:00:00 2001 From: TXuian <1163589503@qq.com> Date: Tue, 24 Dec 2024 03:36:26 +0800 Subject: [PATCH 3/3] Support Schedule node --- Ubiquitous/XiZi_AIoT/softkernel/include/ipc.h | 11 +++++++++-- .../XiZi_AIoT/softkernel/include/scheduler.h | 1 + Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_kill.c | 1 + .../softkernel/syscall/sys_poll_session.c | 2 +- .../softkernel/syscall/sys_register_irq.c | 1 + Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c | 14 ++++++++++++-- Ubiquitous/XiZi_AIoT/softkernel/task/task.c | 4 ++-- 7 files changed, 27 insertions(+), 7 deletions(-) diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/ipc.h b/Ubiquitous/XiZi_AIoT/softkernel/include/ipc.h index 4bed46666..9100205a4 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/ipc.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/ipc.h @@ -54,13 +54,20 @@ typedef struct { struct IpcArgInfo { uint16_t offset; uint16_t len; -}; + union { + uint16_t attr; + struct { + uint16_t null_ptr : 1; + uint16_t reserved : 15; + }; + }; +} __attribute__((packed)); /* [header, ipc_arg_buffer_len[], ipc_arg_buffer[]] */ struct IpcMsg { ipc_msg_header header; uintptr_t buf[]; -}; +} __attribute__((packed)); enum { IPC_ARG_INFO_BASE_OFFSET = sizeof(ipc_msg_header), }; \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h b/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h index 94f657f61..464db4f58 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h @@ -21,6 +21,7 @@ enum ThreadState { typedef struct ScheduleContext { intptr_t remain_tick; + uint64_t run_time; } ScheduleContext; typedef struct TaskSleepContext { diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_kill.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_kill.c index 53c181738..8a1b603c9 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_kill.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_kill.c @@ -32,6 +32,7 @@ Modification: static bool kill_succ; +extern int sys_exit(struct Thread* ptask); static bool kill_task(RbtNode* node, void* id) { struct ScheduleNode* snode = (struct ScheduleNode*)node->data; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c index 1cb39c55a..8227df78a 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c @@ -114,7 +114,7 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity) if (queue_is_empty(&cur_task->sessions_in_handle) && queue_is_empty(&cur_task->sessions_to_be_handle)) { task_yield(cur_task); // @todo support blocking(now bug at 4 cores running) - task_block(cur_task); + // task_block(cur_task); } return 0; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c index 5b02e91fe..990e1a230 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c @@ -75,6 +75,7 @@ static void send_irq_to_user(int irq_num) buf->header.done = 0; buf->header.magic = IPC_MSG_MAGIC; buf->header.valid = 1; + enqueue(&irq_forward_table[irq_num].handle_task->sessions_to_be_handle, 0, (void*)&irq_forward_table[irq_num].p_kernel_session->server_side); if (irq_forward_table[irq_num].handle_task->snode.state == BLOCKED) { task_into_ready(irq_forward_table[irq_num].handle_task); diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c b/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c index 0b6031004..b5587dff6 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c @@ -31,6 +31,7 @@ Modification: #include "schedule_algo.h" static struct Thread* next_runable_task; +static uint64_t min_run_time; bool find_runable_task(RbtNode* node, void* data) { @@ -38,8 +39,12 @@ bool find_runable_task(RbtNode* node, void* data) struct Thread* thd = snode->pthd; if (!thd->dead) { - next_runable_task = thd; - return false; + if (thd->snode.sched_context.run_time <= min_run_time) { + next_runable_task = thd; + min_run_time = thd->snode.sched_context.run_time; + thd->snode.sched_context.run_time++; + } + return true; } else { struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); tlo->free_pcb(thd); @@ -53,6 +58,7 @@ struct Thread* max_priority_runnable_task(void) { /// @todo better strategy next_runable_task = NULL; + min_run_time = UINT64_MAX; rbt_traverse(&g_scheduler.snode_state_pool[READY], find_runable_task, NULL); return next_runable_task; } @@ -65,7 +71,10 @@ bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd) { snode->pthd = bind_thd; snode->snode_id = bind_thd->tid; + snode->sched_context.remain_tick = 0; + snode->sched_context.run_time = 0; + snode->sleep_context.remain_ms = 0; snode->state = INIT; if (RBTTREE_INSERT_SECC != rbt_insert(&g_scheduler.snode_state_pool[INIT], // @@ -150,6 +159,7 @@ void task_yield(struct Thread* thd) bool trans_res = task_trans_sched_state(snode, // &g_scheduler.snode_state_pool[thd_cur_state], // &g_scheduler.snode_state_pool[READY], READY); + snode->sched_context.remain_tick = TASK_CLOCK_TICK; assert(trans_res = true); return; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/task.c b/Ubiquitous/XiZi_AIoT/softkernel/task/task.c index 726355542..ced4821df 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/task.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/task.c @@ -288,9 +288,9 @@ struct TaskLifecycleOperations task_lifecycle_ops = { static void task_state_set_running(struct Thread* task) { assert(task != NULL && task->snode.state == READY); - task_trans_sched_state(&task->snode, // + assert(task_trans_sched_state(&task->snode, // &g_scheduler.snode_state_pool[READY], // - &g_scheduler.snode_state_pool[RUNNING], RUNNING); + &g_scheduler.snode_state_pool[RUNNING], RUNNING)); } struct Thread* next_task_emergency = NULL;