From 21304531a54bd79bb67dbccec2a800963ff02ca2 Mon Sep 17 00:00:00 2001 From: TXuian <1163589503@qq.com> Date: Tue, 24 Dec 2024 01:01:54 +0800 Subject: [PATCH] Add schedule node midway --- .../cortex-a9/imx6q-sabrelite/trap_common.c | 4 +- .../XiZi_AIoT/softkernel/include/ksemaphore.h | 2 +- .../XiZi_AIoT/softkernel/include/rbtree.h | 10 ++ .../XiZi_AIoT/softkernel/include/scheduler.h | 48 +++++- .../XiZi_AIoT/softkernel/include/task.h | 35 +--- .../softkernel/syscall/sys_close_session.c | 4 +- .../XiZi_AIoT/softkernel/syscall/sys_exit.c | 4 +- .../softkernel/syscall/sys_poll_session.c | 2 +- .../softkernel/syscall/sys_register_irq.c | 8 +- .../XiZi_AIoT/softkernel/syscall/sys_sleep.c | 9 +- .../XiZi_AIoT/softkernel/syscall/sys_thread.c | 2 +- .../softkernel/syscall/sys_wait_session.c | 4 +- .../XiZi_AIoT/softkernel/syscall/sys_yield.c | 2 +- .../XiZi_AIoT/softkernel/syscall/syscall.c | 2 +- .../XiZi_AIoT/softkernel/task/schedule.c | 154 ++++++++++++------ .../XiZi_AIoT/softkernel/task/semaphore.c | 26 ++- Ubiquitous/XiZi_AIoT/softkernel/task/task.c | 125 ++++---------- .../XiZi_AIoT/softkernel/tools/rbtree.c | 25 ++- .../softkernel/trap/clock_irq_handler.c | 34 ++-- .../softkernel/trap/default_irq_handler.c | 2 +- .../softkernel/trap/software_irq_handler.c | 4 +- 21 files changed, 269 insertions(+), 237 deletions(-) diff --git a/Ubiquitous/XiZi_AIoT/hardkernel/intr/arm/armv7-a/cortex-a9/imx6q-sabrelite/trap_common.c b/Ubiquitous/XiZi_AIoT/hardkernel/intr/arm/armv7-a/cortex-a9/imx6q-sabrelite/trap_common.c index f76b56719..51aab3bad 100644 --- a/Ubiquitous/XiZi_AIoT/hardkernel/intr/arm/armv7-a/cortex-a9/imx6q-sabrelite/trap_common.c +++ b/Ubiquitous/XiZi_AIoT/hardkernel/intr/arm/armv7-a/cortex-a9/imx6q-sabrelite/trap_common.c @@ -57,7 +57,7 @@ void panic(char* s) /* stack for different mode*/ static char mode_stack_pages[NR_CPU][NR_MODE_STACKS][MODE_STACK_SIZE]; extern uint32_t _vector_jumper; -extern uint32_t _vector_start; +extern uint32_t* _vector_start; extern uint32_t _vector_end; void init_cpu_mode_stacks(int cpu_id) @@ -75,7 +75,7 @@ static void _sys_irq_init(int cpu_id) /* load exception vectors */ init_cpu_mode_stacks(cpu_id); if (cpu_id == 0) { - volatile uint32_t* vector_base = &_vector_start; + volatile uint32_t* vector_base = (uint32_t*)&_vector_start; // Set Interrupt handler start address vector_base[1] = (uint32_t)trap_undefined_instruction; // Undefined Instruction diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/ksemaphore.h b/Ubiquitous/XiZi_AIoT/softkernel/include/ksemaphore.h index d30e6732a..2e2c65dbb 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/ksemaphore.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/ksemaphore.h @@ -41,7 +41,7 @@ struct ksemaphore { sem_id_t id; sem_val_t val; /* list of waiting threads */ - struct double_list_node wait_list_guard; + RbtTree wait_thd_tree; /* list to manage semaphores */ /// @todo Use RB-Tree to manage all semaphores struct double_list_node sem_list_node; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/rbtree.h b/Ubiquitous/XiZi_AIoT/softkernel/include/rbtree.h index 0ffbdbe16..f62bd3eae 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/rbtree.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/rbtree.h @@ -5,6 +5,13 @@ #include "actracer.h" +#define RBTTREE_INSERT_SECC 0 +#define RBTTREE_INSERT_FAILED -1 +#define RBTTREE_INSERT_EXISTED -2 + +#define RBTTREE_DELETE_SUCC 0 +#define RBTTREE_DELETE_FAILED -1 + // CLRS // Insertion and Deletion in a Red Black Tree enum rbt_type { @@ -26,10 +33,13 @@ typedef struct RbtTree { int nr_ele; } RbtTree; +typedef void(rbt_traverse_fn)(RbtNode* node); + void rbtree_init(RbtTree* tree); int rbt_insert(RbtTree* tree, uintptr_t key, void* data); RbtNode* rbt_search(RbtTree* tree, uintptr_t key); int rbt_delete(RbtTree* tree, uintptr_t key); +void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn); void module_rbt_factory_init(TraceTag* _softkernel_tag); diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h b/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h index fbb3d03a9..45885d3ec 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h @@ -2,20 +2,52 @@ #pragma once #include "actracer.h" #include "ksemaphore.h" +#include "rbtree.h" #define TASK_MAX_PRIORITY 32 +#define UNINIT_SNODE_ID 0 +typedef uintptr_t snode_id_t; + +enum ThreadState { + INIT = 0, + READY, + RUNNING, + DEAD, + BLOCKED, + SLEEPING, + NEVER_RUN, + NR_STATE, +}; + +typedef struct ScheduleContext { + intptr_t remain_tick; +} ScheduleContext; + +typedef struct TaskSleepContext { + int64_t remain_ms; +} TaskSleepContext; struct ScheduleNode { - TraceTag task_ref; - struct double_list_node list_node; + struct Thread* pthd; + snode_id_t snode_id; + enum ThreadState state; + + ScheduleContext sched_context; + TaskSleepContext sleep_context; }; struct Scheduler { TraceTag tag; - - struct double_list_node task_list_head[TASK_MAX_PRIORITY]; /* list of task control blocks that are allocated */ - struct double_list_node task_running_list_head; - struct double_list_node task_blocked_list_head; - struct double_list_node task_sleep_list_head; + RbtTree snode_state_pool[NR_STATE]; struct XiziSemaphorePool semaphore_pool; -}; \ No newline at end of file +}; + +extern struct Scheduler g_scheduler; + +bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd); + +bool task_trans_sched_state(struct ScheduleNode* snode, RbtTree* from_pool, RbtTree* to_pool, enum ThreadState target_state); +void task_block(struct Thread* thd); +void task_dead(struct Thread* thd); +void task_yield(struct Thread* thd); +void task_into_ready(struct Thread* thd); \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/task.h b/Ubiquitous/XiZi_AIoT/softkernel/include/task.h index 977642f15..f7cc37c9f 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/task.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/task.h @@ -41,22 +41,14 @@ Modification: #include "share_page.h" #include "spinlock.h" +#include "scheduler.h" + #define TASK_CLOCK_TICK 50 #define TASK_MAX_PRIORITY 32 #define TASK_DEFAULT_PRIORITY 2 #define TASK_NAME_MAX_LEN 16 #define SLEEP_MONITOR_CORE 0 -enum ProcState { - INIT = 0, - READY, - RUNNING, - DEAD, - BLOCKED, - SLEEPING, - NEVER_RUN, -}; - /* Thread Control Block */ struct ThreadContext { struct Thread* task; // process of current thread @@ -75,10 +67,6 @@ struct ThreadContext { struct trapframe* trapframe; }; -struct TaskSleepContext { - int64_t remain_ms; -}; - /* Process Control Block */ struct Thread { /* task name */ @@ -107,12 +95,13 @@ struct Thread { bool advance_unblock; // @todo abandon /* task schedule attributes */ - struct double_list_node node; - struct TaskSleepContext sleep_context; - enum ProcState state; - int priority; // priority - int remain_tick; - int maxium_tick; + // struct double_list_node node; + // struct TaskSleepContext sleep_context; + // enum ThreadState state; + // int priority; // priority + // int remain_tick; + // int maxium_tick; + struct ScheduleNode snode; }; struct SchedulerRightGroup { @@ -157,9 +146,6 @@ struct XiziTaskManager { /* init task manager */ void (*init)(); - /* init a task control block, set name, remain_tick, state, cwd, priority, etc. */ - void (*task_set_default_schedule_attr)(struct Thread*); - /* use by task_scheduler, find next READY task, should be in locked */ struct Thread* (*next_runnable_task)(void); /* function that's runing by kernel thread context, schedule use tasks */ @@ -168,9 +154,6 @@ struct XiziTaskManager { /* handle task state */ /* call to yield current use task */ void (*task_yield_noschedule)(struct Thread* task, bool is_blocking); - /* block and unblock task */ - void (*task_block)(struct double_list_node* head, struct Thread* task); - void (*task_unblock)(struct Thread* task); /* set task priority */ void (*set_cur_task_priority)(int priority); }; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_close_session.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_close_session.c index 4ba31a83e..4823230dd 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_close_session.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_close_session.c @@ -70,8 +70,8 @@ int sys_close_session(struct Thread* cur_task, struct Session* session) // @todo fix memory leak } else { assert(!queue_is_empty(&server_to_info->sessions_to_be_handle)); - if (server_to_info->state == BLOCKED) { - xizi_task_manager.task_unblock(session_backend->server); + if (server_to_info->snode.state == BLOCKED) { + task_into_ready(session_backend->server); } } } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_exit.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_exit.c index 4b9596de7..23bb257d6 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_exit.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_exit.c @@ -41,11 +41,11 @@ int sys_exit(struct Thread* ptask) assert(ptask != NULL); ptask->dead = true; // free that task straightly if it's a blocked task - if (ptask->state == BLOCKED) { + if (ptask->snode.state == BLOCKED) { struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); tlo->free_pcb(ptask); } // yield current task in case it wants to exit itself - xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false); + task_yield(cur_cpu()->task); return 0; } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c index 8f2f14e3f..691b1fffa 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c @@ -112,7 +112,7 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity) } if (queue_is_empty(&cur_task->sessions_in_handle) && queue_is_empty(&cur_task->sessions_to_be_handle)) { - xizi_task_manager.task_yield_noschedule(cur_task, false); + task_yield(cur_task); // @todo support blocking(now bug at 4 cores running) // xizi_task_manager.task_block(&xizi_task_manager.task_blocked_list_head, cur_task); } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c index c0260db1f..1eaca105d 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c @@ -76,8 +76,8 @@ static void send_irq_to_user(int irq_num) buf->header.magic = IPC_MSG_MAGIC; buf->header.valid = 1; - if (irq_forward_table[irq_num].handle_task->state == BLOCKED) { - xizi_task_manager.task_unblock(irq_forward_table[irq_num].handle_task); + if (irq_forward_table[irq_num].handle_task->snode.state == BLOCKED) { + task_into_ready(irq_forward_table[irq_num].handle_task); } /* add session head */ @@ -92,7 +92,7 @@ int user_irq_handler(int irq, void* tf, void* arg) next_task_emergency = irq_forward_table[irq].handle_task; if (cur_cpu()->task != NULL) { - xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false); + task_yield(cur_cpu()->task); } } return 0; @@ -126,7 +126,7 @@ int sys_register_irq(int irq_num, int irq_opcode) struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); kernel_irq_proxy = tlo->new_thread(pmemspace); - kernel_irq_proxy->state = NEVER_RUN; + kernel_irq_proxy->snode.state = NEVER_RUN; } // bind irq to session diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_sleep.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_sleep.c index bce382597..3ba6bbc80 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_sleep.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_sleep.c @@ -36,10 +36,11 @@ Modification: int sys_sleep(intptr_t ms) { struct Thread* cur_task = cur_cpu()->task; - xizi_task_manager.task_yield_noschedule(cur_task, false); - xizi_task_manager.task_block(&xizi_task_manager.task_sleep_list_head, cur_task); - cur_task->state = SLEEPING; - cur_task->sleep_context.remain_ms = ms; + task_yield(cur_task); + cur_task->snode.sleep_context.remain_ms = ms; + task_trans_sched_state(&cur_task->snode, // + &g_scheduler.snode_state_pool[READY], // + &g_scheduler.snode_state_pool[SLEEPING], SLEEPING); return 0; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_thread.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_thread.c index 533e06f10..25a372c0c 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_thread.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_thread.c @@ -64,7 +64,7 @@ int sys_new_thread(struct MemSpace* pmemspace, struct Thread* task, uintptr_t en strncpy(task->name, last, sizeof(task->name) - 1); // init pcb schedule attributes - xizi_task_manager.task_set_default_schedule_attr(task); + task_into_ready(task); // thread init done by here if (pmemspace->thread_to_notify == NULL) { diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_wait_session.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_wait_session.c index 0eea989a8..4108e180c 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_wait_session.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_wait_session.c @@ -60,8 +60,8 @@ int sys_wait_session(struct Session* userland_session) assert(!queue_is_empty(&server_to_call->sessions_to_be_handle)); ksemaphore_wait(&xizi_task_manager.semaphore_pool, cur_task, session_backend->client_sem_to_wait); - if (server_to_call->state == BLOCKED) { - xizi_task_manager.task_unblock(session_backend->server); + if (server_to_call->snode.state == BLOCKED) { + task_into_ready(session_backend->server); } return 0; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c index 1cc2689a1..fe4f128ee 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c @@ -36,6 +36,6 @@ Modification: int sys_yield(task_yield_reason reason) { struct Thread* cur_task = cur_cpu()->task; - xizi_task_manager.task_yield_noschedule(cur_task, false); + task_yield(cur_task); return 0; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/syscall.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/syscall.c index a0e2291e2..76135f24c 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/syscall.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/syscall.c @@ -39,7 +39,7 @@ int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, u switch (sys_num) { case SYSCALL_TEST: - ret = arch_curr_tick(); + ret = 0; break; case SYSCALL_SPAWN: ret = sys_spawn((char*)param1, (char*)param2, (char**)param3); diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c b/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c index 13b85c62b..4b926891d 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c @@ -33,65 +33,123 @@ Modification: struct Thread* max_priority_runnable_task(void) { static struct Thread* task = NULL; - static int priority = 0; + // static int priority = 0; - priority = __builtin_ffs(ready_task_priority) - 1; - if (priority > 31 || priority < 0) { - return NULL; - } + // priority = __builtin_ffs(ready_task_priority) - 1; + // if (priority > 31 || priority < 0) { + // return NULL; + // } - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node) - { - assert(task != NULL); - if (task->state == READY && !task->dead) { - // found a runnable task, stop this look up - return task; - } else if (task->dead && task->state != RUNNING) { + // DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node) + // { + // assert(task != NULL); + // if (task->state == READY && !task->dead) { + // // found a runnable task, stop this look up + // return task; + // } else if (task->dead && task->state != RUNNING) { - struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); - tlo->free_pcb(task); - return NULL; - } + // struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); + // tlo->free_pcb(task); + // return NULL; + // } + // } + if (!rbt_is_empty(&g_scheduler.snode_state_pool[READY])) { + return ((struct ScheduleNode*)(g_scheduler.snode_state_pool[READY].root->data))->pthd; } return NULL; } -struct Thread* round_robin_runnable_task(uint32_t priority) +#include "multicores.h" +#include "rbtree.h" +#include "task.h" + +bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd) { - struct Thread* task = NULL; - - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node) - { - if (task->state == READY && !task->dead) { - // found a runnable task, stop this look up - return task; - } else if (task->dead && task->state != RUNNING) { - struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); - tlo->free_pcb(task); - return NULL; - } + snode->pthd = bind_thd; + snode->snode_id = bind_thd->tid; + snode->sched_context.remain_tick = 0; + snode->sleep_context.remain_ms = 0; + snode->state = INIT; + if (RBTTREE_INSERT_SECC != rbt_insert(&g_scheduler.snode_state_pool[INIT], // + snode->snode_id, (void*)snode)) { + return false; } - - return NULL; + return true; } -/* recover task priority */ -void recover_priority(void) +bool task_trans_sched_state(struct ScheduleNode* snode, RbtTree* from_pool, RbtTree* to_pool, enum ThreadState target_state) { - struct Thread* task = NULL; - for (int i = 1; i < TASK_MAX_PRIORITY; i++) { - if (i == TASK_DEFAULT_PRIORITY) - continue; - DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[i], node) - { - if (!IS_DOUBLE_LIST_EMPTY(&task->node)) { - // DEBUG("%s priority recover\n", task->name); - task->priority = TASK_DEFAULT_PRIORITY; - doubleListDel(&task->node); - doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]); - i--; - break; - } - } + assert(snode->snode_id != UNINIT_SNODE_ID && snode->pthd != NULL); + if (RBTTREE_DELETE_SUCC != rbt_delete(from_pool, snode->snode_id)) { + DEBUG("Thread %d not in from schedule pool\n", snode->pthd->tid); + return false; } + + if (RBTTREE_INSERT_SECC != rbt_insert(to_pool, snode->snode_id, (void*)snode)) { + DEBUG("Thread %d trans state failed\n", snode->pthd->tid); + return false; + } + + snode->state = target_state; + return true; +} + +void task_dead(struct Thread* thd) +{ + assert(thd != NULL); + struct ScheduleNode* snode = &thd->snode; + enum ThreadState thd_cur_state = snode->state; + + assert(snode->state == READY); + + bool trans_res = task_trans_sched_state(snode, // + &g_scheduler.snode_state_pool[READY], // + &g_scheduler.snode_state_pool[DEAD], DEAD); + assert(trans_res = true); + return; +} + +void task_block(struct Thread* thd) +{ + assert(thd != NULL); + struct ScheduleNode* snode = &thd->snode; + enum ThreadState thd_cur_state = snode->state; + + assert(thd_cur_state != RUNNING); + + bool trans_res = task_trans_sched_state(snode, // + &g_scheduler.snode_state_pool[thd_cur_state], // + &g_scheduler.snode_state_pool[BLOCKED], BLOCKED); + assert(trans_res = true); + return; +} + +void task_into_ready(struct Thread* thd) +{ + assert(thd != NULL); + struct ScheduleNode* snode = &thd->snode; + enum ThreadState thd_cur_state = snode->state; + + bool trans_res = task_trans_sched_state(snode, // + &g_scheduler.snode_state_pool[thd_cur_state], // + &g_scheduler.snode_state_pool[READY], READY); + snode->sched_context.remain_tick = TASK_CLOCK_TICK; + assert(trans_res = true); + return; +} + +void task_yield(struct Thread* thd) +{ + assert(thd != NULL); + struct ScheduleNode* snode = &thd->snode; + enum ThreadState thd_cur_state = snode->state; + + assert(thd == cur_cpu()->task && thd_cur_state == RUNNING); + cur_cpu()->task = NULL; + + bool trans_res = task_trans_sched_state(snode, // + &g_scheduler.snode_state_pool[thd_cur_state], // + &g_scheduler.snode_state_pool[READY], READY); + assert(trans_res = true); + return; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c b/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c index fc1ed0e44..65d0626f9 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c @@ -58,7 +58,7 @@ sem_id_t ksemaphore_alloc(struct XiziSemaphorePool* sem_pool, sem_val_t val) } sem->val = val; doubleListNodeInit(&sem->sem_list_node); - doubleListNodeInit(&sem->wait_list_guard); + rbtree_init(&sem->wait_thd_tree); if (0 != rbt_insert(&sem_pool->sem_pool_map, sem->id, sem)) { slab_free(&sem_pool->allocator, sem); @@ -88,7 +88,7 @@ bool ksemaphore_consume(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id, sem bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, sem_id_t sem_id) { assert(thd != NULL); - assert(thd->state == RUNNING); + assert(thd->snode.state == RUNNING); /* find sem */ struct ksemaphore* sem = ksemaphore_get_by_id(sem_pool, sem_id); // invalid sem id @@ -105,8 +105,9 @@ bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, sem // waiting at the sem sem->val--; - xizi_task_manager.task_yield_noschedule(thd, false); - xizi_task_manager.task_block(&sem->wait_list_guard, thd); + task_yield(thd); + task_block(thd); + assert(RBTTREE_INSERT_SECC == rbt_insert(&sem->wait_thd_tree, thd->tid, thd)); return true; } @@ -120,12 +121,11 @@ bool ksemaphore_signal(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id) } if (sem->val < 0) { - if (!IS_DOUBLE_LIST_EMPTY(&sem->wait_list_guard)) { - struct Thread* thd = CONTAINER_OF(sem->wait_list_guard.next, struct Thread, node); - assert(thd != NULL && thd->state == BLOCKED); - xizi_task_manager.task_unblock(thd); - // DEBUG("waking %s\n", thd->name); - } + assert(!rbt_is_empty(&sem->wait_thd_tree)); + RbtNode* root = sem->wait_thd_tree.root; + struct Thread* thd = (struct Thread*)root->data; + rbt_delete(&sem->wait_thd_tree, root->key); + task_into_ready(thd); } sem->val++; @@ -155,11 +155,7 @@ bool ksemaphore_free(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id) } struct Thread* thd = NULL; - DOUBLE_LIST_FOR_EACH_ENTRY(thd, &sem->wait_list_guard, node) - { - assert(thd != NULL); - xizi_task_manager.task_unblock(thd); - } + // by design: no waking any waiting threads rbt_delete(&sem_pool->sem_pool_map, sem_id); doubleListDel(&sem->sem_list_node); diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/task.c b/Ubiquitous/XiZi_AIoT/softkernel/task/task.c index 916ebc475..4b29d99ca 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/task.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/task.c @@ -44,28 +44,9 @@ struct CPU global_cpus[NR_CPU]; uint32_t ready_task_priority; struct GlobalTaskPool global_task_pool; +struct Scheduler g_scheduler; extern struct TaskLifecycleOperations task_lifecycle_ops; -static inline void task_node_leave_list(struct Thread* task) -{ - doubleListDel(&task->node); - if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[task->priority])) { - ready_task_priority &= ~((uint32_t)1 << task->priority); - } -} - -static inline void task_node_add_to_ready_list_head(struct Thread* task) -{ - doubleListAddOnHead(&task->node, &xizi_task_manager.task_list_head[task->priority]); - ready_task_priority |= ((uint32_t)1 << task->priority); -} - -static inline void task_node_add_to_ready_list_back(struct Thread* task) -{ - doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]); - ready_task_priority |= ((uint32_t)1 << task->priority); -} - static void _task_manager_init() { assert(CreateResourceTag(&xizi_task_manager.task_lifecycle_ops_tag, &xizi_task_manager.tag, // @@ -93,6 +74,14 @@ static void _task_manager_init() doubleListNodeInit(&global_task_pool.thd_listing_head); rbtree_init(&global_task_pool.thd_ref_map); + // scheduler + assert(CreateResourceTag(&g_scheduler.tag, &xizi_task_manager.tag, // + "GlobalScheduler", TRACER_SYSOBJECT, (void*)&g_scheduler)); + semaphore_pool_init(&g_scheduler.semaphore_pool); + for (int pool_id = 0; pool_id < NR_STATE; pool_id++) { + rbtree_init(&g_scheduler.snode_state_pool[pool_id]); + } + // tid pool xizi_task_manager.next_pid = 0; @@ -125,8 +114,8 @@ int _task_return_sys_resources(struct Thread* ptask) // @todo fix memory leak } else { assert(!queue_is_empty(&server_to_info->sessions_to_be_handle)); - if (server_to_info->state == BLOCKED) { - xizi_task_manager.task_unblock(session_backend->server); + if (server_to_info->snode.state == BLOCKED) { + task_into_ready(server_to_info); } } } @@ -184,7 +173,7 @@ static void _free_thread(struct Thread* task) } // remove thread from used task list - task_node_leave_list(task); + task_dead(task); /* free memspace if needed to */ if (task->memspace != NULL) { @@ -196,8 +185,8 @@ static void _free_thread(struct Thread* task) // awake deamon in this memspace if (task->memspace->thread_to_notify != NULL) { if (task->memspace->thread_to_notify != task) { - if (task->memspace->thread_to_notify->state == BLOCKED) { - xizi_task_manager.task_unblock(task->memspace->thread_to_notify); + if (task->memspace->thread_to_notify->snode.state == BLOCKED) { + task_into_ready(task->memspace->thread_to_notify); } else { task->memspace->thread_to_notify->advance_unblock = true; } @@ -231,9 +220,17 @@ static struct Thread* _new_thread(struct MemSpace* pmemspace) return NULL; } + // [schedule related] + if (!init_schedule_node(&task->snode, task)) { + ERROR("Not enough memory\n"); + slab_free(&xizi_task_manager.task_allocator, (void*)task); + return NULL; + } + // alloc stack page for task if ((void*)(task->thread_context.kern_stack_addr = (uintptr_t)kalloc_by_ownership(pmemspace->kernspace_mem_usage.tag, USER_STACK_SIZE)) == NULL) { /* here inside, will no free memspace */ + assert(RBTTREE_DELETE_SUCC == rbt_delete(&g_scheduler.snode_state_pool[INIT], task->snode.snode_id)); slab_free(&xizi_task_manager.task_allocator, (void*)task); return NULL; } @@ -279,7 +276,6 @@ static struct Thread* _new_thread(struct MemSpace* pmemspace) } // [name] - // [schedule related] return task; } @@ -289,22 +285,12 @@ struct TaskLifecycleOperations task_lifecycle_ops = { .free_pcb = _free_thread, }; -static void _task_set_default_schedule_attr(struct Thread* task) -{ - task->remain_tick = TASK_CLOCK_TICK; - task->maxium_tick = TASK_CLOCK_TICK * 10; - task->dead = false; - task->state = READY; - task->priority = TASK_DEFAULT_PRIORITY; - task_node_add_to_ready_list_head(task); -} - static void task_state_set_running(struct Thread* task) { - assert(task != NULL && task->state == READY); - task->state = RUNNING; - task_node_leave_list(task); - doubleListAddOnHead(&task->node, &xizi_task_manager.task_running_list_head); + assert(task != NULL && task->snode.state == READY); + task_trans_sched_state(&task->snode, // + &g_scheduler.snode_state_pool[READY], // + &g_scheduler.snode_state_pool[RUNNING], RUNNING); } struct Thread* next_task_emergency = NULL; @@ -319,7 +305,7 @@ static void _scheduler(struct SchedulerRightGroup right_group) next_task = NULL; /* find next runnable task */ assert(cur_cpu()->task == NULL); - if (next_task_emergency != NULL && next_task_emergency->state == READY) { + if (next_task_emergency != NULL && next_task_emergency->snode.state == READY) { next_task = next_task_emergency; } else { next_task = xizi_task_manager.next_runnable_task(); @@ -340,76 +326,21 @@ static void _scheduler(struct SchedulerRightGroup right_group) assert(next_task->memspace->pgdir.pd_addr != NULL); p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->memspace->pgdir.pd_addr)); context_switch(&cpu->scheduler, next_task->thread_context.context); - assert(next_task->state != RUNNING); + assert(next_task->snode.state != RUNNING); } } -static void _task_yield_noschedule(struct Thread* task, bool blocking) -{ - assert(task != NULL); - /// @warning only support current task yield now - assert(task == cur_cpu()->task && task->state == RUNNING); - - // rearrage current task position - task_node_leave_list(task); - if (task->state == RUNNING) { - task->state = READY; - } - task->remain_tick = TASK_CLOCK_TICK; - cur_cpu()->task = NULL; - task_node_add_to_ready_list_back(task); -} - -static void _task_block(struct double_list_node* head, struct Thread* task) -{ - assert(head != NULL); - assert(task != NULL); - assert(task->state != RUNNING); - task_node_leave_list(task); - task->state = BLOCKED; - doubleListAddOnHead(&task->node, head); -} - -static void _task_unblock(struct Thread* task) -{ - assert(task != NULL); - assert(task->state == BLOCKED || task->state == SLEEPING); - task_node_leave_list(task); - task->state = READY; - task_node_add_to_ready_list_back(task); -} - /// @brief @warning not tested function /// @param priority static void _set_cur_task_priority(int priority) { - if (priority < 0 || priority >= TASK_MAX_PRIORITY) { - ERROR("priority is invalid\n"); - return; - } - - struct Thread* current_task = cur_cpu()->task; - assert(current_task != NULL && current_task->state == RUNNING); - - task_node_leave_list(current_task); - - current_task->priority = priority; - - task_node_add_to_ready_list_back(current_task); - return; } struct XiziTaskManager xizi_task_manager = { .init = _task_manager_init, - .task_set_default_schedule_attr = _task_set_default_schedule_attr, - .next_runnable_task = max_priority_runnable_task, .task_scheduler = _scheduler, - - .task_block = _task_block, - .task_unblock = _task_unblock, - .task_yield_noschedule = _task_yield_noschedule, .set_cur_task_priority = _set_cur_task_priority }; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/tools/rbtree.c b/Ubiquitous/XiZi_AIoT/softkernel/tools/rbtree.c index b194882bb..14430e1b8 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/tools/rbtree.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/tools/rbtree.c @@ -326,20 +326,20 @@ RbtNode* __rbtree_insert(RbtNode* node, RbtTree* tree) int rbt_insert(RbtTree* tree, uintptr_t key, void* data) { if (rbt_search(tree, key) != NULL) { - return -2; + return RBTTREE_INSERT_EXISTED; } RbtNode* node = rbtree_createnode(key, data); RbtNode* samenode = NULL; if (node == NULL) - return -1; + return RBTTREE_INSERT_FAILED; else samenode = __rbtree_insert(node, tree); assert(samenode == NULL); tree->nr_ele++; - return 0; + return RBTTREE_INSERT_SECC; } void replace_node(RbtTree* t, RbtNode* oldn, RbtNode* newn) @@ -455,7 +455,7 @@ int rbt_delete(RbtTree* tree, uintptr_t key) { RbtNode* node = do_lookup(key, tree, NULL); if (node == NULL) - return -1; + return RBTTREE_DELETE_FAILED; else __rbtree_remove(node, tree); @@ -463,5 +463,20 @@ int rbt_delete(RbtTree* tree, uintptr_t key) if (rbt_is_empty(tree)) { assert(tree->root == NULL); } - return 0; + return RBTTREE_DELETE_SUCC; +} + +void rbt_traverse_inner(RbtNode* node, rbt_traverse_fn fn) +{ + if (node == NULL) { + return; + } + fn(node); + rbt_traverse_inner(node->left, fn); + rbt_traverse_inner(node->right, fn); +} + +void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn) +{ + rbt_traverse_inner(tree->root, fn); } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c b/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c index 937e4b219..c22faad7f 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c @@ -62,6 +62,11 @@ void hw_current_second(uintptr_t* second) *second = p_clock_driver->get_second(); } +void count_down_sleeping_task(RbtNode* node) +{ + /// @todo implement +} + uint64_t global_tick = 0; int xizi_clock_handler(int irq, void* tf, void* arg) { @@ -73,24 +78,25 @@ int xizi_clock_handler(int irq, void* tf, void* arg) // handle current thread struct Thread* current_task = cur_cpu()->task; if (current_task) { - current_task->remain_tick--; - current_task->maxium_tick--; - if (current_task->remain_tick == 0) { - xizi_task_manager.task_yield_noschedule(current_task, false); + struct ScheduleNode* snode = ¤t_task->snode; + snode->sched_context.remain_tick--; + if (snode->sched_context.remain_tick == 0) { + task_into_ready(current_task); } } // todo: cpu 0 will handle sleeping thread - struct Thread* thread = NULL; - DOUBLE_LIST_FOR_EACH_ENTRY(thread, &xizi_task_manager.task_sleep_list_head, node) - { - assert(thread->state == SLEEPING); - thread->sleep_context.remain_ms--; - if (thread->sleep_context.remain_ms <= 0) { - xizi_task_manager.task_unblock(thread); - break; - } - } + rbt_traverse(&g_scheduler.snode_state_pool[SLEEPING], count_down_sleeping_task); + + // DOUBLE_LIST_FOR_EACH_ENTRY(thread, &xizi_task_manager.task_sleep_list_head, node) + // { + // assert(thread->state == SLEEPING); + // thread->sleep_context.remain_ms--; + // if (thread->sleep_context.remain_ms <= 0) { + // xizi_task_manager.task_unblock(thread); + // break; + // } + // } } return 0; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/trap/default_irq_handler.c b/Ubiquitous/XiZi_AIoT/softkernel/trap/default_irq_handler.c index baa3ae2de..64053ed80 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/trap/default_irq_handler.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/trap/default_irq_handler.c @@ -84,7 +84,7 @@ void intr_irq_dispatch(struct trapframe* tf) // finish irq. p_intr_driver->hw_after_irq(int_info); - if (cur_cpu()->task == NULL || current_task->state != RUNNING) { + if (cur_cpu()->task == NULL || current_task->snode.state != RUNNING) { cur_cpu()->task = NULL; context_switch(¤t_task->thread_context.context, cur_cpu()->scheduler); } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/trap/software_irq_handler.c b/Ubiquitous/XiZi_AIoT/softkernel/trap/software_irq_handler.c index 25c4bd1de..62422e9e0 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/trap/software_irq_handler.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/trap/software_irq_handler.c @@ -56,7 +56,7 @@ void software_irq_dispatch(struct trapframe* tf) /// @todo: Handle dead task int syscall_num = -1; - if (cur_task && cur_task->state != DEAD) { + if (cur_task && cur_task->snode.state != DEAD) { cur_task->thread_context.trapframe = tf; // call syscall @@ -64,7 +64,7 @@ void software_irq_dispatch(struct trapframe* tf) arch_set_return(tf, ret); } - if ((cur_cpu()->task == NULL && cur_task != NULL) || cur_task->state != RUNNING) { + if ((cur_cpu()->task == NULL && cur_task != NULL) || cur_task->snode.state != RUNNING) { cur_cpu()->task = NULL; context_switch(&cur_task->thread_context.context, cur_cpu()->scheduler); }