From 74da96e1f11a41aa6d5738e5852696edb89d51fc Mon Sep 17 00:00:00 2001 From: TXuian <1163589503@qq.com> Date: Tue, 24 Dec 2024 16:10:53 +0800 Subject: [PATCH] centralize thread state transition --- .../preboot_for_imx6q-sabrelite/boot.S | 2 +- .../XiZi_AIoT/softkernel/include/scheduler.h | 11 ++- .../softkernel/memory/object_allocator.c | 2 +- .../softkernel/syscall/sys_close_session.c | 4 +- .../XiZi_AIoT/softkernel/syscall/sys_exit.c | 2 +- .../softkernel/syscall/sys_poll_session.c | 4 +- .../softkernel/syscall/sys_register_irq.c | 8 +- .../XiZi_AIoT/softkernel/syscall/sys_sleep.c | 5 +- .../softkernel/syscall/sys_wait_session.c | 4 +- .../XiZi_AIoT/softkernel/syscall/sys_yield.c | 2 +- .../XiZi_AIoT/softkernel/task/schedule.c | 35 ++++---- .../XiZi_AIoT/softkernel/task/semaphore.c | 5 +- Ubiquitous/XiZi_AIoT/softkernel/task/task.c | 84 +++++++++++++++++-- .../XiZi_AIoT/softkernel/trap/abort_handler.c | 2 - .../softkernel/trap/clock_irq_handler.c | 2 +- .../softkernel/trap/default_irq_handler.c | 4 +- .../softkernel/trap/software_irq_handler.c | 4 +- 17 files changed, 121 insertions(+), 59 deletions(-) diff --git a/Ubiquitous/XiZi_AIoT/hardkernel/arch/arm/armv7-a/cortex-a9/preboot_for_imx6q-sabrelite/boot.S b/Ubiquitous/XiZi_AIoT/hardkernel/arch/arm/armv7-a/cortex-a9/preboot_for_imx6q-sabrelite/boot.S index 30e353d3b..ce2b4bcf6 100644 --- a/Ubiquitous/XiZi_AIoT/hardkernel/arch/arm/armv7-a/cortex-a9/preboot_for_imx6q-sabrelite/boot.S +++ b/Ubiquitous/XiZi_AIoT/hardkernel/arch/arm/armv7-a/cortex-a9/preboot_for_imx6q-sabrelite/boot.S @@ -77,7 +77,7 @@ _boot_start: mul r3, r2, r1 sub r0, r0, r3 - msr CPSR_c, #ARM_MODE_SVC | I_BIT | F_BIT + msr CPSR_c, #ARM_MODE_SVC | I_BIT mov sp, r0 sub r0, r0, r1 diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h b/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h index 464db4f58..fd1adc3bd 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/scheduler.h @@ -17,11 +17,15 @@ enum ThreadState { BLOCKED, SLEEPING, NR_STATE, + + // follow state is temp for kernel use + TRANS_WAKING, }; typedef struct ScheduleContext { intptr_t remain_tick; uint64_t run_time; + intptr_t unblock_signals; } ScheduleContext; typedef struct TaskSleepContext { @@ -32,6 +36,7 @@ struct ScheduleNode { struct Thread* pthd; snode_id_t snode_id; enum ThreadState state; + Queue state_trans_signal_queue; ScheduleContext sched_context; TaskSleepContext sleep_context; @@ -40,15 +45,17 @@ struct ScheduleNode { struct Scheduler { TraceTag tag; RbtTree snode_state_pool[NR_STATE]; + RbtTree state_trans_ref_map; struct XiziSemaphorePool semaphore_pool; }; extern struct Scheduler g_scheduler; bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd); +void enqueue_task_trans_state(struct Thread* thd, enum ThreadState state); +#define THREAD_TRANS_STATE(thd, state) enqueue_task_trans_state(thd, state); bool task_trans_sched_state(struct ScheduleNode* snode, RbtTree* from_pool, RbtTree* to_pool, enum ThreadState target_state); void task_block(struct Thread* thd); void task_dead(struct Thread* thd); -void task_yield(struct Thread* thd); -void task_into_ready(struct Thread* thd); \ No newline at end of file +void task_into_ready(struct Thread* thd); diff --git a/Ubiquitous/XiZi_AIoT/softkernel/memory/object_allocator.c b/Ubiquitous/XiZi_AIoT/softkernel/memory/object_allocator.c index 6bc4ea021..dd9ac5769 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/memory/object_allocator.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/memory/object_allocator.c @@ -45,7 +45,7 @@ Modification: #define LOWLEVEL_ALLOC(size) kalloc(size) #define LOWLEVEL_FREE(ptr) kfree(ptr) -#define ARENA_SIZE_PER_INCREASE PAGE_SIZE +#define ARENA_SIZE_PER_INCREASE (2 * PAGE_SIZE) #define MAX_NR_ELEMENT_PER_SLABPAGE 64 void slab_init(struct slab_allocator* const allocator, const size_t element_size, char* name) diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_close_session.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_close_session.c index 4823230dd..a1e35cdd0 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_close_session.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_close_session.c @@ -70,9 +70,7 @@ int sys_close_session(struct Thread* cur_task, struct Session* session) // @todo fix memory leak } else { assert(!queue_is_empty(&server_to_info->sessions_to_be_handle)); - if (server_to_info->snode.state == BLOCKED) { - task_into_ready(session_backend->server); - } + THREAD_TRANS_STATE(server_to_info, TRANS_WAKING); } } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_exit.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_exit.c index 23bb257d6..e02be1f6b 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_exit.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_exit.c @@ -46,6 +46,6 @@ int sys_exit(struct Thread* ptask) tlo->free_pcb(ptask); } // yield current task in case it wants to exit itself - task_yield(cur_cpu()->task); + THREAD_TRANS_STATE(cur_cpu()->task, READY); return 0; } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c index 8227df78a..702594fce 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_poll_session.c @@ -112,9 +112,7 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity) } if (queue_is_empty(&cur_task->sessions_in_handle) && queue_is_empty(&cur_task->sessions_to_be_handle)) { - task_yield(cur_task); - // @todo support blocking(now bug at 4 cores running) - // task_block(cur_task); + THREAD_TRANS_STATE(cur_task, BLOCKED); } return 0; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c index 990e1a230..601e6e755 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c @@ -75,10 +75,8 @@ static void send_irq_to_user(int irq_num) buf->header.done = 0; buf->header.magic = IPC_MSG_MAGIC; buf->header.valid = 1; - enqueue(&irq_forward_table[irq_num].handle_task->sessions_to_be_handle, 0, (void*)&irq_forward_table[irq_num].p_kernel_session->server_side); - - if (irq_forward_table[irq_num].handle_task->snode.state == BLOCKED) { - task_into_ready(irq_forward_table[irq_num].handle_task); + if (enqueue(&irq_forward_table[irq_num].handle_task->sessions_to_be_handle, 0, (void*)&irq_forward_table[irq_num].p_kernel_session->server_side)) { + THREAD_TRANS_STATE(irq_forward_table[irq_num].handle_task, TRANS_WAKING); } /* add session head */ @@ -93,7 +91,7 @@ int user_irq_handler(int irq, void* tf, void* arg) next_task_emergency = irq_forward_table[irq].handle_task; if (cur_cpu()->task != NULL) { - task_yield(cur_cpu()->task); + THREAD_TRANS_STATE(cur_cpu()->task, READY); } } return 0; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_sleep.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_sleep.c index 3ba6bbc80..d792b0520 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_sleep.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_sleep.c @@ -36,11 +36,8 @@ Modification: int sys_sleep(intptr_t ms) { struct Thread* cur_task = cur_cpu()->task; - task_yield(cur_task); cur_task->snode.sleep_context.remain_ms = ms; - task_trans_sched_state(&cur_task->snode, // - &g_scheduler.snode_state_pool[READY], // - &g_scheduler.snode_state_pool[SLEEPING], SLEEPING); + THREAD_TRANS_STATE(cur_task, SLEEPING); return 0; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_wait_session.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_wait_session.c index 4108e180c..2393a94da 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_wait_session.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_wait_session.c @@ -60,9 +60,7 @@ int sys_wait_session(struct Session* userland_session) assert(!queue_is_empty(&server_to_call->sessions_to_be_handle)); ksemaphore_wait(&xizi_task_manager.semaphore_pool, cur_task, session_backend->client_sem_to_wait); - if (server_to_call->snode.state == BLOCKED) { - task_into_ready(session_backend->server); - } + THREAD_TRANS_STATE(server_to_call, TRANS_WAKING); return 0; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c index fe4f128ee..e9f20afcc 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c @@ -36,6 +36,6 @@ Modification: int sys_yield(task_yield_reason reason) { struct Thread* cur_task = cur_cpu()->task; - task_yield(cur_task); + THREAD_TRANS_STATE(cur_task, READY); return 0; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c b/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c index b5587dff6..26fc15f6b 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/schedule.c @@ -28,10 +28,12 @@ Modification: 1. first version *************************************************/ #include "log.h" +#include "multicores.h" #include "schedule_algo.h" static struct Thread* next_runable_task; static uint64_t min_run_time; +#define MIN_RUN_TIME_BOUND 5 bool find_runable_task(RbtNode* node, void* data) { @@ -44,6 +46,10 @@ bool find_runable_task(RbtNode* node, void* data) min_run_time = thd->snode.sched_context.run_time; thd->snode.sched_context.run_time++; } + + if (min_run_time <= MIN_RUN_TIME_BOUND) { + return false; + } return true; } else { struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); @@ -81,13 +87,21 @@ bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd) snode->snode_id, (void*)snode)) { return false; } + queue_init(&snode->state_trans_signal_queue); return true; } +void enqueue_task_trans_state(struct Thread* thd, enum ThreadState state) +{ + /// @todo handle memory drain + assert(enqueue(&thd->snode.state_trans_signal_queue, state, NULL)); + int res = rbt_insert(&g_scheduler.state_trans_ref_map, thd->tid, (void*)thd); + assert(RBTTREE_INSERT_SECC == res || RBTTREE_INSERT_EXISTED == res); +} + bool task_trans_sched_state(struct ScheduleNode* snode, RbtTree* from_pool, RbtTree* to_pool, enum ThreadState target_state) { assert(snode != NULL); - // DEBUG("%d %p %d %s\n", snode->snode_id, snode->pthd, snode->pthd->tid, snode->pthd->name); assert(snode->snode_id != UNINIT_SNODE_ID && snode->pthd != NULL); if (RBTTREE_DELETE_SUCC != rbt_delete(from_pool, snode->snode_id)) { DEBUG("Thread %d not in from schedule pool\n", snode->pthd->tid); @@ -124,8 +138,6 @@ void task_block(struct Thread* thd) struct ScheduleNode* snode = &thd->snode; enum ThreadState thd_cur_state = snode->state; - assert(thd_cur_state != RUNNING); - bool trans_res = task_trans_sched_state(snode, // &g_scheduler.snode_state_pool[thd_cur_state], // &g_scheduler.snode_state_pool[BLOCKED], BLOCKED); @@ -139,23 +151,6 @@ void task_into_ready(struct Thread* thd) struct ScheduleNode* snode = &thd->snode; enum ThreadState thd_cur_state = snode->state; - bool trans_res = task_trans_sched_state(snode, // - &g_scheduler.snode_state_pool[thd_cur_state], // - &g_scheduler.snode_state_pool[READY], READY); - snode->sched_context.remain_tick = TASK_CLOCK_TICK; - assert(trans_res = true); - return; -} - -void task_yield(struct Thread* thd) -{ - assert(thd != NULL); - struct ScheduleNode* snode = &thd->snode; - enum ThreadState thd_cur_state = snode->state; - - assert(thd == cur_cpu()->task && thd_cur_state == RUNNING); - cur_cpu()->task = NULL; - bool trans_res = task_trans_sched_state(snode, // &g_scheduler.snode_state_pool[thd_cur_state], // &g_scheduler.snode_state_pool[READY], READY); diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c b/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c index 90b79f46f..0fb3444f4 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/semaphore.c @@ -105,8 +105,7 @@ bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, sem // waiting at the sem sem->val--; - task_yield(thd); - task_block(thd); + THREAD_TRANS_STATE(thd, BLOCKED); assert(RBTTREE_INSERT_SECC == rbt_insert(&sem->wait_thd_tree, thd->tid, thd)); return true; } @@ -125,7 +124,7 @@ bool ksemaphore_signal(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id) RbtNode* root = sem->wait_thd_tree.root; struct Thread* thd = (struct Thread*)root->data; rbt_delete(&sem->wait_thd_tree, root->key); - task_into_ready(thd); + THREAD_TRANS_STATE(thd, TRANS_WAKING); } sem->val++; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/task.c b/Ubiquitous/XiZi_AIoT/softkernel/task/task.c index ced4821df..892639f26 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/task.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/task.c @@ -81,6 +81,7 @@ static void _task_manager_init() for (int pool_id = 0; pool_id < NR_STATE; pool_id++) { rbtree_init(&g_scheduler.snode_state_pool[pool_id]); } + rbtree_init(&g_scheduler.state_trans_ref_map); // tid pool xizi_task_manager.next_pid = 1; @@ -114,9 +115,7 @@ int _task_return_sys_resources(struct Thread* ptask) // @todo fix memory leak } else { assert(!queue_is_empty(&server_to_info->sessions_to_be_handle)); - if (server_to_info->snode.state == BLOCKED) { - task_into_ready(server_to_info); - } + THREAD_TRANS_STATE(server_to_info, BLOCKED); } } @@ -186,7 +185,7 @@ static void _free_thread(struct Thread* task) if (task->memspace->thread_to_notify != NULL) { if (task->memspace->thread_to_notify != task) { if (task->memspace->thread_to_notify->snode.state == BLOCKED) { - task_into_ready(task->memspace->thread_to_notify); + THREAD_TRANS_STATE(task->memspace->thread_to_notify, READY); } else { task->memspace->thread_to_notify->advance_unblock = true; } @@ -293,6 +292,79 @@ static void task_state_set_running(struct Thread* task) &g_scheduler.snode_state_pool[RUNNING], RUNNING)); } +bool rbt_in_queue(RbtNode* node, void* data) +{ + Queue* queue = (Queue*)data; + return enqueue(queue, node->key, node->data); +} + +extern void show_tasks(void); +static void central_trans_task_state() +{ + Queue tmp_queue; + queue_init(&tmp_queue); + rbt_traverse(&g_scheduler.state_trans_ref_map, rbt_in_queue, (void*)&tmp_queue); + + while (!queue_is_empty(&tmp_queue)) { + struct Thread* thd = (struct Thread*)queue_front(&tmp_queue)->data; + struct ScheduleNode* snode = &thd->snode; + assert(cur_cpu()->task != NULL); + if (snode->state == RUNNING && cur_cpu()->task->tid != thd->tid) { + dequeue(&tmp_queue); + continue; + } + + Queue* trans_queue = &snode->state_trans_signal_queue; + while (!queue_is_empty(trans_queue)) { + QueueNode* cur_qnode = queue_front(trans_queue); + enum ThreadState next_state = cur_qnode->key; + switch (next_state) { + case READY: { + if (snode->state == RUNNING || snode->state == READY) { + task_into_ready(thd); + } else { + ERROR("Thread %s(%d) Error trans to READY\n", thd->name, thd->tid); + } + break; + } + case BLOCKED: { + if (snode->sched_context.unblock_signals > 0) { + snode->sched_context.unblock_signals--; + task_into_ready(thd); + } else { + task_block(thd); + } + break; + } + case SLEEPING: { + /// @todo support sleep + break; + } + case TRANS_WAKING: { + if (snode->state == BLOCKED) { + task_into_ready(thd); + } else { + snode->sched_context.unblock_signals++; + task_into_ready(thd); + } + break; + } + case DEAD: { + /// @todo + break; + } + default: + break; + } + + dequeue(trans_queue); + } + + assert(RBTTREE_DELETE_SUCC == rbt_delete(&g_scheduler.state_trans_ref_map, thd->tid)); + dequeue(&tmp_queue); + } +} + struct Thread* next_task_emergency = NULL; extern void context_switch(struct context**, struct context*); static void _scheduler(struct SchedulerRightGroup right_group) @@ -321,12 +393,14 @@ static void _scheduler(struct SchedulerRightGroup right_group) } /* run the chosen task */ + // DEBUG_PRINTF("Thread %s(%d) to RUNNING\n", next_task->name, next_task->tid); task_state_set_running(next_task); cpu->task = next_task; assert(next_task->memspace->pgdir.pd_addr != NULL); p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->memspace->pgdir.pd_addr)); context_switch(&cpu->scheduler, next_task->thread_context.context); - assert(next_task->snode.state != RUNNING); + central_trans_task_state(); + cpu->task = NULL; } } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/trap/abort_handler.c b/Ubiquitous/XiZi_AIoT/softkernel/trap/abort_handler.c index 119ae2772..ec6546b79 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/trap/abort_handler.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/trap/abort_handler.c @@ -64,7 +64,6 @@ __attribute__((optimize("O0"))) void dabort_handler(struct trapframe* r) xizi_enter_kernel(); sys_exit(cur_task); - assert(cur_cpu()->task == NULL); context_switch(&cur_task->thread_context.context, cur_cpu()->scheduler); panic("dabort end should never be reashed.\n"); } @@ -84,7 +83,6 @@ __attribute__((optimize("O0"))) void iabort_handler(struct trapframe* r) xizi_enter_kernel(); sys_exit(cur_task); - assert(cur_cpu()->task == NULL); context_switch(&cur_task->thread_context.context, cur_cpu()->scheduler); panic("iabort end should never be reashed.\n"); } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c b/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c index 1e691fced..4531f130c 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c @@ -82,7 +82,7 @@ int xizi_clock_handler(int irq, void* tf, void* arg) struct ScheduleNode* snode = ¤t_task->snode; snode->sched_context.remain_tick--; if (snode->sched_context.remain_tick == 0) { - task_into_ready(current_task); + THREAD_TRANS_STATE(current_task, READY); } } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/trap/default_irq_handler.c b/Ubiquitous/XiZi_AIoT/softkernel/trap/default_irq_handler.c index 64053ed80..e1393828a 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/trap/default_irq_handler.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/trap/default_irq_handler.c @@ -84,8 +84,8 @@ void intr_irq_dispatch(struct trapframe* tf) // finish irq. p_intr_driver->hw_after_irq(int_info); - if (cur_cpu()->task == NULL || current_task->snode.state != RUNNING) { - cur_cpu()->task = NULL; + assert(cur_cpu()->task == current_task && current_task->snode.state == RUNNING); + if (!queue_is_empty(¤t_task->snode.state_trans_signal_queue)) { context_switch(¤t_task->thread_context.context, cur_cpu()->scheduler); } assert(current_task == cur_cpu()->task); diff --git a/Ubiquitous/XiZi_AIoT/softkernel/trap/software_irq_handler.c b/Ubiquitous/XiZi_AIoT/softkernel/trap/software_irq_handler.c index 62422e9e0..fc5afec12 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/trap/software_irq_handler.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/trap/software_irq_handler.c @@ -64,8 +64,8 @@ void software_irq_dispatch(struct trapframe* tf) arch_set_return(tf, ret); } - if ((cur_cpu()->task == NULL && cur_task != NULL) || cur_task->snode.state != RUNNING) { - cur_cpu()->task = NULL; + assert(cur_cpu()->task == cur_task && cur_task->snode.state == RUNNING); + if (!queue_is_empty(&cur_task->snode.state_trans_signal_queue)) { context_switch(&cur_task->thread_context.context, cur_cpu()->scheduler); } if (syscall_num == SYSCALL_EXIT) {