centralize thread state transition
This commit is contained in:
parent
d68abecdba
commit
74da96e1f1
|
@ -77,7 +77,7 @@ _boot_start:
|
|||
mul r3, r2, r1
|
||||
sub r0, r0, r3
|
||||
|
||||
msr CPSR_c, #ARM_MODE_SVC | I_BIT | F_BIT
|
||||
msr CPSR_c, #ARM_MODE_SVC | I_BIT
|
||||
mov sp, r0
|
||||
sub r0, r0, r1
|
||||
|
||||
|
|
|
@ -17,11 +17,15 @@ enum ThreadState {
|
|||
BLOCKED,
|
||||
SLEEPING,
|
||||
NR_STATE,
|
||||
|
||||
// follow state is temp for kernel use
|
||||
TRANS_WAKING,
|
||||
};
|
||||
|
||||
typedef struct ScheduleContext {
|
||||
intptr_t remain_tick;
|
||||
uint64_t run_time;
|
||||
intptr_t unblock_signals;
|
||||
} ScheduleContext;
|
||||
|
||||
typedef struct TaskSleepContext {
|
||||
|
@ -32,6 +36,7 @@ struct ScheduleNode {
|
|||
struct Thread* pthd;
|
||||
snode_id_t snode_id;
|
||||
enum ThreadState state;
|
||||
Queue state_trans_signal_queue;
|
||||
|
||||
ScheduleContext sched_context;
|
||||
TaskSleepContext sleep_context;
|
||||
|
@ -40,15 +45,17 @@ struct ScheduleNode {
|
|||
struct Scheduler {
|
||||
TraceTag tag;
|
||||
RbtTree snode_state_pool[NR_STATE];
|
||||
RbtTree state_trans_ref_map;
|
||||
struct XiziSemaphorePool semaphore_pool;
|
||||
};
|
||||
|
||||
extern struct Scheduler g_scheduler;
|
||||
|
||||
bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd);
|
||||
void enqueue_task_trans_state(struct Thread* thd, enum ThreadState state);
|
||||
#define THREAD_TRANS_STATE(thd, state) enqueue_task_trans_state(thd, state);
|
||||
|
||||
bool task_trans_sched_state(struct ScheduleNode* snode, RbtTree* from_pool, RbtTree* to_pool, enum ThreadState target_state);
|
||||
void task_block(struct Thread* thd);
|
||||
void task_dead(struct Thread* thd);
|
||||
void task_yield(struct Thread* thd);
|
||||
void task_into_ready(struct Thread* thd);
|
||||
void task_into_ready(struct Thread* thd);
|
||||
|
|
|
@ -45,7 +45,7 @@ Modification:
|
|||
#define LOWLEVEL_ALLOC(size) kalloc(size)
|
||||
#define LOWLEVEL_FREE(ptr) kfree(ptr)
|
||||
|
||||
#define ARENA_SIZE_PER_INCREASE PAGE_SIZE
|
||||
#define ARENA_SIZE_PER_INCREASE (2 * PAGE_SIZE)
|
||||
#define MAX_NR_ELEMENT_PER_SLABPAGE 64
|
||||
|
||||
void slab_init(struct slab_allocator* const allocator, const size_t element_size, char* name)
|
||||
|
|
|
@ -70,9 +70,7 @@ int sys_close_session(struct Thread* cur_task, struct Session* session)
|
|||
// @todo fix memory leak
|
||||
} else {
|
||||
assert(!queue_is_empty(&server_to_info->sessions_to_be_handle));
|
||||
if (server_to_info->snode.state == BLOCKED) {
|
||||
task_into_ready(session_backend->server);
|
||||
}
|
||||
THREAD_TRANS_STATE(server_to_info, TRANS_WAKING);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,6 @@ int sys_exit(struct Thread* ptask)
|
|||
tlo->free_pcb(ptask);
|
||||
}
|
||||
// yield current task in case it wants to exit itself
|
||||
task_yield(cur_cpu()->task);
|
||||
THREAD_TRANS_STATE(cur_cpu()->task, READY);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -112,9 +112,7 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
|
|||
}
|
||||
|
||||
if (queue_is_empty(&cur_task->sessions_in_handle) && queue_is_empty(&cur_task->sessions_to_be_handle)) {
|
||||
task_yield(cur_task);
|
||||
// @todo support blocking(now bug at 4 cores running)
|
||||
// task_block(cur_task);
|
||||
THREAD_TRANS_STATE(cur_task, BLOCKED);
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -75,10 +75,8 @@ static void send_irq_to_user(int irq_num)
|
|||
buf->header.done = 0;
|
||||
buf->header.magic = IPC_MSG_MAGIC;
|
||||
buf->header.valid = 1;
|
||||
enqueue(&irq_forward_table[irq_num].handle_task->sessions_to_be_handle, 0, (void*)&irq_forward_table[irq_num].p_kernel_session->server_side);
|
||||
|
||||
if (irq_forward_table[irq_num].handle_task->snode.state == BLOCKED) {
|
||||
task_into_ready(irq_forward_table[irq_num].handle_task);
|
||||
if (enqueue(&irq_forward_table[irq_num].handle_task->sessions_to_be_handle, 0, (void*)&irq_forward_table[irq_num].p_kernel_session->server_side)) {
|
||||
THREAD_TRANS_STATE(irq_forward_table[irq_num].handle_task, TRANS_WAKING);
|
||||
}
|
||||
|
||||
/* add session head */
|
||||
|
@ -93,7 +91,7 @@ int user_irq_handler(int irq, void* tf, void* arg)
|
|||
|
||||
next_task_emergency = irq_forward_table[irq].handle_task;
|
||||
if (cur_cpu()->task != NULL) {
|
||||
task_yield(cur_cpu()->task);
|
||||
THREAD_TRANS_STATE(cur_cpu()->task, READY);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -36,11 +36,8 @@ Modification:
|
|||
int sys_sleep(intptr_t ms)
|
||||
{
|
||||
struct Thread* cur_task = cur_cpu()->task;
|
||||
task_yield(cur_task);
|
||||
cur_task->snode.sleep_context.remain_ms = ms;
|
||||
task_trans_sched_state(&cur_task->snode, //
|
||||
&g_scheduler.snode_state_pool[READY], //
|
||||
&g_scheduler.snode_state_pool[SLEEPING], SLEEPING);
|
||||
THREAD_TRANS_STATE(cur_task, SLEEPING);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -60,9 +60,7 @@ int sys_wait_session(struct Session* userland_session)
|
|||
assert(!queue_is_empty(&server_to_call->sessions_to_be_handle));
|
||||
|
||||
ksemaphore_wait(&xizi_task_manager.semaphore_pool, cur_task, session_backend->client_sem_to_wait);
|
||||
if (server_to_call->snode.state == BLOCKED) {
|
||||
task_into_ready(session_backend->server);
|
||||
}
|
||||
THREAD_TRANS_STATE(server_to_call, TRANS_WAKING);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -36,6 +36,6 @@ Modification:
|
|||
int sys_yield(task_yield_reason reason)
|
||||
{
|
||||
struct Thread* cur_task = cur_cpu()->task;
|
||||
task_yield(cur_task);
|
||||
THREAD_TRANS_STATE(cur_task, READY);
|
||||
return 0;
|
||||
}
|
|
@ -28,10 +28,12 @@ Modification:
|
|||
1. first version
|
||||
*************************************************/
|
||||
#include "log.h"
|
||||
#include "multicores.h"
|
||||
#include "schedule_algo.h"
|
||||
|
||||
static struct Thread* next_runable_task;
|
||||
static uint64_t min_run_time;
|
||||
#define MIN_RUN_TIME_BOUND 5
|
||||
|
||||
bool find_runable_task(RbtNode* node, void* data)
|
||||
{
|
||||
|
@ -44,6 +46,10 @@ bool find_runable_task(RbtNode* node, void* data)
|
|||
min_run_time = thd->snode.sched_context.run_time;
|
||||
thd->snode.sched_context.run_time++;
|
||||
}
|
||||
|
||||
if (min_run_time <= MIN_RUN_TIME_BOUND) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
|
||||
|
@ -81,13 +87,21 @@ bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd)
|
|||
snode->snode_id, (void*)snode)) {
|
||||
return false;
|
||||
}
|
||||
queue_init(&snode->state_trans_signal_queue);
|
||||
return true;
|
||||
}
|
||||
|
||||
void enqueue_task_trans_state(struct Thread* thd, enum ThreadState state)
|
||||
{
|
||||
/// @todo handle memory drain
|
||||
assert(enqueue(&thd->snode.state_trans_signal_queue, state, NULL));
|
||||
int res = rbt_insert(&g_scheduler.state_trans_ref_map, thd->tid, (void*)thd);
|
||||
assert(RBTTREE_INSERT_SECC == res || RBTTREE_INSERT_EXISTED == res);
|
||||
}
|
||||
|
||||
bool task_trans_sched_state(struct ScheduleNode* snode, RbtTree* from_pool, RbtTree* to_pool, enum ThreadState target_state)
|
||||
{
|
||||
assert(snode != NULL);
|
||||
// DEBUG("%d %p %d %s\n", snode->snode_id, snode->pthd, snode->pthd->tid, snode->pthd->name);
|
||||
assert(snode->snode_id != UNINIT_SNODE_ID && snode->pthd != NULL);
|
||||
if (RBTTREE_DELETE_SUCC != rbt_delete(from_pool, snode->snode_id)) {
|
||||
DEBUG("Thread %d not in from schedule pool\n", snode->pthd->tid);
|
||||
|
@ -124,8 +138,6 @@ void task_block(struct Thread* thd)
|
|||
struct ScheduleNode* snode = &thd->snode;
|
||||
enum ThreadState thd_cur_state = snode->state;
|
||||
|
||||
assert(thd_cur_state != RUNNING);
|
||||
|
||||
bool trans_res = task_trans_sched_state(snode, //
|
||||
&g_scheduler.snode_state_pool[thd_cur_state], //
|
||||
&g_scheduler.snode_state_pool[BLOCKED], BLOCKED);
|
||||
|
@ -139,23 +151,6 @@ void task_into_ready(struct Thread* thd)
|
|||
struct ScheduleNode* snode = &thd->snode;
|
||||
enum ThreadState thd_cur_state = snode->state;
|
||||
|
||||
bool trans_res = task_trans_sched_state(snode, //
|
||||
&g_scheduler.snode_state_pool[thd_cur_state], //
|
||||
&g_scheduler.snode_state_pool[READY], READY);
|
||||
snode->sched_context.remain_tick = TASK_CLOCK_TICK;
|
||||
assert(trans_res = true);
|
||||
return;
|
||||
}
|
||||
|
||||
void task_yield(struct Thread* thd)
|
||||
{
|
||||
assert(thd != NULL);
|
||||
struct ScheduleNode* snode = &thd->snode;
|
||||
enum ThreadState thd_cur_state = snode->state;
|
||||
|
||||
assert(thd == cur_cpu()->task && thd_cur_state == RUNNING);
|
||||
cur_cpu()->task = NULL;
|
||||
|
||||
bool trans_res = task_trans_sched_state(snode, //
|
||||
&g_scheduler.snode_state_pool[thd_cur_state], //
|
||||
&g_scheduler.snode_state_pool[READY], READY);
|
||||
|
|
|
@ -105,8 +105,7 @@ bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, sem
|
|||
|
||||
// waiting at the sem
|
||||
sem->val--;
|
||||
task_yield(thd);
|
||||
task_block(thd);
|
||||
THREAD_TRANS_STATE(thd, BLOCKED);
|
||||
assert(RBTTREE_INSERT_SECC == rbt_insert(&sem->wait_thd_tree, thd->tid, thd));
|
||||
return true;
|
||||
}
|
||||
|
@ -125,7 +124,7 @@ bool ksemaphore_signal(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id)
|
|||
RbtNode* root = sem->wait_thd_tree.root;
|
||||
struct Thread* thd = (struct Thread*)root->data;
|
||||
rbt_delete(&sem->wait_thd_tree, root->key);
|
||||
task_into_ready(thd);
|
||||
THREAD_TRANS_STATE(thd, TRANS_WAKING);
|
||||
}
|
||||
|
||||
sem->val++;
|
||||
|
|
|
@ -81,6 +81,7 @@ static void _task_manager_init()
|
|||
for (int pool_id = 0; pool_id < NR_STATE; pool_id++) {
|
||||
rbtree_init(&g_scheduler.snode_state_pool[pool_id]);
|
||||
}
|
||||
rbtree_init(&g_scheduler.state_trans_ref_map);
|
||||
|
||||
// tid pool
|
||||
xizi_task_manager.next_pid = 1;
|
||||
|
@ -114,9 +115,7 @@ int _task_return_sys_resources(struct Thread* ptask)
|
|||
// @todo fix memory leak
|
||||
} else {
|
||||
assert(!queue_is_empty(&server_to_info->sessions_to_be_handle));
|
||||
if (server_to_info->snode.state == BLOCKED) {
|
||||
task_into_ready(server_to_info);
|
||||
}
|
||||
THREAD_TRANS_STATE(server_to_info, BLOCKED);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -186,7 +185,7 @@ static void _free_thread(struct Thread* task)
|
|||
if (task->memspace->thread_to_notify != NULL) {
|
||||
if (task->memspace->thread_to_notify != task) {
|
||||
if (task->memspace->thread_to_notify->snode.state == BLOCKED) {
|
||||
task_into_ready(task->memspace->thread_to_notify);
|
||||
THREAD_TRANS_STATE(task->memspace->thread_to_notify, READY);
|
||||
} else {
|
||||
task->memspace->thread_to_notify->advance_unblock = true;
|
||||
}
|
||||
|
@ -293,6 +292,79 @@ static void task_state_set_running(struct Thread* task)
|
|||
&g_scheduler.snode_state_pool[RUNNING], RUNNING));
|
||||
}
|
||||
|
||||
bool rbt_in_queue(RbtNode* node, void* data)
|
||||
{
|
||||
Queue* queue = (Queue*)data;
|
||||
return enqueue(queue, node->key, node->data);
|
||||
}
|
||||
|
||||
extern void show_tasks(void);
|
||||
static void central_trans_task_state()
|
||||
{
|
||||
Queue tmp_queue;
|
||||
queue_init(&tmp_queue);
|
||||
rbt_traverse(&g_scheduler.state_trans_ref_map, rbt_in_queue, (void*)&tmp_queue);
|
||||
|
||||
while (!queue_is_empty(&tmp_queue)) {
|
||||
struct Thread* thd = (struct Thread*)queue_front(&tmp_queue)->data;
|
||||
struct ScheduleNode* snode = &thd->snode;
|
||||
assert(cur_cpu()->task != NULL);
|
||||
if (snode->state == RUNNING && cur_cpu()->task->tid != thd->tid) {
|
||||
dequeue(&tmp_queue);
|
||||
continue;
|
||||
}
|
||||
|
||||
Queue* trans_queue = &snode->state_trans_signal_queue;
|
||||
while (!queue_is_empty(trans_queue)) {
|
||||
QueueNode* cur_qnode = queue_front(trans_queue);
|
||||
enum ThreadState next_state = cur_qnode->key;
|
||||
switch (next_state) {
|
||||
case READY: {
|
||||
if (snode->state == RUNNING || snode->state == READY) {
|
||||
task_into_ready(thd);
|
||||
} else {
|
||||
ERROR("Thread %s(%d) Error trans to READY\n", thd->name, thd->tid);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BLOCKED: {
|
||||
if (snode->sched_context.unblock_signals > 0) {
|
||||
snode->sched_context.unblock_signals--;
|
||||
task_into_ready(thd);
|
||||
} else {
|
||||
task_block(thd);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case SLEEPING: {
|
||||
/// @todo support sleep
|
||||
break;
|
||||
}
|
||||
case TRANS_WAKING: {
|
||||
if (snode->state == BLOCKED) {
|
||||
task_into_ready(thd);
|
||||
} else {
|
||||
snode->sched_context.unblock_signals++;
|
||||
task_into_ready(thd);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case DEAD: {
|
||||
/// @todo
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
dequeue(trans_queue);
|
||||
}
|
||||
|
||||
assert(RBTTREE_DELETE_SUCC == rbt_delete(&g_scheduler.state_trans_ref_map, thd->tid));
|
||||
dequeue(&tmp_queue);
|
||||
}
|
||||
}
|
||||
|
||||
struct Thread* next_task_emergency = NULL;
|
||||
extern void context_switch(struct context**, struct context*);
|
||||
static void _scheduler(struct SchedulerRightGroup right_group)
|
||||
|
@ -321,12 +393,14 @@ static void _scheduler(struct SchedulerRightGroup right_group)
|
|||
}
|
||||
|
||||
/* run the chosen task */
|
||||
// DEBUG_PRINTF("Thread %s(%d) to RUNNING\n", next_task->name, next_task->tid);
|
||||
task_state_set_running(next_task);
|
||||
cpu->task = next_task;
|
||||
assert(next_task->memspace->pgdir.pd_addr != NULL);
|
||||
p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->memspace->pgdir.pd_addr));
|
||||
context_switch(&cpu->scheduler, next_task->thread_context.context);
|
||||
assert(next_task->snode.state != RUNNING);
|
||||
central_trans_task_state();
|
||||
cpu->task = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,6 @@ __attribute__((optimize("O0"))) void dabort_handler(struct trapframe* r)
|
|||
|
||||
xizi_enter_kernel();
|
||||
sys_exit(cur_task);
|
||||
assert(cur_cpu()->task == NULL);
|
||||
context_switch(&cur_task->thread_context.context, cur_cpu()->scheduler);
|
||||
panic("dabort end should never be reashed.\n");
|
||||
}
|
||||
|
@ -84,7 +83,6 @@ __attribute__((optimize("O0"))) void iabort_handler(struct trapframe* r)
|
|||
|
||||
xizi_enter_kernel();
|
||||
sys_exit(cur_task);
|
||||
assert(cur_cpu()->task == NULL);
|
||||
context_switch(&cur_task->thread_context.context, cur_cpu()->scheduler);
|
||||
panic("iabort end should never be reashed.\n");
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ int xizi_clock_handler(int irq, void* tf, void* arg)
|
|||
struct ScheduleNode* snode = ¤t_task->snode;
|
||||
snode->sched_context.remain_tick--;
|
||||
if (snode->sched_context.remain_tick == 0) {
|
||||
task_into_ready(current_task);
|
||||
THREAD_TRANS_STATE(current_task, READY);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -84,8 +84,8 @@ void intr_irq_dispatch(struct trapframe* tf)
|
|||
// finish irq.
|
||||
p_intr_driver->hw_after_irq(int_info);
|
||||
|
||||
if (cur_cpu()->task == NULL || current_task->snode.state != RUNNING) {
|
||||
cur_cpu()->task = NULL;
|
||||
assert(cur_cpu()->task == current_task && current_task->snode.state == RUNNING);
|
||||
if (!queue_is_empty(¤t_task->snode.state_trans_signal_queue)) {
|
||||
context_switch(¤t_task->thread_context.context, cur_cpu()->scheduler);
|
||||
}
|
||||
assert(current_task == cur_cpu()->task);
|
||||
|
|
|
@ -64,8 +64,8 @@ void software_irq_dispatch(struct trapframe* tf)
|
|||
arch_set_return(tf, ret);
|
||||
}
|
||||
|
||||
if ((cur_cpu()->task == NULL && cur_task != NULL) || cur_task->snode.state != RUNNING) {
|
||||
cur_cpu()->task = NULL;
|
||||
assert(cur_cpu()->task == cur_task && cur_task->snode.state == RUNNING);
|
||||
if (!queue_is_empty(&cur_task->snode.state_trans_signal_queue)) {
|
||||
context_switch(&cur_task->thread_context.context, cur_cpu()->scheduler);
|
||||
}
|
||||
if (syscall_num == SYSCALL_EXIT) {
|
||||
|
|
Loading…
Reference in New Issue