Support Schedule node
This commit is contained in:
parent
af1ceec308
commit
e0ff453726
|
@ -54,13 +54,20 @@ typedef struct {
|
|||
struct IpcArgInfo {
|
||||
uint16_t offset;
|
||||
uint16_t len;
|
||||
union {
|
||||
uint16_t attr;
|
||||
struct {
|
||||
uint16_t null_ptr : 1;
|
||||
uint16_t reserved : 15;
|
||||
};
|
||||
};
|
||||
} __attribute__((packed));
|
||||
|
||||
/* [header, ipc_arg_buffer_len[], ipc_arg_buffer[]] */
|
||||
struct IpcMsg {
|
||||
ipc_msg_header header;
|
||||
uintptr_t buf[];
|
||||
};
|
||||
} __attribute__((packed));
|
||||
enum {
|
||||
IPC_ARG_INFO_BASE_OFFSET = sizeof(ipc_msg_header),
|
||||
};
|
|
@ -21,6 +21,7 @@ enum ThreadState {
|
|||
|
||||
typedef struct ScheduleContext {
|
||||
intptr_t remain_tick;
|
||||
uint64_t run_time;
|
||||
} ScheduleContext;
|
||||
|
||||
typedef struct TaskSleepContext {
|
||||
|
|
|
@ -32,6 +32,7 @@ Modification:
|
|||
|
||||
static bool kill_succ;
|
||||
|
||||
extern int sys_exit(struct Thread* ptask);
|
||||
static bool kill_task(RbtNode* node, void* id)
|
||||
{
|
||||
struct ScheduleNode* snode = (struct ScheduleNode*)node->data;
|
||||
|
|
|
@ -114,7 +114,7 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
|
|||
if (queue_is_empty(&cur_task->sessions_in_handle) && queue_is_empty(&cur_task->sessions_to_be_handle)) {
|
||||
task_yield(cur_task);
|
||||
// @todo support blocking(now bug at 4 cores running)
|
||||
task_block(cur_task);
|
||||
// task_block(cur_task);
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -75,6 +75,7 @@ static void send_irq_to_user(int irq_num)
|
|||
buf->header.done = 0;
|
||||
buf->header.magic = IPC_MSG_MAGIC;
|
||||
buf->header.valid = 1;
|
||||
enqueue(&irq_forward_table[irq_num].handle_task->sessions_to_be_handle, 0, (void*)&irq_forward_table[irq_num].p_kernel_session->server_side);
|
||||
|
||||
if (irq_forward_table[irq_num].handle_task->snode.state == BLOCKED) {
|
||||
task_into_ready(irq_forward_table[irq_num].handle_task);
|
||||
|
|
|
@ -31,6 +31,7 @@ Modification:
|
|||
#include "schedule_algo.h"
|
||||
|
||||
static struct Thread* next_runable_task;
|
||||
static uint64_t min_run_time;
|
||||
|
||||
bool find_runable_task(RbtNode* node, void* data)
|
||||
{
|
||||
|
@ -38,8 +39,12 @@ bool find_runable_task(RbtNode* node, void* data)
|
|||
struct Thread* thd = snode->pthd;
|
||||
|
||||
if (!thd->dead) {
|
||||
if (thd->snode.sched_context.run_time <= min_run_time) {
|
||||
next_runable_task = thd;
|
||||
return false;
|
||||
min_run_time = thd->snode.sched_context.run_time;
|
||||
thd->snode.sched_context.run_time++;
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
|
||||
tlo->free_pcb(thd);
|
||||
|
@ -53,6 +58,7 @@ struct Thread* max_priority_runnable_task(void)
|
|||
{
|
||||
/// @todo better strategy
|
||||
next_runable_task = NULL;
|
||||
min_run_time = UINT64_MAX;
|
||||
rbt_traverse(&g_scheduler.snode_state_pool[READY], find_runable_task, NULL);
|
||||
return next_runable_task;
|
||||
}
|
||||
|
@ -65,7 +71,10 @@ bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd)
|
|||
{
|
||||
snode->pthd = bind_thd;
|
||||
snode->snode_id = bind_thd->tid;
|
||||
|
||||
snode->sched_context.remain_tick = 0;
|
||||
snode->sched_context.run_time = 0;
|
||||
|
||||
snode->sleep_context.remain_ms = 0;
|
||||
snode->state = INIT;
|
||||
if (RBTTREE_INSERT_SECC != rbt_insert(&g_scheduler.snode_state_pool[INIT], //
|
||||
|
@ -150,6 +159,7 @@ void task_yield(struct Thread* thd)
|
|||
bool trans_res = task_trans_sched_state(snode, //
|
||||
&g_scheduler.snode_state_pool[thd_cur_state], //
|
||||
&g_scheduler.snode_state_pool[READY], READY);
|
||||
snode->sched_context.remain_tick = TASK_CLOCK_TICK;
|
||||
assert(trans_res = true);
|
||||
return;
|
||||
}
|
|
@ -288,9 +288,9 @@ struct TaskLifecycleOperations task_lifecycle_ops = {
|
|||
static void task_state_set_running(struct Thread* task)
|
||||
{
|
||||
assert(task != NULL && task->snode.state == READY);
|
||||
task_trans_sched_state(&task->snode, //
|
||||
assert(task_trans_sched_state(&task->snode, //
|
||||
&g_scheduler.snode_state_pool[READY], //
|
||||
&g_scheduler.snode_state_pool[RUNNING], RUNNING);
|
||||
&g_scheduler.snode_state_pool[RUNNING], RUNNING));
|
||||
}
|
||||
|
||||
struct Thread* next_task_emergency = NULL;
|
||||
|
|
Loading…
Reference in New Issue