Merge branch 'to_prepare' into push_to_prepare

This commit is contained in:
TXuian 2024-12-24 03:37:35 +08:00
commit d68abecdba
25 changed files with 359 additions and 344 deletions

View File

@ -73,7 +73,7 @@ Modification:
#include "cortex_a55.h" #include "cortex_a55.h"
#define NR_CPU 1 // maximum number of CPUs #define NR_CPU 4 // maximum number of CPUs
static inline uintptr_t arch_curr_tick() static inline uintptr_t arch_curr_tick()
{ {

View File

@ -57,7 +57,7 @@ void panic(char* s)
/* stack for different mode*/ /* stack for different mode*/
static char mode_stack_pages[NR_CPU][NR_MODE_STACKS][MODE_STACK_SIZE]; static char mode_stack_pages[NR_CPU][NR_MODE_STACKS][MODE_STACK_SIZE];
extern uint32_t _vector_jumper; extern uint32_t _vector_jumper;
extern uint32_t _vector_start; extern uint32_t* _vector_start;
extern uint32_t _vector_end; extern uint32_t _vector_end;
void init_cpu_mode_stacks(int cpu_id) void init_cpu_mode_stacks(int cpu_id)
@ -75,7 +75,7 @@ static void _sys_irq_init(int cpu_id)
/* load exception vectors */ /* load exception vectors */
init_cpu_mode_stacks(cpu_id); init_cpu_mode_stacks(cpu_id);
if (cpu_id == 0) { if (cpu_id == 0) {
volatile uint32_t* vector_base = &_vector_start; volatile uint32_t* vector_base = (uint32_t*)&_vector_start;
// Set Interrupt handler start address // Set Interrupt handler start address
vector_base[1] = (uint32_t)trap_undefined_instruction; // Undefined Instruction vector_base[1] = (uint32_t)trap_undefined_instruction; // Undefined Instruction

View File

@ -54,13 +54,20 @@ typedef struct {
struct IpcArgInfo { struct IpcArgInfo {
uint16_t offset; uint16_t offset;
uint16_t len; uint16_t len;
}; union {
uint16_t attr;
struct {
uint16_t null_ptr : 1;
uint16_t reserved : 15;
};
};
} __attribute__((packed));
/* [header, ipc_arg_buffer_len[], ipc_arg_buffer[]] */ /* [header, ipc_arg_buffer_len[], ipc_arg_buffer[]] */
struct IpcMsg { struct IpcMsg {
ipc_msg_header header; ipc_msg_header header;
uintptr_t buf[]; uintptr_t buf[];
}; } __attribute__((packed));
enum { enum {
IPC_ARG_INFO_BASE_OFFSET = sizeof(ipc_msg_header), IPC_ARG_INFO_BASE_OFFSET = sizeof(ipc_msg_header),
}; };

View File

@ -41,7 +41,7 @@ struct ksemaphore {
sem_id_t id; sem_id_t id;
sem_val_t val; sem_val_t val;
/* list of waiting threads */ /* list of waiting threads */
struct double_list_node wait_list_guard; RbtTree wait_thd_tree;
/* list to manage semaphores */ /* list to manage semaphores */
/// @todo Use RB-Tree to manage all semaphores /// @todo Use RB-Tree to manage all semaphores
struct double_list_node sem_list_node; struct double_list_node sem_list_node;

View File

@ -5,6 +5,13 @@
#include "actracer.h" #include "actracer.h"
#define RBTTREE_INSERT_SECC 0
#define RBTTREE_INSERT_FAILED -1
#define RBTTREE_INSERT_EXISTED -2
#define RBTTREE_DELETE_SUCC 0
#define RBTTREE_DELETE_FAILED -1
// CLRS // CLRS
// Insertion and Deletion in a Red Black Tree // Insertion and Deletion in a Red Black Tree
enum rbt_type { enum rbt_type {
@ -26,10 +33,14 @@ typedef struct RbtTree {
int nr_ele; int nr_ele;
} RbtTree; } RbtTree;
// return if the traverse needs to continue
typedef bool(rbt_traverse_fn)(RbtNode* node, void* data);
void rbtree_init(RbtTree* tree); void rbtree_init(RbtTree* tree);
int rbt_insert(RbtTree* tree, uintptr_t key, void* data); int rbt_insert(RbtTree* tree, uintptr_t key, void* data);
RbtNode* rbt_search(RbtTree* tree, uintptr_t key); RbtNode* rbt_search(RbtTree* tree, uintptr_t key);
int rbt_delete(RbtTree* tree, uintptr_t key); int rbt_delete(RbtTree* tree, uintptr_t key);
void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn, void* data);
void module_rbt_factory_init(TraceTag* _softkernel_tag); void module_rbt_factory_init(TraceTag* _softkernel_tag);

View File

@ -2,20 +2,53 @@
#pragma once #pragma once
#include "actracer.h" #include "actracer.h"
#include "ksemaphore.h" #include "ksemaphore.h"
#include "rbtree.h"
#define TASK_MAX_PRIORITY 32 #define TASK_MAX_PRIORITY 32
#define UNINIT_SNODE_ID 0
typedef uintptr_t snode_id_t;
enum ThreadState {
NEVER_RUN = 0,
INIT,
READY,
RUNNING,
DEAD,
BLOCKED,
SLEEPING,
NR_STATE,
};
typedef struct ScheduleContext {
intptr_t remain_tick;
uint64_t run_time;
} ScheduleContext;
typedef struct TaskSleepContext {
int64_t remain_ms;
} TaskSleepContext;
struct ScheduleNode { struct ScheduleNode {
TraceTag task_ref; struct Thread* pthd;
struct double_list_node list_node; snode_id_t snode_id;
enum ThreadState state;
ScheduleContext sched_context;
TaskSleepContext sleep_context;
}; };
struct Scheduler { struct Scheduler {
TraceTag tag; TraceTag tag;
RbtTree snode_state_pool[NR_STATE];
struct double_list_node task_list_head[TASK_MAX_PRIORITY]; /* list of task control blocks that are allocated */
struct double_list_node task_running_list_head;
struct double_list_node task_blocked_list_head;
struct double_list_node task_sleep_list_head;
struct XiziSemaphorePool semaphore_pool; struct XiziSemaphorePool semaphore_pool;
}; };
extern struct Scheduler g_scheduler;
bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd);
bool task_trans_sched_state(struct ScheduleNode* snode, RbtTree* from_pool, RbtTree* to_pool, enum ThreadState target_state);
void task_block(struct Thread* thd);
void task_dead(struct Thread* thd);
void task_yield(struct Thread* thd);
void task_into_ready(struct Thread* thd);

View File

@ -41,21 +41,15 @@ Modification:
#include "share_page.h" #include "share_page.h"
#include "spinlock.h" #include "spinlock.h"
#include "scheduler.h"
#define TASK_CLOCK_TICK 50 #define TASK_CLOCK_TICK 50
#define TASK_MAX_PRIORITY 32 #define TASK_MAX_PRIORITY 32
#define TASK_DEFAULT_PRIORITY 2 #define TASK_DEFAULT_PRIORITY 2
#define TASK_NAME_MAX_LEN 16 #define TASK_NAME_MAX_LEN 16
#define SLEEP_MONITOR_CORE 0 #define SLEEP_MONITOR_CORE 0
enum ProcState { typedef int tid_t;
INIT = 0,
READY,
RUNNING,
DEAD,
BLOCKED,
SLEEPING,
NEVER_RUN,
};
/* Thread Control Block */ /* Thread Control Block */
struct ThreadContext { struct ThreadContext {
@ -75,10 +69,6 @@ struct ThreadContext {
struct trapframe* trapframe; struct trapframe* trapframe;
}; };
struct TaskSleepContext {
int64_t remain_ms;
};
/* Process Control Block */ /* Process Control Block */
struct Thread { struct Thread {
/* task name */ /* task name */
@ -107,12 +97,7 @@ struct Thread {
bool advance_unblock; // @todo abandon bool advance_unblock; // @todo abandon
/* task schedule attributes */ /* task schedule attributes */
struct double_list_node node; struct ScheduleNode snode;
struct TaskSleepContext sleep_context;
enum ProcState state;
int priority; // priority
int remain_tick;
int maxium_tick;
}; };
struct SchedulerRightGroup { struct SchedulerRightGroup {
@ -157,9 +142,6 @@ struct XiziTaskManager {
/* init task manager */ /* init task manager */
void (*init)(); void (*init)();
/* init a task control block, set name, remain_tick, state, cwd, priority, etc. */
void (*task_set_default_schedule_attr)(struct Thread*);
/* use by task_scheduler, find next READY task, should be in locked */ /* use by task_scheduler, find next READY task, should be in locked */
struct Thread* (*next_runnable_task)(void); struct Thread* (*next_runnable_task)(void);
/* function that's runing by kernel thread context, schedule use tasks */ /* function that's runing by kernel thread context, schedule use tasks */
@ -168,9 +150,6 @@ struct XiziTaskManager {
/* handle task state */ /* handle task state */
/* call to yield current use task */ /* call to yield current use task */
void (*task_yield_noschedule)(struct Thread* task, bool is_blocking); void (*task_yield_noschedule)(struct Thread* task, bool is_blocking);
/* block and unblock task */
void (*task_block)(struct double_list_node* head, struct Thread* task);
void (*task_unblock)(struct Thread* task);
/* set task priority */ /* set task priority */
void (*set_cur_task_priority)(int priority); void (*set_cur_task_priority)(int priority);
}; };

View File

@ -70,8 +70,8 @@ int sys_close_session(struct Thread* cur_task, struct Session* session)
// @todo fix memory leak // @todo fix memory leak
} else { } else {
assert(!queue_is_empty(&server_to_info->sessions_to_be_handle)); assert(!queue_is_empty(&server_to_info->sessions_to_be_handle));
if (server_to_info->state == BLOCKED) { if (server_to_info->snode.state == BLOCKED) {
xizi_task_manager.task_unblock(session_backend->server); task_into_ready(session_backend->server);
} }
} }
} }

View File

@ -41,11 +41,11 @@ int sys_exit(struct Thread* ptask)
assert(ptask != NULL); assert(ptask != NULL);
ptask->dead = true; ptask->dead = true;
// free that task straightly if it's a blocked task // free that task straightly if it's a blocked task
if (ptask->state == BLOCKED) { if (ptask->snode.state == BLOCKED) {
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
tlo->free_pcb(ptask); tlo->free_pcb(ptask);
} }
// yield current task in case it wants to exit itself // yield current task in case it wants to exit itself
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false); task_yield(cur_cpu()->task);
return 0; return 0;
} }

View File

@ -27,53 +27,37 @@ Author: AIIT XUOS Lab
Modification: Modification:
1. first version 1. first version
*************************************************/ *************************************************/
#include "task.h"
#include "trap_common.h" #include "trap_common.h"
#include "task.h" static bool kill_succ;
extern int sys_exit(struct Thread* ptask);
static bool kill_task(RbtNode* node, void* id)
{
struct ScheduleNode* snode = (struct ScheduleNode*)node->data;
struct Thread* thd = snode->pthd;
tid_t target_id = *(tid_t*)id;
if (thd->tid == target_id) {
sys_exit(thd);
kill_succ = true;
return false;
}
return true;
}
extern int sys_exit(struct Thread* task); extern int sys_exit(struct Thread* task);
int sys_kill(int id) int sys_kill(int id)
{ {
struct Thread* task = NULL; kill_succ = false;
// check if task is a running one for (int pool_id = 0; pool_id < NR_STATE; pool_id++) {
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_running_list_head, node) rbt_traverse(&g_scheduler.snode_state_pool[pool_id], kill_task, (void*)&id);
{
if (task->tid == id) {
sys_exit(task);
return 0;
}
} }
// check if task is a blocking one if (kill_succ) {
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_blocked_list_head, node)
{
if (task->tid == id) {
sys_exit(task);
return 0; return 0;
} }
}
struct ksemaphore* sem = NULL;
DOUBLE_LIST_FOR_EACH_ENTRY(sem, &xizi_task_manager.semaphore_pool.sem_list_guard, sem_list_node)
{
task = NULL;
DOUBLE_LIST_FOR_EACH_ENTRY(task, &sem->wait_list_guard, node)
{
sys_exit(task);
return 0;
}
}
// check if task is a ready one
for (int prio = 0; prio < TASK_MAX_PRIORITY; prio++) {
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[prio], node)
{
if (task->tid == id) {
sys_exit(task);
return 0;
}
}
}
return -1; return -1;
} }

View File

@ -118,8 +118,8 @@ int sys_mmap_v2(uintptr_t* vaddr, uintptr_t* paddr, int len, sys_mmap_info* info
} }
uintptr_t paddr_to_map = *paddr; uintptr_t paddr_to_map = *paddr;
if (paddr_to_map >= PHY_MEM_BASE && paddr_to_map < PHY_MEM_STOP && cur_task->tid > 1) { if (paddr_to_map >= PHY_MEM_BASE && paddr_to_map < PHY_MEM_STOP && cur_task->tid > 2) {
ERROR("mapping invalid memory: 0x%p\n", paddr_to_map); ERROR("mapping invalid memory: 0x%p by %d\n", paddr_to_map, cur_task->tid);
return -1; return -1;
} }

View File

@ -112,9 +112,9 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
} }
if (queue_is_empty(&cur_task->sessions_in_handle) && queue_is_empty(&cur_task->sessions_to_be_handle)) { if (queue_is_empty(&cur_task->sessions_in_handle) && queue_is_empty(&cur_task->sessions_to_be_handle)) {
xizi_task_manager.task_yield_noschedule(cur_task, false); task_yield(cur_task);
// @todo support blocking(now bug at 4 cores running) // @todo support blocking(now bug at 4 cores running)
// xizi_task_manager.task_block(&xizi_task_manager.task_blocked_list_head, cur_task); // task_block(cur_task);
} }
return 0; return 0;
} }

View File

@ -75,9 +75,10 @@ static void send_irq_to_user(int irq_num)
buf->header.done = 0; buf->header.done = 0;
buf->header.magic = IPC_MSG_MAGIC; buf->header.magic = IPC_MSG_MAGIC;
buf->header.valid = 1; buf->header.valid = 1;
enqueue(&irq_forward_table[irq_num].handle_task->sessions_to_be_handle, 0, (void*)&irq_forward_table[irq_num].p_kernel_session->server_side);
if (irq_forward_table[irq_num].handle_task->state == BLOCKED) { if (irq_forward_table[irq_num].handle_task->snode.state == BLOCKED) {
xizi_task_manager.task_unblock(irq_forward_table[irq_num].handle_task); task_into_ready(irq_forward_table[irq_num].handle_task);
} }
/* add session head */ /* add session head */
@ -92,7 +93,7 @@ int user_irq_handler(int irq, void* tf, void* arg)
next_task_emergency = irq_forward_table[irq].handle_task; next_task_emergency = irq_forward_table[irq].handle_task;
if (cur_cpu()->task != NULL) { if (cur_cpu()->task != NULL) {
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false); task_yield(cur_cpu()->task);
} }
} }
return 0; return 0;
@ -126,7 +127,9 @@ int sys_register_irq(int irq_num, int irq_opcode)
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag); struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
kernel_irq_proxy = tlo->new_thread(pmemspace); kernel_irq_proxy = tlo->new_thread(pmemspace);
kernel_irq_proxy->state = NEVER_RUN; task_trans_sched_state(&kernel_irq_proxy->snode, //
&g_scheduler.snode_state_pool[INIT], //
&g_scheduler.snode_state_pool[NEVER_RUN], NEVER_RUN);
} }
// bind irq to session // bind irq to session

View File

@ -36,10 +36,11 @@ Modification:
int sys_sleep(intptr_t ms) int sys_sleep(intptr_t ms)
{ {
struct Thread* cur_task = cur_cpu()->task; struct Thread* cur_task = cur_cpu()->task;
xizi_task_manager.task_yield_noschedule(cur_task, false); task_yield(cur_task);
xizi_task_manager.task_block(&xizi_task_manager.task_sleep_list_head, cur_task); cur_task->snode.sleep_context.remain_ms = ms;
cur_task->state = SLEEPING; task_trans_sched_state(&cur_task->snode, //
cur_task->sleep_context.remain_ms = ms; &g_scheduler.snode_state_pool[READY], //
&g_scheduler.snode_state_pool[SLEEPING], SLEEPING);
return 0; return 0;
} }

View File

@ -42,30 +42,13 @@ Modification:
extern uint8_t _binary_fs_img_start[], _binary_fs_img_end[]; extern uint8_t _binary_fs_img_start[], _binary_fs_img_end[];
#define SHOWINFO_BORDER_LINE() LOG_PRINTF("******************************************************\n"); #define SHOWINFO_BORDER_LINE() LOG_PRINTF("******************************************************\n");
#define SHOWTASK_TASK_BASE_INFO(task) LOG_PRINTF(" %-6d %-16s %-4d 0x%x(%-d)\n", task->tid, task->name, task->priority, task->memspace->mem_size >> 10, task->memspace->mem_size >> 10) #define SHOWTASK_TASK_BASE_INFO(task) LOG_PRINTF(" %-6d %-16s %-4d 0x%x(%-d)\n", task->tid, task->name, 0, task->memspace->mem_size >> 10, task->memspace->mem_size >> 10)
void show_tasks(void) bool print_info(RbtNode* node, void* data)
{ {
struct Thread* task = NULL; struct ScheduleNode* snode = (struct ScheduleNode*)node->data;
SHOWINFO_BORDER_LINE(); struct Thread* thd = snode->pthd;
for (int i = 0; i < NR_CPU; i++) { switch (snode->state) {
LOG_PRINTF("CPU %-2d: %s\n", i, (global_cpus[i].task == NULL ? "NULL" : global_cpus[i].task->name));
}
SHOWINFO_BORDER_LINE();
LOG_PRINTF("%-8s %-6s %-16s %-4s %-8s\n", "STAT", "ID", "TASK", "PRI", "MEM(KB)");
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_running_list_head, node)
{
LOG_PRINTF("%-8s", "RUNNING");
SHOWTASK_TASK_BASE_INFO(task);
}
for (int i = 0; i < TASK_MAX_PRIORITY; i++) {
if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[i])) {
continue;
}
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[i], node)
{
switch (task->state) {
case INIT: case INIT:
LOG_PRINTF("%-8s", "INIT"); LOG_PRINTF("%-8s", "INIT");
break; break;
@ -78,35 +61,31 @@ void show_tasks(void)
case DEAD: case DEAD:
LOG_PRINTF("%-8s", "DEAD"); LOG_PRINTF("%-8s", "DEAD");
break; break;
case BLOCKED:
LOG_PRINTF("%-8s", "BLOCK");
break;
case SLEEPING:
LOG_PRINTF("%-8s", "SLEEP");
break;
default: default:
break; break;
} }
SHOWTASK_TASK_BASE_INFO(task); SHOWTASK_TASK_BASE_INFO(thd);
} return true;
} }
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_blocked_list_head, node) void show_tasks(void)
{ {
LOG_PRINTF("%-8s", "BLOCK"); SHOWINFO_BORDER_LINE();
SHOWTASK_TASK_BASE_INFO(task); for (int i = 0; i < NR_CPU; i++) {
LOG_PRINTF("CPU %-2d: %s\n", i, (global_cpus[i].task == NULL ? "NULL" : global_cpus[i].task->name));
} }
SHOWINFO_BORDER_LINE();
LOG_PRINTF("%-8s %-6s %-16s %-4s %-8s\n", "STAT", "ID", "TASK", "PRI", "MEM(KB)");
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_sleep_list_head, node) for (int pool_id = INIT; pool_id < NR_STATE; pool_id++) {
{ rbt_traverse(&g_scheduler.snode_state_pool[pool_id], print_info, NULL);
LOG_PRINTF("%-8s", "SLEEP");
SHOWTASK_TASK_BASE_INFO(task);
}
struct ksemaphore* sem = NULL;
DOUBLE_LIST_FOR_EACH_ENTRY(sem, &xizi_task_manager.semaphore_pool.sem_list_guard, sem_list_node)
{
task = NULL;
DOUBLE_LIST_FOR_EACH_ENTRY(task, &sem->wait_list_guard, node)
{
LOG_PRINTF("%-8s", "BLOCK");
SHOWTASK_TASK_BASE_INFO(task);
}
} }
SHOWINFO_BORDER_LINE(); SHOWINFO_BORDER_LINE();
@ -150,7 +129,7 @@ void show_cpu(void)
assert(current_task != NULL); assert(current_task != NULL);
LOG_PRINTF(" ID COMMAND USED_TICKS FREE_TICKS \n"); LOG_PRINTF(" ID COMMAND USED_TICKS FREE_TICKS \n");
LOG_PRINTF(" %d %s %d %d\n", cpu_id, current_task->name, TASK_CLOCK_TICK - current_task->remain_tick, current_task->remain_tick); LOG_PRINTF(" %d %s %d %d\n", cpu_id, current_task->name, TASK_CLOCK_TICK - current_task->snode.sched_context.remain_tick, current_task->snode.sched_context.remain_tick);
LOG_PRINTF("***********************************************************\n"); LOG_PRINTF("***********************************************************\n");
return; return;

View File

@ -64,7 +64,7 @@ int sys_new_thread(struct MemSpace* pmemspace, struct Thread* task, uintptr_t en
strncpy(task->name, last, sizeof(task->name) - 1); strncpy(task->name, last, sizeof(task->name) - 1);
// init pcb schedule attributes // init pcb schedule attributes
xizi_task_manager.task_set_default_schedule_attr(task); task_into_ready(task);
// thread init done by here // thread init done by here
if (pmemspace->thread_to_notify == NULL) { if (pmemspace->thread_to_notify == NULL) {

View File

@ -60,8 +60,8 @@ int sys_wait_session(struct Session* userland_session)
assert(!queue_is_empty(&server_to_call->sessions_to_be_handle)); assert(!queue_is_empty(&server_to_call->sessions_to_be_handle));
ksemaphore_wait(&xizi_task_manager.semaphore_pool, cur_task, session_backend->client_sem_to_wait); ksemaphore_wait(&xizi_task_manager.semaphore_pool, cur_task, session_backend->client_sem_to_wait);
if (server_to_call->state == BLOCKED) { if (server_to_call->snode.state == BLOCKED) {
xizi_task_manager.task_unblock(session_backend->server); task_into_ready(session_backend->server);
} }
return 0; return 0;

View File

@ -36,6 +36,6 @@ Modification:
int sys_yield(task_yield_reason reason) int sys_yield(task_yield_reason reason)
{ {
struct Thread* cur_task = cur_cpu()->task; struct Thread* cur_task = cur_cpu()->task;
xizi_task_manager.task_yield_noschedule(cur_task, false); task_yield(cur_task);
return 0; return 0;
} }

View File

@ -30,68 +30,136 @@ Modification:
#include "log.h" #include "log.h"
#include "schedule_algo.h" #include "schedule_algo.h"
static struct Thread* next_runable_task;
static uint64_t min_run_time;
bool find_runable_task(RbtNode* node, void* data)
{
struct ScheduleNode* snode = (struct ScheduleNode*)node->data;
struct Thread* thd = snode->pthd;
if (!thd->dead) {
if (thd->snode.sched_context.run_time <= min_run_time) {
next_runable_task = thd;
min_run_time = thd->snode.sched_context.run_time;
thd->snode.sched_context.run_time++;
}
return true;
} else {
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
tlo->free_pcb(thd);
return false;
}
return true;
}
struct Thread* max_priority_runnable_task(void) struct Thread* max_priority_runnable_task(void)
{ {
static struct Thread* task = NULL; /// @todo better strategy
static int priority = 0; next_runable_task = NULL;
min_run_time = UINT64_MAX;
priority = __builtin_ffs(ready_task_priority) - 1; rbt_traverse(&g_scheduler.snode_state_pool[READY], find_runable_task, NULL);
if (priority > 31 || priority < 0) { return next_runable_task;
return NULL;
}
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node)
{
assert(task != NULL);
if (task->state == READY && !task->dead) {
// found a runnable task, stop this look up
return task;
} else if (task->dead && task->state != RUNNING) {
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
tlo->free_pcb(task);
return NULL;
}
}
return NULL;
} }
struct Thread* round_robin_runnable_task(uint32_t priority) #include "multicores.h"
#include "rbtree.h"
#include "task.h"
bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd)
{ {
struct Thread* task = NULL; snode->pthd = bind_thd;
snode->snode_id = bind_thd->tid;
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node) snode->sched_context.remain_tick = 0;
{ snode->sched_context.run_time = 0;
if (task->state == READY && !task->dead) {
// found a runnable task, stop this look up
return task;
} else if (task->dead && task->state != RUNNING) {
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
tlo->free_pcb(task);
return NULL;
}
}
return NULL; snode->sleep_context.remain_ms = 0;
snode->state = INIT;
if (RBTTREE_INSERT_SECC != rbt_insert(&g_scheduler.snode_state_pool[INIT], //
snode->snode_id, (void*)snode)) {
return false;
}
return true;
} }
/* recover task priority */ bool task_trans_sched_state(struct ScheduleNode* snode, RbtTree* from_pool, RbtTree* to_pool, enum ThreadState target_state)
void recover_priority(void)
{ {
struct Thread* task = NULL; assert(snode != NULL);
for (int i = 1; i < TASK_MAX_PRIORITY; i++) { // DEBUG("%d %p %d %s\n", snode->snode_id, snode->pthd, snode->pthd->tid, snode->pthd->name);
if (i == TASK_DEFAULT_PRIORITY) assert(snode->snode_id != UNINIT_SNODE_ID && snode->pthd != NULL);
continue; if (RBTTREE_DELETE_SUCC != rbt_delete(from_pool, snode->snode_id)) {
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[i], node) DEBUG("Thread %d not in from schedule pool\n", snode->pthd->tid);
{ return false;
if (!IS_DOUBLE_LIST_EMPTY(&task->node)) {
// DEBUG("%s priority recover\n", task->name);
task->priority = TASK_DEFAULT_PRIORITY;
doubleListDel(&task->node);
doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]);
i--;
break;
}
} }
if (RBTTREE_INSERT_SECC != rbt_insert(to_pool, snode->snode_id, (void*)snode)) {
DEBUG("Thread %d trans state failed\n", snode->pthd->tid);
return false;
} }
snode->state = target_state;
return true;
}
void task_dead(struct Thread* thd)
{
assert(thd != NULL);
struct ScheduleNode* snode = &thd->snode;
assert(snode->state == READY);
bool trans_res = task_trans_sched_state(snode, //
&g_scheduler.snode_state_pool[READY], //
&g_scheduler.snode_state_pool[DEAD], DEAD);
assert(trans_res = true);
assert(RBTTREE_DELETE_SUCC == rbt_delete(&g_scheduler.snode_state_pool[DEAD], snode->snode_id));
return;
}
void task_block(struct Thread* thd)
{
assert(thd != NULL);
struct ScheduleNode* snode = &thd->snode;
enum ThreadState thd_cur_state = snode->state;
assert(thd_cur_state != RUNNING);
bool trans_res = task_trans_sched_state(snode, //
&g_scheduler.snode_state_pool[thd_cur_state], //
&g_scheduler.snode_state_pool[BLOCKED], BLOCKED);
assert(trans_res = true);
return;
}
void task_into_ready(struct Thread* thd)
{
assert(thd != NULL);
struct ScheduleNode* snode = &thd->snode;
enum ThreadState thd_cur_state = snode->state;
bool trans_res = task_trans_sched_state(snode, //
&g_scheduler.snode_state_pool[thd_cur_state], //
&g_scheduler.snode_state_pool[READY], READY);
snode->sched_context.remain_tick = TASK_CLOCK_TICK;
assert(trans_res = true);
return;
}
void task_yield(struct Thread* thd)
{
assert(thd != NULL);
struct ScheduleNode* snode = &thd->snode;
enum ThreadState thd_cur_state = snode->state;
assert(thd == cur_cpu()->task && thd_cur_state == RUNNING);
cur_cpu()->task = NULL;
bool trans_res = task_trans_sched_state(snode, //
&g_scheduler.snode_state_pool[thd_cur_state], //
&g_scheduler.snode_state_pool[READY], READY);
snode->sched_context.remain_tick = TASK_CLOCK_TICK;
assert(trans_res = true);
return;
} }

View File

@ -58,7 +58,7 @@ sem_id_t ksemaphore_alloc(struct XiziSemaphorePool* sem_pool, sem_val_t val)
} }
sem->val = val; sem->val = val;
doubleListNodeInit(&sem->sem_list_node); doubleListNodeInit(&sem->sem_list_node);
doubleListNodeInit(&sem->wait_list_guard); rbtree_init(&sem->wait_thd_tree);
if (0 != rbt_insert(&sem_pool->sem_pool_map, sem->id, sem)) { if (0 != rbt_insert(&sem_pool->sem_pool_map, sem->id, sem)) {
slab_free(&sem_pool->allocator, sem); slab_free(&sem_pool->allocator, sem);
@ -88,7 +88,7 @@ bool ksemaphore_consume(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id, sem
bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, sem_id_t sem_id) bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, sem_id_t sem_id)
{ {
assert(thd != NULL); assert(thd != NULL);
assert(thd->state == RUNNING); assert(thd->snode.state == RUNNING);
/* find sem */ /* find sem */
struct ksemaphore* sem = ksemaphore_get_by_id(sem_pool, sem_id); struct ksemaphore* sem = ksemaphore_get_by_id(sem_pool, sem_id);
// invalid sem id // invalid sem id
@ -105,8 +105,9 @@ bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, sem
// waiting at the sem // waiting at the sem
sem->val--; sem->val--;
xizi_task_manager.task_yield_noschedule(thd, false); task_yield(thd);
xizi_task_manager.task_block(&sem->wait_list_guard, thd); task_block(thd);
assert(RBTTREE_INSERT_SECC == rbt_insert(&sem->wait_thd_tree, thd->tid, thd));
return true; return true;
} }
@ -120,12 +121,11 @@ bool ksemaphore_signal(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id)
} }
if (sem->val < 0) { if (sem->val < 0) {
if (!IS_DOUBLE_LIST_EMPTY(&sem->wait_list_guard)) { assert(!rbt_is_empty(&sem->wait_thd_tree));
struct Thread* thd = CONTAINER_OF(sem->wait_list_guard.next, struct Thread, node); RbtNode* root = sem->wait_thd_tree.root;
assert(thd != NULL && thd->state == BLOCKED); struct Thread* thd = (struct Thread*)root->data;
xizi_task_manager.task_unblock(thd); rbt_delete(&sem->wait_thd_tree, root->key);
// DEBUG("waking %s\n", thd->name); task_into_ready(thd);
}
} }
sem->val++; sem->val++;
@ -154,12 +154,7 @@ bool ksemaphore_free(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id)
return false; return false;
} }
struct Thread* thd = NULL; // by design: no waking any waiting threads
DOUBLE_LIST_FOR_EACH_ENTRY(thd, &sem->wait_list_guard, node)
{
assert(thd != NULL);
xizi_task_manager.task_unblock(thd);
}
rbt_delete(&sem_pool->sem_pool_map, sem_id); rbt_delete(&sem_pool->sem_pool_map, sem_id);
doubleListDel(&sem->sem_list_node); doubleListDel(&sem->sem_list_node);

View File

@ -44,28 +44,9 @@ struct CPU global_cpus[NR_CPU];
uint32_t ready_task_priority; uint32_t ready_task_priority;
struct GlobalTaskPool global_task_pool; struct GlobalTaskPool global_task_pool;
struct Scheduler g_scheduler;
extern struct TaskLifecycleOperations task_lifecycle_ops; extern struct TaskLifecycleOperations task_lifecycle_ops;
static inline void task_node_leave_list(struct Thread* task)
{
doubleListDel(&task->node);
if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[task->priority])) {
ready_task_priority &= ~((uint32_t)1 << task->priority);
}
}
static inline void task_node_add_to_ready_list_head(struct Thread* task)
{
doubleListAddOnHead(&task->node, &xizi_task_manager.task_list_head[task->priority]);
ready_task_priority |= ((uint32_t)1 << task->priority);
}
static inline void task_node_add_to_ready_list_back(struct Thread* task)
{
doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]);
ready_task_priority |= ((uint32_t)1 << task->priority);
}
static void _task_manager_init() static void _task_manager_init()
{ {
assert(CreateResourceTag(&xizi_task_manager.task_lifecycle_ops_tag, &xizi_task_manager.tag, // assert(CreateResourceTag(&xizi_task_manager.task_lifecycle_ops_tag, &xizi_task_manager.tag, //
@ -93,8 +74,16 @@ static void _task_manager_init()
doubleListNodeInit(&global_task_pool.thd_listing_head); doubleListNodeInit(&global_task_pool.thd_listing_head);
rbtree_init(&global_task_pool.thd_ref_map); rbtree_init(&global_task_pool.thd_ref_map);
// scheduler
assert(CreateResourceTag(&g_scheduler.tag, &xizi_task_manager.tag, //
"GlobalScheduler", TRACER_SYSOBJECT, (void*)&g_scheduler));
semaphore_pool_init(&g_scheduler.semaphore_pool);
for (int pool_id = 0; pool_id < NR_STATE; pool_id++) {
rbtree_init(&g_scheduler.snode_state_pool[pool_id]);
}
// tid pool // tid pool
xizi_task_manager.next_pid = 0; xizi_task_manager.next_pid = 1;
// init priority bit map // init priority bit map
ready_task_priority = 0; ready_task_priority = 0;
@ -125,8 +114,8 @@ int _task_return_sys_resources(struct Thread* ptask)
// @todo fix memory leak // @todo fix memory leak
} else { } else {
assert(!queue_is_empty(&server_to_info->sessions_to_be_handle)); assert(!queue_is_empty(&server_to_info->sessions_to_be_handle));
if (server_to_info->state == BLOCKED) { if (server_to_info->snode.state == BLOCKED) {
xizi_task_manager.task_unblock(session_backend->server); task_into_ready(server_to_info);
} }
} }
} }
@ -184,7 +173,7 @@ static void _free_thread(struct Thread* task)
} }
// remove thread from used task list // remove thread from used task list
task_node_leave_list(task); task_dead(task);
/* free memspace if needed to */ /* free memspace if needed to */
if (task->memspace != NULL) { if (task->memspace != NULL) {
@ -196,8 +185,8 @@ static void _free_thread(struct Thread* task)
// awake deamon in this memspace // awake deamon in this memspace
if (task->memspace->thread_to_notify != NULL) { if (task->memspace->thread_to_notify != NULL) {
if (task->memspace->thread_to_notify != task) { if (task->memspace->thread_to_notify != task) {
if (task->memspace->thread_to_notify->state == BLOCKED) { if (task->memspace->thread_to_notify->snode.state == BLOCKED) {
xizi_task_manager.task_unblock(task->memspace->thread_to_notify); task_into_ready(task->memspace->thread_to_notify);
} else { } else {
task->memspace->thread_to_notify->advance_unblock = true; task->memspace->thread_to_notify->advance_unblock = true;
} }
@ -231,9 +220,18 @@ static struct Thread* _new_thread(struct MemSpace* pmemspace)
return NULL; return NULL;
} }
// [schedule related]
task->tid = xizi_task_manager.next_pid++;
if (!init_schedule_node(&task->snode, task)) {
ERROR("Not enough memory\n");
slab_free(&xizi_task_manager.task_allocator, (void*)task);
return NULL;
}
// alloc stack page for task // alloc stack page for task
if ((void*)(task->thread_context.kern_stack_addr = (uintptr_t)kalloc_by_ownership(pmemspace->kernspace_mem_usage.tag, USER_STACK_SIZE)) == NULL) { if ((void*)(task->thread_context.kern_stack_addr = (uintptr_t)kalloc_by_ownership(pmemspace->kernspace_mem_usage.tag, USER_STACK_SIZE)) == NULL) {
/* here inside, will no free memspace */ /* here inside, will no free memspace */
assert(RBTTREE_DELETE_SUCC == rbt_delete(&g_scheduler.snode_state_pool[INIT], task->snode.snode_id));
slab_free(&xizi_task_manager.task_allocator, (void*)task); slab_free(&xizi_task_manager.task_allocator, (void*)task);
return NULL; return NULL;
} }
@ -241,7 +239,6 @@ static struct Thread* _new_thread(struct MemSpace* pmemspace)
ERROR_FREE ERROR_FREE
{ {
/* init basic task ref member */ /* init basic task ref member */
task->tid = xizi_task_manager.next_pid++;
task->bind_irq = false; task->bind_irq = false;
/* vm & memory member */ /* vm & memory member */
@ -279,7 +276,6 @@ static struct Thread* _new_thread(struct MemSpace* pmemspace)
} }
// [name] // [name]
// [schedule related]
return task; return task;
} }
@ -289,22 +285,12 @@ struct TaskLifecycleOperations task_lifecycle_ops = {
.free_pcb = _free_thread, .free_pcb = _free_thread,
}; };
static void _task_set_default_schedule_attr(struct Thread* task)
{
task->remain_tick = TASK_CLOCK_TICK;
task->maxium_tick = TASK_CLOCK_TICK * 10;
task->dead = false;
task->state = READY;
task->priority = TASK_DEFAULT_PRIORITY;
task_node_add_to_ready_list_head(task);
}
static void task_state_set_running(struct Thread* task) static void task_state_set_running(struct Thread* task)
{ {
assert(task != NULL && task->state == READY); assert(task != NULL && task->snode.state == READY);
task->state = RUNNING; assert(task_trans_sched_state(&task->snode, //
task_node_leave_list(task); &g_scheduler.snode_state_pool[READY], //
doubleListAddOnHead(&task->node, &xizi_task_manager.task_running_list_head); &g_scheduler.snode_state_pool[RUNNING], RUNNING));
} }
struct Thread* next_task_emergency = NULL; struct Thread* next_task_emergency = NULL;
@ -319,7 +305,7 @@ static void _scheduler(struct SchedulerRightGroup right_group)
next_task = NULL; next_task = NULL;
/* find next runnable task */ /* find next runnable task */
assert(cur_cpu()->task == NULL); assert(cur_cpu()->task == NULL);
if (next_task_emergency != NULL && next_task_emergency->state == READY) { if (next_task_emergency != NULL && next_task_emergency->snode.state == READY) {
next_task = next_task_emergency; next_task = next_task_emergency;
} else { } else {
next_task = xizi_task_manager.next_runnable_task(); next_task = xizi_task_manager.next_runnable_task();
@ -340,76 +326,21 @@ static void _scheduler(struct SchedulerRightGroup right_group)
assert(next_task->memspace->pgdir.pd_addr != NULL); assert(next_task->memspace->pgdir.pd_addr != NULL);
p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->memspace->pgdir.pd_addr)); p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->memspace->pgdir.pd_addr));
context_switch(&cpu->scheduler, next_task->thread_context.context); context_switch(&cpu->scheduler, next_task->thread_context.context);
assert(next_task->state != RUNNING); assert(next_task->snode.state != RUNNING);
} }
} }
static void _task_yield_noschedule(struct Thread* task, bool blocking)
{
assert(task != NULL);
/// @warning only support current task yield now
assert(task == cur_cpu()->task && task->state == RUNNING);
// rearrage current task position
task_node_leave_list(task);
if (task->state == RUNNING) {
task->state = READY;
}
task->remain_tick = TASK_CLOCK_TICK;
cur_cpu()->task = NULL;
task_node_add_to_ready_list_back(task);
}
static void _task_block(struct double_list_node* head, struct Thread* task)
{
assert(head != NULL);
assert(task != NULL);
assert(task->state != RUNNING);
task_node_leave_list(task);
task->state = BLOCKED;
doubleListAddOnHead(&task->node, head);
}
static void _task_unblock(struct Thread* task)
{
assert(task != NULL);
assert(task->state == BLOCKED || task->state == SLEEPING);
task_node_leave_list(task);
task->state = READY;
task_node_add_to_ready_list_back(task);
}
/// @brief @warning not tested function /// @brief @warning not tested function
/// @param priority /// @param priority
static void _set_cur_task_priority(int priority) static void _set_cur_task_priority(int priority)
{ {
if (priority < 0 || priority >= TASK_MAX_PRIORITY) {
ERROR("priority is invalid\n");
return;
}
struct Thread* current_task = cur_cpu()->task;
assert(current_task != NULL && current_task->state == RUNNING);
task_node_leave_list(current_task);
current_task->priority = priority;
task_node_add_to_ready_list_back(current_task);
return; return;
} }
struct XiziTaskManager xizi_task_manager = { struct XiziTaskManager xizi_task_manager = {
.init = _task_manager_init, .init = _task_manager_init,
.task_set_default_schedule_attr = _task_set_default_schedule_attr,
.next_runnable_task = max_priority_runnable_task, .next_runnable_task = max_priority_runnable_task,
.task_scheduler = _scheduler, .task_scheduler = _scheduler,
.task_block = _task_block,
.task_unblock = _task_unblock,
.task_yield_noschedule = _task_yield_noschedule,
.set_cur_task_priority = _set_cur_task_priority .set_cur_task_priority = _set_cur_task_priority
}; };

View File

@ -326,20 +326,20 @@ RbtNode* __rbtree_insert(RbtNode* node, RbtTree* tree)
int rbt_insert(RbtTree* tree, uintptr_t key, void* data) int rbt_insert(RbtTree* tree, uintptr_t key, void* data)
{ {
if (rbt_search(tree, key) != NULL) { if (rbt_search(tree, key) != NULL) {
return -2; return RBTTREE_INSERT_EXISTED;
} }
RbtNode* node = rbtree_createnode(key, data); RbtNode* node = rbtree_createnode(key, data);
RbtNode* samenode = NULL; RbtNode* samenode = NULL;
if (node == NULL) if (node == NULL)
return -1; return RBTTREE_INSERT_FAILED;
else else
samenode = __rbtree_insert(node, tree); samenode = __rbtree_insert(node, tree);
assert(samenode == NULL); assert(samenode == NULL);
tree->nr_ele++; tree->nr_ele++;
return 0; return RBTTREE_INSERT_SECC;
} }
void replace_node(RbtTree* t, RbtNode* oldn, RbtNode* newn) void replace_node(RbtTree* t, RbtNode* oldn, RbtNode* newn)
@ -455,7 +455,7 @@ int rbt_delete(RbtTree* tree, uintptr_t key)
{ {
RbtNode* node = do_lookup(key, tree, NULL); RbtNode* node = do_lookup(key, tree, NULL);
if (node == NULL) if (node == NULL)
return -1; return RBTTREE_DELETE_FAILED;
else else
__rbtree_remove(node, tree); __rbtree_remove(node, tree);
@ -463,5 +463,22 @@ int rbt_delete(RbtTree* tree, uintptr_t key)
if (rbt_is_empty(tree)) { if (rbt_is_empty(tree)) {
assert(tree->root == NULL); assert(tree->root == NULL);
} }
return 0; return RBTTREE_DELETE_SUCC;
}
void rbt_traverse_inner(RbtNode* node, rbt_traverse_fn fn, void* data)
{
if (node == NULL) {
return;
}
if (fn(node, data)) {
rbt_traverse_inner(node->left, fn, data);
rbt_traverse_inner(node->right, fn, data);
}
}
void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn, void* data)
{
rbt_traverse_inner(tree->root, fn, data);
} }

View File

@ -62,6 +62,12 @@ void hw_current_second(uintptr_t* second)
*second = p_clock_driver->get_second(); *second = p_clock_driver->get_second();
} }
bool count_down_sleeping_task(RbtNode* node, void* data)
{
/// @todo implement
return false;
}
uint64_t global_tick = 0; uint64_t global_tick = 0;
int xizi_clock_handler(int irq, void* tf, void* arg) int xizi_clock_handler(int irq, void* tf, void* arg)
{ {
@ -73,24 +79,25 @@ int xizi_clock_handler(int irq, void* tf, void* arg)
// handle current thread // handle current thread
struct Thread* current_task = cur_cpu()->task; struct Thread* current_task = cur_cpu()->task;
if (current_task) { if (current_task) {
current_task->remain_tick--; struct ScheduleNode* snode = &current_task->snode;
current_task->maxium_tick--; snode->sched_context.remain_tick--;
if (current_task->remain_tick == 0) { if (snode->sched_context.remain_tick == 0) {
xizi_task_manager.task_yield_noschedule(current_task, false); task_into_ready(current_task);
} }
} }
// todo: cpu 0 will handle sleeping thread // todo: cpu 0 will handle sleeping thread
struct Thread* thread = NULL; rbt_traverse(&g_scheduler.snode_state_pool[SLEEPING], count_down_sleeping_task, NULL);
DOUBLE_LIST_FOR_EACH_ENTRY(thread, &xizi_task_manager.task_sleep_list_head, node)
{ // DOUBLE_LIST_FOR_EACH_ENTRY(thread, &xizi_task_manager.task_sleep_list_head, node)
assert(thread->state == SLEEPING); // {
thread->sleep_context.remain_ms--; // assert(thread->state == SLEEPING);
if (thread->sleep_context.remain_ms <= 0) { // thread->sleep_context.remain_ms--;
xizi_task_manager.task_unblock(thread); // if (thread->sleep_context.remain_ms <= 0) {
break; // xizi_task_manager.task_unblock(thread);
} // break;
} // }
// }
} }
return 0; return 0;
} }

View File

@ -84,7 +84,7 @@ void intr_irq_dispatch(struct trapframe* tf)
// finish irq. // finish irq.
p_intr_driver->hw_after_irq(int_info); p_intr_driver->hw_after_irq(int_info);
if (cur_cpu()->task == NULL || current_task->state != RUNNING) { if (cur_cpu()->task == NULL || current_task->snode.state != RUNNING) {
cur_cpu()->task = NULL; cur_cpu()->task = NULL;
context_switch(&current_task->thread_context.context, cur_cpu()->scheduler); context_switch(&current_task->thread_context.context, cur_cpu()->scheduler);
} }

View File

@ -56,7 +56,7 @@ void software_irq_dispatch(struct trapframe* tf)
/// @todo: Handle dead task /// @todo: Handle dead task
int syscall_num = -1; int syscall_num = -1;
if (cur_task && cur_task->state != DEAD) { if (cur_task && cur_task->snode.state != DEAD) {
cur_task->thread_context.trapframe = tf; cur_task->thread_context.trapframe = tf;
// call syscall // call syscall
@ -64,7 +64,7 @@ void software_irq_dispatch(struct trapframe* tf)
arch_set_return(tf, ret); arch_set_return(tf, ret);
} }
if ((cur_cpu()->task == NULL && cur_task != NULL) || cur_task->state != RUNNING) { if ((cur_cpu()->task == NULL && cur_task != NULL) || cur_task->snode.state != RUNNING) {
cur_cpu()->task = NULL; cur_cpu()->task = NULL;
context_switch(&cur_task->thread_context.context, cur_cpu()->scheduler); context_switch(&cur_task->thread_context.context, cur_cpu()->scheduler);
} }