Add schedule node midway
This commit is contained in:
parent
7639937678
commit
21304531a5
|
@ -57,7 +57,7 @@ void panic(char* s)
|
||||||
/* stack for different mode*/
|
/* stack for different mode*/
|
||||||
static char mode_stack_pages[NR_CPU][NR_MODE_STACKS][MODE_STACK_SIZE];
|
static char mode_stack_pages[NR_CPU][NR_MODE_STACKS][MODE_STACK_SIZE];
|
||||||
extern uint32_t _vector_jumper;
|
extern uint32_t _vector_jumper;
|
||||||
extern uint32_t _vector_start;
|
extern uint32_t* _vector_start;
|
||||||
extern uint32_t _vector_end;
|
extern uint32_t _vector_end;
|
||||||
|
|
||||||
void init_cpu_mode_stacks(int cpu_id)
|
void init_cpu_mode_stacks(int cpu_id)
|
||||||
|
@ -75,7 +75,7 @@ static void _sys_irq_init(int cpu_id)
|
||||||
/* load exception vectors */
|
/* load exception vectors */
|
||||||
init_cpu_mode_stacks(cpu_id);
|
init_cpu_mode_stacks(cpu_id);
|
||||||
if (cpu_id == 0) {
|
if (cpu_id == 0) {
|
||||||
volatile uint32_t* vector_base = &_vector_start;
|
volatile uint32_t* vector_base = (uint32_t*)&_vector_start;
|
||||||
|
|
||||||
// Set Interrupt handler start address
|
// Set Interrupt handler start address
|
||||||
vector_base[1] = (uint32_t)trap_undefined_instruction; // Undefined Instruction
|
vector_base[1] = (uint32_t)trap_undefined_instruction; // Undefined Instruction
|
||||||
|
|
|
@ -41,7 +41,7 @@ struct ksemaphore {
|
||||||
sem_id_t id;
|
sem_id_t id;
|
||||||
sem_val_t val;
|
sem_val_t val;
|
||||||
/* list of waiting threads */
|
/* list of waiting threads */
|
||||||
struct double_list_node wait_list_guard;
|
RbtTree wait_thd_tree;
|
||||||
/* list to manage semaphores */
|
/* list to manage semaphores */
|
||||||
/// @todo Use RB-Tree to manage all semaphores
|
/// @todo Use RB-Tree to manage all semaphores
|
||||||
struct double_list_node sem_list_node;
|
struct double_list_node sem_list_node;
|
||||||
|
|
|
@ -5,6 +5,13 @@
|
||||||
|
|
||||||
#include "actracer.h"
|
#include "actracer.h"
|
||||||
|
|
||||||
|
#define RBTTREE_INSERT_SECC 0
|
||||||
|
#define RBTTREE_INSERT_FAILED -1
|
||||||
|
#define RBTTREE_INSERT_EXISTED -2
|
||||||
|
|
||||||
|
#define RBTTREE_DELETE_SUCC 0
|
||||||
|
#define RBTTREE_DELETE_FAILED -1
|
||||||
|
|
||||||
// CLRS
|
// CLRS
|
||||||
// Insertion and Deletion in a Red Black Tree
|
// Insertion and Deletion in a Red Black Tree
|
||||||
enum rbt_type {
|
enum rbt_type {
|
||||||
|
@ -26,10 +33,13 @@ typedef struct RbtTree {
|
||||||
int nr_ele;
|
int nr_ele;
|
||||||
} RbtTree;
|
} RbtTree;
|
||||||
|
|
||||||
|
typedef void(rbt_traverse_fn)(RbtNode* node);
|
||||||
|
|
||||||
void rbtree_init(RbtTree* tree);
|
void rbtree_init(RbtTree* tree);
|
||||||
int rbt_insert(RbtTree* tree, uintptr_t key, void* data);
|
int rbt_insert(RbtTree* tree, uintptr_t key, void* data);
|
||||||
RbtNode* rbt_search(RbtTree* tree, uintptr_t key);
|
RbtNode* rbt_search(RbtTree* tree, uintptr_t key);
|
||||||
int rbt_delete(RbtTree* tree, uintptr_t key);
|
int rbt_delete(RbtTree* tree, uintptr_t key);
|
||||||
|
void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn);
|
||||||
|
|
||||||
void module_rbt_factory_init(TraceTag* _softkernel_tag);
|
void module_rbt_factory_init(TraceTag* _softkernel_tag);
|
||||||
|
|
||||||
|
|
|
@ -2,20 +2,52 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
#include "actracer.h"
|
#include "actracer.h"
|
||||||
#include "ksemaphore.h"
|
#include "ksemaphore.h"
|
||||||
|
#include "rbtree.h"
|
||||||
|
|
||||||
#define TASK_MAX_PRIORITY 32
|
#define TASK_MAX_PRIORITY 32
|
||||||
|
#define UNINIT_SNODE_ID 0
|
||||||
|
typedef uintptr_t snode_id_t;
|
||||||
|
|
||||||
|
enum ThreadState {
|
||||||
|
INIT = 0,
|
||||||
|
READY,
|
||||||
|
RUNNING,
|
||||||
|
DEAD,
|
||||||
|
BLOCKED,
|
||||||
|
SLEEPING,
|
||||||
|
NEVER_RUN,
|
||||||
|
NR_STATE,
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef struct ScheduleContext {
|
||||||
|
intptr_t remain_tick;
|
||||||
|
} ScheduleContext;
|
||||||
|
|
||||||
|
typedef struct TaskSleepContext {
|
||||||
|
int64_t remain_ms;
|
||||||
|
} TaskSleepContext;
|
||||||
|
|
||||||
struct ScheduleNode {
|
struct ScheduleNode {
|
||||||
TraceTag task_ref;
|
struct Thread* pthd;
|
||||||
struct double_list_node list_node;
|
snode_id_t snode_id;
|
||||||
|
enum ThreadState state;
|
||||||
|
|
||||||
|
ScheduleContext sched_context;
|
||||||
|
TaskSleepContext sleep_context;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Scheduler {
|
struct Scheduler {
|
||||||
TraceTag tag;
|
TraceTag tag;
|
||||||
|
RbtTree snode_state_pool[NR_STATE];
|
||||||
struct double_list_node task_list_head[TASK_MAX_PRIORITY]; /* list of task control blocks that are allocated */
|
|
||||||
struct double_list_node task_running_list_head;
|
|
||||||
struct double_list_node task_blocked_list_head;
|
|
||||||
struct double_list_node task_sleep_list_head;
|
|
||||||
struct XiziSemaphorePool semaphore_pool;
|
struct XiziSemaphorePool semaphore_pool;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
extern struct Scheduler g_scheduler;
|
||||||
|
|
||||||
|
bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd);
|
||||||
|
|
||||||
|
bool task_trans_sched_state(struct ScheduleNode* snode, RbtTree* from_pool, RbtTree* to_pool, enum ThreadState target_state);
|
||||||
|
void task_block(struct Thread* thd);
|
||||||
|
void task_dead(struct Thread* thd);
|
||||||
|
void task_yield(struct Thread* thd);
|
||||||
|
void task_into_ready(struct Thread* thd);
|
|
@ -41,22 +41,14 @@ Modification:
|
||||||
#include "share_page.h"
|
#include "share_page.h"
|
||||||
#include "spinlock.h"
|
#include "spinlock.h"
|
||||||
|
|
||||||
|
#include "scheduler.h"
|
||||||
|
|
||||||
#define TASK_CLOCK_TICK 50
|
#define TASK_CLOCK_TICK 50
|
||||||
#define TASK_MAX_PRIORITY 32
|
#define TASK_MAX_PRIORITY 32
|
||||||
#define TASK_DEFAULT_PRIORITY 2
|
#define TASK_DEFAULT_PRIORITY 2
|
||||||
#define TASK_NAME_MAX_LEN 16
|
#define TASK_NAME_MAX_LEN 16
|
||||||
#define SLEEP_MONITOR_CORE 0
|
#define SLEEP_MONITOR_CORE 0
|
||||||
|
|
||||||
enum ProcState {
|
|
||||||
INIT = 0,
|
|
||||||
READY,
|
|
||||||
RUNNING,
|
|
||||||
DEAD,
|
|
||||||
BLOCKED,
|
|
||||||
SLEEPING,
|
|
||||||
NEVER_RUN,
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Thread Control Block */
|
/* Thread Control Block */
|
||||||
struct ThreadContext {
|
struct ThreadContext {
|
||||||
struct Thread* task; // process of current thread
|
struct Thread* task; // process of current thread
|
||||||
|
@ -75,10 +67,6 @@ struct ThreadContext {
|
||||||
struct trapframe* trapframe;
|
struct trapframe* trapframe;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TaskSleepContext {
|
|
||||||
int64_t remain_ms;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Process Control Block */
|
/* Process Control Block */
|
||||||
struct Thread {
|
struct Thread {
|
||||||
/* task name */
|
/* task name */
|
||||||
|
@ -107,12 +95,13 @@ struct Thread {
|
||||||
bool advance_unblock; // @todo abandon
|
bool advance_unblock; // @todo abandon
|
||||||
|
|
||||||
/* task schedule attributes */
|
/* task schedule attributes */
|
||||||
struct double_list_node node;
|
// struct double_list_node node;
|
||||||
struct TaskSleepContext sleep_context;
|
// struct TaskSleepContext sleep_context;
|
||||||
enum ProcState state;
|
// enum ThreadState state;
|
||||||
int priority; // priority
|
// int priority; // priority
|
||||||
int remain_tick;
|
// int remain_tick;
|
||||||
int maxium_tick;
|
// int maxium_tick;
|
||||||
|
struct ScheduleNode snode;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct SchedulerRightGroup {
|
struct SchedulerRightGroup {
|
||||||
|
@ -157,9 +146,6 @@ struct XiziTaskManager {
|
||||||
|
|
||||||
/* init task manager */
|
/* init task manager */
|
||||||
void (*init)();
|
void (*init)();
|
||||||
/* init a task control block, set name, remain_tick, state, cwd, priority, etc. */
|
|
||||||
void (*task_set_default_schedule_attr)(struct Thread*);
|
|
||||||
|
|
||||||
/* use by task_scheduler, find next READY task, should be in locked */
|
/* use by task_scheduler, find next READY task, should be in locked */
|
||||||
struct Thread* (*next_runnable_task)(void);
|
struct Thread* (*next_runnable_task)(void);
|
||||||
/* function that's runing by kernel thread context, schedule use tasks */
|
/* function that's runing by kernel thread context, schedule use tasks */
|
||||||
|
@ -168,9 +154,6 @@ struct XiziTaskManager {
|
||||||
/* handle task state */
|
/* handle task state */
|
||||||
/* call to yield current use task */
|
/* call to yield current use task */
|
||||||
void (*task_yield_noschedule)(struct Thread* task, bool is_blocking);
|
void (*task_yield_noschedule)(struct Thread* task, bool is_blocking);
|
||||||
/* block and unblock task */
|
|
||||||
void (*task_block)(struct double_list_node* head, struct Thread* task);
|
|
||||||
void (*task_unblock)(struct Thread* task);
|
|
||||||
/* set task priority */
|
/* set task priority */
|
||||||
void (*set_cur_task_priority)(int priority);
|
void (*set_cur_task_priority)(int priority);
|
||||||
};
|
};
|
||||||
|
|
|
@ -70,8 +70,8 @@ int sys_close_session(struct Thread* cur_task, struct Session* session)
|
||||||
// @todo fix memory leak
|
// @todo fix memory leak
|
||||||
} else {
|
} else {
|
||||||
assert(!queue_is_empty(&server_to_info->sessions_to_be_handle));
|
assert(!queue_is_empty(&server_to_info->sessions_to_be_handle));
|
||||||
if (server_to_info->state == BLOCKED) {
|
if (server_to_info->snode.state == BLOCKED) {
|
||||||
xizi_task_manager.task_unblock(session_backend->server);
|
task_into_ready(session_backend->server);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,11 +41,11 @@ int sys_exit(struct Thread* ptask)
|
||||||
assert(ptask != NULL);
|
assert(ptask != NULL);
|
||||||
ptask->dead = true;
|
ptask->dead = true;
|
||||||
// free that task straightly if it's a blocked task
|
// free that task straightly if it's a blocked task
|
||||||
if (ptask->state == BLOCKED) {
|
if (ptask->snode.state == BLOCKED) {
|
||||||
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
|
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
|
||||||
tlo->free_pcb(ptask);
|
tlo->free_pcb(ptask);
|
||||||
}
|
}
|
||||||
// yield current task in case it wants to exit itself
|
// yield current task in case it wants to exit itself
|
||||||
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false);
|
task_yield(cur_cpu()->task);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,7 +112,7 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (queue_is_empty(&cur_task->sessions_in_handle) && queue_is_empty(&cur_task->sessions_to_be_handle)) {
|
if (queue_is_empty(&cur_task->sessions_in_handle) && queue_is_empty(&cur_task->sessions_to_be_handle)) {
|
||||||
xizi_task_manager.task_yield_noschedule(cur_task, false);
|
task_yield(cur_task);
|
||||||
// @todo support blocking(now bug at 4 cores running)
|
// @todo support blocking(now bug at 4 cores running)
|
||||||
// xizi_task_manager.task_block(&xizi_task_manager.task_blocked_list_head, cur_task);
|
// xizi_task_manager.task_block(&xizi_task_manager.task_blocked_list_head, cur_task);
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,8 +76,8 @@ static void send_irq_to_user(int irq_num)
|
||||||
buf->header.magic = IPC_MSG_MAGIC;
|
buf->header.magic = IPC_MSG_MAGIC;
|
||||||
buf->header.valid = 1;
|
buf->header.valid = 1;
|
||||||
|
|
||||||
if (irq_forward_table[irq_num].handle_task->state == BLOCKED) {
|
if (irq_forward_table[irq_num].handle_task->snode.state == BLOCKED) {
|
||||||
xizi_task_manager.task_unblock(irq_forward_table[irq_num].handle_task);
|
task_into_ready(irq_forward_table[irq_num].handle_task);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* add session head */
|
/* add session head */
|
||||||
|
@ -92,7 +92,7 @@ int user_irq_handler(int irq, void* tf, void* arg)
|
||||||
|
|
||||||
next_task_emergency = irq_forward_table[irq].handle_task;
|
next_task_emergency = irq_forward_table[irq].handle_task;
|
||||||
if (cur_cpu()->task != NULL) {
|
if (cur_cpu()->task != NULL) {
|
||||||
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false);
|
task_yield(cur_cpu()->task);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -126,7 +126,7 @@ int sys_register_irq(int irq_num, int irq_opcode)
|
||||||
|
|
||||||
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
|
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
|
||||||
kernel_irq_proxy = tlo->new_thread(pmemspace);
|
kernel_irq_proxy = tlo->new_thread(pmemspace);
|
||||||
kernel_irq_proxy->state = NEVER_RUN;
|
kernel_irq_proxy->snode.state = NEVER_RUN;
|
||||||
}
|
}
|
||||||
|
|
||||||
// bind irq to session
|
// bind irq to session
|
||||||
|
|
|
@ -36,10 +36,11 @@ Modification:
|
||||||
int sys_sleep(intptr_t ms)
|
int sys_sleep(intptr_t ms)
|
||||||
{
|
{
|
||||||
struct Thread* cur_task = cur_cpu()->task;
|
struct Thread* cur_task = cur_cpu()->task;
|
||||||
xizi_task_manager.task_yield_noschedule(cur_task, false);
|
task_yield(cur_task);
|
||||||
xizi_task_manager.task_block(&xizi_task_manager.task_sleep_list_head, cur_task);
|
cur_task->snode.sleep_context.remain_ms = ms;
|
||||||
cur_task->state = SLEEPING;
|
task_trans_sched_state(&cur_task->snode, //
|
||||||
cur_task->sleep_context.remain_ms = ms;
|
&g_scheduler.snode_state_pool[READY], //
|
||||||
|
&g_scheduler.snode_state_pool[SLEEPING], SLEEPING);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
|
@ -64,7 +64,7 @@ int sys_new_thread(struct MemSpace* pmemspace, struct Thread* task, uintptr_t en
|
||||||
strncpy(task->name, last, sizeof(task->name) - 1);
|
strncpy(task->name, last, sizeof(task->name) - 1);
|
||||||
|
|
||||||
// init pcb schedule attributes
|
// init pcb schedule attributes
|
||||||
xizi_task_manager.task_set_default_schedule_attr(task);
|
task_into_ready(task);
|
||||||
|
|
||||||
// thread init done by here
|
// thread init done by here
|
||||||
if (pmemspace->thread_to_notify == NULL) {
|
if (pmemspace->thread_to_notify == NULL) {
|
||||||
|
|
|
@ -60,8 +60,8 @@ int sys_wait_session(struct Session* userland_session)
|
||||||
assert(!queue_is_empty(&server_to_call->sessions_to_be_handle));
|
assert(!queue_is_empty(&server_to_call->sessions_to_be_handle));
|
||||||
|
|
||||||
ksemaphore_wait(&xizi_task_manager.semaphore_pool, cur_task, session_backend->client_sem_to_wait);
|
ksemaphore_wait(&xizi_task_manager.semaphore_pool, cur_task, session_backend->client_sem_to_wait);
|
||||||
if (server_to_call->state == BLOCKED) {
|
if (server_to_call->snode.state == BLOCKED) {
|
||||||
xizi_task_manager.task_unblock(session_backend->server);
|
task_into_ready(session_backend->server);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -36,6 +36,6 @@ Modification:
|
||||||
int sys_yield(task_yield_reason reason)
|
int sys_yield(task_yield_reason reason)
|
||||||
{
|
{
|
||||||
struct Thread* cur_task = cur_cpu()->task;
|
struct Thread* cur_task = cur_cpu()->task;
|
||||||
xizi_task_manager.task_yield_noschedule(cur_task, false);
|
task_yield(cur_task);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
|
@ -39,7 +39,7 @@ int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, u
|
||||||
|
|
||||||
switch (sys_num) {
|
switch (sys_num) {
|
||||||
case SYSCALL_TEST:
|
case SYSCALL_TEST:
|
||||||
ret = arch_curr_tick();
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
case SYSCALL_SPAWN:
|
case SYSCALL_SPAWN:
|
||||||
ret = sys_spawn((char*)param1, (char*)param2, (char**)param3);
|
ret = sys_spawn((char*)param1, (char*)param2, (char**)param3);
|
||||||
|
|
|
@ -33,65 +33,123 @@ Modification:
|
||||||
struct Thread* max_priority_runnable_task(void)
|
struct Thread* max_priority_runnable_task(void)
|
||||||
{
|
{
|
||||||
static struct Thread* task = NULL;
|
static struct Thread* task = NULL;
|
||||||
static int priority = 0;
|
// static int priority = 0;
|
||||||
|
|
||||||
priority = __builtin_ffs(ready_task_priority) - 1;
|
// priority = __builtin_ffs(ready_task_priority) - 1;
|
||||||
if (priority > 31 || priority < 0) {
|
// if (priority > 31 || priority < 0) {
|
||||||
return NULL;
|
// return NULL;
|
||||||
}
|
// }
|
||||||
|
|
||||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node)
|
// DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node)
|
||||||
{
|
// {
|
||||||
assert(task != NULL);
|
// assert(task != NULL);
|
||||||
if (task->state == READY && !task->dead) {
|
// if (task->state == READY && !task->dead) {
|
||||||
// found a runnable task, stop this look up
|
// // found a runnable task, stop this look up
|
||||||
return task;
|
// return task;
|
||||||
} else if (task->dead && task->state != RUNNING) {
|
// } else if (task->dead && task->state != RUNNING) {
|
||||||
|
|
||||||
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
|
// struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
|
||||||
tlo->free_pcb(task);
|
// tlo->free_pcb(task);
|
||||||
return NULL;
|
// return NULL;
|
||||||
}
|
// }
|
||||||
|
// }
|
||||||
|
if (!rbt_is_empty(&g_scheduler.snode_state_pool[READY])) {
|
||||||
|
return ((struct ScheduleNode*)(g_scheduler.snode_state_pool[READY].root->data))->pthd;
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Thread* round_robin_runnable_task(uint32_t priority)
|
#include "multicores.h"
|
||||||
{
|
#include "rbtree.h"
|
||||||
struct Thread* task = NULL;
|
#include "task.h"
|
||||||
|
|
||||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node)
|
bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd)
|
||||||
{
|
{
|
||||||
if (task->state == READY && !task->dead) {
|
snode->pthd = bind_thd;
|
||||||
// found a runnable task, stop this look up
|
snode->snode_id = bind_thd->tid;
|
||||||
return task;
|
snode->sched_context.remain_tick = 0;
|
||||||
} else if (task->dead && task->state != RUNNING) {
|
snode->sleep_context.remain_ms = 0;
|
||||||
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
|
snode->state = INIT;
|
||||||
tlo->free_pcb(task);
|
if (RBTTREE_INSERT_SECC != rbt_insert(&g_scheduler.snode_state_pool[INIT], //
|
||||||
return NULL;
|
snode->snode_id, (void*)snode)) {
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
bool task_trans_sched_state(struct ScheduleNode* snode, RbtTree* from_pool, RbtTree* to_pool, enum ThreadState target_state)
|
||||||
|
{
|
||||||
|
assert(snode->snode_id != UNINIT_SNODE_ID && snode->pthd != NULL);
|
||||||
|
if (RBTTREE_DELETE_SUCC != rbt_delete(from_pool, snode->snode_id)) {
|
||||||
|
DEBUG("Thread %d not in from schedule pool\n", snode->pthd->tid);
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* recover task priority */
|
if (RBTTREE_INSERT_SECC != rbt_insert(to_pool, snode->snode_id, (void*)snode)) {
|
||||||
void recover_priority(void)
|
DEBUG("Thread %d trans state failed\n", snode->pthd->tid);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
snode->state = target_state;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void task_dead(struct Thread* thd)
|
||||||
{
|
{
|
||||||
struct Thread* task = NULL;
|
assert(thd != NULL);
|
||||||
for (int i = 1; i < TASK_MAX_PRIORITY; i++) {
|
struct ScheduleNode* snode = &thd->snode;
|
||||||
if (i == TASK_DEFAULT_PRIORITY)
|
enum ThreadState thd_cur_state = snode->state;
|
||||||
continue;
|
|
||||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[i], node)
|
assert(snode->state == READY);
|
||||||
|
|
||||||
|
bool trans_res = task_trans_sched_state(snode, //
|
||||||
|
&g_scheduler.snode_state_pool[READY], //
|
||||||
|
&g_scheduler.snode_state_pool[DEAD], DEAD);
|
||||||
|
assert(trans_res = true);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
void task_block(struct Thread* thd)
|
||||||
{
|
{
|
||||||
if (!IS_DOUBLE_LIST_EMPTY(&task->node)) {
|
assert(thd != NULL);
|
||||||
// DEBUG("%s priority recover\n", task->name);
|
struct ScheduleNode* snode = &thd->snode;
|
||||||
task->priority = TASK_DEFAULT_PRIORITY;
|
enum ThreadState thd_cur_state = snode->state;
|
||||||
doubleListDel(&task->node);
|
|
||||||
doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]);
|
assert(thd_cur_state != RUNNING);
|
||||||
i--;
|
|
||||||
break;
|
bool trans_res = task_trans_sched_state(snode, //
|
||||||
}
|
&g_scheduler.snode_state_pool[thd_cur_state], //
|
||||||
|
&g_scheduler.snode_state_pool[BLOCKED], BLOCKED);
|
||||||
|
assert(trans_res = true);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void task_into_ready(struct Thread* thd)
|
||||||
|
{
|
||||||
|
assert(thd != NULL);
|
||||||
|
struct ScheduleNode* snode = &thd->snode;
|
||||||
|
enum ThreadState thd_cur_state = snode->state;
|
||||||
|
|
||||||
|
bool trans_res = task_trans_sched_state(snode, //
|
||||||
|
&g_scheduler.snode_state_pool[thd_cur_state], //
|
||||||
|
&g_scheduler.snode_state_pool[READY], READY);
|
||||||
|
snode->sched_context.remain_tick = TASK_CLOCK_TICK;
|
||||||
|
assert(trans_res = true);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void task_yield(struct Thread* thd)
|
||||||
|
{
|
||||||
|
assert(thd != NULL);
|
||||||
|
struct ScheduleNode* snode = &thd->snode;
|
||||||
|
enum ThreadState thd_cur_state = snode->state;
|
||||||
|
|
||||||
|
assert(thd == cur_cpu()->task && thd_cur_state == RUNNING);
|
||||||
|
cur_cpu()->task = NULL;
|
||||||
|
|
||||||
|
bool trans_res = task_trans_sched_state(snode, //
|
||||||
|
&g_scheduler.snode_state_pool[thd_cur_state], //
|
||||||
|
&g_scheduler.snode_state_pool[READY], READY);
|
||||||
|
assert(trans_res = true);
|
||||||
|
return;
|
||||||
}
|
}
|
|
@ -58,7 +58,7 @@ sem_id_t ksemaphore_alloc(struct XiziSemaphorePool* sem_pool, sem_val_t val)
|
||||||
}
|
}
|
||||||
sem->val = val;
|
sem->val = val;
|
||||||
doubleListNodeInit(&sem->sem_list_node);
|
doubleListNodeInit(&sem->sem_list_node);
|
||||||
doubleListNodeInit(&sem->wait_list_guard);
|
rbtree_init(&sem->wait_thd_tree);
|
||||||
|
|
||||||
if (0 != rbt_insert(&sem_pool->sem_pool_map, sem->id, sem)) {
|
if (0 != rbt_insert(&sem_pool->sem_pool_map, sem->id, sem)) {
|
||||||
slab_free(&sem_pool->allocator, sem);
|
slab_free(&sem_pool->allocator, sem);
|
||||||
|
@ -88,7 +88,7 @@ bool ksemaphore_consume(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id, sem
|
||||||
bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, sem_id_t sem_id)
|
bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, sem_id_t sem_id)
|
||||||
{
|
{
|
||||||
assert(thd != NULL);
|
assert(thd != NULL);
|
||||||
assert(thd->state == RUNNING);
|
assert(thd->snode.state == RUNNING);
|
||||||
/* find sem */
|
/* find sem */
|
||||||
struct ksemaphore* sem = ksemaphore_get_by_id(sem_pool, sem_id);
|
struct ksemaphore* sem = ksemaphore_get_by_id(sem_pool, sem_id);
|
||||||
// invalid sem id
|
// invalid sem id
|
||||||
|
@ -105,8 +105,9 @@ bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, sem
|
||||||
|
|
||||||
// waiting at the sem
|
// waiting at the sem
|
||||||
sem->val--;
|
sem->val--;
|
||||||
xizi_task_manager.task_yield_noschedule(thd, false);
|
task_yield(thd);
|
||||||
xizi_task_manager.task_block(&sem->wait_list_guard, thd);
|
task_block(thd);
|
||||||
|
assert(RBTTREE_INSERT_SECC == rbt_insert(&sem->wait_thd_tree, thd->tid, thd));
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,12 +121,11 @@ bool ksemaphore_signal(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sem->val < 0) {
|
if (sem->val < 0) {
|
||||||
if (!IS_DOUBLE_LIST_EMPTY(&sem->wait_list_guard)) {
|
assert(!rbt_is_empty(&sem->wait_thd_tree));
|
||||||
struct Thread* thd = CONTAINER_OF(sem->wait_list_guard.next, struct Thread, node);
|
RbtNode* root = sem->wait_thd_tree.root;
|
||||||
assert(thd != NULL && thd->state == BLOCKED);
|
struct Thread* thd = (struct Thread*)root->data;
|
||||||
xizi_task_manager.task_unblock(thd);
|
rbt_delete(&sem->wait_thd_tree, root->key);
|
||||||
// DEBUG("waking %s\n", thd->name);
|
task_into_ready(thd);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sem->val++;
|
sem->val++;
|
||||||
|
@ -155,11 +155,7 @@ bool ksemaphore_free(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Thread* thd = NULL;
|
struct Thread* thd = NULL;
|
||||||
DOUBLE_LIST_FOR_EACH_ENTRY(thd, &sem->wait_list_guard, node)
|
// by design: no waking any waiting threads
|
||||||
{
|
|
||||||
assert(thd != NULL);
|
|
||||||
xizi_task_manager.task_unblock(thd);
|
|
||||||
}
|
|
||||||
|
|
||||||
rbt_delete(&sem_pool->sem_pool_map, sem_id);
|
rbt_delete(&sem_pool->sem_pool_map, sem_id);
|
||||||
doubleListDel(&sem->sem_list_node);
|
doubleListDel(&sem->sem_list_node);
|
||||||
|
|
|
@ -44,28 +44,9 @@ struct CPU global_cpus[NR_CPU];
|
||||||
uint32_t ready_task_priority;
|
uint32_t ready_task_priority;
|
||||||
|
|
||||||
struct GlobalTaskPool global_task_pool;
|
struct GlobalTaskPool global_task_pool;
|
||||||
|
struct Scheduler g_scheduler;
|
||||||
extern struct TaskLifecycleOperations task_lifecycle_ops;
|
extern struct TaskLifecycleOperations task_lifecycle_ops;
|
||||||
|
|
||||||
static inline void task_node_leave_list(struct Thread* task)
|
|
||||||
{
|
|
||||||
doubleListDel(&task->node);
|
|
||||||
if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[task->priority])) {
|
|
||||||
ready_task_priority &= ~((uint32_t)1 << task->priority);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void task_node_add_to_ready_list_head(struct Thread* task)
|
|
||||||
{
|
|
||||||
doubleListAddOnHead(&task->node, &xizi_task_manager.task_list_head[task->priority]);
|
|
||||||
ready_task_priority |= ((uint32_t)1 << task->priority);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void task_node_add_to_ready_list_back(struct Thread* task)
|
|
||||||
{
|
|
||||||
doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]);
|
|
||||||
ready_task_priority |= ((uint32_t)1 << task->priority);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void _task_manager_init()
|
static void _task_manager_init()
|
||||||
{
|
{
|
||||||
assert(CreateResourceTag(&xizi_task_manager.task_lifecycle_ops_tag, &xizi_task_manager.tag, //
|
assert(CreateResourceTag(&xizi_task_manager.task_lifecycle_ops_tag, &xizi_task_manager.tag, //
|
||||||
|
@ -93,6 +74,14 @@ static void _task_manager_init()
|
||||||
doubleListNodeInit(&global_task_pool.thd_listing_head);
|
doubleListNodeInit(&global_task_pool.thd_listing_head);
|
||||||
rbtree_init(&global_task_pool.thd_ref_map);
|
rbtree_init(&global_task_pool.thd_ref_map);
|
||||||
|
|
||||||
|
// scheduler
|
||||||
|
assert(CreateResourceTag(&g_scheduler.tag, &xizi_task_manager.tag, //
|
||||||
|
"GlobalScheduler", TRACER_SYSOBJECT, (void*)&g_scheduler));
|
||||||
|
semaphore_pool_init(&g_scheduler.semaphore_pool);
|
||||||
|
for (int pool_id = 0; pool_id < NR_STATE; pool_id++) {
|
||||||
|
rbtree_init(&g_scheduler.snode_state_pool[pool_id]);
|
||||||
|
}
|
||||||
|
|
||||||
// tid pool
|
// tid pool
|
||||||
xizi_task_manager.next_pid = 0;
|
xizi_task_manager.next_pid = 0;
|
||||||
|
|
||||||
|
@ -125,8 +114,8 @@ int _task_return_sys_resources(struct Thread* ptask)
|
||||||
// @todo fix memory leak
|
// @todo fix memory leak
|
||||||
} else {
|
} else {
|
||||||
assert(!queue_is_empty(&server_to_info->sessions_to_be_handle));
|
assert(!queue_is_empty(&server_to_info->sessions_to_be_handle));
|
||||||
if (server_to_info->state == BLOCKED) {
|
if (server_to_info->snode.state == BLOCKED) {
|
||||||
xizi_task_manager.task_unblock(session_backend->server);
|
task_into_ready(server_to_info);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -184,7 +173,7 @@ static void _free_thread(struct Thread* task)
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove thread from used task list
|
// remove thread from used task list
|
||||||
task_node_leave_list(task);
|
task_dead(task);
|
||||||
|
|
||||||
/* free memspace if needed to */
|
/* free memspace if needed to */
|
||||||
if (task->memspace != NULL) {
|
if (task->memspace != NULL) {
|
||||||
|
@ -196,8 +185,8 @@ static void _free_thread(struct Thread* task)
|
||||||
// awake deamon in this memspace
|
// awake deamon in this memspace
|
||||||
if (task->memspace->thread_to_notify != NULL) {
|
if (task->memspace->thread_to_notify != NULL) {
|
||||||
if (task->memspace->thread_to_notify != task) {
|
if (task->memspace->thread_to_notify != task) {
|
||||||
if (task->memspace->thread_to_notify->state == BLOCKED) {
|
if (task->memspace->thread_to_notify->snode.state == BLOCKED) {
|
||||||
xizi_task_manager.task_unblock(task->memspace->thread_to_notify);
|
task_into_ready(task->memspace->thread_to_notify);
|
||||||
} else {
|
} else {
|
||||||
task->memspace->thread_to_notify->advance_unblock = true;
|
task->memspace->thread_to_notify->advance_unblock = true;
|
||||||
}
|
}
|
||||||
|
@ -231,9 +220,17 @@ static struct Thread* _new_thread(struct MemSpace* pmemspace)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// [schedule related]
|
||||||
|
if (!init_schedule_node(&task->snode, task)) {
|
||||||
|
ERROR("Not enough memory\n");
|
||||||
|
slab_free(&xizi_task_manager.task_allocator, (void*)task);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
// alloc stack page for task
|
// alloc stack page for task
|
||||||
if ((void*)(task->thread_context.kern_stack_addr = (uintptr_t)kalloc_by_ownership(pmemspace->kernspace_mem_usage.tag, USER_STACK_SIZE)) == NULL) {
|
if ((void*)(task->thread_context.kern_stack_addr = (uintptr_t)kalloc_by_ownership(pmemspace->kernspace_mem_usage.tag, USER_STACK_SIZE)) == NULL) {
|
||||||
/* here inside, will no free memspace */
|
/* here inside, will no free memspace */
|
||||||
|
assert(RBTTREE_DELETE_SUCC == rbt_delete(&g_scheduler.snode_state_pool[INIT], task->snode.snode_id));
|
||||||
slab_free(&xizi_task_manager.task_allocator, (void*)task);
|
slab_free(&xizi_task_manager.task_allocator, (void*)task);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -279,7 +276,6 @@ static struct Thread* _new_thread(struct MemSpace* pmemspace)
|
||||||
}
|
}
|
||||||
|
|
||||||
// [name]
|
// [name]
|
||||||
// [schedule related]
|
|
||||||
|
|
||||||
return task;
|
return task;
|
||||||
}
|
}
|
||||||
|
@ -289,22 +285,12 @@ struct TaskLifecycleOperations task_lifecycle_ops = {
|
||||||
.free_pcb = _free_thread,
|
.free_pcb = _free_thread,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void _task_set_default_schedule_attr(struct Thread* task)
|
|
||||||
{
|
|
||||||
task->remain_tick = TASK_CLOCK_TICK;
|
|
||||||
task->maxium_tick = TASK_CLOCK_TICK * 10;
|
|
||||||
task->dead = false;
|
|
||||||
task->state = READY;
|
|
||||||
task->priority = TASK_DEFAULT_PRIORITY;
|
|
||||||
task_node_add_to_ready_list_head(task);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void task_state_set_running(struct Thread* task)
|
static void task_state_set_running(struct Thread* task)
|
||||||
{
|
{
|
||||||
assert(task != NULL && task->state == READY);
|
assert(task != NULL && task->snode.state == READY);
|
||||||
task->state = RUNNING;
|
task_trans_sched_state(&task->snode, //
|
||||||
task_node_leave_list(task);
|
&g_scheduler.snode_state_pool[READY], //
|
||||||
doubleListAddOnHead(&task->node, &xizi_task_manager.task_running_list_head);
|
&g_scheduler.snode_state_pool[RUNNING], RUNNING);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Thread* next_task_emergency = NULL;
|
struct Thread* next_task_emergency = NULL;
|
||||||
|
@ -319,7 +305,7 @@ static void _scheduler(struct SchedulerRightGroup right_group)
|
||||||
next_task = NULL;
|
next_task = NULL;
|
||||||
/* find next runnable task */
|
/* find next runnable task */
|
||||||
assert(cur_cpu()->task == NULL);
|
assert(cur_cpu()->task == NULL);
|
||||||
if (next_task_emergency != NULL && next_task_emergency->state == READY) {
|
if (next_task_emergency != NULL && next_task_emergency->snode.state == READY) {
|
||||||
next_task = next_task_emergency;
|
next_task = next_task_emergency;
|
||||||
} else {
|
} else {
|
||||||
next_task = xizi_task_manager.next_runnable_task();
|
next_task = xizi_task_manager.next_runnable_task();
|
||||||
|
@ -340,76 +326,21 @@ static void _scheduler(struct SchedulerRightGroup right_group)
|
||||||
assert(next_task->memspace->pgdir.pd_addr != NULL);
|
assert(next_task->memspace->pgdir.pd_addr != NULL);
|
||||||
p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->memspace->pgdir.pd_addr));
|
p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->memspace->pgdir.pd_addr));
|
||||||
context_switch(&cpu->scheduler, next_task->thread_context.context);
|
context_switch(&cpu->scheduler, next_task->thread_context.context);
|
||||||
assert(next_task->state != RUNNING);
|
assert(next_task->snode.state != RUNNING);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _task_yield_noschedule(struct Thread* task, bool blocking)
|
|
||||||
{
|
|
||||||
assert(task != NULL);
|
|
||||||
/// @warning only support current task yield now
|
|
||||||
assert(task == cur_cpu()->task && task->state == RUNNING);
|
|
||||||
|
|
||||||
// rearrage current task position
|
|
||||||
task_node_leave_list(task);
|
|
||||||
if (task->state == RUNNING) {
|
|
||||||
task->state = READY;
|
|
||||||
}
|
|
||||||
task->remain_tick = TASK_CLOCK_TICK;
|
|
||||||
cur_cpu()->task = NULL;
|
|
||||||
task_node_add_to_ready_list_back(task);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void _task_block(struct double_list_node* head, struct Thread* task)
|
|
||||||
{
|
|
||||||
assert(head != NULL);
|
|
||||||
assert(task != NULL);
|
|
||||||
assert(task->state != RUNNING);
|
|
||||||
task_node_leave_list(task);
|
|
||||||
task->state = BLOCKED;
|
|
||||||
doubleListAddOnHead(&task->node, head);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void _task_unblock(struct Thread* task)
|
|
||||||
{
|
|
||||||
assert(task != NULL);
|
|
||||||
assert(task->state == BLOCKED || task->state == SLEEPING);
|
|
||||||
task_node_leave_list(task);
|
|
||||||
task->state = READY;
|
|
||||||
task_node_add_to_ready_list_back(task);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// @brief @warning not tested function
|
/// @brief @warning not tested function
|
||||||
/// @param priority
|
/// @param priority
|
||||||
static void _set_cur_task_priority(int priority)
|
static void _set_cur_task_priority(int priority)
|
||||||
{
|
{
|
||||||
if (priority < 0 || priority >= TASK_MAX_PRIORITY) {
|
|
||||||
ERROR("priority is invalid\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct Thread* current_task = cur_cpu()->task;
|
|
||||||
assert(current_task != NULL && current_task->state == RUNNING);
|
|
||||||
|
|
||||||
task_node_leave_list(current_task);
|
|
||||||
|
|
||||||
current_task->priority = priority;
|
|
||||||
|
|
||||||
task_node_add_to_ready_list_back(current_task);
|
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct XiziTaskManager xizi_task_manager = {
|
struct XiziTaskManager xizi_task_manager = {
|
||||||
.init = _task_manager_init,
|
.init = _task_manager_init,
|
||||||
.task_set_default_schedule_attr = _task_set_default_schedule_attr,
|
|
||||||
|
|
||||||
.next_runnable_task = max_priority_runnable_task,
|
.next_runnable_task = max_priority_runnable_task,
|
||||||
.task_scheduler = _scheduler,
|
.task_scheduler = _scheduler,
|
||||||
|
|
||||||
.task_block = _task_block,
|
|
||||||
.task_unblock = _task_unblock,
|
|
||||||
.task_yield_noschedule = _task_yield_noschedule,
|
|
||||||
.set_cur_task_priority = _set_cur_task_priority
|
.set_cur_task_priority = _set_cur_task_priority
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -326,20 +326,20 @@ RbtNode* __rbtree_insert(RbtNode* node, RbtTree* tree)
|
||||||
int rbt_insert(RbtTree* tree, uintptr_t key, void* data)
|
int rbt_insert(RbtTree* tree, uintptr_t key, void* data)
|
||||||
{
|
{
|
||||||
if (rbt_search(tree, key) != NULL) {
|
if (rbt_search(tree, key) != NULL) {
|
||||||
return -2;
|
return RBTTREE_INSERT_EXISTED;
|
||||||
}
|
}
|
||||||
|
|
||||||
RbtNode* node = rbtree_createnode(key, data);
|
RbtNode* node = rbtree_createnode(key, data);
|
||||||
RbtNode* samenode = NULL;
|
RbtNode* samenode = NULL;
|
||||||
if (node == NULL)
|
if (node == NULL)
|
||||||
return -1;
|
return RBTTREE_INSERT_FAILED;
|
||||||
else
|
else
|
||||||
samenode = __rbtree_insert(node, tree);
|
samenode = __rbtree_insert(node, tree);
|
||||||
|
|
||||||
assert(samenode == NULL);
|
assert(samenode == NULL);
|
||||||
|
|
||||||
tree->nr_ele++;
|
tree->nr_ele++;
|
||||||
return 0;
|
return RBTTREE_INSERT_SECC;
|
||||||
}
|
}
|
||||||
|
|
||||||
void replace_node(RbtTree* t, RbtNode* oldn, RbtNode* newn)
|
void replace_node(RbtTree* t, RbtNode* oldn, RbtNode* newn)
|
||||||
|
@ -455,7 +455,7 @@ int rbt_delete(RbtTree* tree, uintptr_t key)
|
||||||
{
|
{
|
||||||
RbtNode* node = do_lookup(key, tree, NULL);
|
RbtNode* node = do_lookup(key, tree, NULL);
|
||||||
if (node == NULL)
|
if (node == NULL)
|
||||||
return -1;
|
return RBTTREE_DELETE_FAILED;
|
||||||
else
|
else
|
||||||
__rbtree_remove(node, tree);
|
__rbtree_remove(node, tree);
|
||||||
|
|
||||||
|
@ -463,5 +463,20 @@ int rbt_delete(RbtTree* tree, uintptr_t key)
|
||||||
if (rbt_is_empty(tree)) {
|
if (rbt_is_empty(tree)) {
|
||||||
assert(tree->root == NULL);
|
assert(tree->root == NULL);
|
||||||
}
|
}
|
||||||
return 0;
|
return RBTTREE_DELETE_SUCC;
|
||||||
|
}
|
||||||
|
|
||||||
|
void rbt_traverse_inner(RbtNode* node, rbt_traverse_fn fn)
|
||||||
|
{
|
||||||
|
if (node == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
fn(node);
|
||||||
|
rbt_traverse_inner(node->left, fn);
|
||||||
|
rbt_traverse_inner(node->right, fn);
|
||||||
|
}
|
||||||
|
|
||||||
|
void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn)
|
||||||
|
{
|
||||||
|
rbt_traverse_inner(tree->root, fn);
|
||||||
}
|
}
|
|
@ -62,6 +62,11 @@ void hw_current_second(uintptr_t* second)
|
||||||
*second = p_clock_driver->get_second();
|
*second = p_clock_driver->get_second();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void count_down_sleeping_task(RbtNode* node)
|
||||||
|
{
|
||||||
|
/// @todo implement
|
||||||
|
}
|
||||||
|
|
||||||
uint64_t global_tick = 0;
|
uint64_t global_tick = 0;
|
||||||
int xizi_clock_handler(int irq, void* tf, void* arg)
|
int xizi_clock_handler(int irq, void* tf, void* arg)
|
||||||
{
|
{
|
||||||
|
@ -73,24 +78,25 @@ int xizi_clock_handler(int irq, void* tf, void* arg)
|
||||||
// handle current thread
|
// handle current thread
|
||||||
struct Thread* current_task = cur_cpu()->task;
|
struct Thread* current_task = cur_cpu()->task;
|
||||||
if (current_task) {
|
if (current_task) {
|
||||||
current_task->remain_tick--;
|
struct ScheduleNode* snode = ¤t_task->snode;
|
||||||
current_task->maxium_tick--;
|
snode->sched_context.remain_tick--;
|
||||||
if (current_task->remain_tick == 0) {
|
if (snode->sched_context.remain_tick == 0) {
|
||||||
xizi_task_manager.task_yield_noschedule(current_task, false);
|
task_into_ready(current_task);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo: cpu 0 will handle sleeping thread
|
// todo: cpu 0 will handle sleeping thread
|
||||||
struct Thread* thread = NULL;
|
rbt_traverse(&g_scheduler.snode_state_pool[SLEEPING], count_down_sleeping_task);
|
||||||
DOUBLE_LIST_FOR_EACH_ENTRY(thread, &xizi_task_manager.task_sleep_list_head, node)
|
|
||||||
{
|
// DOUBLE_LIST_FOR_EACH_ENTRY(thread, &xizi_task_manager.task_sleep_list_head, node)
|
||||||
assert(thread->state == SLEEPING);
|
// {
|
||||||
thread->sleep_context.remain_ms--;
|
// assert(thread->state == SLEEPING);
|
||||||
if (thread->sleep_context.remain_ms <= 0) {
|
// thread->sleep_context.remain_ms--;
|
||||||
xizi_task_manager.task_unblock(thread);
|
// if (thread->sleep_context.remain_ms <= 0) {
|
||||||
break;
|
// xizi_task_manager.task_unblock(thread);
|
||||||
}
|
// break;
|
||||||
}
|
// }
|
||||||
|
// }
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
|
@ -84,7 +84,7 @@ void intr_irq_dispatch(struct trapframe* tf)
|
||||||
// finish irq.
|
// finish irq.
|
||||||
p_intr_driver->hw_after_irq(int_info);
|
p_intr_driver->hw_after_irq(int_info);
|
||||||
|
|
||||||
if (cur_cpu()->task == NULL || current_task->state != RUNNING) {
|
if (cur_cpu()->task == NULL || current_task->snode.state != RUNNING) {
|
||||||
cur_cpu()->task = NULL;
|
cur_cpu()->task = NULL;
|
||||||
context_switch(¤t_task->thread_context.context, cur_cpu()->scheduler);
|
context_switch(¤t_task->thread_context.context, cur_cpu()->scheduler);
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,7 +56,7 @@ void software_irq_dispatch(struct trapframe* tf)
|
||||||
/// @todo: Handle dead task
|
/// @todo: Handle dead task
|
||||||
|
|
||||||
int syscall_num = -1;
|
int syscall_num = -1;
|
||||||
if (cur_task && cur_task->state != DEAD) {
|
if (cur_task && cur_task->snode.state != DEAD) {
|
||||||
cur_task->thread_context.trapframe = tf;
|
cur_task->thread_context.trapframe = tf;
|
||||||
// call syscall
|
// call syscall
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ void software_irq_dispatch(struct trapframe* tf)
|
||||||
arch_set_return(tf, ret);
|
arch_set_return(tf, ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((cur_cpu()->task == NULL && cur_task != NULL) || cur_task->state != RUNNING) {
|
if ((cur_cpu()->task == NULL && cur_task != NULL) || cur_task->snode.state != RUNNING) {
|
||||||
cur_cpu()->task = NULL;
|
cur_cpu()->task = NULL;
|
||||||
context_switch(&cur_task->thread_context.context, cur_cpu()->scheduler);
|
context_switch(&cur_task->thread_context.context, cur_cpu()->scheduler);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue