Add schedule node
This commit is contained in:
parent
21304531a5
commit
af1ceec308
|
@ -73,7 +73,7 @@ Modification:
|
||||||
|
|
||||||
#include "cortex_a55.h"
|
#include "cortex_a55.h"
|
||||||
|
|
||||||
#define NR_CPU 1 // maximum number of CPUs
|
#define NR_CPU 4 // maximum number of CPUs
|
||||||
|
|
||||||
static inline uintptr_t arch_curr_tick()
|
static inline uintptr_t arch_curr_tick()
|
||||||
{
|
{
|
||||||
|
|
|
@ -33,13 +33,14 @@ typedef struct RbtTree {
|
||||||
int nr_ele;
|
int nr_ele;
|
||||||
} RbtTree;
|
} RbtTree;
|
||||||
|
|
||||||
typedef void(rbt_traverse_fn)(RbtNode* node);
|
// return if the traverse needs to continue
|
||||||
|
typedef bool(rbt_traverse_fn)(RbtNode* node, void* data);
|
||||||
|
|
||||||
void rbtree_init(RbtTree* tree);
|
void rbtree_init(RbtTree* tree);
|
||||||
int rbt_insert(RbtTree* tree, uintptr_t key, void* data);
|
int rbt_insert(RbtTree* tree, uintptr_t key, void* data);
|
||||||
RbtNode* rbt_search(RbtTree* tree, uintptr_t key);
|
RbtNode* rbt_search(RbtTree* tree, uintptr_t key);
|
||||||
int rbt_delete(RbtTree* tree, uintptr_t key);
|
int rbt_delete(RbtTree* tree, uintptr_t key);
|
||||||
void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn);
|
void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn, void* data);
|
||||||
|
|
||||||
void module_rbt_factory_init(TraceTag* _softkernel_tag);
|
void module_rbt_factory_init(TraceTag* _softkernel_tag);
|
||||||
|
|
||||||
|
|
|
@ -9,13 +9,13 @@
|
||||||
typedef uintptr_t snode_id_t;
|
typedef uintptr_t snode_id_t;
|
||||||
|
|
||||||
enum ThreadState {
|
enum ThreadState {
|
||||||
INIT = 0,
|
NEVER_RUN = 0,
|
||||||
|
INIT,
|
||||||
READY,
|
READY,
|
||||||
RUNNING,
|
RUNNING,
|
||||||
DEAD,
|
DEAD,
|
||||||
BLOCKED,
|
BLOCKED,
|
||||||
SLEEPING,
|
SLEEPING,
|
||||||
NEVER_RUN,
|
|
||||||
NR_STATE,
|
NR_STATE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -49,6 +49,8 @@ Modification:
|
||||||
#define TASK_NAME_MAX_LEN 16
|
#define TASK_NAME_MAX_LEN 16
|
||||||
#define SLEEP_MONITOR_CORE 0
|
#define SLEEP_MONITOR_CORE 0
|
||||||
|
|
||||||
|
typedef int tid_t;
|
||||||
|
|
||||||
/* Thread Control Block */
|
/* Thread Control Block */
|
||||||
struct ThreadContext {
|
struct ThreadContext {
|
||||||
struct Thread* task; // process of current thread
|
struct Thread* task; // process of current thread
|
||||||
|
@ -95,12 +97,6 @@ struct Thread {
|
||||||
bool advance_unblock; // @todo abandon
|
bool advance_unblock; // @todo abandon
|
||||||
|
|
||||||
/* task schedule attributes */
|
/* task schedule attributes */
|
||||||
// struct double_list_node node;
|
|
||||||
// struct TaskSleepContext sleep_context;
|
|
||||||
// enum ThreadState state;
|
|
||||||
// int priority; // priority
|
|
||||||
// int remain_tick;
|
|
||||||
// int maxium_tick;
|
|
||||||
struct ScheduleNode snode;
|
struct ScheduleNode snode;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -27,53 +27,36 @@ Author: AIIT XUOS Lab
|
||||||
Modification:
|
Modification:
|
||||||
1. first version
|
1. first version
|
||||||
*************************************************/
|
*************************************************/
|
||||||
|
#include "task.h"
|
||||||
#include "trap_common.h"
|
#include "trap_common.h"
|
||||||
|
|
||||||
#include "task.h"
|
static bool kill_succ;
|
||||||
|
|
||||||
|
static bool kill_task(RbtNode* node, void* id)
|
||||||
|
{
|
||||||
|
struct ScheduleNode* snode = (struct ScheduleNode*)node->data;
|
||||||
|
struct Thread* thd = snode->pthd;
|
||||||
|
tid_t target_id = *(tid_t*)id;
|
||||||
|
|
||||||
|
if (thd->tid == target_id) {
|
||||||
|
sys_exit(thd);
|
||||||
|
kill_succ = true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
extern int sys_exit(struct Thread* task);
|
extern int sys_exit(struct Thread* task);
|
||||||
int sys_kill(int id)
|
int sys_kill(int id)
|
||||||
{
|
{
|
||||||
struct Thread* task = NULL;
|
kill_succ = false;
|
||||||
// check if task is a running one
|
for (int pool_id = 0; pool_id < NR_STATE; pool_id++) {
|
||||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_running_list_head, node)
|
rbt_traverse(&g_scheduler.snode_state_pool[pool_id], kill_task, (void*)&id);
|
||||||
{
|
|
||||||
if (task->tid == id) {
|
|
||||||
sys_exit(task);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if task is a blocking one
|
if (kill_succ) {
|
||||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_blocked_list_head, node)
|
|
||||||
{
|
|
||||||
if (task->tid == id) {
|
|
||||||
sys_exit(task);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
struct ksemaphore* sem = NULL;
|
|
||||||
DOUBLE_LIST_FOR_EACH_ENTRY(sem, &xizi_task_manager.semaphore_pool.sem_list_guard, sem_list_node)
|
|
||||||
{
|
|
||||||
task = NULL;
|
|
||||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &sem->wait_list_guard, node)
|
|
||||||
{
|
|
||||||
sys_exit(task);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if task is a ready one
|
|
||||||
for (int prio = 0; prio < TASK_MAX_PRIORITY; prio++) {
|
|
||||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[prio], node)
|
|
||||||
{
|
|
||||||
if (task->tid == id) {
|
|
||||||
sys_exit(task);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
|
@ -118,8 +118,8 @@ int sys_mmap_v2(uintptr_t* vaddr, uintptr_t* paddr, int len, sys_mmap_info* info
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t paddr_to_map = *paddr;
|
uintptr_t paddr_to_map = *paddr;
|
||||||
if (paddr_to_map >= PHY_MEM_BASE && paddr_to_map < PHY_MEM_STOP && cur_task->tid > 1) {
|
if (paddr_to_map >= PHY_MEM_BASE && paddr_to_map < PHY_MEM_STOP && cur_task->tid > 2) {
|
||||||
ERROR("mapping invalid memory: 0x%p\n", paddr_to_map);
|
ERROR("mapping invalid memory: 0x%p by %d\n", paddr_to_map, cur_task->tid);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -114,7 +114,7 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
|
||||||
if (queue_is_empty(&cur_task->sessions_in_handle) && queue_is_empty(&cur_task->sessions_to_be_handle)) {
|
if (queue_is_empty(&cur_task->sessions_in_handle) && queue_is_empty(&cur_task->sessions_to_be_handle)) {
|
||||||
task_yield(cur_task);
|
task_yield(cur_task);
|
||||||
// @todo support blocking(now bug at 4 cores running)
|
// @todo support blocking(now bug at 4 cores running)
|
||||||
// xizi_task_manager.task_block(&xizi_task_manager.task_blocked_list_head, cur_task);
|
task_block(cur_task);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
|
@ -126,7 +126,9 @@ int sys_register_irq(int irq_num, int irq_opcode)
|
||||||
|
|
||||||
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
|
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
|
||||||
kernel_irq_proxy = tlo->new_thread(pmemspace);
|
kernel_irq_proxy = tlo->new_thread(pmemspace);
|
||||||
kernel_irq_proxy->snode.state = NEVER_RUN;
|
task_trans_sched_state(&kernel_irq_proxy->snode, //
|
||||||
|
&g_scheduler.snode_state_pool[INIT], //
|
||||||
|
&g_scheduler.snode_state_pool[NEVER_RUN], NEVER_RUN);
|
||||||
}
|
}
|
||||||
|
|
||||||
// bind irq to session
|
// bind irq to session
|
||||||
|
|
|
@ -42,30 +42,13 @@ Modification:
|
||||||
extern uint8_t _binary_fs_img_start[], _binary_fs_img_end[];
|
extern uint8_t _binary_fs_img_start[], _binary_fs_img_end[];
|
||||||
|
|
||||||
#define SHOWINFO_BORDER_LINE() LOG_PRINTF("******************************************************\n");
|
#define SHOWINFO_BORDER_LINE() LOG_PRINTF("******************************************************\n");
|
||||||
#define SHOWTASK_TASK_BASE_INFO(task) LOG_PRINTF(" %-6d %-16s %-4d 0x%x(%-d)\n", task->tid, task->name, task->priority, task->memspace->mem_size >> 10, task->memspace->mem_size >> 10)
|
#define SHOWTASK_TASK_BASE_INFO(task) LOG_PRINTF(" %-6d %-16s %-4d 0x%x(%-d)\n", task->tid, task->name, 0, task->memspace->mem_size >> 10, task->memspace->mem_size >> 10)
|
||||||
|
|
||||||
void show_tasks(void)
|
bool print_info(RbtNode* node, void* data)
|
||||||
{
|
{
|
||||||
struct Thread* task = NULL;
|
struct ScheduleNode* snode = (struct ScheduleNode*)node->data;
|
||||||
SHOWINFO_BORDER_LINE();
|
struct Thread* thd = snode->pthd;
|
||||||
for (int i = 0; i < NR_CPU; i++) {
|
switch (snode->state) {
|
||||||
LOG_PRINTF("CPU %-2d: %s\n", i, (global_cpus[i].task == NULL ? "NULL" : global_cpus[i].task->name));
|
|
||||||
}
|
|
||||||
SHOWINFO_BORDER_LINE();
|
|
||||||
LOG_PRINTF("%-8s %-6s %-16s %-4s %-8s\n", "STAT", "ID", "TASK", "PRI", "MEM(KB)");
|
|
||||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_running_list_head, node)
|
|
||||||
{
|
|
||||||
LOG_PRINTF("%-8s", "RUNNING");
|
|
||||||
SHOWTASK_TASK_BASE_INFO(task);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < TASK_MAX_PRIORITY; i++) {
|
|
||||||
if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[i])) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[i], node)
|
|
||||||
{
|
|
||||||
switch (task->state) {
|
|
||||||
case INIT:
|
case INIT:
|
||||||
LOG_PRINTF("%-8s", "INIT");
|
LOG_PRINTF("%-8s", "INIT");
|
||||||
break;
|
break;
|
||||||
|
@ -78,35 +61,31 @@ void show_tasks(void)
|
||||||
case DEAD:
|
case DEAD:
|
||||||
LOG_PRINTF("%-8s", "DEAD");
|
LOG_PRINTF("%-8s", "DEAD");
|
||||||
break;
|
break;
|
||||||
|
case BLOCKED:
|
||||||
|
LOG_PRINTF("%-8s", "BLOCK");
|
||||||
|
break;
|
||||||
|
case SLEEPING:
|
||||||
|
LOG_PRINTF("%-8s", "SLEEP");
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
SHOWTASK_TASK_BASE_INFO(task);
|
SHOWTASK_TASK_BASE_INFO(thd);
|
||||||
}
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_blocked_list_head, node)
|
void show_tasks(void)
|
||||||
{
|
{
|
||||||
LOG_PRINTF("%-8s", "BLOCK");
|
SHOWINFO_BORDER_LINE();
|
||||||
SHOWTASK_TASK_BASE_INFO(task);
|
for (int i = 0; i < NR_CPU; i++) {
|
||||||
|
LOG_PRINTF("CPU %-2d: %s\n", i, (global_cpus[i].task == NULL ? "NULL" : global_cpus[i].task->name));
|
||||||
}
|
}
|
||||||
|
SHOWINFO_BORDER_LINE();
|
||||||
|
LOG_PRINTF("%-8s %-6s %-16s %-4s %-8s\n", "STAT", "ID", "TASK", "PRI", "MEM(KB)");
|
||||||
|
|
||||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_sleep_list_head, node)
|
for (int pool_id = INIT; pool_id < NR_STATE; pool_id++) {
|
||||||
{
|
rbt_traverse(&g_scheduler.snode_state_pool[pool_id], print_info, NULL);
|
||||||
LOG_PRINTF("%-8s", "SLEEP");
|
|
||||||
SHOWTASK_TASK_BASE_INFO(task);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ksemaphore* sem = NULL;
|
|
||||||
DOUBLE_LIST_FOR_EACH_ENTRY(sem, &xizi_task_manager.semaphore_pool.sem_list_guard, sem_list_node)
|
|
||||||
{
|
|
||||||
task = NULL;
|
|
||||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &sem->wait_list_guard, node)
|
|
||||||
{
|
|
||||||
LOG_PRINTF("%-8s", "BLOCK");
|
|
||||||
SHOWTASK_TASK_BASE_INFO(task);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SHOWINFO_BORDER_LINE();
|
SHOWINFO_BORDER_LINE();
|
||||||
|
@ -150,7 +129,7 @@ void show_cpu(void)
|
||||||
assert(current_task != NULL);
|
assert(current_task != NULL);
|
||||||
|
|
||||||
LOG_PRINTF(" ID COMMAND USED_TICKS FREE_TICKS \n");
|
LOG_PRINTF(" ID COMMAND USED_TICKS FREE_TICKS \n");
|
||||||
LOG_PRINTF(" %d %s %d %d\n", cpu_id, current_task->name, TASK_CLOCK_TICK - current_task->remain_tick, current_task->remain_tick);
|
LOG_PRINTF(" %d %s %d %d\n", cpu_id, current_task->name, TASK_CLOCK_TICK - current_task->snode.sched_context.remain_tick, current_task->snode.sched_context.remain_tick);
|
||||||
|
|
||||||
LOG_PRINTF("***********************************************************\n");
|
LOG_PRINTF("***********************************************************\n");
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -30,33 +30,31 @@ Modification:
|
||||||
#include "log.h"
|
#include "log.h"
|
||||||
#include "schedule_algo.h"
|
#include "schedule_algo.h"
|
||||||
|
|
||||||
|
static struct Thread* next_runable_task;
|
||||||
|
|
||||||
|
bool find_runable_task(RbtNode* node, void* data)
|
||||||
|
{
|
||||||
|
struct ScheduleNode* snode = (struct ScheduleNode*)node->data;
|
||||||
|
struct Thread* thd = snode->pthd;
|
||||||
|
|
||||||
|
if (!thd->dead) {
|
||||||
|
next_runable_task = thd;
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
|
||||||
|
tlo->free_pcb(thd);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
struct Thread* max_priority_runnable_task(void)
|
struct Thread* max_priority_runnable_task(void)
|
||||||
{
|
{
|
||||||
static struct Thread* task = NULL;
|
/// @todo better strategy
|
||||||
// static int priority = 0;
|
next_runable_task = NULL;
|
||||||
|
rbt_traverse(&g_scheduler.snode_state_pool[READY], find_runable_task, NULL);
|
||||||
// priority = __builtin_ffs(ready_task_priority) - 1;
|
return next_runable_task;
|
||||||
// if (priority > 31 || priority < 0) {
|
|
||||||
// return NULL;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node)
|
|
||||||
// {
|
|
||||||
// assert(task != NULL);
|
|
||||||
// if (task->state == READY && !task->dead) {
|
|
||||||
// // found a runnable task, stop this look up
|
|
||||||
// return task;
|
|
||||||
// } else if (task->dead && task->state != RUNNING) {
|
|
||||||
|
|
||||||
// struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
|
|
||||||
// tlo->free_pcb(task);
|
|
||||||
// return NULL;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
if (!rbt_is_empty(&g_scheduler.snode_state_pool[READY])) {
|
|
||||||
return ((struct ScheduleNode*)(g_scheduler.snode_state_pool[READY].root->data))->pthd;
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#include "multicores.h"
|
#include "multicores.h"
|
||||||
|
@ -79,6 +77,8 @@ bool init_schedule_node(struct ScheduleNode* snode, struct Thread* bind_thd)
|
||||||
|
|
||||||
bool task_trans_sched_state(struct ScheduleNode* snode, RbtTree* from_pool, RbtTree* to_pool, enum ThreadState target_state)
|
bool task_trans_sched_state(struct ScheduleNode* snode, RbtTree* from_pool, RbtTree* to_pool, enum ThreadState target_state)
|
||||||
{
|
{
|
||||||
|
assert(snode != NULL);
|
||||||
|
// DEBUG("%d %p %d %s\n", snode->snode_id, snode->pthd, snode->pthd->tid, snode->pthd->name);
|
||||||
assert(snode->snode_id != UNINIT_SNODE_ID && snode->pthd != NULL);
|
assert(snode->snode_id != UNINIT_SNODE_ID && snode->pthd != NULL);
|
||||||
if (RBTTREE_DELETE_SUCC != rbt_delete(from_pool, snode->snode_id)) {
|
if (RBTTREE_DELETE_SUCC != rbt_delete(from_pool, snode->snode_id)) {
|
||||||
DEBUG("Thread %d not in from schedule pool\n", snode->pthd->tid);
|
DEBUG("Thread %d not in from schedule pool\n", snode->pthd->tid);
|
||||||
|
@ -98,7 +98,6 @@ void task_dead(struct Thread* thd)
|
||||||
{
|
{
|
||||||
assert(thd != NULL);
|
assert(thd != NULL);
|
||||||
struct ScheduleNode* snode = &thd->snode;
|
struct ScheduleNode* snode = &thd->snode;
|
||||||
enum ThreadState thd_cur_state = snode->state;
|
|
||||||
|
|
||||||
assert(snode->state == READY);
|
assert(snode->state == READY);
|
||||||
|
|
||||||
|
@ -106,6 +105,7 @@ void task_dead(struct Thread* thd)
|
||||||
&g_scheduler.snode_state_pool[READY], //
|
&g_scheduler.snode_state_pool[READY], //
|
||||||
&g_scheduler.snode_state_pool[DEAD], DEAD);
|
&g_scheduler.snode_state_pool[DEAD], DEAD);
|
||||||
assert(trans_res = true);
|
assert(trans_res = true);
|
||||||
|
assert(RBTTREE_DELETE_SUCC == rbt_delete(&g_scheduler.snode_state_pool[DEAD], snode->snode_id));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -154,7 +154,6 @@ bool ksemaphore_free(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Thread* thd = NULL;
|
|
||||||
// by design: no waking any waiting threads
|
// by design: no waking any waiting threads
|
||||||
|
|
||||||
rbt_delete(&sem_pool->sem_pool_map, sem_id);
|
rbt_delete(&sem_pool->sem_pool_map, sem_id);
|
||||||
|
|
|
@ -83,7 +83,7 @@ static void _task_manager_init()
|
||||||
}
|
}
|
||||||
|
|
||||||
// tid pool
|
// tid pool
|
||||||
xizi_task_manager.next_pid = 0;
|
xizi_task_manager.next_pid = 1;
|
||||||
|
|
||||||
// init priority bit map
|
// init priority bit map
|
||||||
ready_task_priority = 0;
|
ready_task_priority = 0;
|
||||||
|
@ -221,6 +221,7 @@ static struct Thread* _new_thread(struct MemSpace* pmemspace)
|
||||||
}
|
}
|
||||||
|
|
||||||
// [schedule related]
|
// [schedule related]
|
||||||
|
task->tid = xizi_task_manager.next_pid++;
|
||||||
if (!init_schedule_node(&task->snode, task)) {
|
if (!init_schedule_node(&task->snode, task)) {
|
||||||
ERROR("Not enough memory\n");
|
ERROR("Not enough memory\n");
|
||||||
slab_free(&xizi_task_manager.task_allocator, (void*)task);
|
slab_free(&xizi_task_manager.task_allocator, (void*)task);
|
||||||
|
@ -238,7 +239,6 @@ static struct Thread* _new_thread(struct MemSpace* pmemspace)
|
||||||
ERROR_FREE
|
ERROR_FREE
|
||||||
{
|
{
|
||||||
/* init basic task ref member */
|
/* init basic task ref member */
|
||||||
task->tid = xizi_task_manager.next_pid++;
|
|
||||||
task->bind_irq = false;
|
task->bind_irq = false;
|
||||||
|
|
||||||
/* vm & memory member */
|
/* vm & memory member */
|
||||||
|
|
|
@ -466,17 +466,19 @@ int rbt_delete(RbtTree* tree, uintptr_t key)
|
||||||
return RBTTREE_DELETE_SUCC;
|
return RBTTREE_DELETE_SUCC;
|
||||||
}
|
}
|
||||||
|
|
||||||
void rbt_traverse_inner(RbtNode* node, rbt_traverse_fn fn)
|
void rbt_traverse_inner(RbtNode* node, rbt_traverse_fn fn, void* data)
|
||||||
{
|
{
|
||||||
if (node == NULL) {
|
if (node == NULL) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
fn(node);
|
|
||||||
rbt_traverse_inner(node->left, fn);
|
if (fn(node, data)) {
|
||||||
rbt_traverse_inner(node->right, fn);
|
rbt_traverse_inner(node->left, fn, data);
|
||||||
|
rbt_traverse_inner(node->right, fn, data);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn)
|
void rbt_traverse(RbtTree* tree, rbt_traverse_fn fn, void* data)
|
||||||
{
|
{
|
||||||
rbt_traverse_inner(tree->root, fn);
|
rbt_traverse_inner(tree->root, fn, data);
|
||||||
}
|
}
|
|
@ -62,9 +62,10 @@ void hw_current_second(uintptr_t* second)
|
||||||
*second = p_clock_driver->get_second();
|
*second = p_clock_driver->get_second();
|
||||||
}
|
}
|
||||||
|
|
||||||
void count_down_sleeping_task(RbtNode* node)
|
bool count_down_sleeping_task(RbtNode* node, void* data)
|
||||||
{
|
{
|
||||||
/// @todo implement
|
/// @todo implement
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t global_tick = 0;
|
uint64_t global_tick = 0;
|
||||||
|
@ -86,7 +87,7 @@ int xizi_clock_handler(int irq, void* tf, void* arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo: cpu 0 will handle sleeping thread
|
// todo: cpu 0 will handle sleeping thread
|
||||||
rbt_traverse(&g_scheduler.snode_state_pool[SLEEPING], count_down_sleeping_task);
|
rbt_traverse(&g_scheduler.snode_state_pool[SLEEPING], count_down_sleeping_task, NULL);
|
||||||
|
|
||||||
// DOUBLE_LIST_FOR_EACH_ENTRY(thread, &xizi_task_manager.task_sleep_list_head, node)
|
// DOUBLE_LIST_FOR_EACH_ENTRY(thread, &xizi_task_manager.task_sleep_list_head, node)
|
||||||
// {
|
// {
|
||||||
|
|
Loading…
Reference in New Issue