forked from xuos/xiuos
Support running task list management.
This commit is contained in:
parent
3c6e8ce109
commit
baa04913bd
|
@ -104,6 +104,7 @@ struct SchedulerRightGroup {
|
|||
|
||||
struct XiziTaskManager {
|
||||
struct double_list_node task_list_head[TASK_MAX_PRIORITY]; /* list of task control blocks that are allocated */
|
||||
struct double_list_node task_running_list_head;
|
||||
struct double_list_node task_blocked_list_head;
|
||||
struct slab_allocator task_allocator;
|
||||
struct slab_allocator task_buddy_allocator;
|
||||
|
|
|
@ -40,6 +40,11 @@ int sys_exit(struct TaskMicroDescriptor* ptask)
|
|||
{
|
||||
assert(ptask != NULL);
|
||||
ptask->dead = true;
|
||||
// free that task straightly if it's a blocked task
|
||||
if (ptask->state == BLOCKED) {
|
||||
xizi_task_manager.free_pcb(ptask);
|
||||
}
|
||||
// yield current task in case it wants to exit itself
|
||||
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -35,7 +35,25 @@ extern int sys_exit(struct TaskMicroDescriptor* task);
|
|||
int sys_kill(int id)
|
||||
{
|
||||
struct TaskMicroDescriptor* task = NULL;
|
||||
// check if task is a running one
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_running_list_head, node)
|
||||
{
|
||||
if (task->pid == id) {
|
||||
sys_exit(task);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// check if task is a blocking one
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_blocked_list_head, node)
|
||||
{
|
||||
if (task->pid == id) {
|
||||
sys_exit(task);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// check if task is a ready one
|
||||
for (int prio = 0; prio < TASK_MAX_PRIORITY; prio++) {
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[prio], node)
|
||||
{
|
||||
|
|
|
@ -140,8 +140,6 @@ int sys_unbind_irq(struct TaskMicroDescriptor* task, int irq_num)
|
|||
}
|
||||
|
||||
irq_forward_table[irq_num].handle_task = NULL;
|
||||
sys_close_session(kernel_irq_proxy, &irq_forward_table[irq_num].session);
|
||||
DEBUG("Unbind: %s to irq %d", task->name, irq_num);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -66,6 +66,13 @@ void show_tasks(void)
|
|||
}
|
||||
LOG_PRINTF("******************************************************\n");
|
||||
LOG_PRINTF("STAT ID TASK PRI MEM(KB)\n");
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_running_list_head, node)
|
||||
{
|
||||
LOG_PRINTF("RUNNING ");
|
||||
_padding(task->name);
|
||||
LOG_PRINTF(" %d %s %d %d\n", task->pid, task->name, task->priority, task->mem_size >> 10);
|
||||
}
|
||||
|
||||
for (int i = 0; i < TASK_MAX_PRIORITY; i++) {
|
||||
if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[i])) {
|
||||
continue;
|
||||
|
|
|
@ -27,14 +27,18 @@ Author: AIIT XUOS Lab
|
|||
Modification:
|
||||
1. first version
|
||||
*************************************************/
|
||||
#include "log.h"
|
||||
#include "scheduler.h"
|
||||
|
||||
struct TaskMicroDescriptor* max_priority_runnable_task(void)
|
||||
{
|
||||
struct TaskMicroDescriptor* task = NULL;
|
||||
uint32_t priority = 0;
|
||||
static struct TaskMicroDescriptor* task = NULL;
|
||||
static int priority = 0;
|
||||
|
||||
priority = __builtin_ffs(ready_task_priority) - 1;
|
||||
if (priority > 31 || priority < 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node)
|
||||
{
|
||||
|
|
|
@ -42,6 +42,20 @@ Modification:
|
|||
struct CPU global_cpus[NR_CPU];
|
||||
uint32_t ready_task_priority;
|
||||
|
||||
static inline void task_node_leave_list(struct TaskMicroDescriptor* task)
|
||||
{
|
||||
doubleListDel(&task->node);
|
||||
if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[task->priority])) {
|
||||
ready_task_priority &= ~((uint32_t)1 << task->priority);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void task_node_add_to_ready_list(struct TaskMicroDescriptor* task)
|
||||
{
|
||||
doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]);
|
||||
ready_task_priority |= ((uint32_t)1 << task->priority);
|
||||
}
|
||||
|
||||
static void _task_manager_init()
|
||||
{
|
||||
// init task list to NULL
|
||||
|
@ -49,6 +63,7 @@ static void _task_manager_init()
|
|||
doubleListNodeInit(&xizi_task_manager.task_list_head[i]);
|
||||
}
|
||||
doubleListNodeInit(&xizi_task_manager.task_blocked_list_head);
|
||||
doubleListNodeInit(&xizi_task_manager.task_running_list_head);
|
||||
// init task (slab) allocator
|
||||
slab_init(&xizi_task_manager.task_allocator, sizeof(struct TaskMicroDescriptor));
|
||||
slab_init(&xizi_task_manager.task_buddy_allocator, sizeof(struct KBuddy));
|
||||
|
@ -148,9 +163,8 @@ static void _dealloc_task_cb(struct TaskMicroDescriptor* task)
|
|||
kfree((char*)task->main_thread.stack_addr);
|
||||
}
|
||||
|
||||
struct double_list_node* cur_node = &task->node;
|
||||
// remove it from used task list
|
||||
doubleListDel(cur_node);
|
||||
task_node_leave_list(task);
|
||||
|
||||
// free task back to allocator
|
||||
if (task->massive_ipc_allocator != NULL) {
|
||||
|
@ -158,11 +172,6 @@ static void _dealloc_task_cb(struct TaskMicroDescriptor* task)
|
|||
slab_free(&xizi_task_manager.task_buddy_allocator, (void*)task->massive_ipc_allocator);
|
||||
}
|
||||
slab_free(&xizi_task_manager.task_allocator, (void*)task);
|
||||
|
||||
// remove priority
|
||||
if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[task->priority])) {
|
||||
ready_task_priority &= ~(1 << task->priority);
|
||||
}
|
||||
}
|
||||
|
||||
/* alloc a new task with init */
|
||||
|
@ -217,8 +226,15 @@ static void _task_set_default_schedule_attr(struct TaskMicroDescriptor* task)
|
|||
task->maxium_tick = TASK_CLOCK_TICK * 10;
|
||||
task->state = READY;
|
||||
task->priority = TASK_DEFAULT_PRIORITY;
|
||||
doubleListAddOnHead(&task->node, &xizi_task_manager.task_list_head[task->priority]);
|
||||
ready_task_priority |= (1 << task->priority);
|
||||
task_node_add_to_ready_list(task);
|
||||
}
|
||||
|
||||
static void task_state_set_running(struct TaskMicroDescriptor* task)
|
||||
{
|
||||
assert(task != NULL && task->state == READY);
|
||||
task->state = RUNNING;
|
||||
task_node_leave_list(task);
|
||||
doubleListAddOnHead(&task->node, &xizi_task_manager.task_running_list_head);
|
||||
}
|
||||
|
||||
struct TaskMicroDescriptor* next_task_emergency = NULL;
|
||||
|
@ -249,8 +265,7 @@ static void _scheduler(struct SchedulerRightGroup right_group)
|
|||
}
|
||||
|
||||
/* run the chosen task */
|
||||
assert(next_task->state == READY);
|
||||
next_task->state = RUNNING;
|
||||
task_state_set_running(next_task);
|
||||
cpu->task = next_task;
|
||||
p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->pgdir.pd_addr));
|
||||
context_switch(&cpu->scheduler, next_task->main_thread.context);
|
||||
|
@ -261,27 +276,23 @@ static void _scheduler(struct SchedulerRightGroup right_group)
|
|||
static void _task_yield_noschedule(struct TaskMicroDescriptor* task, bool blocking)
|
||||
{
|
||||
assert(task != NULL);
|
||||
/// @warning only support current task yield now
|
||||
assert(task == cur_cpu()->task && task->state == RUNNING);
|
||||
|
||||
// rearrage current task position
|
||||
doubleListDel(&task->node);
|
||||
task_node_leave_list(task);
|
||||
if (task->state == RUNNING) {
|
||||
task->state = READY;
|
||||
}
|
||||
task->remain_tick = TASK_CLOCK_TICK;
|
||||
if (task == cur_cpu()->task) {
|
||||
cur_cpu()->task = NULL;
|
||||
}
|
||||
doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]);
|
||||
task_node_add_to_ready_list(task);
|
||||
}
|
||||
|
||||
static void _task_block(struct TaskMicroDescriptor* task)
|
||||
{
|
||||
assert(task != NULL);
|
||||
assert(task->state != RUNNING);
|
||||
doubleListDel(&task->node);
|
||||
if (xizi_task_manager.task_list_head[task->priority].next == &xizi_task_manager.task_list_head[task->priority]) {
|
||||
ready_task_priority &= ~(1 << task->priority);
|
||||
}
|
||||
task_node_leave_list(task);
|
||||
task->state = BLOCKED;
|
||||
doubleListAddOnHead(&task->node, &xizi_task_manager.task_blocked_list_head);
|
||||
}
|
||||
|
@ -290,12 +301,13 @@ static void _task_unblock(struct TaskMicroDescriptor* task)
|
|||
{
|
||||
assert(task != NULL);
|
||||
assert(task->state == BLOCKED);
|
||||
doubleListDel(&task->node);
|
||||
task_node_leave_list(task);
|
||||
task->state = READY;
|
||||
doubleListAddOnHead(&task->node, &xizi_task_manager.task_list_head[task->priority]);
|
||||
ready_task_priority |= (1 << task->priority);
|
||||
task_node_add_to_ready_list(task);
|
||||
}
|
||||
|
||||
/// @brief @warning not tested function
|
||||
/// @param priority
|
||||
static void _set_cur_task_priority(int priority)
|
||||
{
|
||||
if (priority < 0 || priority >= TASK_MAX_PRIORITY) {
|
||||
|
@ -304,14 +316,13 @@ static void _set_cur_task_priority(int priority)
|
|||
}
|
||||
|
||||
struct TaskMicroDescriptor* current_task = cur_cpu()->task;
|
||||
assert(current_task != NULL);
|
||||
assert(current_task != NULL && current_task->state == RUNNING);
|
||||
|
||||
task_node_leave_list(current_task);
|
||||
|
||||
current_task->priority = priority;
|
||||
|
||||
doubleListDel(¤t_task->node);
|
||||
doubleListAddOnBack(¤t_task->node, &xizi_task_manager.task_list_head[current_task->priority]);
|
||||
|
||||
ready_task_priority |= (1 << current_task->priority);
|
||||
task_node_add_to_ready_list(current_task);
|
||||
|
||||
return;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue