delete task only when it's not in running.
This commit is contained in:
parent
03039cbdab
commit
a7cbb0d041
|
@ -74,7 +74,7 @@ Modification:
|
|||
|
||||
#include "cortex_a9.h"
|
||||
|
||||
#define NR_CPU 3
|
||||
#define NR_CPU 4
|
||||
|
||||
__attribute__((always_inline)) static inline uint32_t user_mode()
|
||||
{
|
||||
|
|
|
@ -110,50 +110,56 @@ void handle_fiq(void)
|
|||
extern void context_switch(struct context**, struct context*);
|
||||
void dabort_handler(struct trapframe* r)
|
||||
{
|
||||
xizi_enter_kernel();
|
||||
|
||||
uint32_t dfs, dfa;
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 0" : "=r"(dfs)::);
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 0" : "=r"(dfa)::);
|
||||
|
||||
if (r->pc < KERN_MEM_BASE) { // Exception occured in User space: exit
|
||||
ERROR("dabort in user space: %s\n", cur_cpu()->task->name);
|
||||
LOG("program counter: 0x%x caused\n", r->pc);
|
||||
LOG("data abort at 0x%x, status 0x%x\n", dfa, dfs);
|
||||
_abort_reason(dfs);
|
||||
dump_tf(r);
|
||||
sys_exit(cur_cpu()->task);
|
||||
context_switch(&cur_cpu()->task->main_thread.context, cur_cpu()->scheduler);
|
||||
} else { // Exception occured in Kernel space: panic
|
||||
if (xizi_is_in_kernel()) {
|
||||
uint32_t dfs, dfa;
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 0" : "=r"(dfs)::);
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 0" : "=r"(dfa)::);
|
||||
LOG("program counter: 0x%x caused\n", r->pc);
|
||||
LOG("data abort at 0x%x, status 0x%x\n", dfa, dfs);
|
||||
_abort_reason(dfs);
|
||||
dump_tf(r);
|
||||
panic("data abort exception\n");
|
||||
}
|
||||
|
||||
xizi_enter_kernel();
|
||||
|
||||
uint32_t dfs, dfa;
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 0" : "=r"(dfs)::);
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 0" : "=r"(dfa)::);
|
||||
|
||||
ERROR("dabort in user space: %s\n", cur_cpu()->task->name);
|
||||
LOG("program counter: 0x%x caused\n", r->pc);
|
||||
LOG("data abort at 0x%x, status 0x%x\n", dfa, dfs);
|
||||
_abort_reason(dfs);
|
||||
dump_tf(r);
|
||||
sys_exit(cur_cpu()->task);
|
||||
context_switch(&cur_cpu()->task->main_thread.context, cur_cpu()->scheduler);
|
||||
}
|
||||
|
||||
void iabort_handler(struct trapframe* r)
|
||||
{
|
||||
xizi_enter_kernel();
|
||||
uint32_t ifs, ifa;
|
||||
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 1" : "=r"(ifs)::);
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 2" : "=r"(ifa)::);
|
||||
|
||||
if (r->pc < KERN_MEM_BASE) { // Exception occured in User space: exit
|
||||
ERROR("iabort in user space: %s\n", cur_cpu()->task->name);
|
||||
LOG("program counter: 0x%x(%s) caused\n", r->pc, cur_cpu()->task);
|
||||
LOG("prefetch abort at 0x%x, status 0x%x\n", ifa, ifs);
|
||||
_abort_reason(ifs);
|
||||
dump_tf(r);
|
||||
sys_exit(cur_cpu()->task);
|
||||
context_switch(&cur_cpu()->task->main_thread.context, cur_cpu()->scheduler);
|
||||
} else { // Exception occured in Kernel space: panic
|
||||
LOG("program counter: 0x%x(%s) caused\n", r->pc, cur_cpu()->task);
|
||||
if (xizi_is_in_kernel()) {
|
||||
uint32_t ifs, ifa;
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 1" : "=r"(ifs)::);
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 2" : "=r"(ifa)::);
|
||||
LOG("program counter: 0x%x caused\n", r->pc);
|
||||
LOG("prefetch abort at 0x%x, status 0x%x\n", ifa, ifs);
|
||||
_abort_reason(ifs);
|
||||
dump_tf(r);
|
||||
panic("prefetch abort exception\n");
|
||||
}
|
||||
|
||||
xizi_enter_kernel();
|
||||
|
||||
uint32_t ifs, ifa;
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 1" : "=r"(ifs)::);
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 2" : "=r"(ifa)::);
|
||||
|
||||
ERROR("iabort in user space: %s\n", cur_cpu()->task->name);
|
||||
LOG("program counter: 0x%x(%s) caused\n", r->pc, cur_cpu()->task);
|
||||
LOG("prefetch abort at 0x%x, status 0x%x\n", ifa, ifs);
|
||||
_abort_reason(ifs);
|
||||
dump_tf(r);
|
||||
sys_exit(cur_cpu()->task);
|
||||
context_switch(&cur_cpu()->task->main_thread.context, cur_cpu()->scheduler);
|
||||
}
|
||||
|
|
|
@ -85,9 +85,10 @@ static void _sys_irq_init(int cpu_id)
|
|||
vector_base[5] = (uint32_t)handle_reserved; // Reserved
|
||||
vector_base[6] = (uint32_t)trap_irq_enter; // IRQ
|
||||
vector_base[7] = (uint32_t)handle_fiq; // FIQ
|
||||
|
||||
gic_init();
|
||||
}
|
||||
/* active hardware irq responser */
|
||||
gic_init();
|
||||
xizi_trap_driver.switch_hw_irqtbl((uint32_t*)&_vector_jumper);
|
||||
}
|
||||
|
||||
|
@ -139,29 +140,6 @@ static void _bind_irq_handler(int irq, irq_handler_t handler)
|
|||
xizi_trap_driver.sw_irqtbl[irq].handler = handler;
|
||||
}
|
||||
|
||||
static bool _send_sgi(uint32_t irq, uint32_t bitmask, enum SgiFilterType type)
|
||||
{
|
||||
if (bitmask > (1 << NR_CPU) - 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
enum _gicd_sgi_filter sgi_filter;
|
||||
switch (type) {
|
||||
case SgiFilter_TargetList:
|
||||
sgi_filter = kGicSgiFilter_UseTargetList;
|
||||
break;
|
||||
case SgiFilter_AllOtherCPUs:
|
||||
sgi_filter = kGicSgiFilter_AllOtherCPUs;
|
||||
break;
|
||||
default:
|
||||
sgi_filter = kGicSgiFilter_OnlyThisCPU;
|
||||
break;
|
||||
}
|
||||
gic_send_sgi(irq, bitmask, sgi_filter);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static uint32_t _hw_before_irq()
|
||||
{
|
||||
|
||||
|
@ -217,7 +195,6 @@ static struct XiziTrapDriver xizi_trap_driver = {
|
|||
.switch_hw_irqtbl = _switch_hw_irqtbl,
|
||||
|
||||
.bind_irq_handler = _bind_irq_handler,
|
||||
.send_sgi = _send_sgi,
|
||||
|
||||
.is_interruptable = _is_interruptable,
|
||||
.hw_before_irq = _hw_before_irq,
|
||||
|
|
|
@ -164,24 +164,6 @@ static void _bind_irq_handler(int irq, irq_handler_t handler)
|
|||
xizi_trap_driver.sw_irqtbl[irq].handler = handler;
|
||||
}
|
||||
|
||||
static bool _send_sgi(uint32_t irq, uint32_t bitmask, enum SgiFilterType type)
|
||||
{
|
||||
if (bitmask > (1 << NR_CPU) - 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int cpu_id = 0;
|
||||
while (bitmask != 0) {
|
||||
if ((bitmask & 0x1) != 0) {
|
||||
XScuGic_SoftwareIntr(&IntcInstance, irq, cpu_id);
|
||||
}
|
||||
cpu_id++;
|
||||
bitmask >>= 1;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static uint32_t _hw_before_irq()
|
||||
{
|
||||
|
||||
|
@ -233,7 +215,6 @@ static struct XiziTrapDriver xizi_trap_driver = {
|
|||
.switch_hw_irqtbl = _switch_hw_irqtbl,
|
||||
|
||||
.bind_irq_handler = _bind_irq_handler,
|
||||
.send_sgi = _send_sgi,
|
||||
|
||||
.is_interruptable = _is_interruptable,
|
||||
.hw_before_irq = _hw_before_irq,
|
||||
|
|
|
@ -66,9 +66,8 @@ struct XiziTrapDriver {
|
|||
void (*cpu_irq_disable)();
|
||||
void (*single_irq_enable)(int irq, int cpu, int prio);
|
||||
void (*single_irq_disable)(int irq, int cpu);
|
||||
uint32_t* (*switch_hw_irqtbl)(uint32_t*);
|
||||
|
||||
bool (*send_sgi)(uint32_t, uint32_t, enum SgiFilterType);
|
||||
uint32_t* (*switch_hw_irqtbl)(uint32_t*);
|
||||
void (*bind_irq_handler)(int, irq_handler_t);
|
||||
|
||||
/* check if no if interruptable */
|
||||
|
|
|
@ -35,7 +35,10 @@ IPC_SERVER_INTERFACE(Ipc_intr_3, 1);
|
|||
IPC_SERVER_REGISTER_INTERFACES(IpcSwIntrHandler, 1, Ipc_intr_3);
|
||||
int main()
|
||||
{
|
||||
register_irq(SW_INTERRUPT_3, Ipc_intr_3);
|
||||
if (register_irq(SW_INTERRUPT_3, Ipc_intr_3) == -1) {
|
||||
printf("TEST_SW_HDLR: bind failed");
|
||||
exit();
|
||||
}
|
||||
ipc_server_loop(&IpcSwIntrHandler);
|
||||
|
||||
exit();
|
||||
|
|
|
@ -50,4 +50,5 @@ static inline struct CPU* cur_cpu(void)
|
|||
struct spinlock whole_kernel_lock;
|
||||
|
||||
void xizi_enter_kernel();
|
||||
void xizi_leave_kernel();
|
||||
void xizi_leave_kernel();
|
||||
bool xizi_is_in_kernel();
|
|
@ -93,4 +93,6 @@ int sys_state(sys_state_option option, sys_state_info* info);
|
|||
int sys_mmap(uintptr_t vaddr, uintptr_t paddr, int len, int is_dev);
|
||||
|
||||
int sys_register_irq(int irq_num, int irq_opcode);
|
||||
int sys_unbind_irq_all(struct TaskMicroDescriptor* task);
|
||||
int sys_unbind_irq(struct TaskMicroDescriptor* task, int irq_num);
|
||||
#endif
|
||||
|
|
|
@ -64,6 +64,8 @@ struct Thread {
|
|||
struct TaskMicroDescriptor {
|
||||
/* task debug resources */
|
||||
int pid;
|
||||
bool bind_irq;
|
||||
bool dead;
|
||||
char name[TASK_NAME_MAX_LEN];
|
||||
|
||||
/// @todo support return value
|
||||
|
|
|
@ -175,9 +175,10 @@ int task_exec(struct TaskMicroDescriptor* task, char* img_start, char* name, cha
|
|||
}
|
||||
strncpy(task->name, last, sizeof(task->name));
|
||||
|
||||
xizi_pager.free_user_pgdir(&task->pgdir);
|
||||
if (task->pgdir.pd_addr != NULL) {
|
||||
xizi_pager.free_user_pgdir(&task->pgdir);
|
||||
}
|
||||
task->pgdir = pgdir;
|
||||
|
||||
task->heap_base = ALIGNUP(load_size, PAGE_SIZE);
|
||||
task->mem_size = task->heap_base + USER_STACK_SIZE;
|
||||
return 0;
|
||||
|
|
|
@ -39,51 +39,7 @@ Modification:
|
|||
int sys_exit(struct TaskMicroDescriptor* ptask)
|
||||
{
|
||||
assert(ptask != NULL);
|
||||
|
||||
/* handle sessions for condition 1, ref. delete_share_pages() */
|
||||
// close all server_sessions
|
||||
struct server_session* server_session = NULL;
|
||||
while (!IS_DOUBLE_LIST_EMPTY(&ptask->svr_sess_listhead)) {
|
||||
server_session = CONTAINER_OF(ptask->svr_sess_listhead.next, struct server_session, node);
|
||||
// cut the connection from task to session
|
||||
if (!server_session->closed) {
|
||||
xizi_share_page_manager.unmap_task_share_pages(ptask, server_session->buf_addr, CLIENT_SESSION_BACKEND(server_session)->nr_pages);
|
||||
server_session->closed = true;
|
||||
}
|
||||
doubleListDel(&server_session->node);
|
||||
SERVER_SESSION_BACKEND(server_session)->server = NULL;
|
||||
// delete session (also cut connection from session to task)
|
||||
if (SERVER_SESSION_BACKEND(server_session)->client_side.closed) {
|
||||
xizi_share_page_manager.delete_share_pages(SERVER_SESSION_BACKEND(server_session));
|
||||
}
|
||||
}
|
||||
// close all client_sessions
|
||||
struct client_session* client_session = NULL;
|
||||
while (!IS_DOUBLE_LIST_EMPTY(&ptask->cli_sess_listhead)) {
|
||||
client_session = CONTAINER_OF(ptask->cli_sess_listhead.next, struct client_session, node);
|
||||
// cut the connection from task to session
|
||||
if (!client_session->closed) {
|
||||
xizi_share_page_manager.unmap_task_share_pages(ptask, client_session->buf_addr, CLIENT_SESSION_BACKEND(client_session)->nr_pages);
|
||||
client_session->closed = true;
|
||||
}
|
||||
doubleListDel(&client_session->node);
|
||||
CLIENT_SESSION_BACKEND(client_session)->client = NULL;
|
||||
// delete session (also cut connection from session to task)
|
||||
if (CLIENT_SESSION_BACKEND(client_session)->server_side.closed) {
|
||||
xizi_share_page_manager.delete_share_pages(CLIENT_SESSION_BACKEND(client_session));
|
||||
}
|
||||
}
|
||||
|
||||
if (ptask->server_identifier.meta != NULL) {
|
||||
struct TraceTag server_identifier_owner;
|
||||
AchieveResourceTag(&server_identifier_owner, RequireRootTag(), "softkernel/server-identifier");
|
||||
assert(server_identifier_owner.meta != NULL);
|
||||
DeleteResource(&ptask->server_identifier, &server_identifier_owner);
|
||||
}
|
||||
|
||||
// delete task for pcb_list
|
||||
xizi_task_manager.task_yield_noschedule(ptask, true);
|
||||
ptask->state = DEAD;
|
||||
|
||||
ptask->dead = true;
|
||||
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -79,26 +79,33 @@ int user_irq_handler(int irq, void* tf, void* arg)
|
|||
AchieveResourceTag(&mmu_driver_tag, RequireRootTag(), "/hardkernel/mmu-ac-resource");
|
||||
p_mmu_driver = (struct MmuCommonDone*)AchieveResource(&mmu_driver_tag);
|
||||
}
|
||||
p_mmu_driver->LoadPgdir((uintptr_t)V2P(kernel_irq_proxy->pgdir.pd_addr));
|
||||
send_irq_to_user(irq);
|
||||
p_mmu_driver->LoadPgdir((uintptr_t)V2P(cur_cpu()->task->pgdir.pd_addr));
|
||||
|
||||
next_task_emergency = irq_forward_table[irq].handle_task;
|
||||
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false);
|
||||
if (irq_forward_table[irq].handle_task != NULL) {
|
||||
p_mmu_driver->LoadPgdir((uintptr_t)V2P(kernel_irq_proxy->pgdir.pd_addr));
|
||||
send_irq_to_user(irq);
|
||||
p_mmu_driver->LoadPgdir((uintptr_t)V2P(cur_cpu()->task->pgdir.pd_addr));
|
||||
|
||||
next_task_emergency = irq_forward_table[irq].handle_task;
|
||||
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern int create_session_inner(struct TaskMicroDescriptor* client, struct TaskMicroDescriptor* server, int capacity, struct Session* user_session);
|
||||
/// @warning no tested.
|
||||
|
||||
static struct XiziTrapDriver* p_intr_driver = NULL;
|
||||
int sys_register_irq(int irq_num, int irq_opcode)
|
||||
{
|
||||
// init intr resource;
|
||||
static struct TraceTag intr_ac_tag;
|
||||
if (!AchieveResourceTag(&intr_ac_tag, RequireRootTag(), "hardkernel/intr-ac-resource")) {
|
||||
ERROR("intr not initialized.\n");
|
||||
return -1;
|
||||
if (p_intr_driver == NULL) {
|
||||
struct TraceTag intr_ac_tag;
|
||||
if (!AchieveResourceTag(&intr_ac_tag, RequireRootTag(), "hardkernel/intr-ac-resource")) {
|
||||
ERROR("intr not initialized.\n");
|
||||
return -1;
|
||||
}
|
||||
p_intr_driver = (struct XiziTrapDriver*)AchieveResource(&intr_ac_tag);
|
||||
}
|
||||
struct XiziTrapDriver* p_intr_driver = AchieveResource(&intr_ac_tag);
|
||||
|
||||
// init kerenl sender proxy
|
||||
if (kernel_irq_proxy == NULL) {
|
||||
|
@ -118,6 +125,30 @@ int sys_register_irq(int irq_num, int irq_opcode)
|
|||
irq_forward_table[irq_num].opcode = irq_opcode;
|
||||
create_session_inner(kernel_irq_proxy, cur_task, PAGE_SIZE, &irq_forward_table[irq_num].session);
|
||||
p_intr_driver->bind_irq_handler(irq_num, user_irq_handler);
|
||||
cur_task->bind_irq = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sys_unbind_irq(struct TaskMicroDescriptor* task, int irq_num)
|
||||
{
|
||||
if (irq_forward_table[irq_num].handle_task != task) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
irq_forward_table[irq_num].handle_task = NULL;
|
||||
sys_close_session(&irq_forward_table[irq_num].session);
|
||||
DEBUG("Unbind: %s to irq %d", task->name, irq_num);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sys_unbind_irq_all(struct TaskMicroDescriptor* task)
|
||||
{
|
||||
for (int idx = 0; idx < NR_IRQS; idx++) {
|
||||
if (irq_forward_table[idx].handle_task == task) {
|
||||
sys_unbind_irq(task, idx);
|
||||
}
|
||||
}
|
||||
task->bind_irq = false;
|
||||
return 0;
|
||||
}
|
|
@ -38,12 +38,10 @@ struct TaskMicroDescriptor* max_priority_runnable_task(void)
|
|||
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node)
|
||||
{
|
||||
if (task->state == READY) {
|
||||
if (task->state == READY && !task->dead) {
|
||||
// found a runnable task, stop this look up
|
||||
return task;
|
||||
} else if (task->state == DEAD) {
|
||||
// found a killed task, stop this loop
|
||||
// change in pcb_list may break this loop, so find a runnable in next look up
|
||||
} else if (task->dead && task->state != RUNNING) {
|
||||
xizi_task_manager.free_pcb(task);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -57,13 +55,10 @@ struct TaskMicroDescriptor* round_robin_runnable_task(uint32_t priority)
|
|||
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node)
|
||||
{
|
||||
|
||||
if (task->state == READY) {
|
||||
if (task->state == READY && !task->dead) {
|
||||
// found a runnable task, stop this look up
|
||||
return task;
|
||||
} else if (task->state == DEAD) {
|
||||
// found a killed task, stop this loop
|
||||
// change in pcb_list may break this loop, so find a runnable in next look up
|
||||
} else if (task->dead && task->state != RUNNING) {
|
||||
xizi_task_manager.free_pcb(task);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -32,10 +32,11 @@ Modification:
|
|||
#include "core.h"
|
||||
|
||||
#include "assert.h"
|
||||
#include "kalloc.h"
|
||||
#include "log.h"
|
||||
#include "multicores.h"
|
||||
#include "kalloc.h"
|
||||
#include "scheduler.h"
|
||||
#include "syscall.h"
|
||||
#include "task.h"
|
||||
|
||||
struct CPU global_cpus[NR_CPU];
|
||||
|
@ -74,6 +75,59 @@ static struct TaskMicroDescriptor* _alloc_task_cb()
|
|||
return task;
|
||||
}
|
||||
|
||||
int _task_retrieve_sys_resources(struct TaskMicroDescriptor* ptask)
|
||||
{
|
||||
assert(ptask != NULL);
|
||||
|
||||
/* handle sessions for condition 1, ref. delete_share_pages() */
|
||||
// close all server_sessions
|
||||
struct server_session* server_session = NULL;
|
||||
while (!IS_DOUBLE_LIST_EMPTY(&ptask->svr_sess_listhead)) {
|
||||
server_session = CONTAINER_OF(ptask->svr_sess_listhead.next, struct server_session, node);
|
||||
// cut the connection from task to session
|
||||
if (!server_session->closed) {
|
||||
xizi_share_page_manager.unmap_task_share_pages(ptask, server_session->buf_addr, CLIENT_SESSION_BACKEND(server_session)->nr_pages);
|
||||
server_session->closed = true;
|
||||
}
|
||||
doubleListDel(&server_session->node);
|
||||
SERVER_SESSION_BACKEND(server_session)->server = NULL;
|
||||
// delete session (also cut connection from session to task)
|
||||
if (SERVER_SESSION_BACKEND(server_session)->client_side.closed) {
|
||||
xizi_share_page_manager.delete_share_pages(SERVER_SESSION_BACKEND(server_session));
|
||||
}
|
||||
}
|
||||
// close all client_sessions
|
||||
struct client_session* client_session = NULL;
|
||||
while (!IS_DOUBLE_LIST_EMPTY(&ptask->cli_sess_listhead)) {
|
||||
client_session = CONTAINER_OF(ptask->cli_sess_listhead.next, struct client_session, node);
|
||||
// cut the connection from task to session
|
||||
if (!client_session->closed) {
|
||||
xizi_share_page_manager.unmap_task_share_pages(ptask, client_session->buf_addr, CLIENT_SESSION_BACKEND(client_session)->nr_pages);
|
||||
client_session->closed = true;
|
||||
}
|
||||
doubleListDel(&client_session->node);
|
||||
CLIENT_SESSION_BACKEND(client_session)->client = NULL;
|
||||
// delete session (also cut connection from session to task)
|
||||
if (CLIENT_SESSION_BACKEND(client_session)->server_side.closed) {
|
||||
xizi_share_page_manager.delete_share_pages(CLIENT_SESSION_BACKEND(client_session));
|
||||
}
|
||||
}
|
||||
|
||||
if (ptask->server_identifier.meta != NULL) {
|
||||
struct TraceTag server_identifier_owner;
|
||||
AchieveResourceTag(&server_identifier_owner, RequireRootTag(), "softkernel/server-identifier");
|
||||
assert(server_identifier_owner.meta != NULL);
|
||||
DeleteResource(&ptask->server_identifier, &server_identifier_owner);
|
||||
}
|
||||
|
||||
// delete registered irq if there is one
|
||||
if (ptask->bind_irq) {
|
||||
sys_unbind_irq_all(ptask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// @brief this function changes task list without locking, so it must be called inside a lock critical area
|
||||
/// @param task
|
||||
static void _dealloc_task_cb(struct TaskMicroDescriptor* task)
|
||||
|
@ -83,6 +137,8 @@ static void _dealloc_task_cb(struct TaskMicroDescriptor* task)
|
|||
return;
|
||||
}
|
||||
|
||||
_task_retrieve_sys_resources(task);
|
||||
|
||||
// stack is mapped in vspace, so it should be free by pgdir
|
||||
if (task->pgdir.pd_addr) {
|
||||
xizi_pager.free_user_pgdir(&task->pgdir);
|
||||
|
@ -112,7 +168,7 @@ static void _dealloc_task_cb(struct TaskMicroDescriptor* task)
|
|||
extern void trap_return(void);
|
||||
void task_prepare_enter()
|
||||
{
|
||||
spinlock_unlock(&whole_kernel_lock);
|
||||
xizi_leave_kernel();
|
||||
trap_return();
|
||||
}
|
||||
|
||||
|
@ -124,10 +180,7 @@ static struct TaskMicroDescriptor* _new_task_cb()
|
|||
return NULL;
|
||||
}
|
||||
// init vm
|
||||
if (!xizi_pager.new_pgdir(&task->pgdir)) {
|
||||
_dealloc_task_cb(task);
|
||||
return NULL;
|
||||
}
|
||||
task->pgdir.pd_addr = NULL;
|
||||
/* init basic task member */
|
||||
doubleListNodeInit(&task->cli_sess_listhead);
|
||||
doubleListNodeInit(&task->svr_sess_listhead);
|
||||
|
@ -180,14 +233,12 @@ static void _scheduler(struct SchedulerRightGroup right_group)
|
|||
assert(cur_cpu()->task == NULL);
|
||||
if (next_task_emergency != NULL && next_task->state == READY) {
|
||||
next_task = next_task_emergency;
|
||||
next_task->state = RUNNING;
|
||||
} else {
|
||||
next_task = xizi_task_manager.next_runnable_task();
|
||||
}
|
||||
next_task_emergency = NULL;
|
||||
if (next_task != NULL) {
|
||||
assert(next_task->state == READY);
|
||||
next_task->state = RUNNING;
|
||||
}
|
||||
spinlock_unlock(&whole_kernel_lock);
|
||||
|
||||
|
@ -199,11 +250,16 @@ static void _scheduler(struct SchedulerRightGroup right_group)
|
|||
|
||||
/* a runnable task */
|
||||
spinlock_lock(&whole_kernel_lock);
|
||||
assert(next_task->state == RUNNING);
|
||||
if (next_task->state == READY) {
|
||||
next_task->state = RUNNING;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
struct CPU* cpu = cur_cpu();
|
||||
cpu->task = next_task;
|
||||
p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->pgdir.pd_addr));
|
||||
context_switch(&cpu->scheduler, next_task->main_thread.context);
|
||||
assert(next_task->state != RUNNING);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -62,8 +62,7 @@ void intr_irq_dispatch(struct trapframe* tf)
|
|||
assert(p_intr_driver != NULL);
|
||||
uintptr_t int_info = 0;
|
||||
if ((int_info = p_intr_driver->hw_before_irq()) == 0) {
|
||||
xizi_leave_kernel();
|
||||
return;
|
||||
goto intr_leave_interrupt;
|
||||
}
|
||||
|
||||
struct TaskMicroDescriptor* current_task = cur_cpu()->task;
|
||||
|
@ -77,7 +76,7 @@ void intr_irq_dispatch(struct trapframe* tf)
|
|||
|
||||
// distribute irq
|
||||
irq_handler_t isr = p_intr_driver->sw_irqtbl[irq].handler;
|
||||
if (isr) {
|
||||
if (isr != NULL) {
|
||||
isr(irq, tf, NULL);
|
||||
} else {
|
||||
default_interrupt_routine();
|
||||
|
@ -93,6 +92,7 @@ void intr_irq_dispatch(struct trapframe* tf)
|
|||
}
|
||||
assert(current_task == cur_cpu()->task);
|
||||
|
||||
intr_leave_interrupt:
|
||||
xizi_leave_kernel();
|
||||
}
|
||||
|
||||
|
@ -106,4 +106,9 @@ void xizi_leave_kernel()
|
|||
{
|
||||
spinlock_unlock(&whole_kernel_lock);
|
||||
p_intr_driver->cpu_irq_enable();
|
||||
}
|
||||
|
||||
bool xizi_is_in_kernel()
|
||||
{
|
||||
return is_spinlock_locked(&whole_kernel_lock);
|
||||
}
|
Loading…
Reference in New Issue