Fully support userland interrupt handler. Use fixed common abort handler.

This commit is contained in:
TXuian 2024-04-28 14:44:49 +08:00
parent a7cbb0d041
commit a24d73f710
25 changed files with 260 additions and 154 deletions

View File

@ -41,30 +41,13 @@ Modification:
*************************************************/
#include "core.h"
#include "memlayout.h"
#include "log.h"
#include "multicores.h"
#include "spinlock.h"
#include "syscall.h"
#include "trap_common.h"
__attribute__((always_inline)) static inline void _abort_reason(uint32_t fault_status)
{
if ((fault_status & 0xd) == 0x1) // Alignment failure
KPrintf("reason: alignment\n");
else if ((fault_status & 0xd) == 0x5) // External abort "on translation"
KPrintf("reason: ext. abort on trnslt.\n");
else if ((fault_status & 0xd) == 0x5) // Translation
KPrintf("reason: sect. translation\n");
else if ((fault_status & 0xd) == 0x9) // Domain
KPrintf("reason: sect. domain\n");
else if ((fault_status & 0xd) == 0xd) // Permission
KPrintf("reason: sect. permission\n");
else if ((fault_status & 0xd) == 0x8) // External abort
KPrintf("reason: ext. abort\n");
else
KPrintf("reason: unknown???\n");
}
#include "assert.h"
#include "multicores.h"
#include "syscall.h"
#include "task.h"
void dump_tf(struct trapframe* tf)
{
@ -86,6 +69,57 @@ void dump_tf(struct trapframe* tf)
KPrintf(" pc: 0x%x\n", tf->pc);
}
void dabort_reason(struct trapframe* r)
{
uint32_t fault_status, dfa;
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 0" : "=r"(fault_status)::);
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 0" : "=r"(dfa)::);
LOG("program counter: 0x%x caused\n", r->pc);
LOG("data abort at 0x%x, status 0x%x\n", dfa, fault_status);
if ((fault_status & 0xd) == 0x1) // Alignment failure
KPrintf("reason: alignment\n");
else if ((fault_status & 0xd) == 0x5) // External abort "on translation"
KPrintf("reason: ext. abort on trnslt.\n");
else if ((fault_status & 0xd) == 0x5) // Translation
KPrintf("reason: sect. translation\n");
else if ((fault_status & 0xd) == 0x9) // Domain
KPrintf("reason: sect. domain\n");
else if ((fault_status & 0xd) == 0xd) // Permission
KPrintf("reason: sect. permission\n");
else if ((fault_status & 0xd) == 0x8) // External abort
KPrintf("reason: ext. abort\n");
else
KPrintf("reason: unknown???\n");
dump_tf(r);
}
void iabort_reason(struct trapframe* r)
{
uint32_t fault_status, ifa;
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 1" : "=r"(fault_status)::);
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 2" : "=r"(ifa)::);
LOG("prefetch abort at 0x%x, status 0x%x\n", ifa, fault_status);
if ((fault_status & 0xd) == 0x1) // Alignment failure
KPrintf("reason: alignment\n");
else if ((fault_status & 0xd) == 0x5) // External abort "on translation"
KPrintf("reason: ext. abort on trnslt.\n");
else if ((fault_status & 0xd) == 0x5) // Translation
KPrintf("reason: sect. translation\n");
else if ((fault_status & 0xd) == 0x9) // Domain
KPrintf("reason: sect. domain\n");
else if ((fault_status & 0xd) == 0xd) // Permission
KPrintf("reason: sect. permission\n");
else if ((fault_status & 0xd) == 0x8) // External abort
KPrintf("reason: ext. abort\n");
else
KPrintf("reason: unknown???\n");
dump_tf(r);
}
void handle_undefined_instruction(struct trapframe* tf)
{
// unimplemented trap handler
@ -105,61 +139,4 @@ void handle_fiq(void)
{
xizi_enter_kernel();
panic("Unimplemented FIQ\n");
}
extern void context_switch(struct context**, struct context*);
void dabort_handler(struct trapframe* r)
{
if (xizi_is_in_kernel()) {
uint32_t dfs, dfa;
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 0" : "=r"(dfs)::);
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 0" : "=r"(dfa)::);
LOG("program counter: 0x%x caused\n", r->pc);
LOG("data abort at 0x%x, status 0x%x\n", dfa, dfs);
_abort_reason(dfs);
dump_tf(r);
panic("data abort exception\n");
}
xizi_enter_kernel();
uint32_t dfs, dfa;
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 0" : "=r"(dfs)::);
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 0" : "=r"(dfa)::);
ERROR("dabort in user space: %s\n", cur_cpu()->task->name);
LOG("program counter: 0x%x caused\n", r->pc);
LOG("data abort at 0x%x, status 0x%x\n", dfa, dfs);
_abort_reason(dfs);
dump_tf(r);
sys_exit(cur_cpu()->task);
context_switch(&cur_cpu()->task->main_thread.context, cur_cpu()->scheduler);
}
void iabort_handler(struct trapframe* r)
{
if (xizi_is_in_kernel()) {
uint32_t ifs, ifa;
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 1" : "=r"(ifs)::);
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 2" : "=r"(ifa)::);
LOG("program counter: 0x%x caused\n", r->pc);
LOG("prefetch abort at 0x%x, status 0x%x\n", ifa, ifs);
_abort_reason(ifs);
dump_tf(r);
panic("prefetch abort exception\n");
}
xizi_enter_kernel();
uint32_t ifs, ifa;
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 1" : "=r"(ifs)::);
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 2" : "=r"(ifa)::);
ERROR("iabort in user space: %s\n", cur_cpu()->task->name);
LOG("program counter: 0x%x(%s) caused\n", r->pc, cur_cpu()->task);
LOG("prefetch abort at 0x%x, status 0x%x\n", ifa, ifs);
_abort_reason(ifs);
dump_tf(r);
sys_exit(cur_cpu()->task);
context_switch(&cur_cpu()->task->main_thread.context, cur_cpu()->scheduler);
}
}

View File

@ -42,12 +42,13 @@ extern void trap_iabort(void);
extern void trap_dabort(void);
extern void trap_irq_enter(void);
extern void trap_undefined_instruction(void);
extern void handle_reserved(void);
extern void handle_fiq(void);
static struct XiziTrapDriver xizi_trap_driver;
void panic(char* s)
{
xizi_trap_driver.cpu_irq_disable();
KPrintf("panic: %s\n", s);
for (;;)
;
@ -55,7 +56,6 @@ void panic(char* s)
/* stack for different mode*/
static char mode_stack_pages[NR_CPU][NR_MODE_STACKS][MODE_STACK_SIZE];
extern uint32_t _vector_jumper;
extern uint32_t _vector_start;
extern uint32_t _vector_end;
@ -72,19 +72,6 @@ void init_cpu_mode_stacks(int cpu_id)
}
}
void handle_reserved(void)
{
// unimplemented trap handler
LOG("Unimplemented Reserved\n");
panic("");
}
void handle_fiq(void)
{
LOG("Unimplemented FIQ\n");
panic("");
}
static void _sys_irq_init(int cpu_id)
{
@ -101,18 +88,18 @@ static void _sys_irq_init(int cpu_id)
vector_base[5] = (uint32_t)handle_reserved; // Reserved
vector_base[6] = (uint32_t)trap_irq_enter; // IRQ
vector_base[7] = (uint32_t)handle_fiq; // FIQ
}
/* active hardware irq responser */
XScuGic_Config* gic_config = XScuGic_LookupConfig(XPAR_PS7_SCUGIC_0_DEVICE_ID);
if (NULL == gic_config) {
ERROR("Error while looking up gic config\n");
return;
}
int gic_init_status = XScuGic_CfgInitialize(&IntcInstance, gic_config, gic_config->CpuBaseAddress);
if (gic_init_status != XST_SUCCESS) {
ERROR("Error initializing gic\n");
return;
/* active hardware irq responser */
XScuGic_Config* gic_config = XScuGic_LookupConfig(XPAR_PS7_SCUGIC_0_DEVICE_ID);
if (NULL == gic_config) {
ERROR("Error while looking up gic config\n");
return;
}
int gic_init_status = XScuGic_CfgInitialize(&IntcInstance, gic_config, gic_config->CpuBaseAddress);
if (gic_init_status != XST_SUCCESS) {
ERROR("Error initializing gic\n");
return;
}
}
xizi_trap_driver.switch_hw_irqtbl((uint32_t*)&_vector_jumper);
@ -226,6 +213,6 @@ static struct XiziTrapDriver xizi_trap_driver = {
struct XiziTrapDriver* hardkernel_intr_init(struct TraceTag* hardkernel_tag)
{
xizi_trap_driver.sys_irq_init(0);
xizi_trap_driver.cpu_irq_enable();
xizi_trap_driver.cpu_irq_disable();
return &xizi_trap_driver;
}

View File

@ -27,8 +27,8 @@
#include "multicores.h"
struct lock_node {
int cpu_id;
struct double_list_node node;
int cpu_id;
};
static struct double_list_node lock_request_guard;
@ -63,15 +63,18 @@ void _spinlock_unlock(struct spinlock* lock);
void spinlock_lock(struct spinlock* lock)
{
if (lock->owner_cpu != SPINLOCK_STATE_UNLOCK && lock->owner_cpu == cur_cpuid()) {
int cur_cpu_id = cur_cpuid();
if (lock->owner_cpu != SPINLOCK_STATE_UNLOCK && lock->owner_cpu == cur_cpu_id) {
ERROR("spinlock %s lock double locked by core %d\n", lock->name, lock->owner_cpu);
panic("");
}
struct double_list_node* p_lock_node = &core_lock_request[cur_cpu_id].node;
_spinlock_lock(&request_lock, SPINLOCK_LOCK_WAITFOREVER);
doubleListAddOnBack(&core_lock_request[cur_cpuid()].node, &lock_request_guard);
doubleListAddOnBack(p_lock_node, &lock_request_guard);
_spinlock_unlock(&request_lock);
while (lock_request_guard.next != &core_lock_request[cur_cpuid()].node)
while (lock_request_guard.next != p_lock_node)
;
_spinlock_lock(lock, SPINLOCK_LOCK_WAITFOREVER);
@ -79,15 +82,38 @@ void spinlock_lock(struct spinlock* lock)
void spinlock_unlock(struct spinlock* lock)
{
assert(lock_request_guard.next == &core_lock_request[cur_cpuid()].node);
struct double_list_node* p_lock_node = &core_lock_request[cur_cpuid()].node;
assert(lock_request_guard.next == p_lock_node);
_spinlock_lock(&request_lock, SPINLOCK_LOCK_WAITFOREVER);
_double_list_del(core_lock_request[cur_cpuid()].node.prev, core_lock_request[cur_cpuid()].node.next);
_double_list_del(p_lock_node->prev, p_lock_node->next);
_spinlock_unlock(&request_lock);
_spinlock_unlock(lock);
}
bool is_spinlock_locked(struct spinlock* lock)
bool spinlock_try_lock(struct spinlock* lock)
{
return lock->owner_cpu != SPINLOCK_STATE_UNLOCK;
int cur_cpu_id = cur_cpuid();
if (lock->owner_cpu != SPINLOCK_STATE_UNLOCK && lock->owner_cpu == cur_cpu_id) {
ERROR("spinlock %s lock double locked by core %d\n", lock->name, lock->owner_cpu);
panic("");
}
struct double_list_node* p_lock_node = &core_lock_request[cur_cpu_id].node;
_spinlock_lock(&request_lock, SPINLOCK_LOCK_WAITFOREVER);
doubleListAddOnBack(p_lock_node, &lock_request_guard);
if (lock_request_guard.next != p_lock_node) {
_double_list_del(p_lock_node->prev, p_lock_node->next);
_spinlock_unlock(&request_lock);
return false;
}
_spinlock_unlock(&request_lock);
_spinlock_lock(lock, SPINLOCK_LOCK_WAITFOREVER);
return true;
}
bool is_spinlock_hold_by_current_cpu(struct spinlock* lock)
{
return lock->owner_cpu;
}

View File

@ -42,4 +42,5 @@ bool module_spinlock_use_intr_init(void);
void spinlock_init(struct spinlock* lock, char* name);
void spinlock_lock(struct spinlock* lock);
void spinlock_unlock(struct spinlock* lock);
bool is_spinlock_locked(struct spinlock* lock);
bool spinlock_try_lock(struct spinlock* lock);
bool is_spinlock_hold_by_current_cpu(struct spinlock* lock);

View File

@ -100,4 +100,7 @@ void panic(char* s);
bool intr_distributer_init(struct IrqDispatcherRightGroup*);
void intr_irq_dispatch(struct trapframe* tf);
bool swi_distributer_init(struct SwiDispatcherRightGroup*);
void software_irq_dispatch(struct trapframe* tf);
void software_irq_dispatch(struct trapframe* tf);
void dabort_reason(struct trapframe* r);
void iabort_reason(struct trapframe* r);

View File

@ -23,7 +23,11 @@ INC_DIR = -I$(KERNEL_ROOT)/services/shell/letter-shell \
-I$(KERNEL_ROOT)/services/boards/$(BOARD) \
-I$(KERNEL_ROOT)/services/app
ifeq ($(BOARD), imx6q-sabrelite)
all: init test_fs simple_client simple_server shell fs_server test_priority test_irq_hdlr test_irq_send readme.txt | bin
else
all: init test_fs simple_client simple_server shell fs_server test_priority test_irq_hdlr readme.txt | bin
endif
../tools/mkfs/mkfs ./fs.img $^
@mv $(filter-out readme.txt, $^) bin
@mv *.o bin
@ -32,9 +36,11 @@ all: init test_fs simple_client simple_server shell fs_server test_priority test
bin:
@mkdir -p bin
ifeq ($(BOARD), imx6q-sabrelite)
test_irq_send: test_irq_sender.o usyscall.o libserial.o
@${ld} ${user_ldflags} -e main -o $@ $^ ${board_specs}
@${objdump} -S $@ > $@.asm
endif
test_irq_hdlr: test_irq_handler.o libserial.o libipc.o session.o usyscall.o libmem.o
@${ld} ${user_ldflags} -e main -o $@ $^ ${board_specs}

View File

@ -35,7 +35,7 @@ signed short userShellRead(char* data, unsigned short len)
while (length--) {
cur_read = getc();
if (cur_read == 0xff) {
yield();
yield(SYS_TASK_YIELD_NO_REASON);
}
// *data++ = getc();
*data++ = cur_read;

View File

@ -89,7 +89,7 @@ int main()
mmap(ARM_PERIPHERAL_VIRT_BASE, ARM_PERIPHERAL_BASE, 0x2000, true);
printf("%s: Sending soft interrupt\n", prog_name);
gic_send_sgi(SW_INTERRUPT_3, 0, kGicSgiFilter_OnlyThisCPU);
gic_send_sgi(SW_INTERRUPT_3, 0xF, kGicSgiFilter_UseTargetList);
printf("%s: Soft interrupt send done\n", prog_name);
exit();
}

View File

@ -53,9 +53,9 @@ int exit()
return syscall(SYSCALL_EXIT, 0, 0, 0, 0);
}
int yield()
int yield(task_yield_reason reason)
{
return syscall(SYSCALL_YIELD, 0, 0, 0, 0);
return syscall(SYSCALL_YIELD, (uintptr_t)reason, 0, 0, 0);
}
int kill(int pid)

View File

@ -44,6 +44,12 @@ typedef enum {
SYS_STATE_SHOW_CPU_INFO,
} sys_state_option;
typedef enum {
SYS_TASK_YIELD_NO_REASON = 0x0,
SYS_TASK_YIELD_FOREVER = 0x1,
SYS_TASK_YIELD_BLOCK_IPC = 0x2,
} task_yield_reason;
typedef union {
struct {
uintptr_t memblock_start;
@ -58,7 +64,7 @@ typedef int (*ipc_write_fn)(struct Session* session, int fd, char* src, int offs
int spawn(struct Session* session, int fd, ipc_read_fn ipc_read, ipc_fsize_fn ipc_fsize, char* name, char** argv);
int exit();
int yield();
int yield(task_yield_reason reason);
int kill(int pid);
int register_server(char* name);
int session(char* path, int capacity, struct Session* user_session);

View File

@ -53,9 +53,9 @@ int exit()
return syscall(SYSCALL_EXIT, 0, 0, 0, 0);
}
int yield()
int yield(task_yield_reason reason)
{
return syscall(SYSCALL_YIELD, 0, 0, 0, 0);
return syscall(SYSCALL_YIELD, (uintptr_t)reason, 0, 0, 0);
}
int kill(int pid)

View File

@ -44,6 +44,12 @@ typedef enum {
SYS_STATE_SHOW_CPU_INFO,
} sys_state_option;
typedef enum {
SYS_TASK_YIELD_NO_REASON = 0x0,
SYS_TASK_YIELD_FOREVER = 0x1,
SYS_TASK_YIELD_BLOCK_IPC = 0x2,
} task_yield_reason;
typedef union {
struct {
uintptr_t memblock_start;
@ -58,7 +64,7 @@ typedef int (*ipc_write_fn)(struct Session* session, int fd, char* src, int offs
int spawn(struct Session* session, int fd, ipc_read_fn ipc_read, ipc_fsize_fn ipc_fsize, char* name, char** argv);
int exit();
int yield();
int yield(task_yield_reason reason);
int kill(int pid);
int register_server(char* name);
int session(char* path, int capacity, struct Session* user_session);

View File

@ -121,7 +121,7 @@ void ipc_msg_send_wait(struct IpcMsg* msg)
msg->header.done = 0;
while (msg->header.done == 0) {
/// @todo syscall yield with prio decrease
yield();
yield(SYS_TASK_YIELD_BLOCK_IPC);
}
assert(msg->header.done == 1);
}
@ -138,7 +138,7 @@ int ipc_session_wait(struct Session* session)
struct IpcMsg* msg = IPCSESSION_MSG(session);
while (msg->header.done == 0) {
/// @todo syscall yield with prio decrease
yield();
yield(SYS_TASK_YIELD_BLOCK_IPC);
}
assert(msg->header.done == 1);
return msg->header.ret_val;
@ -169,7 +169,7 @@ void ipc_server_loop(struct IpcNode* ipc_node)
/* handle each session */
for (int i = 0; i < NR_MAX_SESSION; i++) {
if (session_list[i].buf == NULL) {
yield();
yield(SYS_TASK_YIELD_NO_REASON);
break;
}
cur_sess_id = session_list[i].id;

View File

@ -50,5 +50,6 @@ static inline struct CPU* cur_cpu(void)
struct spinlock whole_kernel_lock;
void xizi_enter_kernel();
bool xizi_try_enter_kernel();
void xizi_leave_kernel();
bool xizi_is_in_kernel();

View File

@ -64,6 +64,12 @@ typedef enum {
SYS_STATE_SHOW_CPU_INFO,
} sys_state_option;
typedef enum {
SYS_TASK_YIELD_NO_REASON = 0x0,
SYS_TASK_YIELD_FOREVER = 0x1,
SYS_TASK_YIELD_BLOCK_IPC = 0x2,
} task_yield_reason;
typedef union {
struct {
uintptr_t memblock_start;
@ -80,7 +86,7 @@ int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, u
int sys_spawn(char* img_start, char* name, char** argv);
int sys_exit(struct TaskMicroDescriptor* ptask);
int sys_yield();
int sys_yield(task_yield_reason reason);
int sys_kill(int id);
int sys_register_as_server(char* name);

View File

@ -34,6 +34,7 @@ Modification:
#include "actracer.h"
#include "assert.h"
#include "ipc.h"
#include "kalloc.h"
#include "mmu_common.h"
#include "multicores.h"
#include "share_page.h"
@ -52,9 +53,10 @@ static void send_irq_to_user(int irq_num)
struct Session* session = &irq_forward_table[irq_num].session;
int len = IPC_ARG_INFO_BASE_OFFSET;
len += sizeof(struct IpcArgInfo);
/* get message space and add session tail */
/* add session tail */
struct IpcMsg* buf = session->buf + session->tail;
void* session_kern_vaddr = P2V(xizi_pager.address_translate(&kernel_irq_proxy->pgdir, (uintptr_t)session->buf));
struct IpcMsg* buf = session_kern_vaddr + session->tail;
memset((void*)buf, 0, len);
session->tail = (session->tail + len) % session->capacity;
@ -73,20 +75,13 @@ static void send_irq_to_user(int irq_num)
int user_irq_handler(int irq, void* tf, void* arg)
{
static struct MmuCommonDone* p_mmu_driver = NULL;
if (p_mmu_driver == NULL) {
struct TraceTag mmu_driver_tag;
AchieveResourceTag(&mmu_driver_tag, RequireRootTag(), "/hardkernel/mmu-ac-resource");
p_mmu_driver = (struct MmuCommonDone*)AchieveResource(&mmu_driver_tag);
}
if (irq_forward_table[irq].handle_task != NULL) {
p_mmu_driver->LoadPgdir((uintptr_t)V2P(kernel_irq_proxy->pgdir.pd_addr));
send_irq_to_user(irq);
p_mmu_driver->LoadPgdir((uintptr_t)V2P(cur_cpu()->task->pgdir.pd_addr));
next_task_emergency = irq_forward_table[irq].handle_task;
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false);
if (cur_cpu()->task != NULL) {
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false);
}
}
return 0;
}

View File

@ -33,7 +33,7 @@ Modification:
#include "log.h"
int sys_yield()
int sys_yield(task_yield_reason reason)
{
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false);
return 0;

View File

@ -48,7 +48,7 @@ int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, u
ret = sys_exit(cur_cpu()->task);
break;
case SYSCALL_YIELD:
ret = sys_yield();
ret = sys_yield((task_yield_reason)param1);
break;
case SYSCALL_SERVER:
ret = sys_register_as_server((char*)param1);

View File

@ -1,3 +1,3 @@
SRC_FILES := task.c scheduler.c
SRC_FILES := task.c schedule.c
include $(KERNEL_ROOT)/compiler.mk

View File

@ -259,6 +259,7 @@ static void _scheduler(struct SchedulerRightGroup right_group)
cpu->task = next_task;
p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->pgdir.pd_addr));
context_switch(&cpu->scheduler, next_task->main_thread.context);
assert(cur_cpu()->task == NULL);
assert(next_task->state != RUNNING);
}
}

View File

@ -1,6 +1,7 @@
SRC_FILES := default_irq_handler.c \
clock_irq_handler.c \
software_irq_handler.c
software_irq_handler.c \
abort_handler.c
include $(KERNEL_ROOT)/compiler.mk

View File

@ -0,0 +1,86 @@
/* Copyright (c) 2006-2018 Frans Kaashoek, Robert Morris, Russ Cox,
* Massachusetts Institute of Technology
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/**
* @file abort_handler.c
* @brief handle program abort
* @version 3.0
* @author AIIT XUOS Lab
* @date 2023.11.23
*/
/*************************************************
File name: abort_handler.c
Description: handle program abort
Others:
History:
1. Date: 2023-11-23
Author: AIIT XUOS Lab
Modification:
1. Modify iabort and dabort handler(in dabort_handler() and iabort_handler())
*************************************************/
#include "core.h"
#include "memlayout.h"
#include "spinlock.h"
#include "trap_common.h"
#include "assert.h"
#include "multicores.h"
#include "syscall.h"
#include "task.h"
extern void context_switch(struct context**, struct context*);
void dabort_handler(struct trapframe* r)
{
if (r->pc >= DEV_VRTMEM_BASE && is_spinlock_hold_by_current_cpu(&whole_kernel_lock)) {
assert(is_spinlock_hold_by_current_cpu(&whole_kernel_lock));
dabort_reason(r);
panic("data abort exception\n");
}
xizi_enter_kernel();
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
ERROR("dabort in user space: %s\n", cur_task->name);
dabort_reason(r);
sys_exit(cur_task);
assert(cur_cpu()->task == NULL);
context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler);
}
void iabort_handler(struct trapframe* r)
{
if (r->pc >= DEV_VRTMEM_BASE && is_spinlock_hold_by_current_cpu(&whole_kernel_lock)) {
assert(is_spinlock_hold_by_current_cpu(&whole_kernel_lock));
iabort_reason(r);
panic("kernel prefetch abort exception\n");
}
xizi_enter_kernel();
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
ERROR("iabort in user space: %s\n", cur_task->name);
iabort_reason(r);
sys_exit(cur_task);
assert(cur_cpu()->task == NULL);
context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler);
}

View File

@ -66,9 +66,8 @@ void intr_irq_dispatch(struct trapframe* tf)
}
struct TaskMicroDescriptor* current_task = cur_cpu()->task;
if (LIKELY(current_task != NULL)) {
current_task->main_thread.trapframe = tf;
}
assert(current_task != NULL);
current_task->main_thread.trapframe = tf;
unsigned cpu = p_intr_driver->hw_cur_int_cpu(int_info);
unsigned irq = p_intr_driver->hw_cur_int_num(int_info);
@ -86,7 +85,7 @@ void intr_irq_dispatch(struct trapframe* tf)
p_intr_driver->curr_int[cpu] = 0;
p_intr_driver->hw_after_irq(int_info);
if ((cur_cpu()->task == NULL && current_task != NULL) || current_task->state != RUNNING) {
if (cur_cpu()->task == NULL || current_task->state != RUNNING) {
cur_cpu()->task = NULL;
context_switch(&current_task->main_thread.context, cur_cpu()->scheduler);
}
@ -102,13 +101,18 @@ void xizi_enter_kernel()
spinlock_lock(&whole_kernel_lock);
}
bool xizi_try_enter_kernel()
{
p_intr_driver->cpu_irq_disable();
if (spinlock_try_lock(&whole_kernel_lock)) {
return true;
}
return false;
}
void xizi_leave_kernel()
{
spinlock_unlock(&whole_kernel_lock);
p_intr_driver->cpu_irq_enable();
}
bool xizi_is_in_kernel()
{
return is_spinlock_locked(&whole_kernel_lock);
}

View File

@ -71,10 +71,10 @@ void software_irq_dispatch(struct trapframe* tf)
cur_cpu()->task = NULL;
context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler);
}
assert(cur_task == cur_cpu()->task);
if (syscall_num == SYSCALL_EXIT) {
panic("Exit reaches");
}
assert(cur_task == cur_cpu()->task);
xizi_leave_kernel();
}