Modify handle_exception according to the Linux code

This commit is contained in:
songyanguang 2025-01-13 18:02:32 +08:00
parent 301073476f
commit ee49e0d71c
5 changed files with 212 additions and 221 deletions

View File

@ -1,4 +1,4 @@
SRC_FILES := entry.S trampoline.S $(BOARD)/trap_common.c $(BOARD)/trap.c $(BOARD)/plic.c error_debug.c hard_spinlock.S SRC_FILES := trampoline.S $(BOARD)/trap_common.c $(BOARD)/trap.c $(BOARD)/plic.c error_debug.c hard_spinlock.S
ifeq ($(BOARD), jh7110) ifeq ($(BOARD), jh7110)
SRC_DIR := gicv3 SRC_DIR := gicv3
@ -6,4 +6,5 @@ SRC_FILES += $(BOARD)/
endif endif
include $(KERNEL_ROOT)/compiler.mk include $(KERNEL_ROOT)/compiler.mk

View File

@ -1,139 +0,0 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiUOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/**
* @file entry.S
* @brief trap in and out code
* @version 1.0
* @author AIIT XUOS Lab
* @date 2024-12-10
*/
/*************************************************
File name: entry.S
Description: trap in and out code
Others:
History:
1. Date: 2024-12-10
Author: AIIT XUOS Lab
Modification:
1. first version
*************************************************/
#include "asm/asm-offsets.h"
.macro kernel_entry
addi sp, sp, -(PT_SIZE)
sd x1, PT_RA(sp)
sd x3, PT_GP(sp)
sd x5, PT_T0(sp)
sd x6, PT_T1(sp)
sd x7, PT_T2(sp)
sd x8, PT_S0(sp)
sd x9, PT_S1(sp)
sd x10, PT_A0(sp)
sd x11, PT_A1(sp)
sd x12, PT_A2(sp)
sd x13, PT_A3(sp)
sd x14, PT_A4(sp)
sd x15, PT_A5(sp)
sd x16, PT_A6(sp)
sd x17, PT_A7(sp)
sd x18, PT_S2(sp)
sd x19, PT_S3(sp)
sd x20, PT_S4(sp)
sd x21, PT_S5(sp)
sd x22, PT_S6(sp)
sd x23, PT_S7(sp)
sd x24, PT_S8(sp)
sd x25, PT_S9(sp)
sd x26, PT_S10(sp)
sd x27, PT_S11(sp)
sd x28, PT_T3(sp)
sd x29, PT_T4(sp)
sd x30, PT_T5(sp)
sd x31, PT_T6(sp)
csrr s2, sepc
sd s2, PT_EPC(sp)
csrr s3, sbadaddr
sd s3, PT_BADADDR(sp)
csrr s4, scause
sd s4, PT_CAUSE(sp)
csrr s5, sscratch
sd s5, PT_TP(sp)
addi s0, sp, PT_SIZE
sd sp, PT_SP(sp)
.endm
.macro kernel_exit
ld a0, PT_STATUS(sp)
csrw sstatus, a0
ld a2, PT_EPC(sp)
csrw sepc, a2
ld x1, PT_RA(sp)
ld x3, PT_GP(sp)
ld x5, PT_T0(sp)
ld x6, PT_T1(sp)
ld x7, PT_T2(sp)
ld x8, PT_S0(sp)
ld x9, PT_S1(sp)
ld x10, PT_A0(sp)
ld x11, PT_A1(sp)
ld x12, PT_A2(sp)
ld x13, PT_A3(sp)
ld x14, PT_A4(sp)
ld x15, PT_A5(sp)
ld x16, PT_A6(sp)
ld x17, PT_A7(sp)
ld x18, PT_S2(sp)
ld x19, PT_S3(sp)
ld x20, PT_S4(sp)
ld x21, PT_S5(sp)
ld x22, PT_S6(sp)
ld x23, PT_S7(sp)
ld x24, PT_S8(sp)
ld x25, PT_S9(sp)
ld x26, PT_S10(sp)
ld x27, PT_S11(sp)
ld x28, PT_T3(sp)
ld x29, PT_T4(sp)
ld x30, PT_T5(sp)
ld x31, PT_T6(sp)
ld x2, PT_SP(sp)
.endm
.align 4
.global do_exception_vector
do_exception_vector:
kernel_entry
la ra, ret_from_exception
mv a0, sp /* pt_regs */
mv a1, s4
tail do_exception
ret_from_exception:
restore_all:
kernel_exit
sret
.global trigger_fault
trigger_fault:
li a0, 0x70000000
ld a0, (a0)
ret

View File

@ -127,15 +127,24 @@ void syscall_arch_handler(struct trapframe* tf)
extern void do_exception_vector(void); extern void handle_exception(void);
void trap_init(void) void trap_init(void)
{ {
csr_write(stvec, do_exception_vector); csr_write(stvec, handle_exception);
csr_write(sie, 0); csr_write(sie, 0);
__asm__ volatile("csrw sscratch, zero" : : : "memory");
#if 0
printk("trap_init test\n");
__asm__ volatile("ebreak");
printk("trap_init test ok\n");
#endif
} }
void trap_set_exception_vector(uint64_t new_tbl_base)
{
csr_write(stvec, new_tbl_base);
}
static void do_trap_error(struct pt_regs *regs, const char *str) static void do_trap_error(struct pt_regs *regs, const char *str)
{ {
@ -225,7 +234,7 @@ void do_exception(struct pt_regs *regs, unsigned long scause)
printk("%s, scause: 0x%lx\n", __func__, scause); printk("%s, scause: 0x%lx\n", __func__, scause);
if (scause & CAUSE_IRQ_FLAG) { if (scause & CAUSE_IRQ_FLAG) {
handle_irq(regs, scause); intr_irq_dispatch((struct trapframe *)regs);
} }
else { else {
inf = ec_to_fault_info(scause); inf = ec_to_fault_info(scause);
@ -235,3 +244,10 @@ void do_exception(struct pt_regs *regs, unsigned long scause)
} }
} }
#define INIT_THREAD_INFO \
{ \
.flags = 0, \
.preempt_count = 1, \
}
struct thread_info init_thread_info = INIT_THREAD_INFO;

View File

@ -41,6 +41,9 @@ Modification:
static struct XiziTrapDriver xizi_trap_driver; static struct XiziTrapDriver xizi_trap_driver;
extern void trap_init(void);
extern void trap_set_exception_vector(uint64_t new_tbl_base);
void panic(char* s) void panic(char* s)
{ {
KPrintf("panic: %s\n", s); KPrintf("panic: %s\n", s);
@ -48,12 +51,9 @@ void panic(char* s)
; ;
} }
//extern void alltraps();
extern void trap_init(void);
static void _sys_irq_init(int cpu_id) static void _sys_irq_init(int cpu_id)
{ {
// primary core init intr // primary core init intr
// xizi_trap_driver.switch_hw_irqtbl((uintptr_t*)alltraps);
if (cpu_id == 0) { if (cpu_id == 0) {
plic_init(); plic_init();
} }
@ -90,8 +90,7 @@ static void _single_irq_disable(int irq, int cpu)
static inline uintptr_t* _switch_hw_irqtbl(uintptr_t* new_tbl_base) static inline uintptr_t* _switch_hw_irqtbl(uintptr_t* new_tbl_base)
{ {
w_vbar_el1((uint64_t)new_tbl_base); trap_set_exception_vector(new_tbl_base);
return NULL; return NULL;
} }

View File

@ -29,84 +29,198 @@ Modification:
*************************************************/ *************************************************/
#include "memlayout.h" #include "memlayout.h"
#include "core.h" #include "core.h"
#include "asm/csr.h"
.macro savereg #include "asm/asm-offsets.h"
.endm
.macro restorereg
.endm
.macro usavereg
.endm
.macro urestorereg
.endm
.global alltraps .align 4
.balign 0x800 .global handle_exception
alltraps: handle_exception:
// Current EL with sp0 csrrw tp, CSR_SCRATCH, tp
j badtrap bnez tp, _save_context
.balign 0x80
j badtrap
.balign 0x80
j badtrap
.balign 0x80
j badtrap
// Current EL with spx _restore_kernel_tpsp:
.balign 0x80 csrr tp, CSR_SCRATCH
j el1sync REG_S sp, TASK_TI_KERNEL_SP(tp)
.balign 0x80
j el1irq
.balign 0x80
j badtrap
.balign 0x80
j badtrap
// Lower EL using aarch64 _save_context:
.balign 0x80 REG_S sp, TASK_TI_USER_SP(tp)
j el0sync REG_L sp, TASK_TI_KERNEL_SP(tp)
.balign 0x80 addi sp, sp, -(PT_SIZE_ON_STACK)
j el0irq REG_S x1, PT_RA(sp)
.balign 0x80 REG_S x3, PT_GP(sp)
j badtrap REG_S x5, PT_T0(sp)
.balign 0x80 REG_S x6, PT_T1(sp)
j badtrap REG_S x7, PT_T2(sp)
REG_S x8, PT_S0(sp)
REG_S x9, PT_S1(sp)
REG_S x10, PT_A0(sp)
REG_S x11, PT_A1(sp)
REG_S x12, PT_A2(sp)
REG_S x13, PT_A3(sp)
REG_S x14, PT_A4(sp)
REG_S x15, PT_A5(sp)
REG_S x16, PT_A6(sp)
REG_S x17, PT_A7(sp)
REG_S x18, PT_S2(sp)
REG_S x19, PT_S3(sp)
REG_S x20, PT_S4(sp)
REG_S x21, PT_S5(sp)
REG_S x22, PT_S6(sp)
REG_S x23, PT_S7(sp)
REG_S x24, PT_S8(sp)
REG_S x25, PT_S9(sp)
REG_S x26, PT_S10(sp)
REG_S x27, PT_S11(sp)
REG_S x28, PT_T3(sp)
REG_S x29, PT_T4(sp)
REG_S x30, PT_T5(sp)
REG_S x31, PT_T6(sp)
// Lower EL using aarch32 /*
.balign 0x80 * Disable user-mode memory access as it should only be set in the
j badtrap * actual user copy routines.
.balign 0x80 *
j badtrap * Disable the FPU to detect illegal usage of floating point in kernel
.balign 0x80 * space.
j badtrap */
.balign 0x80 li t0, SR_SUM | SR_FS
j badtrap
badtrap: REG_L s0, TASK_TI_USER_SP(tp)
j . csrrc s1, CSR_STATUS, t0
csrr s2, CSR_EPC
csrr s3, CSR_TVAL
csrr s4, CSR_CAUSE
csrr s5, CSR_SCRATCH
REG_S s0, PT_SP(sp)
REG_S s1, PT_STATUS(sp)
REG_S s2, PT_EPC(sp)
REG_S s3, PT_BADADDR(sp)
REG_S s4, PT_CAUSE(sp)
REG_S s5, PT_TP(sp)
el1sync: /*
j . * Set the scratch register to 0, so that if a recursive exception
* occurs, the exception vector knows it came from the kernel
el1irq: */
ret csrw CSR_SCRATCH, x0
el0sync:
ret /* Load the global pointer */
.option push
.option norelax
la gp, __global_pointer$
.option pop
el0irq: /*
jal intr_irq_dispatch * MSB of cause differentiates between
* interrupts and exceptions
.global trap_return */
trap_return: bge s4, zero, 1f
ret
la ra, ret_from_exception
/* Handle interrupts */
move a0, sp /* pt_regs */
//la a1, handle_arch_irq
la a1, intr_irq_dispatch
REG_L a1, (a1)
jr a1
1:
/*
* Exceptions run with interrupts enabled or disabled depending on the
* state of SR_PIE in m/sstatus.
*/
andi t0, s1, SR_PIE
beqz t0, 1f
/* kprobes, entered via ebreak, must have interrupts disabled. */
li t0, EXC_BREAKPOINT
beq s4, t0, 1f
csrs CSR_STATUS, SR_IE
1:
la ra, ret_from_exception
/* Handle syscalls */
li t0, EXC_SYSCALL
beq s4, t0, handle_syscall
mv a0, sp
mv a1, s4
tail do_exception
handle_syscall:
j .
ret_from_exception:
REG_L s0, PT_STATUS(sp)
csrc CSR_STATUS, SR_IE
andi s0, s0, SR_SPP
bnez s0, resume_kernel
resume_userspace:
/* Save unwound kernel stack pointer in thread_info */
addi s0, sp, PT_SIZE_ON_STACK
REG_S s0, TASK_TI_KERNEL_SP(tp)
/*
* Save TP into the scratch register , so we can find the kernel data
* structures again.
*/
csrw CSR_SCRATCH, tp
restore_all:
REG_L a0, PT_STATUS(sp)
REG_L a2, PT_EPC(sp)
REG_SC x0, a2, PT_EPC(sp)
csrw CSR_STATUS, a0
csrw CSR_EPC, a2
REG_L x1, PT_RA(sp)
REG_L x3, PT_GP(sp)
REG_L x4, PT_TP(sp)
REG_L x5, PT_T0(sp)
REG_L x6, PT_T1(sp)
REG_L x7, PT_T2(sp)
REG_L x8, PT_S0(sp)
REG_L x9, PT_S1(sp)
REG_L x10, PT_A0(sp)
REG_L x11, PT_A1(sp)
REG_L x12, PT_A2(sp)
REG_L x13, PT_A3(sp)
REG_L x14, PT_A4(sp)
REG_L x15, PT_A5(sp)
REG_L x16, PT_A6(sp)
REG_L x17, PT_A7(sp)
REG_L x18, PT_S2(sp)
REG_L x19, PT_S3(sp)
REG_L x20, PT_S4(sp)
REG_L x21, PT_S5(sp)
REG_L x22, PT_S6(sp)
REG_L x23, PT_S7(sp)
REG_L x24, PT_S8(sp)
REG_L x25, PT_S9(sp)
REG_L x26, PT_S10(sp)
REG_L x27, PT_S11(sp)
REG_L x28, PT_T3(sp)
REG_L x29, PT_T4(sp)
REG_L x30, PT_T5(sp)
REG_L x31, PT_T6(sp)
REG_L x2, PT_SP(sp)
sret
resume_kernel:
j restore_all
.global task_prepare_enter
task_prepare_enter:
call xizi_leave_kernel
j ret_from_exception