TODO: Support armv8

This commit is contained in:
liuqh 2024-05-24 13:43:31 +08:00
parent e62863bc22
commit 0efbe375eb
43 changed files with 1636 additions and 578 deletions

View File

@ -32,8 +32,8 @@ Modification:
.global _boot_start
.global CpuInitCrit
.global primary_cpu_init
_boot_start:
@ save r0 for cores 1-3, r0 arg field passed by ROM
@ r0 is a function pointer for secondary cpus

View File

@ -73,19 +73,7 @@ Modification:
#include "cortex_a72.h"
#define NR_CPU 4 // maximum number of CPUs
#define NR_PROC 64 // maximum number of processes
#define NOFILE 16 // open files per process
#define NFILE 100 // open files per system
#define NINODE 50 // maximum number of active i-nodes
#define NDEV 10 // maximum major device number
#define ROOTDEV 1 // device number of file system root disk
#define MAXARG 32 // max exec arguments
#define MAXOPBLOCKS 10 // max # of blocks any FS op writes
#define LOGSIZE (MAXOPBLOCKS * 3) // max data blocks in on-disk log
#define NBUF (MAXOPBLOCKS * 3) // size of disk block cache
#define FSSIZE 1000 // size of file system in blocks
#define MAXPATH 128 // maximum file path name
#define NR_CPU 1 // maximum number of CPUs
__attribute__((always_inline)) static inline uint64_t EL0_mode() // Set ARM mode to EL0
{
@ -103,8 +91,21 @@ __attribute__((always_inline)) static inline uint64_t EL0_mode() // Set ARM mode
return val;
}
__attribute__((always_inline, optimize("O0"))) static inline void cpu_into_low_power()
{
WFE();
}
__attribute__((always_inline, optimize("O0"))) static inline void cpu_leave_low_power()
{
SEV();
}
struct context {
// callee-saved Registers
uint64_t sp;
/* callee register */
uint64_t x18;
uint64_t x19;
uint64_t x20;
uint64_t x21;
@ -121,24 +122,14 @@ struct context {
/// @brief init task context, set return address to trap return
/// @param ctx
extern void trap_return(void);
extern void task_prepare_enter(void);
__attribute__((__always_inline__)) static inline void arch_init_context(struct context* ctx)
{
memset(ctx, 0, sizeof(*ctx));
ctx->x30 = (uint64_t)(trap_return);
ctx->x30 = (uint64_t)(task_prepare_enter);
}
struct trapframe {
// Additional registers used to support musl
uint64_t _padding; // for 16-byte aligned
uint64_t tpidr_el0;
__uint128_t q0;
// Special Registers
uint64_t sp_el0; // stack pointer
uint64_t spsr_el1; // program status register
uint64_t elr_el1; // exception link register
uint64_t pc; // program counter
// general purpose registers
uint64_t x0;
uint64_t x1;
uint64_t x2;
@ -170,6 +161,9 @@ struct trapframe {
uint64_t x28;
uint64_t x29;
uint64_t x30;
uint64_t pc;
uint64_t spsr;
uint64_t sp;
};
/// @brief init task trapframe
@ -179,9 +173,8 @@ struct trapframe {
__attribute__((__always_inline__)) static inline void arch_init_trapframe(struct trapframe* tf, uintptr_t sp, uintptr_t pc)
{
memset(tf, 0, sizeof(*tf));
tf->sp_el0 = sp;
tf->spsr_el1 = EL0_mode();
tf->elr_el1 = 0;
tf->sp = sp;
tf->spsr = EL0_mode();
tf->pc = pc;
}
@ -191,7 +184,7 @@ __attribute__((__always_inline__)) static inline void arch_init_trapframe(struct
/// @param pc
__attribute__((__always_inline__)) static inline void arch_trapframe_set_sp_pc(struct trapframe* tf, uintptr_t sp, uintptr_t pc)
{
tf->sp_el0 = sp;
tf->sp = sp;
tf->pc = pc;
}
@ -217,7 +210,7 @@ extern int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t pa
__attribute__((__always_inline__)) static inline int arch_syscall(struct trapframe* tf, int* syscall_num)
{
// call syscall
*syscall_num = tf->x8;
*syscall_num = tf->x0;
return syscall(*syscall_num, tf->x1, tf->x2, tf->x3, tf->x4);
}

View File

@ -1,4 +1,6 @@
SRC_FILES := boot.S \
start.c
start.c \
smp.c \
cortexA72.S
include $(KERNEL_ROOT)/compiler.mk

View File

@ -7,22 +7,25 @@
// kernel.ld causes the following code to
// be placed at 0x40000000.
.section ".text"
.global _entry
_entry:
mrs x1, mpidr_el1
and x1, x1, #0x3
cbz x1, entry // primary
//.global _entry
.global _boot_start
.global primary_cpu_init
//_entry:
// mrs x1, mpidr_el1
// and x1, x1, #0x3
// cbz x1, entry // primary
# b entryothers // secondary
entry:
//entry:
// clear .bss
adrp x1, bss_start
ldr w2, =bss_size
1:
// cbz w2, 2f
str xzr, [x1], #8
sub w2, w2, #1
b 1b
// adrp x1, __bss_start__
// ldr w2, =bss_size
// 1:
// // cbz w2, 2f
// str xzr, [x1], #8
// sub w2, w2, #1
// b 1b
// 2:
// // set up entry pagetable
// //
@ -127,26 +130,80 @@ entry:
// br x1 // jump to higher address (0xffffff8000000000~)
_start:
_boot_start:
// set up a stack for C.
// stack0 is declared in start.c,
// with a 4096-byte stack per CPU.
// sp = stack0 + ((cpuid+1) * 4096)
// cpuid = mpidr_el1 & 0xff
ldr x0, =stacks_start
// save r0 for cores 1-3, r0 arg field passed by ROM
// r0 is a function pointer for secondary cpus
mov x4, x0
mrs x0, spsr_el1 /* Enter EL1 (Exception Level 1) */
bic x0, x0, #0x1f
MOV x1, #0xC5
ORR x0, x0, x1
msr spsr_el1, x0
/* set NSACR, both Secure and Non-secure access are allowed to NEON */
MRS X1, CPACR_EL1
ORR X1, X1, #(0X3 << 20)
MSR CPACR_EL1, X1
ISB
/* Clear A bit of SCTLR */
MRS x0, SCTLR_EL1
BIC x0, x0, #0x2
MSR SCTLR_EL1, x0
// clear some registers
msr elr_el1, XZR
ldr x0, =stacks_top
mov x1, #MODE_STACK_SIZE
// get cpu id, and subtract the offset from the stacks base address
mrs x2, mpidr_el1
and x2, x2, #0x3
add x2, x2, #1
mul x1, x1, x2
add x0, x0, x1
mov x5, x2
mul x3, x2, x1
sub x0, x0, x3
MOV X2, #ARM_MODE_EL1_h | DIS_INT
MSR SPSR_EL1, X2
mov sp, x0
SUB x0, x0,x1
// check cpu id - cpu0 is primary cpu
cmp x5, #0
beq primary_cpu_init
bl bootmain // for secondary cpus, jump to argument function pointer passed in by ROM
bl .
primary_cpu_init:
/* init .bss */
/* clear the .bss section (zero init) */
ldr x1, =boot_start_addr
ldr x2, =boot_end_addr
mov x3, #0
1:
cmp x1, x2
stp x3, x3, [x1], #16
b.lt 1b
// branch to c library entry point
mov x0, #0 // argc
mov x1, #0 // argv
mov x2, #0 // env
// jump to main()
b main
//b main
b . // spin
//b . // spin
.global psci_call
psci_call:
hvc #0
ret
bl bootmain
.end

View File

@ -1,17 +1,14 @@
export CROSS_COMPILE ?= aarch64-linux-gnu-
export DEVICE = -march=armv8-a -mtune=cortex-a72 -ftree-vectorize -ffast-math
export CFLAGS := $(DEVICE) -Wall -O0 -g -gdwarf-2
export AFLAGS := -c $(DEVICE) -x assembler-with-cpp -D__ASSEMBLY__ -gdwarf-2
export CROSS_COMPILE ?= aarch64-none-elf-
# export DEVICE = -march=armv8-a -mtune=cortex-a72 -ffreestanding -fno-common -nostdlib
export DEVICE = -mtune=cortex-a72 -ffreestanding -fno-common -fno-stack-protector -fno-pie -no-pie
# export CFLAGS := $(DEVICE) -Wall -Werror -O0 -g -fno-omit-frame-pointer -fPIC
export CFLAGS := $(DEVICE) -O0 -g -fno-omit-frame-pointer -fPIC
# export AFLAGS := -c $(DEVICE) -x assembler-with-cpp -D__ASSEMBLY__ -gdwarf-2
# export LFLAGS := $(DEVICE) -Wl,-Map=XiZi-imx6q-sabrelite.map,-cref,-u,_boot_start -T $(KERNEL_ROOT)/hardkernel/arch/arm/armv7-a/cortex-a9/preboot_for_imx6q-sabrelite/nxp_imx6q_sabrelite.lds
export LFLAGS := $(DEVICE) --specs=nosys.specs -Wl,-Map=XiZi-ok1028a-c.map,-cref,-u,_start -T $(KERNEL_ROOT)/hardkernel/arch/arm/armv8-a/cortex-a72/preboot_for_ok1028a-c/nxp_ls1028.lds
# export LFLAGS := $(DEVICE) -mcmodel=large -Wl,-Map=XiZi-ok1028a-c.map,-cref,-u,_start -T $(KERNEL_ROOT)/hardkernel/arch/arm/armv8-a/cortex-a72/preboot_for_ok1028a-c/nxp_ls1028.lds -Wl,--start-group,-lgcc,-lc,--end-group
export LFLAGS := $(DEVICE) -Wl,-T -Wl,$(KERNEL_ROOT)/hardkernel/arch/arm/armv8-a/cortex-a72/preboot_for_ok1028a-c/nxp_ls1028.lds -Wl,--start-group,-lgcc,-lc,--end-group
export CXXFLAGS :=
ifeq ($(CONFIG_LIB_MUSLLIB), y)
export LFLAGS += -nostdlib -nostdinc -fno-builtin -nodefaultlibs
export LIBCC := -lgcc
export LINK_MUSLLIB := $(KERNEL_ROOT)/lib/musllib/libmusl.a
endif
export DEFINES := -DHAVE_CCONFIG_H -DCHIP_LS1028
export ARCH = arm

View File

@ -0,0 +1,72 @@
/*!
* @file cortexA72.s
* @brief This file contains cortexA72 functions
*
*/
/*************************************************
File name: cortexA72.S
Description: This file contains cortexA9 functions
Others:
History:
1. Date: 202-05-08
Author: AIIT XUOS Lab
Modification:
1. No modifications
*************************************************/
.section ".text","ax"
/*
* bool arm_set_interrupt_state(bool enable)
*/
.global arm_set_interrupt_state
.func arm_set_interrupt_state
arm_set_interrupt_state:
mrs x2, spsr_el1
cmp x0, #0
b.eq disable_interrupts
bic x1, x2, #0xc0 // disable IRQ and FIQ
b set_interrupt_state_end
disable_interrupts:
orr x1, x2, #0xc0 // enable IRQ and FIQ
set_interrupt_state_end:
msr spsr_el1, x1
// x27FIQ
tst x2, #0x80
mov x0, #1 //
b.eq fiq_set_to_0 //
ret
fiq_set_to_0:
mov x0, #0 // FIQx00
ret
.endfunc
.global cpu_get_current
# int cpu_get_current(void)@
# get current CPU ID
.func cpu_get_current
cpu_get_current:
mrs x0, mpidr_el1
and x0, x0, #3
ret
.endfunc
.global get_arm_private_peripheral_base
# uint32_t get_arm_private_peripheral_base(void)
.func get_arm_private_peripheral_base
get_arm_private_peripheral_base:
# Get base address of private perpherial space
# mrc p15, 4, r0, c15, c0, 0 Read periph base address
mov x0, #0x00A00000
ret
.endfunc
# ------------------------------------------------------------
# End of cortexA72.s
# ------------------------------------------------------------
.end

View File

@ -68,8 +68,8 @@ Modification:
#define _ARM_MSR(coproc, opcode1, Rt, CRn, CRm, opcode2) \
asm volatile("mcr p" #coproc ", " #opcode1 ", %[input], c" #CRn ", c" #CRm ", " #opcode2 "\n" ::[input] "r"(Rt))
#define WriteReg(value, address) (*(volatile unsigned int*)(address) = (value))
#define ReadReg(address) (*(volatile unsigned int*)(address))
// #define WriteReg(value, address) (*(volatile unsigned int*)(address) = (value))
// #define ReadReg(address) (*(volatile unsigned int*)(address))
#if defined(__cplusplus)
extern "C" {

View File

@ -46,12 +46,6 @@
#define EC_DABORT 0x24
#define EC_IABORT 0x20
#define PGSIZE 4096 // bytes per page
#define PGSHIFT 12 // bits of offset within a page
#define PGROUNDUP(sz) (((sz)+PGSIZE-1) & ~(PGSIZE-1))
#define PGROUNDDOWN(a) (((a)) & ~(PGSIZE-1))
#define PTE_VALID 1 // level 0,1,2 descriptor: valid
#define PTE_TABLE 2 // level 0,1,2 descriptor: table
#define PTE_V 3 // level 3 descriptor: valid
@ -88,17 +82,17 @@
#define PTE_NORMAL PTE_INDX(AI_NORMAL_NC_IDX)
// shift a physical address to the right place for a PTE.
#define PA2PTE(pa) ((uint64)(pa) & 0xfffffffff000)
#define PTE2PA(pte) ((uint64)(pte) & 0xfffffffff000)
#define PA2PTE(pa) ((uint64_t)(pa) & 0xfffffffff000)
#define PTE2PA(pte) ((uint64_t)(pte) & 0xfffffffff000)
#define PTE_FLAGS(pte) ((pte) & (0x600000000003ff))
// translation control register
#define TCR_T0SZ(n) ((n) & 0x3f)
#define TCR_TG0(n) (((n) & 0x3) << 14)
#define TCR_T1SZ(n) (((n) & 0x3f) << 16)
#define TCR_TG1(n) (((n) & 0x3) << 30)
#define TCR_IPS(n) (((n) & 0x7) << 32)
// #define TCR_T0SZ(n) ((n) & 0x3f)
// #define TCR_TG0(n) (((n) & 0x3) << 14)
// #define TCR_T1SZ(n) (((n) & 0x3f) << 16)
// #define TCR_TG1(n) (((n) & 0x3) << 30)
// #define TCR_IPS(n) (((n) & 0x7) << 32)
#define ISS_MASK 0xFFFFFF

View File

@ -39,7 +39,21 @@ BOOT_STACK_SIZE = 0x4000;
OUTPUT_FORMAT("elf64-littleaarch64")
OUTPUT_ARCH( "aarch64" )
ENTRY( _entry )
/**
ENTRY( _ENTRY )
*/
ENTRY( _boot_start )
MEMORY {
/**
phy_ddr3 (rwx) : ORIGIN = 0x0000000040000000, LENGTH = 0x8000000
vir_ddr3 (rwx) : ORIGIN = 0xffffff8040010000, LENGTH = 0x8000000
*/
phy_ddr3 (rwx) : ORIGIN = 0x0000000040000000, LENGTH = 1024M
vir_ddr3 (rwx) : ORIGIN = 0x0000006040635000, LENGTH = 1024M
/* vir_ddr3 (rwx) : ORIGIN = 0xffffffE040635000, LENGTH = 1024M */
}
SECTIONS
{
@ -48,22 +62,24 @@ SECTIONS
* where qemu's -kernel jumps.
* 0x40000000(PA) is 0xffffff8040000000(VA);
*/
/*
. = 0x40000000;
*/
. = 0xffffff0000000000;
.start_sec : {
. = ALIGN(0x1000);
/* initialization start checkpoint. */
boot.o(.text)
bootmmu.o(.text .text.*)
boot.o(.rodata .rodata.*)
bootmmu.o(.rodata .rodata.*)
boot.o(.data .data.*)
bootmmu.o(.data .data.*)
PROVIDE(boot_start_addr = .);
boot.o(.bss .bss.* COMMON)
bootmmu.o(.bss .bss.* COMMON)
/* stack for booting code. */
. = ALIGN(0x1000);
@ -74,39 +90,39 @@ SECTIONS
/* initialization end checkpoint. */
PROVIDE(boot_end_addr = .);
}
} > phy_ddr3
.text : AT(0x0000000) {
*(.text .text.*)
.text : AT(0x40635000) {
. = ALIGN(0x1000);
PROVIDE(etext = .);
}
*(.text .text.* .gnu.linkonce.t.*)
} > vir_ddr3
.rodata : {
. = ALIGN(16);
*(.srodata .srodata.*) /* do not need to distinguish this from .rodata */
. = ALIGN(16);
*(.rodata .rodata.*)
}
. = ALIGN(0x1000);
.data : {
. = ALIGN(16);
*(.sdata .sdata.*) /* do not need to distinguish this from .data */
. = ALIGN(16);
*(.data .data.*)
}
. = ALIGN(1000);
PROVIDE(_binary_fs_img_start = .);
*(.rawdata_fs_img*)
PROVIDE(_binary_fs_img_end = .);
PROVIDE(_binary_init_start = .);
*(.rawdata_init*)
PROVIDE(_binary_init_end = .);
PROVIDE(_binary_default_fs_start = .);
*(.rawdata_memfs*)
PROVIDE(_binary_default_fs_end = .);
} > vir_ddr3
. = ALIGN(1000);
PROVIDE(kernel_data_begin = .);
_image_size = . - 0x0000006040000000;
.bss : {
. = ALIGN(16);
PROVIDE(bss_start = .);
*(.sbss .sbss.*) /* do not need to distinguish this from .bss */
. = ALIGN(16);
*(.bss .bss.*)
. = ALIGN(16);
PROVIDE(bss_end = .);
PROVIDE(__bss_start__ = .);
*(.bss .bss.* COMMON)
PROVIDE(__bss_end__ = .);
} > vir_ddr3
. = ALIGN(0x1000);
PROVIDE(kernel_data_end = .);
}
PROVIDE(end = .);
}
bss_size = (bss_end - bss_start) >> 3;

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2010-2012, Freescale Semiconductor, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* o Redistributions of source code must retain the above copyright notice, this list
* of conditions and the following disclaimer.
*
* o Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* o Neither the name of Freescale Semiconductor, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file smp.c
* @brief start multicore
* @version 1.0
* @author AIIT XUOS Lab
* @date 2024.04.10
*/
/*************************************************
File name: smp.c
Description:
Others:
History:
Author: AIIT XUOS Lab
Modification:
1. No modifications
*************************************************/
#include "cortex_a72.h"
extern void _boot_start();
void cpu_start_secondary(uint8_t cpu_id)
{
return;
}
void start_smp_cache_broadcast(int cpu_id)
{
return;
}

View File

@ -2,7 +2,8 @@
#include "cortex_a72.h"
#include "memlayout.h"
void _entry();
// void _entry();
void _boot_start();
void main();
extern char end[];
@ -15,8 +16,3 @@ void start()
{
main();
}
__attribute__((aligned(PGSIZE))) uint64_t l1entrypgt[512];
__attribute__((aligned(PGSIZE))) uint64_t l2entrypgt[512];
__attribute__((aligned(PGSIZE))) uint64_t l1kpgt[512];
__attribute__((aligned(PGSIZE))) uint64_t l2kpgt[512];

View File

@ -199,7 +199,7 @@ void InvalidateL1Icache(uintptr_t start, uintptr_t end)
va = (uint64_t)((uint64_t)addr & (~(line_size - 1)));
// Invalidate data cache line to PoC (Point of Coherence) by va.
__asm__ __volatile__("dc ivau, %0 " : : "r"(va));
__asm__ __volatile__("ic ivau, %0 " : : "r"(va));
// increment addres to next line and decrement lenght
addr = (uintptr_t)((uint64_t)addr + line_size);
} while (addr < end_addr);

View File

@ -0,0 +1,119 @@
#include "actracer.h"
#include "core.h"
#include "cortex_a72.h"
#include "generic_timer.h"
#include "memlayout.h"
#include "clock_common_op.h"
// armv8 generic timer driver
#define CNTV_CTL_ENABLE (1 << 0)
#define CNTV_CTL_IMASK (1 << 1)
#define CNTV_CTL_ISTATUS (1 << 2)
static void enable_timer(void);
static void disable_timer(void);
static void reload_timer(void);
static void enable_timer()
{
uint64_t c = r_cntv_ctl_el0();
c |= CNTV_CTL_ENABLE;
c &= ~CNTV_CTL_IMASK;
w_cntv_ctl_el0(c);
}
static void disable_timer()
{
uint64_t c = r_cntv_ctl_el0();
c &= ~CNTV_CTL_ENABLE;
c |= CNTV_CTL_IMASK;
w_cntv_ctl_el0(c);
}
static void arch_timer_interrupt_enable()
{
uint64_t c = r_cntv_ctl_el0();
if (c &= CNTV_CTL_IMASK) {
c |= ~CNTV_CTL_IMASK;
w_cntv_ctl_el0(c);
}
}
static void arch_timer_interrupt_disable()
{
uint64_t c = r_cntv_ctl_el0();
if (!(c &= CNTV_CTL_IMASK)) {
c |= CNTV_CTL_IMASK;
w_cntv_ctl_el0(c);
}
}
static void reload_timer()
{
// interval 100ms
uint64_t interval = 100000;
uint64_t interval_clk = interval * (r_cntfrq_el0() / 1000000);
w_cntv_tval_el0(interval_clk);
}
void delay(uint32_t cycles)
{
uint64_t start = r_cntvct_el0();
while ((r_cntvct_el0() - start) < cycles)
__asm__ volatile("yield" ::: "memory");
}
void _sys_clock_init()
{
arch_timer_interrupt_disable();
disable_timer();
reload_timer();
enable_timer();
arch_timer_interrupt_enable();
}
static uint32_t _get_clock_int()
{
return 0;
}
static uint64_t _get_tick()
{
return 0;
}
static uint64_t _get_second()
{
return 0;
}
static bool _is_timer_expired()
{
return true;
}
static void _clear_clock_intr()
{
disable_timer();
reload_timer();
enable_timer();
}
static struct XiziClockDriver hardkernel_clock_driver = {
.sys_clock_init = _sys_clock_init,
.get_clock_int = _get_clock_int,
.get_tick = _get_tick,
.get_second = _get_second,
.is_timer_expired = _is_timer_expired,
.clear_clock_intr = _clear_clock_intr,
};
struct XiziClockDriver* hardkernel_clock_init(struct TraceTag* hardkernel_tag)
{
hardkernel_clock_driver.sys_clock_init();
return &hardkernel_clock_driver;
}

View File

@ -0,0 +1,44 @@
// armv8 generic timer
static inline uint64_t
r_cntv_ctl_el0()
{
uint64_t x;
asm volatile("mrs %0, cntv_ctl_el0" : "=r"(x));
return x;
}
static inline void
w_cntv_ctl_el0(uint64_t x)
{
asm volatile("msr cntv_ctl_el0, %0" : : "r"(x));
}
static inline uint64_t
r_cntv_tval_el0()
{
uint64_t x;
asm volatile("mrs %0, cntv_tval_el0" : "=r"(x));
return x;
}
static inline void
w_cntv_tval_el0(uint64_t x)
{
asm volatile("msr cntv_tval_el0, %0" : : "r"(x));
}
static inline uint64_t
r_cntvct_el0()
{
uint64_t x;
asm volatile("mrs %0, cntvct_el0" : "=r"(x));
return x;
}
static inline uint64_t
r_cntfrq_el0()
{
uint64_t x;
asm volatile("mrs %0, cntfrq_el0" : "=r"(x));
return x;
}

View File

@ -1,4 +1,4 @@
SRC_FILES := vector.S trampoline.S $(BOARD)/trap_common.c error_debug.c hard_spinlock.S
SRC_FILES := trampoline.S $(BOARD)/trap_common.c $(BOARD)/trap.c error_debug.c hard_spinlock.S
ifeq ($(BOARD), ok1028a-c)
SRC_DIR := gicv3

View File

@ -38,48 +38,21 @@ Modification:
1. Take only armv8 abort reason part(_abort_reason).
2. Modify iabort and dabort handler(in dabort_handler() and iabort_handler())
*************************************************/
#include <stddef.h>
#include "assert.h"
#include "core.h"
#include "log.h"
#include "multicores.h"
#include "spinlock.h"
// #include "syscall.h"
__attribute__((always_inline)) static inline void _abort_reason(uint32_t fault_status)
{
if ((fault_status & 0x3f) == 0x21) // Alignment failure
KPrintf("reason: alignment\n");
else if ((fault_status & 0x3f) == 0x4) // Translation fault, level 0
KPrintf("reason: sect. translation level 0\n");
else if ((fault_status & 0x3f) == 0x5) // Translation fault, level 1
KPrintf("reason: sect. translation level 1\n");
else if ((fault_status & 0x3f) == 0x6) // Translation fault, level 2
KPrintf("reason: sect. translation level 2\n");
else if ((fault_status & 0x3f) == 0x7) // Translation fault, level 3
KPrintf("reason: sect. translation level 3\n");
else if ((fault_status & 0x3f) == 0x3d) // Section Domain fault
KPrintf("reason: sect. domain\n");
else if ((fault_status & 0x3f) == 0x13) // Permission level 1
KPrintf("reason: sect. permission level 1\n");
else if ((fault_status & 0x3f) == 0x14) // Permission level 2
KPrintf("reason: sect. permission level 2\n");
else if ((fault_status & 0x3f) == 0x15) // Permission level 3
KPrintf("reason: sect. permission level 3\n");
else if ((fault_status & 0x3f) == 0x8) // External abort
KPrintf("reason: ext. abort\n");
else if ((fault_status & 0x3f) == 0x9) // Access flag fault, level 1
KPrintf("reason: sect. Access flag fault level 1\n");
else if ((fault_status & 0x3f) == 0xa) // Access flag fault, level 2
KPrintf("reason: sect. Access flag fault level 2\n");
else if ((fault_status & 0x3f) == 0xb) // Access flag fault, level 3
KPrintf("reason: sect. Access flag fault level 3\n");
else
KPrintf("reason: unknown???\n");
}
#include "task.h"
#include "trap_common.h"
void dump_tf(struct trapframe* tf)
{
KPrintf(" elr_el1: 0x%x\n", tf->elr_el1);
KPrintf(" spsr_el1: 0x%x\n", tf->spsr_el1);
KPrintf(" sp: 0x%x\n", tf->sp);
KPrintf(" pc: 0x%x\n", tf->pc);
KPrintf(" spsr: 0x%x\n", tf->spsr);
KPrintf(" x0: 0x%x\n", tf->x0);
KPrintf(" x1: 0x%x\n", tf->x1);
KPrintf(" x2: 0x%x\n", tf->x2);
@ -110,12 +83,85 @@ void dump_tf(struct trapframe* tf)
KPrintf(" x27: 0x%x\n", tf->x27);
KPrintf(" x28: 0x%x\n", tf->x28);
KPrintf(" x29: 0x%x\n", tf->x29);
KPrintf(" pc: 0x%x\n", tf->pc);
KPrintf(" x30: 0x%x\n", tf->x30);
}
void handle_undefined_instruction(struct trapframe* tf)
void dabort_reason(struct trapframe* r)
{
// unimplemented trap handler
KPrintf("undefined instruction at %x\n", tf->pc);
panic("");
uint32_t fault_status, fault_address;
__asm__ __volatile__("mrs %0, esr_el1" : "=r"(fault_status));
__asm__ __volatile__("mrs %0, far_el1" : "=r"(fault_address));
LOG("program counter: 0x%x caused\n", r->pc);
LOG("data abort at 0x%x, status 0x%x\n", fault_address, fault_status);
if ((fault_status & 0x3f) == 0x21) // Alignment failure
KPrintf("reason: alignment\n");
else if ((fault_status & 0x3f) == 0x4) // Translation fault, level 0
KPrintf("reason: sect. translation level 0\n");
else if ((fault_status & 0x3f) == 0x5) // Translation fault, level 1
KPrintf("reason: sect. translation level 1\n");
else if ((fault_status & 0x3f) == 0x6) // Translation fault, level 2
KPrintf("reason: sect. translation level 2\n");
else if ((fault_status & 0x3f) == 0x7) // Translation fault, level 3
KPrintf("reason: sect. translation level 3\n");
else if ((fault_status & 0x3f) == 0x3d) // Section Domain fault
KPrintf("reason: sect. domain\n");
else if ((fault_status & 0x3f) == 0xd) // Permission level 1
KPrintf("reason: sect. permission level 1\n");
else if ((fault_status & 0x3f) == 0xe) // Permission level 2
KPrintf("reason: sect. permission level 2\n");
else if ((fault_status & 0x3f) == 0xf) // Permission level 3
KPrintf("reason: sect. permission level 3\n");
else if ((fault_status & 0x3f) == 0x14) // External abort
KPrintf("reason: ext. abort\n");
else if ((fault_status & 0x3f) == 0x9) // Access flag fault, level 1
KPrintf("reason: sect. Access flag fault level 1\n");
else if ((fault_status & 0x3f) == 0xa) // Access flag fault, level 2
KPrintf("reason: sect. Access flag fault level 2\n");
else if ((fault_status & 0x3f) == 0xb) // Access flag fault, level 3
KPrintf("reason: sect. Access flag fault level 3\n");
else
KPrintf("reason: unknown???\n");
dump_tf(r);
return;
}
void iabort_reason(struct trapframe* r)
{
uint32_t fault_status, fault_address;
__asm__ __volatile__("mrs %0, esr_el1" : "=r"(fault_status));
__asm__ __volatile__("mrs %0, far_el1" : "=r"(fault_address));
LOG("program counter: 0x%x caused\n", r->pc);
LOG("data abort at 0x%x, status 0x%x\n", fault_address, fault_status);
if ((fault_status & 0x3f) == 0x21) // Alignment failure
KPrintf("reason: alignment\n");
else if ((fault_status & 0x3f) == 0x4) // Translation fault, level 0
KPrintf("reason: sect. translation level 0\n");
else if ((fault_status & 0x3f) == 0x5) // Translation fault, level 1
KPrintf("reason: sect. translation level 1\n");
else if ((fault_status & 0x3f) == 0x6) // Translation fault, level 2
KPrintf("reason: sect. translation level 2\n");
else if ((fault_status & 0x3f) == 0x7) // Translation fault, level 3
KPrintf("reason: sect. translation level 3\n");
else if ((fault_status & 0x3f) == 0x3d) // Section Domain fault
KPrintf("reason: sect. domain\n");
else if ((fault_status & 0x3f) == 0xd) // Permission level 1
KPrintf("reason: sect. permission level 1\n");
else if ((fault_status & 0x3f) == 0xe) // Permission level 2
KPrintf("reason: sect. permission level 2\n");
else if ((fault_status & 0x3f) == 0xf) // Permission level 3
KPrintf("reason: sect. permission level 3\n");
else if ((fault_status & 0x3f) == 0x14) // External abort
KPrintf("reason: ext. abort\n");
else if ((fault_status & 0x3f) == 0x9) // Access flag fault, level 1
KPrintf("reason: sect. Access flag fault level 1\n");
else if ((fault_status & 0x3f) == 0xa) // Access flag fault, level 2
KPrintf("reason: sect. Access flag fault level 2\n");
else if ((fault_status & 0x3f) == 0xb) // Access flag fault, level 3
KPrintf("reason: sect. Access flag fault level 3\n");
else
KPrintf("reason: unknown???\n");
dump_tf(r);
return;
}

View File

@ -1,3 +1,3 @@
SRC_FILES :=giv3.c
SRC_FILES := gicv3.c gicv3_distributer.c
include $(KERNEL_ROOT)/compiler.mk

View File

@ -15,14 +15,12 @@ Author: AIIT XUOS Lab
Modification:
*************************************************/
#include "string.h"
#include <stdio.h>
#include "core.h"
#include "gicv3_common_opa.h"
#include "gicv3_registers.h"
static void gic_setup_ppi(uint32_t cpuid, uint32_t intid) __attribute__((unused));
static void gic_setup_spi(uint32_t intid);
static struct {
char* gicd;
char* rdist_addrs[NR_CPU];
@ -56,16 +54,28 @@ w_icc_pmr_el1(uint32_t x)
__asm__ volatile("msr S3_0_C4_C6_0, %0" : : "r"(x));
}
static inline uint32_t
icc_iar1_el1()
// static inline uint32_t
// icc_iar1_el1()
// {
// uint32_t x;
// __asm__ volatile("mrs %0, S3_0_C12_C12_0" : "=r"(x));
// return x;
// }
// static inline void
// w_icc_eoir1_el1(uint32_t x)
// {
// __asm__ volatile("msr S3_0_C12_C12_1, %0" : : "r"(x));
// }
inline uint32_t gic_read_irq_ack()
{
uint32_t x;
__asm__ volatile("mrs %0, S3_0_C12_C12_0" : "=r"(x));
return x;
}
static inline void
w_icc_eoir1_el1(uint32_t x)
inline void
gic_write_end_of_irq(uint32_t x)
{
__asm__ volatile("msr S3_0_C12_C12_1, %0" : : "r"(x));
}
@ -84,12 +94,6 @@ w_icc_sre_el1(uint32_t x)
__asm__ volatile("msr S3_0_C12_C12_5, %0" : : "r"(x));
}
static inline gicc_t* gic_get_gicc(void)
{
uint32_t base = get_arm_private_peripheral_base() + kGICCBaseOffset;
return (gicc_t*)base;
}
static void
gicd_write(uint32_t off, uint32_t val)
{
@ -129,8 +133,6 @@ gicdinit()
uint32_t typer = gicd_read(D_TYPER);
uint32_t lines = typer & 0x1f;
printf("lines %d\n", lines);
for (int i = 0; i < lines; i++)
gicd_write(D_IGROUPR(i), ~0);
}
@ -151,8 +153,7 @@ gicrinit(uint32_t cpuid)
;
}
static void
gic_enable()
void gic_enable()
{
gicd_write(D_CTLR, (1 << 1));
w_icc_igrpen1_el1(1);
@ -166,13 +167,9 @@ void gic_init()
}
gicdinit();
gic_setup_spi(UART0_IRQ);
gic_setup_spi(VIRTIO0_IRQ);
}
static inline uint64_t
cpuid()
static inline uint64_t cpuid()
{
uint64_t x;
__asm__ volatile("mrs %0, mpidr_el1" : "=r"(x));
@ -181,13 +178,11 @@ cpuid()
void gicv3inithart()
{
int cpu = cpuid();
uint32_t cpu = cpuid();
giccinit();
gicrinit(cpu);
gic_setup_ppi(cpu, TIMER0_IRQ);
gic_enable();
}
@ -254,22 +249,18 @@ gicr_set_prio0(uint32_t cpuid, uint32_t intid)
gicr_write(cpuid, R_IPRIORITYR(intid / 4), p);
}
static void
gic_setup_ppi(uint32_t cpuid, uint32 intid)
void gic_setup_ppi(uint32_t cpuid, uint32_t intid)
{
gicr_set_prio0(cpuid, intid);
gicr_clear_pending(cpuid, intid);
gicr_enable_int(cpuid, intid);
}
static void
gic_setup_spi(uint32_t intid)
void gic_setup_spi(uint32_t cpuid, uint32_t intid)
{
gic_set_prio0(intid);
// all interrupts are handled by cpu0 
gic_set_target(intid, 0);
gic_set_target(intid, cpuid);
gic_clear_pending(intid);
gic_enable_int(intid);
}
@ -282,14 +273,13 @@ int gic_iar_irq(uint32_t iar)
// interrupt acknowledge register:
// ask GIC what interrupt we should serve.
uint32_t
gic_iar()
uint32_t gic_iar()
{
return icc_iar1_el1();
return gic_read_irq_ack();
}
// tell GIC we've served this IRQ.
void gic_eoi(uint32_t iar)
{
w_icc_eoir1_el1(iar);
gic_write_end_of_irq(iar);
}

View File

@ -142,6 +142,9 @@ void gic_set_cpu_target(uint32_t irqID, unsigned cpuNumber, bool enableIt);
//! 0 being the highest priority.
void gic_set_irq_priority(uint32_t irq_id, uint32_t priority);
void gic_setup_spi(uint32_t cpuid, uint32_t intid);
void gicv3inithart();
//! @brief Send a software generated interrupt to a specific CPU.
//!
//! @param irq_id The interrupt number to send.

View File

@ -0,0 +1,77 @@
/**
* @file gicv3_distributer.c
* @brief gicv3_distributer
* @version 1.0
* @author AIIT XUOS Lab
* @date 2024.05.10
*/
/*************************************************
File name: gicv3_distributer.c
Description: gicv3_distributer operation
Others:
History:
Author: AIIT XUOS Lab
Modification:
*************************************************/
#include "string.h"
#include "gicv3_common_opa.h"
#include "gicv3_registers.h"
static inline gicd_t* gic_get_gicd(void)
{
uint64_t base = get_arm_private_peripheral_base() + kGICDBaseOffset;
return (gicd_t*)base;
}
void gic_set_cpu_target(uint32_t irqID, unsigned cpuNumber, bool enableIt)
{
// Make sure the CPU number is valid.
gicd_t* gicd = gic_get_gicd();
uint8_t cpuMask = 1 << cpuNumber;
if (enableIt) {
gicd->ITARGETSRn[irqID] |= (cpuMask & 0xff);
} else {
gicd->ITARGETSRn[irqID] &= ~(cpuMask & 0xff);
}
}
void gic_set_irq_security(uint32_t irqID, bool isSecure)
{
gicd_t* gicd = gic_get_gicd();
uint32_t reg = irq_get_register_offset(irqID);
uint32_t mask = irq_get_bit_mask(irqID);
uint32_t value = gicd->IGROUPRn[reg];
if (!isSecure) {
value &= ~mask;
} else {
value |= mask;
}
gicd->IGROUPRn[reg] = value;
}
void gic_enable_irq(uint32_t irqID, bool isEnabled)
{
gicd_t* gicd = gic_get_gicd();
uint32_t reg = irq_get_register_offset(irqID);
uint32_t mask = irq_get_bit_mask(irqID);
// Select set-enable or clear-enable register based on enable flag.
if (isEnabled) {
gicd->ISENABLERn[reg] = mask;
} else {
gicd->ICENABLERn[reg] = mask;
}
}
void gic_set_irq_priority(uint32_t ID, uint32_t priority)
{
gicd_t* gicd = gic_get_gicd();
gicd->IPRIORITYRn[ID] = priority & 0xff;
}

View File

@ -1,5 +1,4 @@
/*
* include/linux/irqchip/gicv3_registers.h
*
* Copyright (C) 2002 ARM Limited, All Rights Reserved.
*
@ -8,7 +7,7 @@
* published by the Free Software Foundation.
*/
/**
* @file gicv3_registers.c
* @file gicv3_registers.h
* @brief gicv3 registers
* @version 1.0
* @author AIIT XUOS Lab
@ -28,16 +27,21 @@ Modification:
#ifndef __LINUX_IRQCHIP_ARM_GIC_H
#define __LINUX_IRQCHIP_ARM_GIC_H
// clang-format off
// interrupt controller GICv3
#define GICV3 (DEV_VRTMEM_BASE + 0x08000000L)
#define GICV3_REDIST (DEV_VRTMEM_BASE + 0x080a0000L)
#define D_CTLR 0x0
#define D_TYPER 0x4
#define D_IGROUPR(n) (0x80 + (uint64)(n) * 4)
#define D_ISENABLER(n) (0x100 + (uint64)(n) * 4)
#define D_ICENABLER(n) (0x180 + (uint64)(n) * 4)
#define D_ISPENDR(n) (0x200 + (uint64)(n) * 4)
#define D_ICPENDR(n) (0x280 + (uint64)(n) * 4)
#define D_IPRIORITYR(n) (0x400 + (uint64)(n) * 4)
#define D_ITARGETSR(n) (0x800 + (uint64)(n) * 4)
#define D_ICFGR(n) (0xc00 + (uint64)(n) * 4)
#define D_IGROUPR(n) (0x80 + (uint64_t)(n) * 4)
#define D_ISENABLER(n) (0x100 + (uint64_t)(n) * 4)
#define D_ICENABLER(n) (0x180 + (uint64_t)(n) * 4)
#define D_ISPENDR(n) (0x200 + (uint64_t)(n) * 4)
#define D_ICPENDR(n) (0x280 + (uint64_t)(n) * 4)
#define D_IPRIORITYR(n) (0x400 + (uint64_t)(n) * 4)
#define D_ITARGETSR(n) (0x800 + (uint64_t)(n) * 4)
#define D_ICFGR(n) (0xc00 + (uint64_t)(n) * 4)
#define R_CTLR 0x0
#define R_WAKER 0x14
@ -52,6 +56,8 @@ Modification:
#define R_ICFGR1 (SGI_BASE + 0xc04)
#define R_IGRPMODR0 (SGI_BASE + 0xd00)
// clang-format on
#endif
#include <stdint.h>
@ -109,3 +115,5 @@ enum _gicd_sgir_fields {
kBP_GICD_SGIR_SGIINTID = 0,
kBM_GICD_SGIR_SGIINTID = 0xf
};
typedef volatile struct _gicd_registers gicd_t;

View File

@ -58,15 +58,18 @@ Modification:
.func _spinlock_lock
_spinlock_lock:
ldxr x1, [x0] // check if the spinlock is currently unlocked
sevl
wfe
// wait for an event signal
ldaxrb w1, [x0] // check if the spinlock is currently unlocked
cmp x1, #UNLOCKED
wfe // wait for an event signal
// wfe // wait for an event signal
bne _spinlock_lock
mrs x1, mpidr_el1 // get our CPU ID
and x1, x1, #3
stxr w2, x1, [x0]
stxrb w2, w1, [x0]
cbnz x2, _spinlock_lock // check if the write was successful, if the write failed, start over
dmb ish // Ensure that accesses to shared resource have completed
@ -89,6 +92,7 @@ ldr x2, [x0]
cmp x1, x2
bne 1f //doesn't match,jump to 1
dmb ish
mov x1, #UNLOCKED
@ -107,3 +111,7 @@ ret
mov x0, #1 //doesn't match, so exit with failure
ret
.endfunc
.end

View File

@ -0,0 +1,64 @@
/**
* @file exception_registers.h
* @brief exception registers
* @version 1.0
* @author AIIT XUOS Lab
* @date 2024.05.09
*/
static inline void w_vbar_el1(uint64_t x)
{
asm volatile("msr vbar_el1, %0" : : "r"(x));
}
static inline uint64_t
r_esr_el1()
{
uint64_t x;
asm volatile("mrs %0, esr_el1" : "=r"(x));
return x;
}
static inline void
w_esr_el1(uint64_t x)
{
asm volatile("msr esr_el1, %0" : : "r"(x));
}
static inline uint64_t
r_elr_el1()
{
uint64_t x;
asm volatile("mrs %0, elr_el1" : "=r"(x));
return x;
}
static inline uint64_t
r_far_el1()
{
uint64_t x;
asm volatile("mrs %0, far_el1" : "=r"(x));
return x;
}
static inline uint64_t
daif()
{
uint64_t x;
asm volatile("mrs %0, daif" : "=r"(x));
return x;
}
// enable interrupts(irq)
static inline void
intr_on()
{
asm volatile("msr daifclr, #0xf" ::: "memory");
}
// disable interrupts(irq)
static inline void
intr_off()
{
asm volatile("msr daifset, #0xf" ::: "memory");
}

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiUOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/**
* @file trap.c
* @brief trap interface of hardkernel
* @version 1.0
* @author AIIT XUOS Lab
* @date 2023.05.06
*/
/*************************************************
File name: trap.c
Description: trap interface of hardkernel
Others:
History:
Author: AIIT XUOS Lab
Modification:
1. first version
*************************************************/
#include <assert.h>
#include <stdio.h>
#include "core.h"
#include "exception_registers.h"
#include "multicores.h"
#include "syscall.h"
#include "task.h"
extern void dabort_handler(struct trapframe* r);
extern void iabort_handler(struct trapframe* r);
void kernel_abort_handler(struct trapframe* tf)
{
uint64_t esr = r_esr_el1();
switch ((esr & 0x3F) >> 26) {
case 0b100100:
case 0b100101:
dabort_handler(tf);
case 0b100000:
case 0b100001:
iabort_handler(tf);
default:
panic("Unimplemented Error Occured.\n");
}
panic("Return from abort handler.\n");
}
void kernel_intr_handler(struct trapframe* tf)
{
panic("Intr at kernel mode should never happen by design.\n");
}
extern void context_switch(struct context**, struct context*);
void syscall_arch_handler(struct trapframe* tf)
{
uint64_t ec = (r_esr_el1() >> 0x1A) & 0x3F;
w_esr_el1(0);
if (ec == 0b010101) {
software_irq_dispatch(tf);
} else {
printf("USYSCALL: unexpected ec %p", r_esr_el1());
printf(" elr=%p far=%p\n", r_elr_el1(), r_far_el1());
// kill error task
xizi_enter_kernel();
assert(cur_cpu()->task == NULL);
sys_exit(cur_cpu()->task);
context_switch(&cur_cpu()->task->main_thread.context, cur_cpu()->scheduler);
panic("dabort end should never be reashed.\n");
}
}

View File

@ -29,6 +29,8 @@ Modification:
#include <string.h>
#include "core.h"
#include "cortex_a72.h"
#include "exception_registers.h"
#include "gicv3_common_opa.h"
#include "trap_common.h"
@ -46,8 +48,6 @@ static struct XiziTrapDriver xizi_trap_driver;
void panic(char* s)
{
xizi_trap_driver.cpu_irq_disable();
spinlock_unlock(&whole_kernel_lock);
KPrintf("panic: %s\n", s);
for (;;)
;
@ -60,80 +60,55 @@ extern uint64_t _vector_jumper;
extern uint64_t _vector_start;
extern uint64_t _vector_end;
void init_cpu_mode_stacks(int cpu_id)
{
uint32_t modes[] = { ARM_MODE_EL0_t, ARM_MODE_EL1_t, ARM_MODE_EL2_t, ARM_MODE_EL3_t };
// initialize the stacks for different mode
for (int i = 0; i < sizeof(modes) / sizeof(uint64_t); i++) {
memset(mode_stack_pages[cpu_id][i], 0, MODE_STACK_SIZE);
init_stack(modes[i], (uint64_t)mode_stack_pages[cpu_id][i]);
}
}
void handle_reserved(void)
{
// unimplemented trap handler
LOG("Unimplemented Reserved\n");
panic("");
}
void handle_fiq(void)
{
LOG("Unimplemented FIQ\n");
panic("");
}
// void init_cpu_mode_stacks(int cpu_id)
// {
// uint32_t modes[] = { ARM_MODE_EL0_t, ARM_MODE_EL1_t, ARM_MODE_EL2_t, ARM_MODE_EL3_t };
// // initialize the stacks for different mode
// for (int i = 0; i < sizeof(modes) / sizeof(uint64_t); i++) {
// memset(mode_stack_pages[cpu_id][i], 0, MODE_STACK_SIZE);
// init_stack(modes[i], (uint64_t)mode_stack_pages[cpu_id][i]);
// }
// }
extern void alltraps();
static void _sys_irq_init(int cpu_id)
{
/* load exception vectors */
init_cpu_mode_stacks(cpu_id);
if (cpu_id == 0) {
volatile uint64_t* vector_base = &_vector_start;
// primary core init intr
xizi_trap_driver.switch_hw_irqtbl((uintptr_t*)alltraps);
// Set Interrupt handler start address
vector_base[1] = (uint64_t)trap_undefined_instruction; // Undefined Instruction
vector_base[2] = (uint64_t)user_trap_swi_enter; // Software Interrupt
vector_base[3] = (uint64_t)trap_iabort; // Prefetch Abort
vector_base[4] = (uint64_t)trap_dabort; // Data Abort
vector_base[5] = (uint64_t)handle_reserved; // Reserved
vector_base[6] = (uint64_t)trap_irq_enter; // IRQ
vector_base[7] = (uint64_t)handle_fiq; // FIQ
}
/* active hardware irq responser */
if (cpu_id == 0) {
xizi_trap_driver.switch_hw_irqtbl((uintptr_t*)alltraps);
gic_init();
xizi_trap_driver.switch_hw_irqtbl((uint32_t*)&_vector_jumper);
}
gicv3inithart();
}
static void _cpu_irq_enable(void)
{
arm_set_interrupt_state(true);
// arm_set_interrupt_state(true);
intr_on();
}
static void _cpu_irq_disable(void)
{
arm_set_interrupt_state(false);
intr_off();
}
static void _single_irq_enable(int irq, int cpu, int prio)
{
gic_enable();
gic_setup_spi(cpu, irq);
}
static void _single_irq_disable(int irq, int cpu)
{
return;
}
#define VBAR
static inline uint32_t _switch_hw_irqtbl(uint32_t* new_tbl_base)
static inline uintptr_t* _switch_hw_irqtbl(uintptr_t* new_tbl_base)
{
uint32_t old_tbl_base = 0;
// get old irq table base addr
__asm__ volatile("mrs %0, vbar_el1" : "=r"(old_tbl_base));
w_vbar_el1((uint64_t)new_tbl_base);
// set new irq table base addr
__asm__ volatile("msr vbar_el1, %0" : : "r"(new_tbl_base));
return old_tbl_base;
return NULL;
}
static void _bind_irq_handler(int irq, irq_handler_t handler)
@ -144,22 +119,13 @@ static void _bind_irq_handler(int irq, irq_handler_t handler)
static uint32_t _hw_before_irq()
{
uint32_t vectNum = gic_read_irq_ack();
if (vectNum & 0x200) {
gic_write_end_of_irq(vectNum);
return 0;
}
return vectNum;
uint32_t iar = gic_read_irq_ack();
return iar;
}
static uint32_t _hw_cur_int_num(uint32_t int_info)
{
return int_info & 0x1FF;
}
static __attribute__((unused)) uint32_t _hw_cur_int_cpu(uint32_t int_info)
{
return (int_info >> 10) & 0x7;
return int_info & 0x3FF;
}
static void _hw_after_irq(uint32_t int_info)
@ -167,15 +133,6 @@ static void _hw_after_irq(uint32_t int_info)
gic_write_end_of_irq(int_info);
}
static __attribute__((unused)) int _is_interruptable(void)
{
uint32_t val;
asm volatile("mrs %0, spsr_el1" : "=r"(val));
return !(val & DIS_INT);
}
int _cur_cpu_id()
{
return cpu_get_current();
@ -189,7 +146,7 @@ static struct XiziTrapDriver xizi_trap_driver = {
.cpu_irq_disable = _cpu_irq_disable,
.single_irq_enable = _single_irq_enable,
.single_irq_disable = _single_irq_disable,
//.switch_hw_irqtbl = _switch_hw_irqtbl,
.switch_hw_irqtbl = _switch_hw_irqtbl,
.bind_irq_handler = _bind_irq_handler,

View File

@ -35,9 +35,10 @@ Modification:
.global trap_irq_enter
.global trap_return
.global usertrapret
.global init_stack
trap_return:
/* Restore registers. */
// Restore registers
ldp x1, x2, [sp], #16
ldp x3, x0, [sp], #16
msr sp_el0, x1
@ -92,7 +93,7 @@ user_trap_swi_enter:
b trap_return
trap_irq_enter:
/* Build trapframe. */
// Build trapframe.
stp x29, x30, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp x25, x26, [sp, #-16]!
@ -115,15 +116,225 @@ trap_irq_enter:
stp x3, x0, [sp, #-16]!
stp x1, x2, [sp, #-16]!
/* Call trap(struct trapframe*). */
//Call trap(struct trapframe*)
mov x0, sp
bl intr_irq_dispatch
b trap_return
/* Help forkret to call trap_return in an expected way. */
// Help forkret to call trap_return in an expected way
//usertrapret:
// Overlay stack pointer in trap_return
// mov sp, x0
// b trap_return
init_stack:
mrs x2, spsr_el1
bic x2, x2, #SPSR_MODE_MASK
orr x2, x2, x0
msr spsr_el1, x2
mov sp, x1
bic x2, x2, #SPSR_MODE_MASK
orr x2, x2, #ARM_MODE_EL1_t
msr spsr_el1, x2
ret
.section ".text"
.macro savereg
msr daifset, #0xf
// make room to save registers.
sub sp, sp, #272
// save the registers.
stp x0, x1, [sp, #16 * 0]
stp x2, x3, [sp, #16 * 1]
stp x4, x5, [sp, #16 * 2]
stp x6, x7, [sp, #16 * 3]
stp x8, x9, [sp, #16 * 4]
stp x10, x11, [sp, #16 * 5]
stp x12, x13, [sp, #16 * 6]
stp x14, x15, [sp, #16 * 7]
stp x16, x17, [sp, #16 * 8]
stp x18, x19, [sp, #16 * 9]
stp x20, x21, [sp, #16 * 10]
stp x22, x23, [sp, #16 * 11]
stp x24, x25, [sp, #16 * 12]
stp x26, x27, [sp, #16 * 13]
stp x28, x29, [sp, #16 * 14]
mrs x9, elr_el1
mrs x10, spsr_el1
add x11, sp, #272
stp x30, x9, [sp, #16 * 15]
stp x10, x11, [sp, #16 * 16]
.endm
.macro restorereg
ldp x30, x9, [sp, #16 * 15]
ldp x10, x11, [sp, #16 * 16]
msr elr_el1, x9
msr spsr_el1, x10
ldp x0, x1, [sp, #16 * 0]
ldp x2, x3, [sp, #16 * 1]
ldp x4, x5, [sp, #16 * 2]
ldp x6, x7, [sp, #16 * 3]
ldp x8, x9, [sp, #16 * 4]
ldp x10, x11, [sp, #16 * 5]
ldp x12, x13, [sp, #16 * 6]
ldp x14, x15, [sp, #16 * 7]
ldp x16, x17, [sp, #16 * 8]
ldp x18, x19, [sp, #16 * 9]
ldp x20, x21, [sp, #16 * 10]
ldp x22, x23, [sp, #16 * 11]
ldp x24, x25, [sp, #16 * 12]
ldp x26, x27, [sp, #16 * 13]
ldp x28, x29, [sp, #16 * 14]
add sp, sp, #272
.endm
.macro usavereg
msr daifset, #0xf
sub sp, sp, #272
stp x0, x1, [sp, #16 * 0]
stp x2, x3, [sp, #16 * 1]
stp x4, x5, [sp, #16 * 2]
stp x6, x7, [sp, #16 * 3]
stp x8, x9, [sp, #16 * 4]
stp x10, x11, [sp, #16 * 5]
stp x12, x13, [sp, #16 * 6]
stp x14, x15, [sp, #16 * 7]
stp x16, x17, [sp, #16 * 8]
stp x18, x19, [sp, #16 * 9]
stp x20, x21, [sp, #16 * 10]
stp x22, x23, [sp, #16 * 11]
stp x24, x25, [sp, #16 * 12]
stp x26, x27, [sp, #16 * 13]
stp x28, x29, [sp, #16 * 14]
mrs x9, elr_el1
mrs x10, spsr_el1
mrs x11, sp_el0
stp x30, x9, [sp, #16 * 15]
stp x10, x11, [sp, #16 * 16]
.endm
.macro urestorereg
ldp x30, x9, [sp, #16 * 15]
ldp x10, x11, [sp, #16 * 16]
msr elr_el1, x9
msr spsr_el1, x10
msr sp_el0, x11
ldp x0, x1, [sp, #16 * 0]
ldp x2, x3, [sp, #16 * 1]
ldp x4, x5, [sp, #16 * 2]
ldp x6, x7, [sp, #16 * 3]
ldp x8, x9, [sp, #16 * 4]
ldp x10, x11, [sp, #16 * 5]
ldp x12, x13, [sp, #16 * 6]
ldp x14, x15, [sp, #16 * 7]
ldp x16, x17, [sp, #16 * 8]
ldp x18, x19, [sp, #16 * 9]
ldp x20, x21, [sp, #16 * 10]
ldp x22, x23, [sp, #16 * 11]
ldp x24, x25, [sp, #16 * 12]
ldp x26, x27, [sp, #16 * 13]
ldp x28, x29, [sp, #16 * 14]
add sp, sp, #272
.endm
.global alltraps
.balign 0x800
alltraps:
// Current EL with sp0
b .
.balign 0x80
b .
.balign 0x80
b .
.balign 0x80
b .
// Current EL with spx
.balign 0x80
b el1sync
.balign 0x80
b el1irq
.balign 0x80
b .
.balign 0x80
b .
// Lower EL using aarch64
.balign 0x80
b el0sync
.balign 0x80
b el0irq
.balign 0x80
b .
.balign 0x80
b .
// Lower EL using aarch32
.balign 0x80
b .
.balign 0x80
b .
.balign 0x80
b .
.balign 0x80
b .
el1sync:
savereg
mov x0, sp
bl kernel_abort_handler
restorereg
eret
el1irq:
savereg
mov x0, sp
# this should never happen by design
bl kernel_intr_handler
restorereg
eret
el0sync:
usavereg
mov x0, sp
bl syscall_arch_handler
urestorereg
eret
el0irq:
usavereg
mov x0, sp
bl intr_irq_dispatch
trapret:
urestorereg
eret
.global usertrapret
usertrapret:
/* Overlay stack pointer in trap_return. */
mov sp, x0
b trap_return
b trapret

View File

@ -1,67 +0,0 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiUOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/**
* @file vector.S
* @brief define vector table function
* @version 1.0
* @author AIIT XUOS Lab
* @date 2024.4.22
*/
/*************************************************
File name: vector.S
Description: cortex-a9 vector table
Others:
History:
1. Date: 2024.4.22
Author: AIIT XUOS Lab
Modification:
1. first version
*************************************************/
#include "memlayout.h"
#define ventry .align 7; b trap_irq_enter
#define verror(type) .align 7; mov x0, #(type); b irq_error
.globl vectors
.align 11
vectors:
el1_sp0:
verror(0)
verror(1)
verror(2)
verror(3)
el1_spx:
/* Current EL with SPx */
verror(4)
verror(5)
verror(6)
verror(7)
el0_aarch64:
/* Lower EL using AArch64 */
ventry
ventry
verror(10)
verror(11)
el0_aarch32:
/* Lower EL using AArch32 */
verror(12)
verror(13)
verror(14)
verror(15)

View File

@ -59,7 +59,7 @@ __attribute__((optimize("O0"))) void spinlock_init(struct spinlock* lock, char*
}
extern int _spinlock_lock(struct spinlock* lock, uint32_t timeout);
void _spinlock_unlock(struct spinlock* lock);
extern void _spinlock_unlock(struct spinlock* lock);
__attribute__((optimize("O0"))) void spinlock_lock(struct spinlock* lock)
{

View File

@ -32,12 +32,11 @@ Modification:
#include <stdbool.h>
#include <stdint.h>
#include "actracer.h"
#include "core.h"
#include "irq_numbers.h"
#include "memlayout.h"
#include "actracer.h"
#define NR_IRQS HW_NR_IRQS
#define NR_MODE_STACKS 4
@ -65,7 +64,7 @@ struct XiziTrapDriver {
void (*single_irq_enable)(int irq, int cpu, int prio);
void (*single_irq_disable)(int irq, int cpu);
uint32_t* (*switch_hw_irqtbl)(uint32_t*);
uintptr_t* (*switch_hw_irqtbl)(uintptr_t*);
void (*bind_irq_handler)(int, irq_handler_t);
/* check if no if interruptable */

View File

@ -28,9 +28,9 @@ Modification:
*************************************************/
#include "core.h"
#include "memlayout.h"
#include "mmu.h"
#include "mmio_access.h"
#include "mmu.h"
#include "registers.h"
#include <stdint.h>
#include <string.h>
@ -38,52 +38,117 @@ Modification:
extern uint64_t kernel_data_end[];
extern uint64_t kernel_data_begin[];
#define NR_PDE_ENTRIES 512
#define L1_TYPE_SEC (2 << 0)
#define L1_SECT_DEV ((0B00) << 2) // Device memory
#define L1_SECT_AP0 (1 << 6) // Data Access Permissions
uint64_t boot_pgdir[NR_PDE_ENTRIES] __attribute__((aligned(0x4000))) = { 0 };
#define NR_PDE_ENTRIES (1 << 9)
#define L2_TYPE_TAB 2
#define L2_PTE_VALID 1
#define L3_TYPE_TAB 2
#define L3_PTE_VALID 1
#define L4_TYPE_PAGE (3 << 0)
#define L4_PTE_DEV ((0b00) << 2) // Device memory
#define L4_PTE_AF (1 << 10) // Data Access Permissions
#define IDX_MASK (0b111111111)
#define L3_PDE_INDEX(idx) ((idx << LEVEL3_PDE_SHIFT) & L3_IDX_MASK)
uint64_t boot_l2pgdir[NUM_LEVEL2_PDE] __attribute__((aligned(0x1000))) = { 0 };
// uint64_t boot_lowspace_l2pgdir[NUM_LEVEL2_PDE] __attribute__((aligned(0x4000))) = { 0 };
// uint64_t boot_highspace_l2pgdir[NUM_LEVEL2_PDE] __attribute__((aligned(0x4000))) = { 0 };
uint64_t boot_dev_l3pgdir[NUM_LEVEL3_PDE] __attribute__((aligned(0x1000))) = { 0 };
// uint64_t boot_identical_dev_l3pgdir[NUM_LEVEL3_PDE] __attribute__((aligned(0x4000))) = { 0 };
uint64_t boot_virt_dev_l3pgdir[NUM_LEVEL3_PDE] __attribute__((aligned(0x1000))) = { 0 };
uint64_t boot_kern_l3pgdir[NUM_LEVEL3_PDE] __attribute__((aligned(0x1000))) = { 0 };
// uint64_t boot_identical_kern_l3pgdir[NUM_LEVEL3_PDE] __attribute__((aligned(0x4000))) = { 0 };
uint64_t boot_virt_kern_l3pgdir[NUM_LEVEL3_PDE] __attribute__((aligned(0x1000))) = { 0 };
uint64_t boot_dev_l4pgdirs[NUM_LEVEL3_PDE][NUM_LEVEL4_PTE] __attribute__((aligned(0x1000))) = { 0 };
uint64_t boot_kern_l4pgdirs[NUM_LEVEL3_PDE][NUM_LEVEL4_PTE] __attribute__((aligned(0x1000))) = { 0 };
// uint64_t boot_mem_l4pgdirs[NUM_LEVEL3_PDE][NUM_LEVEL4_PTE] __attribute__((aligned(0x1000))) = { 0 };
static void build_boot_pgdir()
{
uint64_t dev_phy_mem_base = DEV_PHYMEM_BASE;
// dev mem
uint64_t dev_mem_end_idx = (DEV_PHYMEM_BASE + DEV_MEM_SZ) >> LEVEL3_PDE_SHIFT;
for (uint64_t i = DEV_PHYMEM_BASE >> LEVEL3_PDE_SHIFT; i < dev_mem_end_idx; i++) {
boot_pgdir[i] = (i << LEVEL3_PDE_SHIFT) | L1_TYPE_SEC | L1_SECT_DEV | L1_SECT_AP0;
boot_pgdir[MMIO_P2V_WO(i << LEVEL3_PDE_SHIFT) >> LEVEL3_PDE_SHIFT] = (i << LEVEL3_PDE_SHIFT) | L1_TYPE_SEC | L1_SECT_DEV | L1_SECT_AP0;
boot_l2pgdir[(dev_phy_mem_base >> LEVEL2_PDE_SHIFT) & IDX_MASK] = (uint64_t)boot_dev_l3pgdir | L2_TYPE_TAB | L2_PTE_VALID;
boot_l2pgdir[(MMIO_P2V_WO(dev_phy_mem_base) >> LEVEL2_PDE_SHIFT) & IDX_MASK] = (uint64_t)boot_dev_l3pgdir | L2_TYPE_TAB | L2_PTE_VALID;
// boot_lowspace_l2pgdir[(dev_phy_mem_base >> LEVEL2_PDE_SHIFT) & IDX_MASK] = (uint64_t)boot_identical_dev_l3pgdir | L2_TYPE_TAB | L2_PTE_VALID;
// boot_highspace_l2pgdir[(MMIO_P2V_WO(dev_phy_mem_base) >> LEVEL2_PDE_SHIFT) & IDX_MASK] = (uint64_t)boot_identical_dev_l3pgdir | L2_TYPE_TAB | L2_PTE_VALID;
uint64_t cur_mem_paddr = (uint64_t)DEV_PHYMEM_BASE & ((uint64_t)IDX_MASK << (uint64_t)LEVEL2_PDE_SHIFT);
for (size_t i = 0; i < NUM_LEVEL3_PDE; i++) {
boot_dev_l3pgdir[i] = (uint64_t)boot_dev_l4pgdirs[i] | L3_TYPE_TAB | L3_PTE_VALID;
// boot_identical_dev_l3pgdir[i] = (uint64_t)boot_dev_l4pgdirs[i] | L3_TYPE_TAB | L3_PTE_VALID;
// boot_virt_dev_l3pgdir[i] = (uint64_t)boot_dev_l4pgdirs[i] | L3_TYPE_TAB | L3_PTE_VALID;
for (size_t j = 0; j < NUM_LEVEL4_PTE; j++) {
// boot_dev_l4pgdirs[i][j] = dev_phy_mem_base | L4_TYPE_PAGE | L4_PTE_DEV | L4_PTE_AF;
boot_dev_l4pgdirs[i][j] = cur_mem_paddr | L4_TYPE_PAGE | L4_PTE_DEV | L4_PTE_AF;
// dev_phy_mem_base += PAGE_SIZE;
cur_mem_paddr += PAGE_SIZE;
}
}
// identical mem
uint64_t idn_mem_start_idx = PHY_MEM_BASE >> LEVEL3_PDE_SHIFT;
uint64_t idn_mem_end_idx = PHY_MEM_STOP >> LEVEL3_PDE_SHIFT;
for (uint64_t i = idn_mem_start_idx; i < idn_mem_end_idx; i++) {
boot_pgdir[i] = i << LEVEL3_PDE_SHIFT | L1_TYPE_SEC | L1_SECT_AP0;
// uint64_t phy_mem_base = PHY_MEM_BASE;
boot_l2pgdir[(PHY_MEM_BASE >> LEVEL2_PDE_SHIFT) & IDX_MASK] = (uint64_t)boot_kern_l3pgdir | L2_TYPE_TAB | L2_PTE_VALID;
boot_l2pgdir[(P2V_WO(PHY_MEM_BASE) >> LEVEL2_PDE_SHIFT) & IDX_MASK] = (uint64_t)boot_kern_l3pgdir | L2_TYPE_TAB | L2_PTE_VALID;
// boot_lowspace_l2pgdir[(PHY_MEM_BASE >> LEVEL2_PDE_SHIFT) & IDX_MASK] = (uint64_t)boot_identical_kern_l3pgdir | L2_TYPE_TAB | L2_PTE_VALID;
// boot_highspace_l2pgdir[(KERN_MEM_BASE >> LEVEL2_PDE_SHIFT) & IDX_MASK] = (uint64_t)boot_identical_kern_l3pgdir | L2_TYPE_TAB | L2_PTE_VALID;
cur_mem_paddr = (uint64_t)PHY_MEM_BASE & ((uint64_t)IDX_MASK << (uint64_t)LEVEL2_PDE_SHIFT);
for (size_t i = 0; i < NUM_LEVEL3_PDE; i++) {
boot_kern_l3pgdir[i] = (uint64_t)boot_kern_l4pgdirs[i] | L3_TYPE_TAB | L3_PTE_VALID;
// boot_identical_kern_l3pgdir[i] = (uint64_t)boot_kern_l4pgdirs[i] | L3_TYPE_TAB | L3_PTE_VALID;
// boot_kern_l3pgdir[i] = (uint64_t)boot_kern_l4pgdirs[i] | L3_TYPE_TAB | L3_PTE_VALID;
// boot_virt_kern_l3pgdir[i] = (uint64_t)boot_kern_l4pgdirs[i] | L3_TYPE_TAB | L3_PTE_VALID;
for (size_t j = 0; j < NUM_LEVEL4_PTE; j++) {
boot_kern_l4pgdirs[i][j] = cur_mem_paddr | L4_TYPE_PAGE | L4_PTE_AF;
cur_mem_paddr += PAGE_SIZE;
}
}
// kern mem
uint64_t kern_mem_start_idx = KERN_MEM_BASE >> LEVEL3_PDE_SHIFT;
uint64_t kern_mem_end_idx = (KERN_MEM_BASE + (PHY_MEM_STOP - PHY_MEM_BASE)) >> LEVEL3_PDE_SHIFT;
for (uint64_t i = kern_mem_start_idx; i < kern_mem_end_idx; i++) {
boot_pgdir[i] = V2P(i << LEVEL3_PDE_SHIFT) | L1_TYPE_SEC | L1_SECT_AP0;
}
// dev mem
// uint64_t dev_mem_end_pgd = PGD_INDEX(DEV_PHYMEM_BASE + DEV_MEM_SZ);
// for (uint64_t i = PGD_INDEX(DEV_PHYMEM_BASE); i < dev_mem_end_idx; i++) {
// boot_pgdir[i] = (uint64_t)boot_dev_l3dir[used_boot_dev_l3dir_idx] | L3_TYPE_SEC | L3_SECT_DEV | L3_SECT_AP0;
// boot_pgdir[PGD_INDEX(MMIO_P2V_WO(PGD_INDEX_TO_PA(i)))] = (uint64_t)boot_dev_l3dir[used_boot_dev_l3dir_idx] | L3_TYPE_SEC | L3_SECT_DEV | L3_SECT_AP0;
// used_boot_dev_l3dir_idx++;
// for (int64_t j = 0; j < 0b111111111; j++) {
// boot_dev_l3dir[i][j] = (uint64_t)boot_dev_l4dir[used_boot_dev_l4dir_idx] | ();
// // uint64_t dev_mem_end_pmd = PMD_INDEX(DEV_PHYMEM_BASE + DEV_MEM_SZ);
// // for (uint64_t j = PMD_INDEX(DEV_PHYMEM_BASE); j < dev_mem_end_pmd; j++) {
// // boot_pmd[j] = PGD_INDEX_TO_PA(j) | L4_TYPE_SEC | L4_SECT_DEV | L4_SECT_AP0;
// // boot_pmd[PGD_INDEX(MMIO_P2V_WO(PGD_INDEX_TO_PA(j)))] = PGD_INDEX_TO_PA(j) | L4_TYPE_SEC | L4_SECT_DEV | L4_SECT_AP0;
// // }
// for (uint64_t k = 0; k < 0b111111111; k++) {
// boot_dev_l4dir[j][k] = DEV_PHYMEM_BASE
// }
// }
// }
}
static void load_boot_pgdir()
{
uint64_t val;
// DACR_W(0x55555555); // set domain access control as client
// TTBCR_W(0x0);
// TTBR0_W((uint64_t)boot_pgdir);
TTBR0_W(0x0);
TTBR1_W((uint64_t)boot_pgdir);
// TTBR0_W((uintptr_t)boot_lowspace_l2pgdir);
// TTBR1_W((uintptr_t)boot_highspace_l2pgdir);
TTBR0_W((uintptr_t)boot_l2pgdir);
TTBR1_W(0);
TCR_W(TCR_VALUE);
MAIR_W((MT_DEVICE_nGnRnE << (8 * AI_DEVICE_nGnRnE_IDX)) | (MT_NORMAL_NC << (8 * AI_NORMAL_NC_IDX)));
// Enable paging using read/modify/write
SCTLR_R(val);
val |= (1 << 0); // EL1 and EL0 stage 1 address translation enabled.
val |= (1 << 1); // Alignment check enable
val |= (1 << 2); // Cacheability control, for data caching.
val |= (1 << 12); // Instruction access Cacheability control
val |= (1 << 19); // forced to XN for the EL1&0 translation regime.
SCTLR_W(val);
@ -101,9 +166,9 @@ void bootmain()
load_boot_pgdir();
__asm__ __volatile__("add sp, sp, %0" ::"r"(KERN_MEM_BASE - PHY_MEM_BASE));
if (!_bss_inited) {
memset(&kernel_data_begin, 0x00, (uint64_t)kernel_data_end - (uint64_t)kernel_data_begin);
memset(&kernel_data_begin, 0x00, (size_t)((uint64_t)kernel_data_end - (uint64_t)kernel_data_begin));
uintptr_t kde = (uintptr_t)kernel_data_end;
_bss_inited = true;
}
main();
}

View File

@ -33,15 +33,18 @@ Modification:
#include "memlayout.h"
#include "page_table_entry.h"
#define TCR_IPS (0 << 32)
// #define TCR_SH1_INNER (0b11 << 28)
// #define TCR_ORGN1_IRGN1_WRITEBACK_WRITEALLOC ((0b01 << 26) | (0b01 << 24))
// #define TCR_SH0_INNER (0b11 << 12)
// #define TCR_ORGN0_IRGN0_WRITEBACK_WRITEALLOC ((0b01 << 10) | (0b01 << 8))
#define TCR_IPS (0 << 0)
#define TCR_TG1_4K (0b10 << 30)
#define TCR_SH1_INNER (0b11 << 28)
#define TCR_ORGN1_IRGN1_WRITEBACK_WRITEALLOC ((0b01 << 26) | (0b01 << 24))
#define TCR_TOSZ (0b11001 << 0)
#define TCR_T1SZ (0b11001 << 16)
#define TCR_TG0_4K (0 << 14)
#define TCR_SH0_INNER (0b11 << 12)
#define TCR_ORGN0_IRGN0_WRITEBACK_WRITEALLOC ((0b01 << 10) | (0b01 << 8))
#define TCR_VALUE \
(TCR_IPS | TCR_TG1_4K | TCR_SH1_INNER | TCR_ORGN1_IRGN1_WRITEBACK_WRITEALLOC | TCR_TG0_4K | TCR_SH0_INNER | TCR_ORGN0_IRGN0_WRITEBACK_WRITEALLOC)
(TCR_IPS | TCR_TG1_4K | TCR_TG0_4K | TCR_TOSZ | TCR_T1SZ)
enum AccessPermission {
AccessPermission_NoAccess = 0,
@ -75,18 +78,13 @@ Read and write mmu pagetable register base addr
#define TTBR1_W(val) __asm__ volatile("msr ttbr1_el1, %0" ::"r"(val))
/*
TTBCR is used for choosing TTBR0 and TTBR1 as page table register.
When TTBCR is set to 0, TTBR0 is selected by default.
Translation Control RegisterTCR
*/
// #define TTBCR_R(val) __asm__ volatile("mrs %0, ttbcr_el1" : "=r"(val))
// #define TTBCR_W(val) __asm__ volatile("msr ttbcr_el1, %0" ::"r"(val))
#define TCR_R(val) __asm__ volatile("mrs %0, tcr_el1" : "=r"(val))
#define TCR_W(val) __asm__ volatile("msr tcr_el1, %0" ::"r"(val))
/*
DACR registers are used to control memory privilage.
The domain value is usually 0x01. The memory privilage will be controled by pte AP/APX
*/
// #define DACR_R(val) __asm__ volatile("mrs %0, dacr_el1" : "=r"(val))
// #define DACR_W(val) __asm__ volatile("msr dacr_el1, %0" :: "r"(val))
#define MAIR_R(val) __asm__ volatile("mrs %0, mair_el1" : "=r"(val))
#define MAIR_W(val) __asm__ volatile("msr mair_el1, %0" ::"r"(val))
/*
Flush TLB when loading a new page table.

View File

@ -34,11 +34,10 @@ Modification:
#define ARCH_BIT 64
/* A72 physical memory layout */
#define PHY_MEM_BASE (0x00000000)
#define PHY_USER_FREEMEM_BASE (0x30000000)
#define PHY_USER_FREEMEM_TOP (0x80000000)
#define PHY_MEM_STOP (0x80000000)
#define PHY_MEM_BASE (0x0000000040000000ULL)
#define PHY_USER_FREEMEM_BASE (0x0000000044000000ULL)
#define PHY_USER_FREEMEM_TOP (0x0000000048000000ULL)
#define PHY_MEM_STOP (0x0000000048000000ULL)
/* PTE-PAGE_SIZE */
#define LEVEL4_PTE_SHIFT 12
@ -48,65 +47,42 @@ Modification:
#define LEVEL3_PDE_SHIFT 21
#define LEVEL3_PDE_SIZE (1 << LEVEL3_PDE_SHIFT)
#define LEVEL2_PTE_SHIFT 30
#define LEVEL2_PDE_SHIFT 30
#define LEVEL2_PDE_SIZE (1 << LEVEL2_PDE_SHIFT)
#define LEVEL1_PTE_SHIFT 39
#define NUM_LEVEL3_PDE (1 << (ARCH_BIT - LEVEL3_PDE_SHIFT)) // how many PTE in a PT
#define NUM_LEVEL2_PDE (1 << (LEVEL1_PTE_SHIFT - LEVEL2_PDE_SHIFT))
#define NUM_LEVEL3_PDE (1 << (LEVEL2_PDE_SHIFT - LEVEL3_PDE_SHIFT)) // how many PDE in a PT
#define NUM_LEVEL4_PTE (1 << (LEVEL3_PDE_SHIFT - LEVEL4_PTE_SHIFT)) // how many PTE in a PT
#define NUM_TOPLEVEL_PDE NUM_LEVEL3_PDE
#define NUM_TOPLEVEL_PDE NUM_LEVEL2_PDE
#define PAGE_SIZE LEVEL4_PTE_SIZE
#define MAX_NR_FREE_PAGES ((PHY_MEM_STOP - PHY_MEM_BASE) >> LEVEL4_PTE_SHIFT)
/* Deivce memory layout */
#define DEV_PHYMEM_BASE (0x0000000000000000)
#define DEV_VRTMEM_BASE (0x0000ffffffffffff)
#define DEV_MEM_SZ (0x10000000)
#define DEV_PHYMEM_BASE (0x0000000000000000ULL)
#define DEV_VRTMEM_BASE (0x0000004000000000ULL)
#define DEV_MEM_SZ (0x0000000010000000ULL)
/* User memory layout */
#define USER_STACK_SIZE PAGE_SIZE
#define USER_MEM_BASE (0x0000000000000000)
#define USER_MEM_BASE (0x0000000000000000ULL)
#define USER_MEM_TOP DEV_VRTMEM_BASE
#define USER_IPC_SPACE_BASE (0x7000000000000000)
#define USER_IPC_SPACE_BASE (0x0000003000000000ULL)
#define USER_IPC_USE_ALLOCATOR_WATERMARK (0x0000003000010000ULL)
#define USER_IPC_SPACE_TOP (USER_MEM_TOP - USER_STACK_SIZE)
/* Kernel memory layout */
#define KERN_MEM_BASE (0xffff000000000000ULL) // First kernel virtual address
#define KERN_MEM_BASE (0x0000006040000000ULL) // First kernel virtual address
#define KERN_OFFSET (KERN_MEM_BASE - PHY_MEM_BASE)
#define V2P(a) (((uint64_t)(a)) - KERN_MEM_BASE)
#define P2V(a) ((void *)(((char *)(a)) + KERN_MEM_BASE))
#define V2P(a) (((uint64_t)(a)) - KERN_OFFSET)
#define P2V(a) ((void *)(((char *)(a)) + KERN_OFFSET))
#define V2P_WO(x) ((x) - KERN_MEM_BASE) // same as V2P, but without casts
#define P2V_WO(x) ((x) + KERN_MEM_BASE) // same as P2V, but without casts
// one beyond the highest possible virtual address.
#define MAXVA (KERN_MEM_BASE + (1ULL<<38))
// qemu puts UART registers here in physical memory.
#define UART0 (KERN_MEM_BASE + 0x09000000L)
#define UART0_IRQ 33
// virtio mmio interface
#define VIRTIO0 (KERN_MEM_BASE + 0x0a000000L)
#define VIRTIO0_IRQ 48
#define V2P_WO(x) ((x) - KERN_OFFSET) // same as V2P, but without casts
#define P2V_WO(x) ((x) + KERN_OFFSET) // same as P2V, but without casts
#define TIMER0_IRQ 27
// interrupt controller GICv3
#define GICV3 (KERN_MEM_BASE + 0x08000000L)
#define GICV3_REDIST (KERN_MEM_BASE + 0x080a0000L)
// map kernel stacks beneath the trampoline,
// each surrounded by invalid guard pages.
#define PGSIZE 4096 // bytes per page
#define KSTACK(p) (MAXVA - ((p)+1) * 2*PGSIZE)
// extract the three 9-bit page table indices from a virtual address.
#define PXMASK 0x1FF // 9 bits
#define PXSHIFT(level) (39-(level)*9)
#define PX(level, va) ((((uint64)(va)) >> PXSHIFT(level)) & PXMASK)
// clang-format on

View File

@ -29,8 +29,8 @@ Modification:
#include "mmu.h"
#include "mmu_common.h"
void GetUsrPteAttr(uintptr_t* attr)
{
// void GetUsrPteAttr(uintptr_t* attr)
// {
// static char init = 0;
// static PageTblEntry usr_pte_attr;
// if (init == 0) {
@ -44,6 +44,14 @@ void GetUsrPteAttr(uintptr_t* attr)
// usr_pte_attr.AP1_0 = AccessPermission_KernelUser;
// }
// *attr = usr_pte_attr.entry;
// }
void GetUsrPteAttr(uintptr_t* attr)
{
static char init = 0;
if (init == 0) {
init = 1;
}
}
void GetUsrDevPteAttr(uintptr_t* attr)

View File

@ -0,0 +1,3 @@
SRC_FILES := uart.c
include $(KERNEL_ROOT)/compiler.mk

View File

@ -0,0 +1,29 @@
#pragma once
#include "memlayout.h"
#include "mmio_access.h"
#define UART0_BASE (0x09000000ULL)
#define UART0_REG(reg) ((volatile uint32_t*)(MMIO_P2V_WO(UART0_BASE + reg)))
// the UART control registers.
// pl011
#define DR 0x00
#define FR 0x18
#define FR_RXFE (1 << 4) // recieve fifo empty
#define FR_TXFF (1 << 5) // transmit fifo full
#define FR_RXFF (1 << 6) // recieve fifo full
#define FR_TXFE (1 << 7) // transmit fifo empty
#define IBRD 0x24
#define FBRD 0x28
#define LCRH 0x2c
#define LCRH_FEN (1 << 4)
#define LCRH_WLEN_8BIT (3 << 5)
#define CR 0x30
#define IMSC 0x38
#define INT_RX_ENABLE (1 << 4)
#define INT_TX_ENABLE (1 << 5)
#define ICR 0x44
#define UART_READ_REG(reg) (*(UART0_REG(reg)))
#define UART_WRITE_REG(reg, v) (*(UART0_REG(reg)) = (v))

View File

@ -0,0 +1,128 @@
//
// low-level driver routines for pl011 UART.
//
#include "uart.h"
#include "actracer.h"
#include "uart_common_ope.h"
// the UART control registers are memory-mapped
// at address UART0. this macro returns the
// address of one of the registers.
// the transmit output buffer.
#define UART_TX_BUF_SIZE 32
static char uart_tx_buf[UART_TX_BUF_SIZE];
uint64_t uart_tx_w; // write next to uart_tx_buf[uart_tx_w % UART_TX_BUF_SIZE]
uint64_t uart_tx_r; // read next from uart_tx_buf[uart_tx_r % UART_TX_BUF_SIZE]
void uartinit(void)
{
// disable uart
UART_WRITE_REG(CR, 0);
// disable interrupts.
UART_WRITE_REG(IMSC, 0);
// in qemu, it is not necessary to set baudrate.
// enable FIFOs.
// set word length to 8 bits, no parity.
UART_WRITE_REG(LCRH, LCRH_FEN | LCRH_WLEN_8BIT);
// enable RXE, TXE and enable uart.
UART_WRITE_REG(CR, 0x301);
// enable transmit and receive interrupts.
UART_WRITE_REG(IMSC, INT_RX_ENABLE | INT_TX_ENABLE);
}
// if the UART is idle, and a character is waiting
// in the transmit buffer, send it.
// caller must hold uart_tx_lock.
// called from both the top- and bottom-half.
void uartstart()
{
while (1) {
if (uart_tx_w == uart_tx_r) {
// transmit buffer is empty.
return;
}
if (UART_READ_REG(FR) & FR_TXFF) {
// the UART transmit holding register is full,
// so we cannot give it another byte.
// it will interrupt when it's ready for a new byte.
return;
}
int c = uart_tx_buf[uart_tx_r % UART_TX_BUF_SIZE];
uart_tx_r += 1;
// maybe uartputc() is waiting for space in the buffer.
UART_WRITE_REG(DR, c);
}
}
// add a character to the output buffer and tell the
// UART to start sending if it isn't already.
// blocks if the output buffer is full.
// because it may block, it can't be called
// from interrupts; it's only suitable for use
// by write().
void uartputc(uint8_t c)
{
while (uart_tx_w == uart_tx_r + UART_TX_BUF_SIZE)
;
uart_tx_buf[uart_tx_w % UART_TX_BUF_SIZE] = c;
uart_tx_w += 1;
uartstart();
return;
}
// read one input character from the UART.
// return -1 if none is waiting.
static uint8_t uartgetc(void)
{
if (UART_READ_REG(FR) & FR_RXFE)
return 0xFF;
else
return UART_READ_REG(DR);
}
// handle a uart interrupt, raised because input has
// arrived, or the uart is ready for more output, or
// both. called from trap.c.
void uartintr(void)
{
// read and process incoming characters.
while (1) {
int c = uartgetc();
if (c == 0xFF)
break;
}
// send buffered characters.
uartstart();
// clear transmit and receive interrupts.
UART_WRITE_REG(ICR, INT_RX_ENABLE | INT_TX_ENABLE);
}
static uint32_t UartGetIrqnum()
{
return 0;
}
static struct XiziSerialDriver hardkernel_serial_driver = {
.sys_serial_init = uartinit,
.get_serial_irqnum = UartGetIrqnum,
.putc = uartputc,
.getc = uartgetc,
};
struct XiziSerialDriver* hardkernel_uart_init(struct TraceTag* hardkernel_tag)
{
hardkernel_serial_driver.sys_serial_init();
return &hardkernel_serial_driver;
}

View File

@ -1,5 +1,5 @@
SRC_DIR := fs shell lib boards tools app
SRC_DIR :=
# SRC_DIR := fs shell lib boards tools app
include $(KERNEL_ROOT)/compiler.mk

View File

@ -79,33 +79,33 @@ struct proghdr {
};
#elif (ARCH_BIT == 64)
struct elfhdr {
uint magic; // must equal ELF_MAGIC
uchar elf[12];
ushort type;
ushort machine;
uint version;
uint64 entry;
uint64 phoff;
uint64 shoff;
uint flags;
ushort ehsize;
ushort phentsize;
ushort phnum;
ushort shentsize;
ushort shnum;
ushort shstrndx;
uint32_t magic; // must equal ELF_MAGIC
uint8_t elf[12];
uint16_t type;
uint16_t machine;
uint32_t version;
uint64_t entry;
uint64_t phoff;
uint64_t shoff;
uint32_t flags;
uint16_t ehsize;
uint16_t phentsize;
uint16_t phnum;
uint16_t shentsize;
uint16_t shnum;
uint16_t shstrndx;
};
// Program section header
struct proghdr {
uint32 type;
uint32 flags;
uint64 off;
uint64 vaddr;
uint64 paddr;
uint64 filesz;
uint64 memsz;
uint64 align;
uint32_t type;
uint32_t flags;
uint64_t off;
uint64_t vaddr;
uint64_t paddr;
uint64_t filesz;
uint64_t memsz;
uint64_t align;
};
#endif

View File

@ -44,6 +44,7 @@ Modification:
#define LEVEL4_PTE_IDX(v) (((uintptr_t)(v) >> LEVEL4_PTE_SHIFT) & (NUM_LEVEL4_PTE - 1))
#define LEVEL4_PTE_ADDR(v) ALIGNDOWN(v, LEVEL4_PTE_SIZE)
#define LEVEL3_PDE_ADDR(v) ALIGNDOWN(v, LEVEL3_PDE_SIZE)
#define TOPLEVLE_PAGEDIR_SIZE sizeof(uintptr_t) * NUM_TOPLEVEL_PDE
// clang-format on

View File

@ -28,18 +28,18 @@ Modification:
1. first version
*************************************************/
.section .rawdata_fs_img
.globl user_apps
user_apps:
.incbin "../services/app/fs.img"
# .globl user_apps
# user_apps:
# .incbin "../services/app/fs.img"
.section .rawdata_init
.globl initapp
initapp:
.incbin "../services/app/bin/init"
# .section .rawdata_init
# .globl initapp
# initapp:
# .incbin "../services/app/bin/init"
.section .rawdata_memfs
.globl memfs
memfs:
.incbin "../services/app/bin/fs_server"
# .section .rawdata_memfs
# .globl memfs
# memfs:
# .incbin "../services/app/bin/fs_server"
.end

View File

@ -1,3 +1,13 @@
#SRC_FILES:= kalloc.c pagetable.c pagetable_level2.c buddy.c object_allocator.c share_page.c
#include $(KERNEL_ROOT)/compiler.mk
ifneq ($(findstring $(BOARD), ok1028a-c), )
SRC_FILES := kalloc.c pagetable.c pagetable_level3.c buddy.c object_allocator.c share_page.c
endif
ifneq ($(findstring $(BOARD), imx6q-sabrelite zynq7000-zc702), )
SRC_FILES:= kalloc.c pagetable.c pagetable_level2.c buddy.c object_allocator.c share_page.c
endif
include $(KERNEL_ROOT)/compiler.mk

View File

@ -0,0 +1,116 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiUOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/**
* @file pagetable_level3.c
* @brief page walk and L2 pagetable
* @version 1.0
* @author AIIT XUOS Lab
* @date 2024.05.06
*/
/*************************************************
File name: pagetable_level3.c
Description: ok1028 image vector table
Others:
History:
Author: AIIT XUOS Lab
Modification:
1. first version
*************************************************/
#include <stdint.h>
#include "core.h"
#include "memlayout.h"
#include "assert.h"
#include "buddy.h"
#include "kalloc.h"
#include "pagetable.h"
uintptr_t* _page_walk(uintptr_t* pgdir, uintptr_t vaddr, bool alloc)
{
// get page table addr
assert(pgdir != NULL);
uintptr_t pde_attr = 0;
_p_pgtbl_mmu_access->MmuPdeAttr(&pde_attr);
uintptr_t* l2_pde_ptr = (uintptr_t*)&pgdir[vaddr >> LEVEL2_PDE_SHIFT];
uintptr_t* l3_pde_vaddr;
if (*l2_pde_ptr != 0) {
uintptr_t l3_pde_paddr = (*l2_pde_ptr) & ~pde_attr;
l3_pde_vaddr = (uintptr_t*)P2V(l3_pde_paddr);
} else {
if (!alloc || !(l3_pde_vaddr = (uintptr_t*)kalloc(sizeof(uintptr_t) * NUM_LEVEL3_PDE))) {
return NULL;
}
memset(l3_pde_vaddr, 0, sizeof(uintptr_t) * NUM_LEVEL4_PTE);
*l2_pde_ptr = V2P(l3_pde_vaddr) | pde_attr;
}
uintptr_t* l3_pde_ptr = (uintptr_t*)&l3_pde_vaddr[(vaddr >> LEVEL3_PDE_SHIFT) & (NUM_LEVEL3_PDE - 1)];
uintptr_t* l4_pte_vaddr;
if (*l3_pde_ptr != 0) {
uintptr_t l4_pte_paddr = (*l3_pde_ptr) & ~pde_attr;
l4_pte_vaddr = (uintptr_t*)P2V(l4_pte_paddr);
} else {
if (!alloc || !(l4_pte_vaddr = (uintptr_t*)kalloc(sizeof(uintptr_t) * NUM_LEVEL4_PTE))) {
return NULL;
}
memset(l4_pte_vaddr, 0, sizeof(uintptr_t) * NUM_LEVEL4_PTE);
*l3_pde_ptr = V2P(l4_pte_vaddr) | pde_attr;
}
return &l4_pte_vaddr[LEVEL4_PTE_IDX(vaddr)];
}
void _free_user_pgdir(struct TopLevelPageDirectory* pgdir)
{
uintptr_t low_bound = kern_virtmem_buddy.mem_start, high_bound = kern_virtmem_buddy.mem_end;
uintptr_t user_low_bound = user_phy_freemem_buddy.mem_start, user_high_bound = user_phy_freemem_buddy.mem_end;
uintptr_t end_idx = USER_MEM_TOP >> LEVEL2_PDE_SHIFT;
for (uintptr_t l3_entry_idx = 0; l3_entry_idx < end_idx; l3_entry_idx++) {
// free each level3 page table
uintptr_t* l3_pde_paddr = (uintptr_t*)LEVEL3_PDE_ADDR(pgdir->pd_addr[l3_entry_idx]);
if (l3_pde_paddr != NULL) {
for (uintptr_t l4_entry_idx = 0; l4_entry_idx < NUM_LEVEL3_PDE; l4_entry_idx++) {
uintptr_t* l4_pte_paddr = (uintptr_t*)LEVEL4_PTE_ADDR(l3_pde_paddr[l4_entry_idx]);
if (l4_pte_paddr != NULL) {
for (uintptr_t page_entry_idx = 0; page_entry_idx < NUM_LEVEL4_PTE; page_entry_idx++) {
uintptr_t vaddr = (l3_entry_idx << LEVEL2_PDE_SHIFT) | (l4_entry_idx << LEVEL3_PDE_SHIFT) | (page_entry_idx << LEVEL4_PTE_SHIFT);
// get page paddr
uintptr_t* page_paddr = (uintptr_t*)ALIGNDOWN(((uintptr_t*)P2V(l4_pte_paddr))[page_entry_idx], PAGE_SIZE);
if (page_paddr != NULL) {
// Ensure the virtual address is not in the IPC address space
assert(vaddr < USER_IPC_SPACE_BASE || vaddr >= USER_IPC_SPACE_TOP);
if (LIKELY((uintptr_t)page_paddr >= low_bound && (uintptr_t)page_paddr < high_bound)) {
kfree(P2V(page_paddr));
} else if (LIKELY((uintptr_t)page_paddr >= user_low_bound && (uintptr_t)page_paddr < user_high_bound)) {
raw_free((char*)page_paddr);
}
}
}
kfree(P2V(l4_pte_paddr));
}
}
kfree(P2V(l3_pde_paddr));
}
}
kfree((char*)pgdir->pd_addr);
}