ArmV8 support arch mmu intr clock

This commit is contained in:
TXuian 2024-05-24 16:06:09 +08:00
parent c1e99c449a
commit 80f80b64f0
9 changed files with 99 additions and 162 deletions

View File

@ -48,7 +48,7 @@ Modification:
static inline void invalidate_dcache(uintptr_t start, uintptr_t end)
{
InvalidateL1Dcache(start, end);
// InvalidateL1Dcache(start, end);
// InvalidateL2Cache(start, end);
}
@ -65,7 +65,7 @@ static inline void invalidate_dcache(uintptr_t start, uintptr_t end)
static inline void invalidate_dcache_all(void)
{
InvalidateL1DcacheAll();
// InvalidateL1DcacheAll();
// InvalidateL2CacheAll();
}
@ -78,7 +78,7 @@ static inline void invalidate_dcache_all(void)
****************************************************************************/
static inline void invalidate_icache(uintptr_t start, uintptr_t end)
{
InvalidateL1Icache(start, end);
// InvalidateL1Icache(start, end);
}
/****************************************************************************
@ -92,7 +92,7 @@ static inline void invalidate_icache(uintptr_t start, uintptr_t end)
static inline void invalidate_icache_all(void)
{
InvalidateL1IcacheAll();
// InvalidateL1IcacheAll();
}
/****************************************************************************
@ -106,7 +106,7 @@ static inline void invalidate_icache_all(void)
static inline void clean_dcache(uintptr_t start, uintptr_t end)
{
CleanL1Dcache(start, end);
// CleanL1Dcache(start, end);
// CleanL2Cache(start, end);
}
@ -121,7 +121,7 @@ static inline void clean_dcache(uintptr_t start, uintptr_t end)
static inline void clean_dcache_all(void)
{
CleanL1DcacheAll();
// CleanL1DcacheAll();
// CleanL2CacheAll();
}
@ -137,7 +137,7 @@ static inline void clean_dcache_all(void)
static inline void flush_dcache(uintptr_t start, uintptr_t end)
{
FlushL1Dcache(start, end);
// FlushL1Dcache(start, end);
// FlushL2Cache(start, end);
}
@ -151,7 +151,7 @@ static inline void flush_dcache(uintptr_t start, uintptr_t end)
static inline void flush_dcache_all(void)
{
FlushL1DcacheAll();
// FlushL1DcacheAll();
// FlushL2CacheAll();
}
@ -165,7 +165,7 @@ static inline void flush_dcache_all(void)
static inline void enable_icache(void)
{
EnableL1Icache();
// EnableL1Icache();
}
/****************************************************************************
@ -178,7 +178,7 @@ static inline void enable_icache(void)
static inline void disable_icache(void)
{
DisableL1Icache();
// DisableL1Icache();
}
/****************************************************************************
@ -191,7 +191,7 @@ static inline void disable_icache(void)
static inline void enable_dcache(void)
{
EnableL1Dcache();
// EnableL1Dcache();
// EnableL2Cache();
}
@ -205,9 +205,9 @@ static inline void enable_dcache(void)
static inline void disable_dcache(void)
{
FlushL1DcacheAll();
// FlushL1DcacheAll();
// pl310_flush_all();
DisableL1Dcache();
// DisableL1Dcache();
// DisableL2Cache();
}

View File

@ -1,8 +1,6 @@
#include "actracer.h"
#include "core.h"
#include "cortex_a72.h"
#include "generic_timer.h"
#include "memlayout.h"
#include "clock_common_op.h"
@ -12,10 +10,6 @@
#define CNTV_CTL_IMASK (1 << 1)
#define CNTV_CTL_ISTATUS (1 << 2)
static void enable_timer(void);
static void disable_timer(void);
static void reload_timer(void);
static void enable_timer()
{
uint64_t c = r_cntv_ctl_el0();
@ -32,24 +26,6 @@ static void disable_timer()
w_cntv_ctl_el0(c);
}
static void arch_timer_interrupt_enable()
{
uint64_t c = r_cntv_ctl_el0();
if (c &= CNTV_CTL_IMASK) {
c |= ~CNTV_CTL_IMASK;
w_cntv_ctl_el0(c);
}
}
static void arch_timer_interrupt_disable()
{
uint64_t c = r_cntv_ctl_el0();
if (!(c &= CNTV_CTL_IMASK)) {
c |= CNTV_CTL_IMASK;
w_cntv_ctl_el0(c);
}
}
static void reload_timer()
{
// interval 100ms
@ -59,26 +35,16 @@ static void reload_timer()
w_cntv_tval_el0(interval_clk);
}
void delay(uint32_t cycles)
{
uint64_t start = r_cntvct_el0();
while ((r_cntvct_el0() - start) < cycles)
__asm__ volatile("yield" ::: "memory");
}
void _sys_clock_init()
{
arch_timer_interrupt_disable();
disable_timer();
reload_timer();
enable_timer();
arch_timer_interrupt_enable();
}
static uint32_t _get_clock_int()
{
return 0;
return 27;
}
static uint64_t _get_tick()

View File

@ -1,42 +1,41 @@
#pragma once
#include <stddef.h>
#include <stdint.h>
// armv8 generic timer
static inline uint64_t
r_cntv_ctl_el0()
static inline uint64_t r_cntv_ctl_el0()
{
uint64_t x;
asm volatile("mrs %0, cntv_ctl_el0" : "=r"(x));
return x;
}
static inline void
w_cntv_ctl_el0(uint64_t x)
static inline void w_cntv_ctl_el0(uint64_t x)
{
asm volatile("msr cntv_ctl_el0, %0" : : "r"(x));
}
static inline uint64_t
r_cntv_tval_el0()
static inline uint64_t r_cntv_tval_el0()
{
uint64_t x;
asm volatile("mrs %0, cntv_tval_el0" : "=r"(x));
return x;
}
static inline void
w_cntv_tval_el0(uint64_t x)
static inline void w_cntv_tval_el0(uint64_t x)
{
asm volatile("msr cntv_tval_el0, %0" : : "r"(x));
}
static inline uint64_t
r_cntvct_el0()
static inline uint64_t r_cntvct_el0()
{
uint64_t x;
asm volatile("mrs %0, cntvct_el0" : "=r"(x));
return x;
}
static inline uint64_t
r_cntfrq_el0()
static inline uint64_t r_cntfrq_el0()
{
uint64_t x;
asm volatile("mrs %0, cntfrq_el0" : "=r"(x));

View File

@ -57,20 +57,20 @@ Modification:
.global _spinlock_lock
.func _spinlock_lock
_spinlock_lock:
mov w2, #1
sevl
wfe
// wait for an event signal
ldaxrb w1, [x0] // check if the spinlock is currently unlocked
cmp x1, #UNLOCKED
wfe
// wfe // wait for an event signal
ldaxrb w1, [x0] // check if the spinlock is currently unlocked
cmp w1, #UNLOCKED
bne _spinlock_lock
mrs x1, mpidr_el1 // get our CPU ID
and x1, x1, #3
stxrb w2, w1, [x0]
cbnz x2, _spinlock_lock // check if the write was successful, if the write failed, start over
cmp x2, #0
bne _spinlock_lock // check if the write was successful, if the write failed, start over
dmb ish // Ensure that accesses to shared resource have completed
@ -88,21 +88,21 @@ _spinlock_unlock:
mrs x1, mpidr_el1 // get our CPU ID
and x1, x1, #3
ldr x2, [x0]
cmp x1, x2
ldr w2, [x0]
cmp w1, w2
bne 1f //doesn't match,jump to 1
dmb ish
mov x1, #UNLOCKED
str x1, [x0]
mov w1, #UNLOCKED
str w1, [x0]
dsb ish //Ensure that no instructions following the barrier execute until
// all memory accesses prior to the barrier have completed.
sev // send event to wake up other cores waiting on spinlock
sevl // send event to wake up other cores waiting on spinlock
mov x0, #0 // return success
ret

View File

@ -73,7 +73,7 @@ void syscall_arch_handler(struct trapframe* tf)
xizi_enter_kernel();
assert(cur_cpu()->task == NULL);
sys_exit(cur_cpu()->task);
context_switch(&cur_cpu()->task->main_thread.context, cur_cpu()->scheduler);
context_switch(&cur_cpu()->task->thread_context.context, cur_cpu()->scheduler);
panic("dabort end should never be reashed.\n");
}
}

View File

@ -41,13 +41,13 @@ static struct MmuDriverRightGroup right_group;
void load_pgdir(uintptr_t pgdir_paddr)
{
/* get cache driver */
struct ICacheDone* p_icache_done = AchieveResource(&right_group.icache_driver_tag);
struct DCacheDone* p_dcache_done = AchieveResource(&right_group.dcache_driver_tag);
// struct ICacheDone* p_icache_done = AchieveResource(&right_group.icache_driver_tag);
// struct DCacheDone* p_dcache_done = AchieveResource(&right_group.dcache_driver_tag);
TTBR0_W((uint64_t)pgdir_paddr);
CLEARTLB(0);
p_icache_done->invalidateall();
p_dcache_done->flushall();
// p_icache_done->invalidateall();
// p_dcache_done->flushall();
}
__attribute__((always_inline)) inline static void _tlb_flush(uintptr_t va)

View File

@ -29,77 +29,48 @@ Modification:
#include "mmu.h"
#include "mmu_common.h"
// void GetUsrPteAttr(uintptr_t* attr)
// {
// static char init = 0;
// static PageTblEntry usr_pte_attr;
// if (init == 0) {
// init = 1;
// clang-format off
#define ARMV8_PTE_ATTR_MASK(attr) (((attr) & 0b111) << 2)
#define ARMV8_PTE_DEVICE ARMV8_PTE_ATTR_MASK(0x0)
#define ARMV8_PTE_NORMAL ARMV8_PTE_ATTR_MASK(0x1)
// usr_pte_attr.entry = 0;
// usr_pte_attr.desc_type = PAGE_4K;
// usr_pte_attr.B = 1;
// usr_pte_attr.C = 1;
// usr_pte_attr.S = 1;
// usr_pte_attr.AP1_0 = AccessPermission_KernelUser;
// }
// *attr = usr_pte_attr.entry;
// }
#define ARMV8_PTE_AP(ap) (((ap) & 0b11) << 6)
#define ARMV8_PTE_AP_U ARMV8_PTE_AP(0x01)
#define ARMV8_PTE_AP_K ARMV8_PTE_AP(0x00)
#define ARMV8_PTE_AP_RO ARMV8_PTE_AP(0b10)
#define ARMV8_PTE_AP_RW ARMV8_PTE_AP(0b00)
#define ARMV8_PTE_AF (0x1 << 10)
#define ARMV8_PTE_PXN (1ULL << 53) // Privileged eXecute Never
#define ARMV8_PTE_UXN (1ULL << 54) // Unprivileged(user) eXecute Never
#define ARMV8_PTE_XN (ARMV8_PTE_PXN | ARMV8_PTE_UXN)
#define ARMV8_PTE_VALID (0b11 << 0)
#define ARMV8_PDE_VALID (0b11 << 0)
// clang-format on
void GetUsrPteAttr(uintptr_t* attr)
{
static char init = 0;
if (init == 0) {
init = 1;
}
*attr = ARMV8_PTE_AP_U | ARMV8_PTE_AP_RW | ARMV8_PTE_AF | ARMV8_PTE_NORMAL | ARMV8_PTE_VALID;
}
void GetUsrDevPteAttr(uintptr_t* attr)
{
// static char init = 0;
// static PageTblEntry usr_pte_attr;
// if (init == 0) {
// init = 1;
// usr_pte_attr.entry = 0;
// usr_pte_attr.desc_type = PAGE_4K;
// usr_pte_attr.AP1_0 = AccessPermission_KernelUser;
// }
// *attr = usr_pte_attr.entry;
*attr = ARMV8_PTE_AP_U | ARMV8_PTE_AP_RW | ARMV8_PTE_AF | ARMV8_PTE_DEVICE | ARMV8_PTE_XN | ARMV8_PTE_VALID;
}
void GetDevPteAttr(uintptr_t* attr)
{
// static char init = 0;
// static PageTblEntry dev_pte_attr;
// if (init == 0) {
// init = 1;
// dev_pte_attr.entry = 0;
// dev_pte_attr.desc_type = PAGE_4K;
// dev_pte_attr.AP1_0 = AccessPermission_KernelOnly;
// }
// *attr = dev_pte_attr.entry;
*attr = ARMV8_PTE_AP_K | ARMV8_PTE_AP_RW | ARMV8_PTE_AF | ARMV8_PTE_DEVICE | ARMV8_PTE_XN | ARMV8_PTE_VALID;
}
void GetKernPteAttr(uintptr_t* attr)
{
// static char init = 0;
// static PageTblEntry kern_pte_attr;
// if (init == 0) {
// init = 1;
// kern_pte_attr.entry = 0;
// kern_pte_attr.desc_type = PAGE_4K;
// kern_pte_attr.B = 1;
// kern_pte_attr.C = 1;
// kern_pte_attr.S = 1;
// kern_pte_attr.AP1_0 = AccessPermission_KernelOnly;
// }
// *attr = kern_pte_attr.entry;
*attr = ARMV8_PTE_AP_K | ARMV8_PTE_AP_RW | ARMV8_PTE_AF | ARMV8_PTE_NORMAL | ARMV8_PTE_VALID;
}
void GetPdeAttr(uintptr_t* attr)
{
// *attr = PAGE_DIR_COARSE;
*attr = ARMV8_PDE_VALID;
}

View File

@ -45,6 +45,7 @@ KERNELPATHS += \
-I$(KERNEL_ROOT)/hardkernel/arch/arm/armv8-a/cortex-a72/ \
-I$(KERNEL_ROOT)/hardkernel/mmu/arm/armv8-a/cortex-a72/$(BOARD) \
-I$(KERNEL_ROOT)/hardkernel/mmu/arm/armv8-a/cortex-a72/include \
-I$(KERNEL_ROOT)/hardkernel/clock/arm/armv8-a/cortex-a72/include \
-I$(KERNEL_ROOT)/hardkernel/intr/arm/armv8-a/cortex-a72/ \
-I$(KERNEL_ROOT)/hardkernel/intr/arm/armv8-a/cortex-a72/$(BOARD) \
-I$(KERNEL_ROOT)/hardkernel/intr/arm/armv8-a/cortex-a72/gicv3 \

View File

@ -44,7 +44,7 @@ uintptr_t* _page_walk(uintptr_t* pgdir, uintptr_t vaddr, bool alloc)
uintptr_t pde_attr = 0;
_p_pgtbl_mmu_access->MmuPdeAttr(&pde_attr);
uintptr_t* l2_pde_ptr = (uintptr_t*)&pgdir[vaddr >> LEVEL2_PDE_SHIFT];
uintptr_t* l2_pde_ptr = (uintptr_t*)&pgdir[(vaddr >> LEVEL2_PDE_SHIFT) & (NUM_LEVEL2_PDE - 1)];
uintptr_t* l3_pde_vaddr;
if (*l2_pde_ptr != 0) {
@ -55,7 +55,7 @@ uintptr_t* _page_walk(uintptr_t* pgdir, uintptr_t vaddr, bool alloc)
return NULL;
}
memset(l3_pde_vaddr, 0, sizeof(uintptr_t) * NUM_LEVEL4_PTE);
memset(l3_pde_vaddr, 0, sizeof(uintptr_t) * NUM_LEVEL3_PDE);
*l2_pde_ptr = V2P(l3_pde_vaddr) | pde_attr;
}
@ -79,38 +79,38 @@ uintptr_t* _page_walk(uintptr_t* pgdir, uintptr_t vaddr, bool alloc)
void _free_user_pgdir(struct TopLevelPageDirectory* pgdir)
{
uintptr_t low_bound = kern_virtmem_buddy.mem_start, high_bound = kern_virtmem_buddy.mem_end;
uintptr_t user_low_bound = user_phy_freemem_buddy.mem_start, user_high_bound = user_phy_freemem_buddy.mem_end;
uintptr_t end_idx = USER_MEM_TOP >> LEVEL2_PDE_SHIFT;
// uintptr_t low_bound = kern_virtmem_buddy.mem_start, high_bound = kern_virtmem_buddy.mem_end;
// uintptr_t user_low_bound = user_phy_freemem_buddy.mem_start, user_high_bound = user_phy_freemem_buddy.mem_end;
// uintptr_t end_idx = (USER_MEM_TOP >> LEVEL2_PDE_SHIFT) & (NUM_LEVEL2_PDE - 1);
for (uintptr_t l3_entry_idx = 0; l3_entry_idx < end_idx; l3_entry_idx++) {
// free each level3 page table
uintptr_t* l3_pde_paddr = (uintptr_t*)LEVEL3_PDE_ADDR(pgdir->pd_addr[l3_entry_idx]);
if (l3_pde_paddr != NULL) {
for (uintptr_t l4_entry_idx = 0; l4_entry_idx < NUM_LEVEL3_PDE; l4_entry_idx++) {
uintptr_t* l4_pte_paddr = (uintptr_t*)LEVEL4_PTE_ADDR(l3_pde_paddr[l4_entry_idx]);
if (l4_pte_paddr != NULL) {
for (uintptr_t page_entry_idx = 0; page_entry_idx < NUM_LEVEL4_PTE; page_entry_idx++) {
uintptr_t vaddr = (l3_entry_idx << LEVEL2_PDE_SHIFT) | (l4_entry_idx << LEVEL3_PDE_SHIFT) | (page_entry_idx << LEVEL4_PTE_SHIFT);
// for (uintptr_t l3_entry_idx = 0; l3_entry_idx < end_idx; l3_entry_idx++) {
// // free each level3 page table
// uintptr_t* l3_pde_paddr = (uintptr_t*)LEVEL3_PDE_ADDR(pgdir->pd_addr[l3_entry_idx]);
// if (l3_pde_paddr != NULL) {
// for (uintptr_t l4_entry_idx = 0; l4_entry_idx < NUM_LEVEL3_PDE; l4_entry_idx++) {
// uintptr_t* l4_pte_paddr = (uintptr_t*)LEVEL4_PTE_ADDR(l3_pde_paddr[l4_entry_idx]);
// if (l4_pte_paddr != NULL) {
// for (uintptr_t page_entry_idx = 0; page_entry_idx < NUM_LEVEL4_PTE; page_entry_idx++) {
// uintptr_t vaddr = (l3_entry_idx << LEVEL2_PDE_SHIFT) | (l4_entry_idx << LEVEL3_PDE_SHIFT) | (page_entry_idx << LEVEL4_PTE_SHIFT);
// get page paddr
uintptr_t* page_paddr = (uintptr_t*)ALIGNDOWN(((uintptr_t*)P2V(l4_pte_paddr))[page_entry_idx], PAGE_SIZE);
if (page_paddr != NULL) {
// Ensure the virtual address is not in the IPC address space
assert(vaddr < USER_IPC_SPACE_BASE || vaddr >= USER_IPC_SPACE_TOP);
// // get page paddr
// uintptr_t* page_paddr = (uintptr_t*)ALIGNDOWN(((uintptr_t*)P2V(l4_pte_paddr))[page_entry_idx], PAGE_SIZE);
// if (page_paddr != NULL) {
// // Ensure the virtual address is not in the IPC address space
// assert(vaddr < USER_IPC_SPACE_BASE || vaddr >= USER_IPC_SPACE_TOP);
if (LIKELY((uintptr_t)page_paddr >= low_bound && (uintptr_t)page_paddr < high_bound)) {
kfree(P2V(page_paddr));
} else if (LIKELY((uintptr_t)page_paddr >= user_low_bound && (uintptr_t)page_paddr < user_high_bound)) {
raw_free((char*)page_paddr);
}
}
}
kfree(P2V(l4_pte_paddr));
}
}
kfree(P2V(l3_pde_paddr));
}
}
kfree((char*)pgdir->pd_addr);
// if (LIKELY((uintptr_t)page_paddr >= low_bound && (uintptr_t)page_paddr < high_bound)) {
// kfree(P2V(page_paddr));
// } else if (LIKELY((uintptr_t)page_paddr >= user_low_bound && (uintptr_t)page_paddr < user_high_bound)) {
// raw_free((char*)page_paddr);
// }
// }
// }
// kfree(P2V(l4_pte_paddr));
// }
// }
// kfree(P2V(l3_pde_paddr));
// }
// }
// kfree((char*)pgdir->pd_addr);
}