forked from xuos/xiuos
Start support ok1028a-c.
This commit is contained in:
parent
361ea2b53e
commit
a836b7f5c8
|
@ -30,9 +30,9 @@ Modification:
|
||||||
*************************************************/
|
*************************************************/
|
||||||
|
|
||||||
/*********cortex-a72 general register************
|
/*********cortex-a72 general register************
|
||||||
EL0 | EL1 | EL2 | EL3
|
EL0 | EL1 | EL2 | EL3
|
||||||
|
|
||||||
x0;
|
x0;
|
||||||
x1;
|
x1;
|
||||||
x2;
|
x2;
|
||||||
x3;
|
x3;
|
||||||
|
@ -57,19 +57,18 @@ Modification:
|
||||||
x22;
|
x22;
|
||||||
x23;
|
x23;
|
||||||
x24;
|
x24;
|
||||||
x25;
|
x25;
|
||||||
x26;
|
x26;
|
||||||
x27;
|
x27;
|
||||||
x28;
|
x28;
|
||||||
x29;
|
x29;
|
||||||
x30;
|
x30;
|
||||||
/*********cortex-a72 special register************
|
*********cortex-a72 special register************
|
||||||
XZR
|
XZR
|
||||||
PC
|
PC
|
||||||
SP_EL0 SP_EL1 SP_EL2 SP_EL3
|
SP_EL0 SP_EL1 SP_EL2 SP_EL3
|
||||||
SPSR_EL1 SPSR_EL2 SPSR_EL3
|
SPSR_EL1 SPSR_EL2 SPSR_EL3
|
||||||
ELR_EL1 ELR_EL2 ELR_EL3
|
ELR_EL1 ELR_EL2 ELR_EL3
|
||||||
************************************************/
|
************************************************/
|
||||||
|
|
||||||
|
|
||||||
#include "core.h"
|
#include "core.h"
|
|
@ -33,6 +33,8 @@ Modification:
|
||||||
#define NO_INT 0x80 // disable IRQ.
|
#define NO_INT 0x80 // disable IRQ.
|
||||||
#define DIS_INT 0xc0 // disable both IRQ and FIQ.
|
#define DIS_INT 0xc0 // disable both IRQ and FIQ.
|
||||||
|
|
||||||
|
#define MODE_STACK_SIZE 0x1000
|
||||||
|
|
||||||
//! @name SPSR fields
|
//! @name SPSR fields
|
||||||
//@{
|
//@{
|
||||||
#define SPSR_EL1_N (1 << 31) //!< Negative
|
#define SPSR_EL1_N (1 << 31) //!< Negative
|
||||||
|
@ -211,12 +213,12 @@ __attribute__((__always_inline__)) static inline void arch_set_main_params(struc
|
||||||
/// @param param4
|
/// @param param4
|
||||||
/// @param param5
|
/// @param param5
|
||||||
/// @return
|
/// @return
|
||||||
extern int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4, uintptr_t param5);
|
extern int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4);
|
||||||
__attribute__((__always_inline__)) static inline int arch_syscall(struct trapframe* tf, int* syscall_num)
|
__attribute__((__always_inline__)) static inline int arch_syscall(struct trapframe* tf, int* syscall_num)
|
||||||
{
|
{
|
||||||
// call syscall
|
// call syscall
|
||||||
*syscall_num = tf->x8;
|
*syscall_num = tf->x8;
|
||||||
return syscall(*syscall_num, tf->x1, tf->x2, tf->x3, tf->x4, tf->x5);
|
return syscall(*syscall_num, tf->x1, tf->x2, tf->x3, tf->x4);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief set return reg to trapframe
|
/// @brief set return reg to trapframe
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#include "memlayout.h"
|
// #include "memlayout.h"
|
||||||
#include "core.h"
|
#include "core.h"
|
||||||
#include "registers.h"
|
// #include "registers.h"
|
||||||
#include "cortex_a72.h"
|
// #include "cortex_a72.h"
|
||||||
// qemu -kernel loads the kernel at 0x40000000
|
// qemu -kernel loads the kernel at 0x40000000
|
||||||
// and causes each CPU to jump there.
|
// and causes each CPU to jump there.
|
||||||
// kernel.ld causes the following code to
|
// kernel.ld causes the following code to
|
||||||
|
@ -19,113 +19,113 @@ entry:
|
||||||
adrp x1, bss_start
|
adrp x1, bss_start
|
||||||
ldr w2, =bss_size
|
ldr w2, =bss_size
|
||||||
1:
|
1:
|
||||||
cbz w2, 2f
|
// cbz w2, 2f
|
||||||
str xzr, [x1], #8
|
str xzr, [x1], #8
|
||||||
sub w2, w2, #1
|
sub w2, w2, #1
|
||||||
b 1b
|
b 1b
|
||||||
2:
|
// 2:
|
||||||
// set up entry pagetable
|
// // set up entry pagetable
|
||||||
//
|
// //
|
||||||
// Phase 1.
|
// // Phase 1.
|
||||||
// map the kernel code identically.
|
// // map the kernel code identically.
|
||||||
// map [0x40000000,PA(end)) to [0x40000000,PA(end))
|
// // map [0x40000000,PA(end)) to [0x40000000,PA(end))
|
||||||
// memory type is normal
|
// // memory type is normal
|
||||||
//
|
// //
|
||||||
// Phase 2.
|
// // Phase 2.
|
||||||
// map the kernel code.
|
// // map the kernel code.
|
||||||
// map [0xffffff8040000000,VA(end)) to [0x40000000,PA(end))
|
// // map [0xffffff8040000000,VA(end)) to [0x40000000,PA(end))
|
||||||
// memory type is normal.
|
// // memory type is normal.
|
||||||
|
|
||||||
// Phase 1
|
// // Phase 1
|
||||||
// map [0x40000000,PA(end)) to [0x40000000,PA(end))
|
// // map [0x40000000,PA(end)) to [0x40000000,PA(end))
|
||||||
adrp x0, l2entrypgt
|
// adrp x0, l2entrypgt
|
||||||
|
|
||||||
mov x1, #0x00000000
|
// mov x1, #0x00000000
|
||||||
ldr x2, =V2P_WO(end)-1
|
// ldr x2, =V2P_WO(end)-1
|
||||||
|
|
||||||
lsr x3, x1, #PXSHIFT(2)
|
// lsr x3, x1, #PXSHIFT(2)
|
||||||
and x3, x3, #PXMASK // PX(2, x1)
|
// and x3, x3, #PXMASK // PX(2, x1)
|
||||||
lsr x4, x2, #PXSHIFT(2)
|
// lsr x4, x2, #PXSHIFT(2)
|
||||||
and x4, x4, #PXMASK // PX(2, x2)
|
// and x4, x4, #PXMASK // PX(2, x2)
|
||||||
mov x5, #(PTE_AF | PTE_INDX(AI_NORMAL_NC_IDX) | PTE_VALID) // entry attr
|
// mov x5, #(PTE_AF | PTE_INDX(AI_NORMAL_NC_IDX) | PTE_VALID) // entry attr
|
||||||
orr x6, x1, x5 // block entry
|
// orr x6, x1, x5 // block entry
|
||||||
l2epgt_loop:
|
// l2epgt_loop:
|
||||||
str x6, [x0, x3, lsl #3] // l2entrypgt[l2idx] = block entry
|
// str x6, [x0, x3, lsl #3] // l2entrypgt[l2idx] = block entry
|
||||||
add x3, x3, #1 // next index
|
// add x3, x3, #1 // next index
|
||||||
add x6, x6, #0x200000 // next block, block size is 2MB
|
// add x6, x6, #0x200000 // next block, block size is 2MB
|
||||||
cmp x3, x4
|
// cmp x3, x4
|
||||||
b.ls l2epgt_loop // if start va idx <= end va idx
|
// b.ls l2epgt_loop // if start va idx <= end va idx
|
||||||
|
|
||||||
adrp x0, l1entrypgt
|
// adrp x0, l1entrypgt
|
||||||
|
|
||||||
lsr x3, x1, #PXSHIFT(1)
|
// lsr x3, x1, #PXSHIFT(1)
|
||||||
and x3, x3, #PXMASK // start va level1 index
|
// and x3, x3, #PXMASK // start va level1 index
|
||||||
|
|
||||||
mov x4, #(PTE_TABLE | PTE_VALID) // entry attr
|
// mov x4, #(PTE_TABLE | PTE_VALID) // entry attr
|
||||||
adrp x5, l2entrypgt
|
// adrp x5, l2entrypgt
|
||||||
orr x6, x4, x5 // table entry
|
// orr x6, x4, x5 // table entry
|
||||||
|
|
||||||
str x6, [x0, x3, lsl #3] // l1entrypgt[l1idx] = table entry
|
// str x6, [x0, x3, lsl #3] // l1entrypgt[l1idx] = table entry
|
||||||
|
|
||||||
// Phase 2
|
// // Phase 2
|
||||||
// map [0xffffff8040000000,VA(end)) to [0x40000000,PA(end))
|
// // map [0xffffff8040000000,VA(end)) to [0x40000000,PA(end))
|
||||||
adrp x0, l2kpgt
|
// adrp x0, l2kpgt
|
||||||
|
|
||||||
mov x1, #0x00000000 // start pa
|
// mov x1, #0x00000000 // start pa
|
||||||
ldr x2, =V2P_WO(end)-1 // end pa
|
// ldr x2, =V2P_WO(end)-1 // end pa
|
||||||
mov x3, #KERN_MEM_BASE
|
// mov x3, #KERN_MEM_BASE
|
||||||
add x4, x1, x3 // start va
|
// add x4, x1, x3 // start va
|
||||||
add x5, x2, x3 // end va
|
// add x5, x2, x3 // end va
|
||||||
|
|
||||||
lsr x6, x4, #PXSHIFT(2)
|
// lsr x6, x4, #PXSHIFT(2)
|
||||||
and x6, x6, #PXMASK // x6 = PX(2,x4)
|
// and x6, x6, #PXMASK // x6 = PX(2,x4)
|
||||||
lsr x7, x5, #PXSHIFT(2)
|
// lsr x7, x5, #PXSHIFT(2)
|
||||||
and x7, x7, #PXMASK // x7 = PX(2,x5)
|
// and x7, x7, #PXMASK // x7 = PX(2,x5)
|
||||||
mov x8, #(PTE_AF | PTE_INDX(AI_NORMAL_NC_IDX) | PTE_VALID) // entry attr
|
// mov x8, #(PTE_AF | PTE_INDX(AI_NORMAL_NC_IDX) | PTE_VALID) // entry attr
|
||||||
orr x9, x1, x8 // block entry
|
// orr x9, x1, x8 // block entry
|
||||||
l2kpgt_loop:
|
// l2kpgt_loop:
|
||||||
str x9, [x0, x6, lsl #3] // l2entrypgt[l2idx] = block entry
|
// str x9, [x0, x6, lsl #3] // l2entrypgt[l2idx] = block entry
|
||||||
add x6, x6, #1 // next index
|
// add x6, x6, #1 // next index
|
||||||
add x9, x9, #0x200000 // next block, block size is 2MB
|
// add x9, x9, #0x200000 // next block, block size is 2MB
|
||||||
cmp x6, x7
|
// cmp x6, x7
|
||||||
b.ls l2kpgt_loop // if start va idx <= end va idx
|
// b.ls l2kpgt_loop // if start va idx <= end va idx
|
||||||
|
|
||||||
adrp x0, l1kpgt
|
// adrp x0, l1kpgt
|
||||||
|
|
||||||
lsr x5, x4, #PXSHIFT(1)
|
// lsr x5, x4, #PXSHIFT(1)
|
||||||
and x5, x5, #PXMASK // x5 = PX(1,x4)
|
// and x5, x5, #PXMASK // x5 = PX(1,x4)
|
||||||
|
|
||||||
mov x6, #(PTE_TABLE | PTE_VALID) // entry attr
|
// mov x6, #(PTE_TABLE | PTE_VALID) // entry attr
|
||||||
adrp x7, l2kpgt
|
// adrp x7, l2kpgt
|
||||||
orr x8, x6, x7 // table entry
|
// orr x8, x6, x7 // table entry
|
||||||
|
|
||||||
str x8, [x0, x5, lsl #3] // l1kpgt[l1idx] = table entry
|
// str x8, [x0, x5, lsl #3] // l1kpgt[l1idx] = table entry
|
||||||
|
|
||||||
entryothers: // secondary CPU starts here
|
// entryothers: // secondary CPU starts here
|
||||||
// load pagetable
|
// // load pagetable
|
||||||
adrp x0, l1entrypgt
|
// adrp x0, l1entrypgt
|
||||||
adrp x1, l1kpgt
|
// adrp x1, l1kpgt
|
||||||
msr ttbr0_el1, x0
|
// msr ttbr0_el1, x0
|
||||||
msr ttbr1_el1, x1
|
// msr ttbr1_el1, x1
|
||||||
|
|
||||||
// setup tcr
|
// // setup tcr
|
||||||
ldr x0, =(TCR_T0SZ(25)|TCR_T1SZ(25)|TCR_TG0(0)|TCR_TG1(2)|TCR_IPS(0))
|
// ldr x0, =(TCR_T0SZ(25)|TCR_T1SZ(25)|TCR_TG0(0)|TCR_TG1(2)|TCR_IPS(0))
|
||||||
msr tcr_el1, x0
|
// msr tcr_el1, x0
|
||||||
|
|
||||||
// setup mair
|
// // setup mair
|
||||||
ldr x1, =((MT_DEVICE_nGnRnE<<(8*AI_DEVICE_nGnRnE_IDX)) | (MT_NORMAL_NC<<(8*AI_NORMAL_NC_IDX)))
|
// ldr x1, =((MT_DEVICE_nGnRnE<<(8*AI_DEVICE_nGnRnE_IDX)) | (MT_NORMAL_NC<<(8*AI_NORMAL_NC_IDX)))
|
||||||
msr mair_el1, x1
|
// msr mair_el1, x1
|
||||||
|
|
||||||
ISB
|
// ISB
|
||||||
|
|
||||||
ldr x1, =_start // x1 = VA(_start)
|
// ldr x1, =_start // x1 = VA(_start)
|
||||||
|
|
||||||
// enable paging
|
// // enable paging
|
||||||
mrs x0, sctlr_el1
|
// mrs x0, sctlr_el1
|
||||||
orr x0, x0, #1
|
// orr x0, x0, #1
|
||||||
msr sctlr_el1, x0
|
// msr sctlr_el1, x0
|
||||||
|
|
||||||
br x1 // jump to higher address (0xffffff8000000000~)
|
// br x1 // jump to higher address (0xffffff8000000000~)
|
||||||
|
|
||||||
_start:
|
_start:
|
||||||
// set up a stack for C.
|
// set up a stack for C.
|
||||||
|
@ -133,8 +133,8 @@ _start:
|
||||||
// with a 4096-byte stack per CPU.
|
// with a 4096-byte stack per CPU.
|
||||||
// sp = stack0 + ((cpuid+1) * 4096)
|
// sp = stack0 + ((cpuid+1) * 4096)
|
||||||
// cpuid = mpidr_el1 & 0xff
|
// cpuid = mpidr_el1 & 0xff
|
||||||
ldr x0, =stack0
|
ldr x0, =stacks_start
|
||||||
mov x1, #1024*4
|
mov x1, #MODE_STACK_SIZE
|
||||||
mrs x2, mpidr_el1
|
mrs x2, mpidr_el1
|
||||||
and x2, x2, #0x3
|
and x2, x2, #0x3
|
||||||
add x2, x2, #1
|
add x2, x2, #1
|
||||||
|
|
|
@ -48,8 +48,8 @@ Modification:
|
||||||
#if !defined(__CORTEX_A72_H__)
|
#if !defined(__CORTEX_A72_H__)
|
||||||
#define __CORTEX_A72_H__
|
#define __CORTEX_A72_H__
|
||||||
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
|
#include <stdint.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
//! @name Instruction macros
|
//! @name Instruction macros
|
||||||
|
@ -58,8 +58,8 @@ Modification:
|
||||||
#define WFI() __asm__ volatile("wfi\n\t")
|
#define WFI() __asm__ volatile("wfi\n\t")
|
||||||
#define WFE() __asm__ volatile("wfe\n\t")
|
#define WFE() __asm__ volatile("wfe\n\t")
|
||||||
#define SEV() __asm__ volatile("sev\n\t")
|
#define SEV() __asm__ volatile("sev\n\t")
|
||||||
#define DMB() __asm__ volatile("dmb\n\t")
|
#define DMB() __asm__ volatile("dmb ish\n\t")
|
||||||
#define DSB() __asm__ volatile("dsb\n\t")
|
#define DSB() __asm__ volatile("dsb ish\n\t")
|
||||||
#define ISB() __asm__ volatile("isb\n\t")
|
#define ISB() __asm__ volatile("isb\n\t")
|
||||||
|
|
||||||
#define _ARM_MRS(coproc, opcode1, Rt, CRn, CRm, opcode2) \
|
#define _ARM_MRS(coproc, opcode1, Rt, CRn, CRm, opcode2) \
|
||||||
|
@ -71,12 +71,10 @@ Modification:
|
||||||
#define WriteReg(value, address) (*(volatile unsigned int*)(address) = (value))
|
#define WriteReg(value, address) (*(volatile unsigned int*)(address) = (value))
|
||||||
#define ReadReg(address) (*(volatile unsigned int*)(address))
|
#define ReadReg(address) (*(volatile unsigned int*)(address))
|
||||||
|
|
||||||
|
|
||||||
#if defined(__cplusplus)
|
#if defined(__cplusplus)
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
//! @name Misc
|
//! @name Misc
|
||||||
//@{
|
//@{
|
||||||
//! @brief Enable or disable the IRQ and FIQ state.
|
//! @brief Enable or disable the IRQ and FIQ state.
|
||||||
|
@ -115,24 +113,24 @@ void arm_dcache_disable();
|
||||||
void arm_dcache_invalidate();
|
void arm_dcache_invalidate();
|
||||||
|
|
||||||
//! @brief Invalidate a line of data cache.
|
//! @brief Invalidate a line of data cache.
|
||||||
void arm_dcache_invalidate_line(const void * addr);
|
void arm_dcache_invalidate_line(const void* addr);
|
||||||
|
|
||||||
//! @brief Invalidate a number of lines of data cache.
|
//! @brief Invalidate a number of lines of data cache.
|
||||||
//!
|
//!
|
||||||
//! Number of lines depends on length parameter and size of line.
|
//! Number of lines depends on length parameter and size of line.
|
||||||
//! Size of line for A9 L1 cache is 32B.
|
//! Size of line for A9 L1 cache is 32B.
|
||||||
void arm_dcache_invalidate_mlines(const void * addr, size_t length);
|
void arm_dcache_invalidate_mlines(const void* addr, size_t length);
|
||||||
|
|
||||||
//! @brief Flush (clean) all lines of cache (all sets in all ways).
|
//! @brief Flush (clean) all lines of cache (all sets in all ways).
|
||||||
void arm_dcache_flush();
|
void arm_dcache_flush();
|
||||||
|
|
||||||
//! @brief Flush (clean) one line of cache.
|
//! @brief Flush (clean) one line of cache.
|
||||||
void arm_dcache_flush_line(const void * addr);
|
void arm_dcache_flush_line(const void* addr);
|
||||||
|
|
||||||
// @brief Flush (clean) multiple lines of cache.
|
// @brief Flush (clean) multiple lines of cache.
|
||||||
//!
|
//!
|
||||||
//! Number of lines depends on length parameter and size of line.
|
//! Number of lines depends on length parameter and size of line.
|
||||||
void arm_dcache_flush_mlines(const void * addr, size_t length);
|
void arm_dcache_flush_mlines(const void* addr, size_t length);
|
||||||
//@}
|
//@}
|
||||||
|
|
||||||
//! @name Instrution cache operations
|
//! @name Instrution cache operations
|
||||||
|
@ -156,12 +154,12 @@ void arm_icache_invalidate();
|
||||||
void arm_icache_invalidate_is();
|
void arm_icache_invalidate_is();
|
||||||
|
|
||||||
//! @brief Invalidate a line of the instruction cache.
|
//! @brief Invalidate a line of the instruction cache.
|
||||||
void arm_icache_invalidate_line(const void * addr);
|
void arm_icache_invalidate_line(const void* addr);
|
||||||
|
|
||||||
//! @brief Invalidate a number of lines of instruction cache.
|
//! @brief Invalidate a number of lines of instruction cache.
|
||||||
//!
|
//!
|
||||||
//! Number of lines depends on length parameter and size of line.
|
//! Number of lines depends on length parameter and size of line.
|
||||||
void arm_icache_invalidate_mlines(const void * addr, size_t length);
|
void arm_icache_invalidate_mlines(const void* addr, size_t length);
|
||||||
//@}
|
//@}
|
||||||
|
|
||||||
//! @name TLB operations
|
//! @name TLB operations
|
||||||
|
@ -232,4 +230,5 @@ void scu_secure_invalidate(unsigned int cpu, unsigned int ways);
|
||||||
#if defined(__cplusplus)
|
#if defined(__cplusplus)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif//__CORTEX_A72_H__
|
|
||||||
|
#endif //__CORTEX_A72_H__
|
|
@ -35,6 +35,8 @@
|
||||||
* @author AIIT XUOS Lab
|
* @author AIIT XUOS Lab
|
||||||
* @date 2024.04.10
|
* @date 2024.04.10
|
||||||
*/
|
*/
|
||||||
|
BOOT_STACK_SIZE = 0x4000;
|
||||||
|
|
||||||
OUTPUT_FORMAT("elf64-littleaarch64")
|
OUTPUT_FORMAT("elf64-littleaarch64")
|
||||||
OUTPUT_ARCH( "aarch64" )
|
OUTPUT_ARCH( "aarch64" )
|
||||||
ENTRY( _entry )
|
ENTRY( _entry )
|
||||||
|
|
|
@ -1,23 +1,22 @@
|
||||||
#include "core.h"
|
#include "core.h"
|
||||||
|
#include "cortex_a72.h"
|
||||||
#include "memlayout.h"
|
#include "memlayout.h"
|
||||||
#include "cortexa72.h"
|
|
||||||
|
|
||||||
void _entry();
|
void _entry();
|
||||||
void main();
|
void main();
|
||||||
extern char end[];
|
extern char end[];
|
||||||
|
|
||||||
// entry.S needs one stack per CPU.
|
// entry.S needs one stack per CPU.
|
||||||
__attribute__ ((aligned (16))) char stack0[4096 * NR_CPU];
|
__attribute__((aligned(16))) char stack0[4096 * NR_CPU];
|
||||||
|
|
||||||
// entry.S jumps here in supervisor mode (EL1) on stack0.
|
// entry.S jumps here in supervisor mode (EL1) on stack0.
|
||||||
// in qemu-system-aarch64, default EL (Exeception Level) is 1.
|
// in qemu-system-aarch64, default EL (Exeception Level) is 1.
|
||||||
void
|
void start()
|
||||||
start()
|
|
||||||
{
|
{
|
||||||
main();
|
main();
|
||||||
}
|
}
|
||||||
|
|
||||||
__attribute__((aligned(PGSIZE))) pte_t l1entrypgt[512];
|
__attribute__((aligned(PGSIZE))) uint64_t l1entrypgt[512];
|
||||||
__attribute__((aligned(PGSIZE))) pte_t l2entrypgt[512];
|
__attribute__((aligned(PGSIZE))) uint64_t l2entrypgt[512];
|
||||||
__attribute__((aligned(PGSIZE))) pte_t l1kpgt[512];
|
__attribute__((aligned(PGSIZE))) uint64_t l1kpgt[512];
|
||||||
__attribute__((aligned(PGSIZE))) pte_t l2kpgt[512];
|
__attribute__((aligned(PGSIZE))) uint64_t l2kpgt[512];
|
|
@ -1,11 +1,11 @@
|
||||||
/**
|
/**
|
||||||
* @file: l1_cache.c
|
* @file: l1_cache.c
|
||||||
* @brief: the general management of L1 cache
|
* @brief: the general management of L1 cache
|
||||||
* @version: 1.0
|
* @version: 1.0
|
||||||
* @author: AIIT XUOS Lab
|
* @author: AIIT XUOS Lab
|
||||||
* @date: 2024/04/23
|
* @date: 2024/04/23
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*************************************************
|
/*************************************************
|
||||||
File name: l1_cache.c
|
File name: l1_cache.c
|
||||||
|
@ -23,27 +23,28 @@ Modification:
|
||||||
|
|
||||||
void InvalidateL1Dcache(uintptr_t start, uintptr_t end)
|
void InvalidateL1Dcache(uintptr_t start, uintptr_t end)
|
||||||
{
|
{
|
||||||
uintptr_t length = end - start;
|
uint64_t length = end - start;
|
||||||
uintptr_t addr = start;
|
uint64_t addr = start;
|
||||||
uint64_t ccsidr_el1;
|
uint64_t ccsidr_el1;
|
||||||
uint64_t line_size;
|
uint64_t line_size;
|
||||||
uint64_t va;
|
uint64_t va;
|
||||||
// get the cache line size
|
// get the cache line size
|
||||||
__asm__ __volatile__("mrs %0, ccsidr_el1" : : "=r" (ccsidr_el1));
|
|
||||||
|
__asm__ __volatile__("mrs %0, ccsidr_el1" : "=r"(ccsidr_el1));
|
||||||
line_size = 1 << ((ccsidr_el1 & 0x7) + 4);
|
line_size = 1 << ((ccsidr_el1 & 0x7) + 4);
|
||||||
|
|
||||||
// align the address with line
|
// align the address with line
|
||||||
const void * end_addr = (const void *)((uint64_t)addr + length);
|
const uintptr_t end_addr = (const uintptr_t)((uint64_t)addr + length);
|
||||||
|
|
||||||
while (addr < end_addr){
|
do {
|
||||||
va = (uint64_t)addr & (~(line_size - 1));
|
va = (uint64_t)((uint64_t)addr & (~(line_size - 1)));
|
||||||
|
|
||||||
//Invalidate data cache line to PoC (Point of Coherence) by va.
|
// Invalidate data cache line to PoC (Point of Coherence) by va.
|
||||||
__asm__ __volatile__("dc ivac, %0 " : : "r" (va));
|
__asm__ __volatile__("dc ivac, %0 " : : "r"(va));
|
||||||
|
|
||||||
// increment addres to next line and decrement lenght
|
// increment addres to next line and decrement lenght
|
||||||
addr = (void*)((uint64_t)addr + line_size);
|
addr = (uintptr_t)((uint64_t)addr + line_size);
|
||||||
}
|
} while (addr < end_addr);
|
||||||
|
|
||||||
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
|
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
|
||||||
DSB();
|
DSB();
|
||||||
|
@ -56,48 +57,46 @@ void InvalidateL1DcacheAll(void)
|
||||||
int num_ways; // number of ways
|
int num_ways; // number of ways
|
||||||
uint32_t wayset; // wayset parameter
|
uint32_t wayset; // wayset parameter
|
||||||
|
|
||||||
__asm__ __volatile__("mrs %0, ccsidr_el1" : : "=r" (ccsidr_el1));// Read Cache Size ID
|
__asm__ __volatile__("mrs %0, ccsidr_el1" : "=r"(ccsidr_el1)); // Read Cache Size ID
|
||||||
|
|
||||||
// Fill number of sets and number of ways from ccsidr_el1 register
|
// Fill number of sets and number of ways from ccsidr_el1 register
|
||||||
num_sets = ((ccsidr_el1 >> 32) & 0x7FFF) + 1;
|
num_sets = ((ccsidr_el1 >> 32) & 0x7FFF) + 1;
|
||||||
num_ways = ((ccsidr_el1 >> 0) & 0x7FFF) + 1;
|
num_ways = ((ccsidr_el1 >> 0) & 0x7FFF) + 1;
|
||||||
|
|
||||||
// Invalidation all lines (all Sets in all ways)
|
// Invalidation all lines (all Sets in all ways)
|
||||||
for (int way = 0 ; way < num_ways; way++){
|
for (int way = 0; way < num_ways; way++) {
|
||||||
for (int set = 0 ;set < num_sets; set++){
|
for (int set = 0; set < num_sets; set++) {
|
||||||
wayset = (way << 30) | (set << 5);
|
wayset = (way << 30) | (set << 5);
|
||||||
__asm__ __volatile__("dc isw, %0" : : "r" (wayset));
|
__asm__ __volatile__("dc isw, %0" : : "r"(wayset));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
|
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
|
||||||
DSB();
|
DSB();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void CleanL1Dcache(uintptr_t start, uintptr_t end)
|
void CleanL1Dcache(uintptr_t start, uintptr_t end)
|
||||||
{
|
{
|
||||||
void* addr = (void*)start;
|
void* addr = (void*)start;
|
||||||
uintptr_t length = end - start;
|
uintptr_t length = end - start;
|
||||||
const void * end_addr = (const void *)((uint64_t)addr + length);
|
const void* end_addr = (const void*)((uint64_t)addr + length);
|
||||||
uint64_t ccsidr_el1;
|
uint64_t ccsidr_el1;
|
||||||
uint64_t line_size;
|
uint64_t line_size;
|
||||||
uint64_t va;
|
uint64_t va;
|
||||||
|
|
||||||
// get the cache line size
|
// get the cache line size
|
||||||
__asm__ __volatile__("mrs %0, ccsidr_el1" : "=r" (ccsidr_el1));
|
__asm__ __volatile__("mrs %0, ccsidr_el1" : "=r"(ccsidr_el1));
|
||||||
line_size = 1 << ((ccsidr_el1 & 0x7) + 4);
|
line_size = 1 << ((ccsidr_el1 & 0x7) + 4);
|
||||||
|
|
||||||
do
|
do {
|
||||||
{
|
|
||||||
va = (uint64_t)addr & (~(line_size - 1));
|
va = (uint64_t)addr & (~(line_size - 1));
|
||||||
// Clean data cache line to PoC (Point of Coherence) by va.
|
// Clean data cache line to PoC (Point of Coherence) by va.
|
||||||
__asm__ __volatile__("dc cvac, %0" : : "r" (va));
|
__asm__ __volatile__("dc cvac, %0" : : "r"(va));
|
||||||
|
|
||||||
// increment addres to next line and decrement lenght
|
// increment addres to next line and decrement lenght
|
||||||
addr = (void*)((uint64_t)addr + line_size);
|
addr = (void*)((uint64_t)addr + line_size);
|
||||||
} while (addr < end_addr);
|
} while (addr < end_addr);
|
||||||
|
|
||||||
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
|
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
|
||||||
DSB();
|
DSB();
|
||||||
}
|
}
|
||||||
|
@ -109,46 +108,45 @@ void CleanL1DcacheAll(void)
|
||||||
int num_ways; // number of ways
|
int num_ways; // number of ways
|
||||||
uint32_t wayset; // wayset parameter
|
uint32_t wayset; // wayset parameter
|
||||||
|
|
||||||
__asm__ __volatile__("mrs %0, ccsidr_el1" : : "=r" (ccsidr_el1));// Read Cache Size ID
|
__asm__ __volatile__("mrs %0, ccsidr_el1" : "=r"(ccsidr_el1)); // Read Cache Size ID
|
||||||
|
|
||||||
// Fill number of sets and number of ways from ccsidr_el1 register This walues are decremented by 1
|
// Fill number of sets and number of ways from ccsidr_el1 register This walues are decremented by 1
|
||||||
num_sets = ((ccsidr_el1 >> 32) & 0x7FFF) + 1;
|
num_sets = ((ccsidr_el1 >> 32) & 0x7FFF) + 1;
|
||||||
num_ways = ((ccsidr_el1 >> 0) & 0x7FFF) + 1;
|
num_ways = ((ccsidr_el1 >> 0) & 0x7FFF) + 1;
|
||||||
|
|
||||||
// clean all lines (all Sets in all ways)
|
// clean all lines (all Sets in all ways)
|
||||||
for (int way = 0 ; way < num_ways; way++){
|
for (int way = 0; way < num_ways; way++) {
|
||||||
for (int set = 0 ;set < num_sets; set++){
|
for (int set = 0; set < num_sets; set++) {
|
||||||
wayset = (way << 30) | (set << 5);
|
wayset = (way << 30) | (set << 5);
|
||||||
__asm__ __volatile__("dc csw, %0" : : "r" (wayset));
|
__asm__ __volatile__("dc csw, %0" : : "r"(wayset));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
|
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
|
||||||
DSB();
|
DSB();
|
||||||
}
|
}
|
||||||
|
|
||||||
void FlushL1Dcache(uintptr_t start, uintptr_t end)
|
void FlushL1Dcache(uintptr_t start, uintptr_t end)
|
||||||
{
|
{
|
||||||
void* addr = (void*)start;
|
void* addr = (void*)start;
|
||||||
// size_t length=end-start;
|
// size_t length=end-start;
|
||||||
uint64_t va;
|
uint64_t va;
|
||||||
uint64_t ccsidr_el1 = 0, line_size = 0;
|
uint64_t ccsidr_el1 = 0, line_size = 0;
|
||||||
const void * end_addr = (const void *)((uint64_t)end);
|
const void* end_addr = (const void*)((uint64_t)end);
|
||||||
|
|
||||||
// get the cache line size
|
// get the cache line size
|
||||||
__asm__ __volatile__("mrs %0, ccsidr_el1" : "=r" (ccsidr_el1));
|
__asm__ __volatile__("mrs %0, ccsidr_el1" : "=r"(ccsidr_el1));
|
||||||
line_size = 1 << ((ccsidr_el1 & 0x7) + 4);
|
line_size = 1 << ((ccsidr_el1 & 0x7) + 4);
|
||||||
|
|
||||||
do
|
do {
|
||||||
{
|
// Clean data cache line to PoC (Point of Coherence) by va.
|
||||||
// Clean data cache line to PoC (Point of Coherence) by va.
|
va = (uint64_t)((uint64_t)addr & (~(line_size - 1))); // addr & va_VIRTUAL_ADDRESS_MASK
|
||||||
va = (uint64_t) ((uint64_t)addr & (~(line_size - 1))); //addr & va_VIRTUAL_ADDRESS_MASK
|
__asm__ __volatile__("dc civac, %0" : : "r"(va));
|
||||||
__asm__ __volatile__("dc civac, %0" : : "r" (va));
|
|
||||||
|
|
||||||
// increment addres to next line and decrement lenght
|
// increment addres to next line and decrement lenght
|
||||||
addr = (void*)((uint64_t)addr + line_size);
|
addr = (void*)((uint64_t)addr + line_size);
|
||||||
} while (addr < end_addr);
|
} while (addr < end_addr);
|
||||||
|
|
||||||
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
|
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
|
||||||
DSB();
|
DSB();
|
||||||
}
|
}
|
||||||
|
@ -160,19 +158,19 @@ void FlushL1DcacheAll(void)
|
||||||
int num_ways; // number of ways
|
int num_ways; // number of ways
|
||||||
uint32_t wayset; // wayset parameter
|
uint32_t wayset; // wayset parameter
|
||||||
|
|
||||||
__asm__ __volatile__("mrs %0, ccsidr_el1" : : "=r" (ccsidr_el1));// Read Cache Size ID
|
__asm__ __volatile__("mrs %0, ccsidr_el1" : "=r"(ccsidr_el1)); // Read Cache Size ID
|
||||||
|
|
||||||
// Fill number of sets and number of ways from ccsidr_el1 register This walues are decremented by 1
|
// Fill number of sets and number of ways from ccsidr_el1 register This walues are decremented by 1
|
||||||
num_sets = ((ccsidr_el1 >> 32) & 0x7FFF) + 1;
|
num_sets = ((ccsidr_el1 >> 32) & 0x7FFF) + 1;
|
||||||
num_ways = ((ccsidr_el1 >> 0) & 0x7FFF) + 1;
|
num_ways = ((ccsidr_el1 >> 0) & 0x7FFF) + 1;
|
||||||
|
|
||||||
// clean and invalidate all lines (all Sets in all ways)
|
// clean and invalidate all lines (all Sets in all ways)
|
||||||
for (int way = 0 ; way < num_ways; way++){
|
for (int way = 0; way < num_ways; way++) {
|
||||||
for (int set = 0 ;set < num_sets; set++){
|
for (int set = 0; set < num_sets; set++) {
|
||||||
wayset = (way << 30) | (set << 5);
|
wayset = (way << 30) | (set << 5);
|
||||||
__asm__ __volatile__("dc cisw, %0" : : "r" (wayset));
|
__asm__ __volatile__("dc cisw, %0" : : "r"(wayset));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
|
// All Cache, Branch predictor and TLB maintenance operations before followed instruction complete
|
||||||
DSB();
|
DSB();
|
||||||
|
@ -187,99 +185,97 @@ void InvalidateL1IcacheAll()
|
||||||
|
|
||||||
void InvalidateL1Icache(uintptr_t start, uintptr_t end)
|
void InvalidateL1Icache(uintptr_t start, uintptr_t end)
|
||||||
{
|
{
|
||||||
uintptr_t length = end - start;
|
// uintptr_t length = end - start;
|
||||||
uintptr_t addr = start;
|
uintptr_t addr = start;
|
||||||
uint64_t ccsidr_el1;
|
uint64_t ccsidr_el1;
|
||||||
uint64_t line_size;
|
uint64_t line_size;
|
||||||
uint64_t va;
|
uint64_t va;
|
||||||
|
const uintptr_t end_addr = (const uintptr_t)((uint64_t)end);
|
||||||
// get the cache line size
|
// get the cache line size
|
||||||
__asm__ __volatile__("mrs %0, ccsidr_el1" : : "=r" (ccsidr_el1));
|
__asm__ __volatile__("mrs %0, ccsidr_el1" : "=r"(ccsidr_el1));
|
||||||
line_size = 1 << ((ccsidr_el1 & 0x7) + 4);
|
line_size = 1 << ((ccsidr_el1 & 0x7) + 4);
|
||||||
|
|
||||||
while (addr < end){
|
do {
|
||||||
va = (uint64_t)addr & (~(line_size - 1));
|
va = (uint64_t)((uint64_t)addr & (~(line_size - 1)));
|
||||||
|
|
||||||
//Invalidate data cache line to PoC (Point of Coherence) by va.
|
// Invalidate data cache line to PoC (Point of Coherence) by va.
|
||||||
__asm__ __volatile__("dc ivau, %0 " : : "r" (va));
|
__asm__ __volatile__("dc ivau, %0 " : : "r"(va));
|
||||||
// increment addres to next line and decrement lenght
|
// increment addres to next line and decrement lenght
|
||||||
addr = (void*)((uint64_t)addr + line_size);
|
addr = (uintptr_t)((uint64_t)addr + line_size);
|
||||||
}
|
} while (addr < end_addr);
|
||||||
|
|
||||||
// synchronize context on this processor
|
// synchronize context on this processor
|
||||||
ISB();
|
ISB();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void EnableL1Dcache()
|
void EnableL1Dcache()
|
||||||
{
|
{
|
||||||
uint64_t sctlr_el1; // System Control Register
|
uint64_t sctlr_el1; // System Control Register
|
||||||
|
|
||||||
// read sctlr_el1
|
// read sctlr_el1
|
||||||
__asm__ __volatile__("mrs %0, sctlr_el1" : :"=r" (sctlr_el1));
|
__asm__ __volatile__("mrs %0, sctlr_el1" : "=r"(sctlr_el1));
|
||||||
|
|
||||||
if (!(sctlr_el1 & SCTLR_EL1_DCACHE_ENABLE))
|
if (!(sctlr_el1 & SCTLR_EL1_DCACHE_ENABLE)) {
|
||||||
{
|
// set C bit (data caching enable)
|
||||||
// set C bit (data caching enable)
|
|
||||||
sctlr_el1 |= SCTLR_EL1_DCACHE_ENABLE;
|
sctlr_el1 |= SCTLR_EL1_DCACHE_ENABLE;
|
||||||
|
|
||||||
// write modified sctlr_el1
|
// write modified sctlr_el1
|
||||||
__asm__ __volatile__("msr sctlr_el1, %0" : : "r" (sctlr_el1));
|
__asm__ __volatile__("msr sctlr_el1, %0" : : "r"(sctlr_el1));
|
||||||
|
|
||||||
//data synchronization barrier
|
// data synchronization barrier
|
||||||
DSB();
|
DSB();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void DisableL1Dcache()
|
void DisableL1Dcache()
|
||||||
{
|
{
|
||||||
uint64_t sctlr_el1; // System Control Register
|
uint64_t sctlr_el1; // System Control Register
|
||||||
|
|
||||||
// read sctlr_el1
|
// read sctlr_el1
|
||||||
__asm__ __volatile__("mrs %0, sctlr_el1" : : "=r" (sctlr_el1));
|
__asm__ __volatile__("mrs %0, sctlr_el1" : "=r"(sctlr_el1));
|
||||||
|
|
||||||
// set C bit (data caching enable)
|
// set C bit (data caching enable)
|
||||||
sctlr_el1 &= ~ SCTLR_EL1_DCACHE_ENABLE;
|
sctlr_el1 &= ~SCTLR_EL1_DCACHE_ENABLE;
|
||||||
|
|
||||||
// write modified sctlr_el1
|
// write modified sctlr_el1
|
||||||
__asm__ __volatile__("msr sctlr_el1, %0" : : "r" (sctlr_el1));
|
__asm__ __volatile__("msr sctlr_el1, %0" : "=r"(sctlr_el1));
|
||||||
|
|
||||||
//data synchronization barrier
|
// data synchronization barrier
|
||||||
DSB();
|
DSB();
|
||||||
}
|
}
|
||||||
|
|
||||||
void EnableL1Icache()
|
void EnableL1Icache()
|
||||||
{
|
{
|
||||||
uint64_t sctlr_el1; // System Control Register
|
uint64_t sctlr_el1; // System Control Register
|
||||||
|
|
||||||
// read sctlr_el1
|
// read sctlr_el1
|
||||||
__asm__ __volatile__("mrs %0, sctlr_el1" : : "=r" (sctlr_el1));
|
__asm__ __volatile__("mrs %0, sctlr_el1" : "=r"(sctlr_el1));
|
||||||
|
|
||||||
if (!(sctlr_el1 & SCTLR_EL1_ICACHE_ENABLE))
|
if (!(sctlr_el1 & SCTLR_EL1_ICACHE_ENABLE)) {
|
||||||
{
|
// set I bit (data caching enable)
|
||||||
// set I bit (data caching enable)
|
|
||||||
sctlr_el1 |= SCTLR_EL1_ICACHE_ENABLE;
|
sctlr_el1 |= SCTLR_EL1_ICACHE_ENABLE;
|
||||||
|
|
||||||
// write modified sctlr_el1
|
|
||||||
__asm__ __volatile__("msr sctlr_el1, %0" : : "r" (sctlr_el1));
|
|
||||||
|
|
||||||
//Instruction synchronization barrier
|
// write modified sctlr_el1
|
||||||
ISB();
|
__asm__ __volatile__("msr sctlr_el1, %0" : "=r"(sctlr_el1));
|
||||||
|
|
||||||
|
// Instruction synchronization barrier
|
||||||
|
ISB();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void DisableL1Icache()
|
void DisableL1Icache()
|
||||||
{
|
{
|
||||||
uint64_t sctlr_el1; // System Control Register
|
uint64_t sctlr_el1; // System Control Register
|
||||||
|
|
||||||
// read sctlr_el1
|
// read sctlr_el1
|
||||||
__asm__ __volatile__("mrs %0, sctlr_el1" : : "=r" (sctlr_el1));
|
__asm__ __volatile__("mrs %0, sctlr_el1" : "=r"(sctlr_el1));
|
||||||
|
|
||||||
// set I bit (data caching enable)
|
|
||||||
sctlr_el1 &= ~ SCTLR_EL1_ICACHE_ENABLE;
|
|
||||||
|
|
||||||
// write modified sctlr_el1
|
|
||||||
__asm__ __volatile__("msr sctlr_el1, %0" : : "r" (sctlr_el1));
|
|
||||||
|
|
||||||
//Instruction synchronization barrier
|
// set I bit (data caching enable)
|
||||||
|
sctlr_el1 &= ~SCTLR_EL1_ICACHE_ENABLE;
|
||||||
|
|
||||||
|
// write modified sctlr_el1
|
||||||
|
__asm__ __volatile__("msr sctlr_el1, %0" : : "r"(sctlr_el1));
|
||||||
|
|
||||||
|
// Instruction synchronization barrier
|
||||||
ISB();
|
ISB();
|
||||||
}
|
}
|
|
@ -1,62 +1,61 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2020 AIIT XUOS Lab
|
* Copyright (c) 2020 AIIT XUOS Lab
|
||||||
* XiUOS is licensed under Mulan PSL v2.
|
* XiUOS is licensed under Mulan PSL v2.
|
||||||
* You can use this software according to the terms and conditions of the Mulan PSL v2.
|
* You can use this software according to the terms and conditions of the Mulan PSL v2.
|
||||||
* You may obtain a copy of Mulan PSL v2 at:
|
* You may obtain a copy of Mulan PSL v2 at:
|
||||||
* http://license.coscl.org.cn/MulanPSL2
|
* http://license.coscl.org.cn/MulanPSL2
|
||||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||||
* See the Mulan PSL v2 for more details.
|
* See the Mulan PSL v2 for more details.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file: l1_cache.h
|
* @file: l1_cache.h
|
||||||
* @brief: the general management of L1 cache
|
* @brief: the general management of L1 cache
|
||||||
* @version: 1.0
|
* @version: 1.0
|
||||||
* @author: AIIT XUOS Lab
|
* @author: AIIT XUOS Lab
|
||||||
* @date: 2024/4/23
|
* @date: 2024/4/23
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*************************************************
|
/*************************************************
|
||||||
File name: l1_cache.h
|
File name: l1_cache.h
|
||||||
Description: the general management of L1 cache
|
Description: the general management of L1 cache
|
||||||
Others:
|
Others:
|
||||||
History:
|
History:
|
||||||
Author: AIIT XUOS Lab
|
Author: AIIT XUOS Lab
|
||||||
Modification:
|
Modification:
|
||||||
1、define the l1 cache operations
|
1、define the l1 cache operations
|
||||||
*************************************************/
|
*************************************************/
|
||||||
#include "cortex-a72/core.h"
|
#include "core.h"
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* L1 Cache Operation:
|
* L1 Cache Operation:
|
||||||
*
|
*
|
||||||
* IVAC -Invalidate by Virtual Address, to Point of Coherency AArch32Equivalent :DCIMVAC
|
* IVAC -Invalidate by Virtual Address, to Point of Coherency AArch32Equivalent :DCIMVAC
|
||||||
*
|
*
|
||||||
* ISW -Invalidate by Set/Way AArch32Equivalent :DCISW
|
* ISW -Invalidate by Set/Way AArch32Equivalent :DCISW
|
||||||
*
|
*
|
||||||
*CVAC -Clean by Virtual Address to Point of Coherency AArch32Equivalent :DCCMVAC
|
*CVAC -Clean by Virtual Address to Point of Coherency AArch32Equivalent :DCCMVAC
|
||||||
*
|
*
|
||||||
*CSW -Clean by Set/Way AArch32Equivalent :DCCSW
|
*CSW -Clean by Set/Way AArch32Equivalent :DCCSW
|
||||||
*
|
*
|
||||||
*CVAU -Clean by Virtual Address to Point of Unification AArch32Equivalent :DCCMVAU
|
*CVAU -Clean by Virtual Address to Point of Unification AArch32Equivalent :DCCMVAU
|
||||||
*
|
*
|
||||||
*CIVAC -Clean and invalidate data cache line by VA to PoC. AArch32Equivalent :DCCIMVAC
|
*CIVAC -Clean and invalidate data cache line by VA to PoC. AArch32Equivalent :DCCIMVAC
|
||||||
*
|
*
|
||||||
*ISW -Clean and invalidate data cache line by Set/Way. AArch32Equivalent :DCCISW
|
*ISW -Clean and invalidate data cache line by Set/Way. AArch32Equivalent :DCCISW
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define SCTLR_EL1_ICACHE_ENABLE(1 << 12) //!< Instruction cache enable
|
#define SCTLR_EL1_ICACHE_ENABLE (1 << 12) //!< Instruction cache enable
|
||||||
#define SCTLR_EL1_DCACHE_ENABLE (1 << 2) //!< Data cache enable
|
#define SCTLR_EL1_DCACHE_ENABLE (1 << 2) //!< Data cache enable
|
||||||
|
|
||||||
void InvalidateL1Dcache(uintptr_t start, uintptr_t end);
|
void InvalidateL1Dcache(uintptr_t start, uintptr_t end);
|
||||||
|
|
||||||
void InvalidateL1DcacheAll(void);
|
void InvalidateL1DcacheAll(void);
|
||||||
|
|
||||||
|
|
||||||
void CleanL1Dcache(uintptr_t start, uintptr_t end);
|
void CleanL1Dcache(uintptr_t start, uintptr_t end);
|
||||||
|
|
||||||
void CleanL1DcacheAll(void);
|
void CleanL1DcacheAll(void);
|
||||||
|
@ -65,7 +64,6 @@ void FlushL1Dcache(uintptr_t start, uintptr_t end);
|
||||||
|
|
||||||
void FlushL1DcacheAll(void);
|
void FlushL1DcacheAll(void);
|
||||||
|
|
||||||
|
|
||||||
void InvalidateL1IcacheAll(void);
|
void InvalidateL1IcacheAll(void);
|
||||||
|
|
||||||
void InvalidateL1Icache(uintptr_t start, uintptr_t end);
|
void InvalidateL1Icache(uintptr_t start, uintptr_t end);
|
||||||
|
|
|
@ -1,37 +1,37 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2020 AIIT XUOS Lab
|
* Copyright (c) 2020 AIIT XUOS Lab
|
||||||
* XiUOS is licensed under Mulan PSL v2.
|
* XiUOS is licensed under Mulan PSL v2.
|
||||||
* You can use this software according to the terms and conditions of the Mulan PSL v2.
|
* You can use this software according to the terms and conditions of the Mulan PSL v2.
|
||||||
* You may obtain a copy of Mulan PSL v2 at:
|
* You may obtain a copy of Mulan PSL v2 at:
|
||||||
* http://license.coscl.org.cn/MulanPSL2
|
* http://license.coscl.org.cn/MulanPSL2
|
||||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||||
* See the Mulan PSL v2 for more details.
|
* See the Mulan PSL v2 for more details.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file: cache_common_ope.c
|
* @file: cache_common_ope.c
|
||||||
* @brief: the general management of cache
|
* @brief: the general management of cache
|
||||||
* @version: 3.0
|
* @version: 3.0
|
||||||
* @author: AIIT XUOS Lab
|
* @author: AIIT XUOS Lab
|
||||||
* @date: 2023/11/06
|
* @date: 2023/11/06
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*************************************************
|
/*************************************************
|
||||||
File name: cache_common_ope.c
|
File name: cache_common_ope.c
|
||||||
Description: the general management of cache
|
Description: the general management of cache
|
||||||
Others:
|
Others:
|
||||||
History:
|
History:
|
||||||
1. Date: 2023-11-06
|
1. Date: 2023-11-06
|
||||||
Author: AIIT XUOS Lab
|
Author: AIIT XUOS Lab
|
||||||
Modification:
|
Modification:
|
||||||
1、implement xiuos cache operations
|
1、implement xiuos cache operations
|
||||||
*************************************************/
|
*************************************************/
|
||||||
#include "cache_common_ope.h"
|
#include "cache_common_ope.h"
|
||||||
#include "l1_cache.h"
|
#include "l1_cache.h"
|
||||||
#include "l2_cache.h"
|
// #include "l2_cache.h"
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
* Public Functions
|
* Public Functions
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
SRC_DIR:= arm/armv7-a/cortex-a9/$(BOARD)
|
SRC_DIR:= arm
|
||||||
|
|
||||||
include $(KERNEL_ROOT)/compiler.mk
|
include $(KERNEL_ROOT)/compiler.mk
|
|
@ -0,0 +1,9 @@
|
||||||
|
ifneq ($(findstring $(BOARD), ok1028a-c), )
|
||||||
|
SRC_DIR := armv8-a
|
||||||
|
endif
|
||||||
|
ifneq ($(findstring $(BOARD), imx6q-sabrelite zynq7000-zc702), )
|
||||||
|
SRC_DIR := armv7-a
|
||||||
|
endif
|
||||||
|
|
||||||
|
|
||||||
|
include $(KERNEL_ROOT)/compiler.mk
|
|
@ -0,0 +1,6 @@
|
||||||
|
ifneq ($(findstring $(BOARD), imx6q-sabrelite zynq7000-zc702), )
|
||||||
|
SRC_DIR := cortex-a9
|
||||||
|
endif
|
||||||
|
|
||||||
|
|
||||||
|
include $(KERNEL_ROOT)/compiler.mk
|
|
@ -0,0 +1,3 @@
|
||||||
|
SRC_DIR := $(BOARD)
|
||||||
|
|
||||||
|
include $(KERNEL_ROOT)/compiler.mk
|
|
@ -0,0 +1,6 @@
|
||||||
|
ifneq ($(findstring $(BOARD), ok1028a-c), )
|
||||||
|
SRC_DIR := cortex-a72
|
||||||
|
endif
|
||||||
|
|
||||||
|
|
||||||
|
include $(KERNEL_ROOT)/compiler.mk
|
|
@ -0,0 +1,3 @@
|
||||||
|
SRC_DIR := $(BOARD)
|
||||||
|
|
||||||
|
include $(KERNEL_ROOT)/compiler.mk
|
|
@ -0,0 +1,4 @@
|
||||||
|
SRC_FILES := clock.c
|
||||||
|
|
||||||
|
|
||||||
|
include $(KERNEL_ROOT)/compiler.mk
|
|
@ -0,0 +1,9 @@
|
||||||
|
SRC_FILES := vector.S trampoline.S $(BOARD)/trap_common.c error_debug.c hard_spinlock.S
|
||||||
|
|
||||||
|
ifeq ($(BOARD), ok1028a-c)
|
||||||
|
SRC_DIR := gicv3
|
||||||
|
SRC_FILES += $(BOARD)/
|
||||||
|
endif
|
||||||
|
|
||||||
|
|
||||||
|
include $(KERNEL_ROOT)/compiler.mk
|
|
@ -42,7 +42,7 @@ Modification:
|
||||||
#include "log.h"
|
#include "log.h"
|
||||||
#include "multicores.h"
|
#include "multicores.h"
|
||||||
#include "spinlock.h"
|
#include "spinlock.h"
|
||||||
#include "syscall.h"
|
// #include "syscall.h"
|
||||||
|
|
||||||
__attribute__((always_inline)) static inline void _abort_reason(uint32_t fault_status)
|
__attribute__((always_inline)) static inline void _abort_reason(uint32_t fault_status)
|
||||||
{
|
{
|
||||||
|
@ -53,17 +53,17 @@ __attribute__((always_inline)) static inline void _abort_reason(uint32_t fault_s
|
||||||
else if ((fault_status & 0x3f) == 0x5) // Translation fault, level 1
|
else if ((fault_status & 0x3f) == 0x5) // Translation fault, level 1
|
||||||
KPrintf("reason: sect. translation level 1\n");
|
KPrintf("reason: sect. translation level 1\n");
|
||||||
else if ((fault_status & 0x3f) == 0x6) // Translation fault, level 2
|
else if ((fault_status & 0x3f) == 0x6) // Translation fault, level 2
|
||||||
KPrintf("reason: sect. translation level 2\n");
|
KPrintf("reason: sect. translation level 2\n");
|
||||||
else if ((fault_status & 0x3f) == 0x7) // Translation fault, level 3
|
else if ((fault_status & 0x3f) == 0x7) // Translation fault, level 3
|
||||||
KPrintf("reason: sect. translation level 3\n");
|
KPrintf("reason: sect. translation level 3\n");
|
||||||
else if ((fault_status & 0x3f) == 0x3d) //Section Domain fault
|
else if ((fault_status & 0x3f) == 0x3d) // Section Domain fault
|
||||||
KPrintf("reason: sect. domain\n");
|
KPrintf("reason: sect. domain\n");
|
||||||
else if ((fault_status & 0x3f) == 0x13) // Permission level 1
|
else if ((fault_status & 0x3f) == 0x13) // Permission level 1
|
||||||
KPrintf("reason: sect. permission level 1\n");
|
KPrintf("reason: sect. permission level 1\n");
|
||||||
else if ((fault_status & 0x3f) == 0x14) // Permission level 2
|
else if ((fault_status & 0x3f) == 0x14) // Permission level 2
|
||||||
KPrintf("reason: sect. permission level 2\n");
|
KPrintf("reason: sect. permission level 2\n");
|
||||||
else if ((fault_status & 0x3f) == 0x15) // Permission level 3
|
else if ((fault_status & 0x3f) == 0x15) // Permission level 3
|
||||||
KPrintf("reason: sect. permission level 3\n");
|
KPrintf("reason: sect. permission level 3\n");
|
||||||
else if ((fault_status & 0x3f) == 0x8) // External abort
|
else if ((fault_status & 0x3f) == 0x8) // External abort
|
||||||
KPrintf("reason: ext. abort\n");
|
KPrintf("reason: ext. abort\n");
|
||||||
else if ((fault_status & 0x3f) == 0x9) // Access flag fault, level 1
|
else if ((fault_status & 0x3f) == 0x9) // Access flag fault, level 1
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
SRC_FILES :=giv3.c
|
||||||
|
|
||||||
|
include $(KERNEL_ROOT)/compiler.mk
|
|
@ -1,27 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 2020 AIIT XUOS Lab
|
|
||||||
* XiUOS is licensed under Mulan PSL v2.
|
|
||||||
* You can use this software according to the terms and conditions of the Mulan PSL v2.
|
|
||||||
* You may obtain a copy of Mulan PSL v2 at:
|
|
||||||
* http://license.coscl.org.cn/MulanPSL2
|
|
||||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
|
||||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
|
||||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
|
||||||
* See the Mulan PSL v2 for more details.
|
|
||||||
*/
|
|
||||||
/**
|
|
||||||
* @file arch_gicv3.h
|
|
||||||
* @brief gicv3 operation
|
|
||||||
* @version 1.0
|
|
||||||
* @author AIIT XUOS Lab
|
|
||||||
* @date 2024.05.07
|
|
||||||
*/
|
|
||||||
/*************************************************
|
|
||||||
File name: arch_gicv3.h
|
|
||||||
Description: gicv3 operation
|
|
||||||
Others:
|
|
||||||
History:
|
|
||||||
Author: AIIT XUOS Lab
|
|
||||||
Modification:
|
|
||||||
1. Rename file
|
|
||||||
*************************************************/
|
|
|
@ -0,0 +1,295 @@
|
||||||
|
/**
|
||||||
|
* @file gicv3.c
|
||||||
|
* @brief gicv3 operation
|
||||||
|
* @version 1.0
|
||||||
|
* @author AIIT XUOS Lab
|
||||||
|
* @date 2024.05.10
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*************************************************
|
||||||
|
File name: gicv3.c
|
||||||
|
Description: gicv3 operation
|
||||||
|
Others:
|
||||||
|
History:
|
||||||
|
Author: AIIT XUOS Lab
|
||||||
|
Modification:
|
||||||
|
*************************************************/
|
||||||
|
#include "string.h"
|
||||||
|
|
||||||
|
#include "core.h"
|
||||||
|
#include "gicv3_common_opa.h"
|
||||||
|
#include "gicv3_registers.h"
|
||||||
|
|
||||||
|
static void gic_setup_ppi(uint32_t cpuid, uint32_t intid) __attribute__((unused));
|
||||||
|
static void gic_setup_spi(uint32_t intid);
|
||||||
|
|
||||||
|
static struct {
|
||||||
|
char* gicd;
|
||||||
|
char* rdist_addrs[NR_CPU];
|
||||||
|
} gicv3;
|
||||||
|
|
||||||
|
static inline uint32_t
|
||||||
|
icc_igrpen1_el1()
|
||||||
|
{
|
||||||
|
uint32_t x;
|
||||||
|
__asm__ volatile("mrs %0, S3_0_C12_C12_7" : "=r"(x));
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
w_icc_igrpen1_el1(uint32_t x)
|
||||||
|
{
|
||||||
|
__asm__ volatile("msr S3_0_C12_C12_7, %0" : : "r"(x));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline uint32_t
|
||||||
|
icc_pmr_el1()
|
||||||
|
{
|
||||||
|
uint32_t x;
|
||||||
|
__asm__ volatile("mrs %0, S3_0_C4_C6_0" : "=r"(x));
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
w_icc_pmr_el1(uint32_t x)
|
||||||
|
{
|
||||||
|
__asm__ volatile("msr S3_0_C4_C6_0, %0" : : "r"(x));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline uint32_t
|
||||||
|
icc_iar1_el1()
|
||||||
|
{
|
||||||
|
uint32_t x;
|
||||||
|
__asm__ volatile("mrs %0, S3_0_C12_C12_0" : "=r"(x));
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
w_icc_eoir1_el1(uint32_t x)
|
||||||
|
{
|
||||||
|
__asm__ volatile("msr S3_0_C12_C12_1, %0" : : "r"(x));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline uint32_t
|
||||||
|
icc_sre_el1()
|
||||||
|
{
|
||||||
|
uint32_t x;
|
||||||
|
__asm__ volatile("mrs %0, S3_0_C12_C12_5" : "=r"(x));
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
w_icc_sre_el1(uint32_t x)
|
||||||
|
{
|
||||||
|
__asm__ volatile("msr S3_0_C12_C12_5, %0" : : "r"(x));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline gicc_t* gic_get_gicc(void)
|
||||||
|
{
|
||||||
|
uint32_t base = get_arm_private_peripheral_base() + kGICCBaseOffset;
|
||||||
|
return (gicc_t*)base;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
gicd_write(uint32_t off, uint32_t val)
|
||||||
|
{
|
||||||
|
*(volatile uint32_t*)(gicv3.gicd + off) = val;
|
||||||
|
}
|
||||||
|
|
||||||
|
static uint32_t
|
||||||
|
gicd_read(uint32_t off)
|
||||||
|
{
|
||||||
|
return *(volatile uint32_t*)(gicv3.gicd + off);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
gicr_write(uint32_t cpuid, uint32_t off, uint32_t val)
|
||||||
|
{
|
||||||
|
*(volatile uint32_t*)(gicv3.rdist_addrs[cpuid] + off) = val;
|
||||||
|
}
|
||||||
|
|
||||||
|
static uint32_t
|
||||||
|
gicr_read(uint32_t cpuid, uint32_t off)
|
||||||
|
{
|
||||||
|
return *(volatile uint32_t*)(gicv3.rdist_addrs[cpuid] + off);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
giccinit()
|
||||||
|
{
|
||||||
|
w_icc_igrpen1_el1(0);
|
||||||
|
w_icc_pmr_el1(0xff);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
gicdinit()
|
||||||
|
{
|
||||||
|
gicd_write(D_CTLR, 0);
|
||||||
|
|
||||||
|
uint32_t typer = gicd_read(D_TYPER);
|
||||||
|
uint32_t lines = typer & 0x1f;
|
||||||
|
|
||||||
|
printf("lines %d\n", lines);
|
||||||
|
|
||||||
|
for (int i = 0; i < lines; i++)
|
||||||
|
gicd_write(D_IGROUPR(i), ~0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
gicrinit(uint32_t cpuid)
|
||||||
|
{
|
||||||
|
gicr_write(cpuid, R_CTLR, 0);
|
||||||
|
|
||||||
|
w_icc_sre_el1(icc_sre_el1() | 1);
|
||||||
|
|
||||||
|
gicr_write(cpuid, R_IGROUPR0, ~0);
|
||||||
|
gicr_write(cpuid, R_IGRPMODR0, 0);
|
||||||
|
|
||||||
|
uint32_t waker = gicr_read(cpuid, R_WAKER);
|
||||||
|
gicr_write(cpuid, R_WAKER, waker & ~(1 << 1));
|
||||||
|
while (gicr_read(cpuid, R_WAKER) & (1 << 2))
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
gic_enable()
|
||||||
|
{
|
||||||
|
gicd_write(D_CTLR, (1 << 1));
|
||||||
|
w_icc_igrpen1_el1(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void gic_init()
|
||||||
|
{
|
||||||
|
gicv3.gicd = (char*)GICV3;
|
||||||
|
for (int i = 0; i < NR_CPU; i++) {
|
||||||
|
gicv3.rdist_addrs[i] = (char*)(GICV3_REDIST + (i) * 0x20000);
|
||||||
|
}
|
||||||
|
|
||||||
|
gicdinit();
|
||||||
|
|
||||||
|
gic_setup_spi(UART0_IRQ);
|
||||||
|
gic_setup_spi(VIRTIO0_IRQ);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline uint64_t
|
||||||
|
cpuid()
|
||||||
|
{
|
||||||
|
uint64_t x;
|
||||||
|
__asm__ volatile("mrs %0, mpidr_el1" : "=r"(x));
|
||||||
|
return x & 0xff;
|
||||||
|
}
|
||||||
|
|
||||||
|
void gicv3inithart()
|
||||||
|
{
|
||||||
|
int cpu = cpuid();
|
||||||
|
|
||||||
|
giccinit();
|
||||||
|
gicrinit(cpu);
|
||||||
|
|
||||||
|
gic_setup_ppi(cpu, TIMER0_IRQ);
|
||||||
|
|
||||||
|
gic_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
gic_enable_int(uint32_t intid)
|
||||||
|
{
|
||||||
|
uint32_t is = gicd_read(D_ISENABLER(intid / 32));
|
||||||
|
is |= 1 << (intid % 32);
|
||||||
|
gicd_write(D_ISENABLER(intid / 32), is);
|
||||||
|
}
|
||||||
|
|
||||||
|
int gic_int_enabled(uint32_t intid)
|
||||||
|
{
|
||||||
|
uint32_t is = gicd_read(D_ISENABLER(intid / 32));
|
||||||
|
return is & (1 << (intid % 32));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
gic_clear_pending(uint32_t intid)
|
||||||
|
{
|
||||||
|
uint32_t ic = gicd_read(D_ICPENDR(intid / 32));
|
||||||
|
ic |= 1 << (intid % 32);
|
||||||
|
gicd_write(D_ICPENDR(intid / 32), ic);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
gic_set_prio0(uint32_t intid)
|
||||||
|
{
|
||||||
|
// set priority to 0
|
||||||
|
uint32_t p = gicd_read(D_IPRIORITYR(intid / 4));
|
||||||
|
p &= ~((uint32_t)0xff << (intid % 4 * 8)); // set prio 0
|
||||||
|
gicd_write(D_IPRIORITYR(intid / 4), p);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
gic_set_target(uint32_t intid, uint32_t cpuid)
|
||||||
|
{
|
||||||
|
uint32_t itargetsr = gicd_read(D_ITARGETSR(intid / 4));
|
||||||
|
itargetsr &= ~((uint32_t)0xff << (intid % 4 * 8));
|
||||||
|
gicd_write(D_ITARGETSR(intid / 4), itargetsr | ((uint32_t)(1 << cpuid) << (intid % 4 * 8)));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
gicr_enable_int(uint32_t cpuid, uint32_t intid)
|
||||||
|
{
|
||||||
|
uint32_t is = gicr_read(cpuid, R_ISENABLER0);
|
||||||
|
is |= 1 << (intid % 32);
|
||||||
|
gicr_write(cpuid, R_ISENABLER0, is);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
gicr_clear_pending(uint32_t cpuid, uint32_t intid)
|
||||||
|
{
|
||||||
|
uint32_t ic = gicr_read(cpuid, R_ICPENDR0);
|
||||||
|
ic |= 1 << (intid % 32);
|
||||||
|
gicr_write(cpuid, R_ICPENDR0, ic);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
gicr_set_prio0(uint32_t cpuid, uint32_t intid)
|
||||||
|
{
|
||||||
|
uint32_t p = gicr_read(cpuid, R_IPRIORITYR(intid / 4));
|
||||||
|
p &= ~((uint32_t)0xff << (intid % 4 * 8)); // set prio 0
|
||||||
|
gicr_write(cpuid, R_IPRIORITYR(intid / 4), p);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
gic_setup_ppi(uint32_t cpuid, uint32 intid)
|
||||||
|
{
|
||||||
|
gicr_set_prio0(cpuid, intid);
|
||||||
|
gicr_clear_pending(cpuid, intid);
|
||||||
|
gicr_enable_int(cpuid, intid);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
gic_setup_spi(uint32_t intid)
|
||||||
|
{
|
||||||
|
gic_set_prio0(intid);
|
||||||
|
|
||||||
|
// all interrupts are handled by cpu0
|
||||||
|
gic_set_target(intid, 0);
|
||||||
|
|
||||||
|
gic_clear_pending(intid);
|
||||||
|
gic_enable_int(intid);
|
||||||
|
}
|
||||||
|
|
||||||
|
// irq from iar
|
||||||
|
int gic_iar_irq(uint32_t iar)
|
||||||
|
{
|
||||||
|
return iar & 0x3ff;
|
||||||
|
}
|
||||||
|
|
||||||
|
// interrupt acknowledge register:
|
||||||
|
// ask GIC what interrupt we should serve.
|
||||||
|
uint32_t
|
||||||
|
gic_iar()
|
||||||
|
{
|
||||||
|
return icc_iar1_el1();
|
||||||
|
}
|
||||||
|
|
||||||
|
// tell GIC we've served this IRQ.
|
||||||
|
void gic_eoi(uint32_t iar)
|
||||||
|
{
|
||||||
|
w_icc_eoir1_el1(iar);
|
||||||
|
}
|
|
@ -0,0 +1,200 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2020 AIIT XUOS Lab
|
||||||
|
* XiUOS is licensed under Mulan PSL v2.
|
||||||
|
* You can use this software according to the terms and conditions of the Mulan PSL v2.
|
||||||
|
* You may obtain a copy of Mulan PSL v2 at:
|
||||||
|
* http://license.coscl.org.cn/MulanPSL2
|
||||||
|
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||||
|
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||||
|
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||||
|
* See the Mulan PSL v2 for more details.
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* @file gicv3_common_opa.h
|
||||||
|
* @brief gicv3 operation
|
||||||
|
* @version 1.0
|
||||||
|
* @author AIIT XUOS Lab
|
||||||
|
* @date 2024.05.07
|
||||||
|
*/
|
||||||
|
/*************************************************
|
||||||
|
File name: gicv3_common_opa.h
|
||||||
|
Description: gicv3 operation
|
||||||
|
Others:
|
||||||
|
History:
|
||||||
|
Author: AIIT XUOS Lab
|
||||||
|
Modification:
|
||||||
|
1. Rename file
|
||||||
|
*************************************************/
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <stdbool.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include <mmio_access.h>
|
||||||
|
|
||||||
|
//! @addtogroup gic
|
||||||
|
//! @{
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Definitions
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
//! @brief Options for sending a software generated interrupt.
|
||||||
|
//!
|
||||||
|
//! These options are used for the @a filter_list parameter of the gic_send_sgi()
|
||||||
|
//! function. They control how to select which CPUs that the interrupt is
|
||||||
|
//! sent to.
|
||||||
|
enum _gicd_sgi_filter {
|
||||||
|
//! Forward the interrupt to the CPU interfaces specified in the @a target_list parameter.
|
||||||
|
kGicSgiFilter_UseTargetList = 0,
|
||||||
|
|
||||||
|
//! Forward the interrupt to all CPU interfaces except that of the processor that requested
|
||||||
|
//! the interrupt.
|
||||||
|
kGicSgiFilter_AllOtherCPUs = 1,
|
||||||
|
|
||||||
|
//! Forward the interrupt only to the CPU interface of the processor that requested the
|
||||||
|
//! interrupt.
|
||||||
|
kGicSgiFilter_OnlyThisCPU = 2
|
||||||
|
};
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// API
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
#if defined(__cplusplus)
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
__attribute__((__always_inline__)) static inline uint32_t get_arm_private_peripheral_base()
|
||||||
|
{
|
||||||
|
return MMIO_P2V(0x00A00000);
|
||||||
|
}
|
||||||
|
|
||||||
|
__attribute__((__always_inline__)) static inline uint32_t irq_get_register_offset(uint32_t irqID)
|
||||||
|
{
|
||||||
|
return irqID / 32;
|
||||||
|
}
|
||||||
|
|
||||||
|
__attribute__((__always_inline__)) static inline uint32_t irq_get_bit_offset(uint32_t irqID)
|
||||||
|
{
|
||||||
|
return irqID & 0x1f;
|
||||||
|
}
|
||||||
|
|
||||||
|
__attribute__((__always_inline__)) static inline uint32_t irq_get_bit_mask(uint32_t irqID)
|
||||||
|
{
|
||||||
|
return 1 << irq_get_bit_offset(irqID);
|
||||||
|
}
|
||||||
|
|
||||||
|
//! @name Initialization
|
||||||
|
//@{
|
||||||
|
//! @brief Init interrupt handling.
|
||||||
|
//!
|
||||||
|
//! This function is intended to be called only by the primary CPU init code, so it will
|
||||||
|
//! only be called once during system bootup.
|
||||||
|
//!
|
||||||
|
//! Also inits the current CPU. You don't need to call gic_init_cpu() separately.
|
||||||
|
//!
|
||||||
|
//! @post The interrupt distributor and the current CPU interface are enabled. All interrupts
|
||||||
|
//! that were pending are cleared, and all interrupts are made secure (group 0).
|
||||||
|
void gic_init(void);
|
||||||
|
|
||||||
|
//! @brief Init the current CPU's GIC interface.
|
||||||
|
//!
|
||||||
|
//! @post Enables the CPU interface and sets the priority mask to 255. Interrupt preemption
|
||||||
|
//! is disabled by setting the Binary Point to a value of 7.
|
||||||
|
void gic_init_cpu(void);
|
||||||
|
//@}
|
||||||
|
|
||||||
|
//! @name GIC Interrupt Distributor Functions
|
||||||
|
//@{
|
||||||
|
//! @brief Enable or disable the GIC Distributor.
|
||||||
|
//!
|
||||||
|
//! Enables or disables the GIC distributor passing both secure (group 0) and non-secure
|
||||||
|
//! (group 1) interrupts to the CPU interfaces.
|
||||||
|
//!
|
||||||
|
//! @param enableIt Pass true to enable or false to disable.
|
||||||
|
void gic_enable();
|
||||||
|
|
||||||
|
//! @brief Set the security mode for an interrupt.
|
||||||
|
//!
|
||||||
|
//! @param irqID The interrupt number.
|
||||||
|
//! @param isSecure Whether the interrupt is taken to secure mode.
|
||||||
|
void gic_set_irq_security(uint32_t irqID, bool isSecure);
|
||||||
|
|
||||||
|
//! @brief Enable or disable an interrupt.
|
||||||
|
//!
|
||||||
|
//! @param irqID The number of the interrupt to control.
|
||||||
|
//! @param isEnabled Pass true to enable or false to disable.
|
||||||
|
void gic_enable_irq(uint32_t irqID, bool isEnabled);
|
||||||
|
|
||||||
|
//! @brief Set whether a CPU will receive a particular interrupt.
|
||||||
|
//!
|
||||||
|
//! @param irqID The interrupt number.
|
||||||
|
//! @param cpuNumber The CPU number. The first CPU core is 0.
|
||||||
|
//! @param enableIt Whether to send the interrupt to the specified CPU. Pass true to enable
|
||||||
|
//! or false to disable.
|
||||||
|
void gic_set_cpu_target(uint32_t irqID, unsigned cpuNumber, bool enableIt);
|
||||||
|
|
||||||
|
//! @brief Set an interrupt's priority.
|
||||||
|
//!
|
||||||
|
//! @param irq_id The interrupt number.
|
||||||
|
//! @param priority The priority for the interrupt. In the range of 0 through 0xff, with
|
||||||
|
//! 0 being the highest priority.
|
||||||
|
void gic_set_irq_priority(uint32_t irq_id, uint32_t priority);
|
||||||
|
|
||||||
|
//! @brief Send a software generated interrupt to a specific CPU.
|
||||||
|
//!
|
||||||
|
//! @param irq_id The interrupt number to send.
|
||||||
|
//! @param target_list Each bit indicates a CPU to which the interrupt will be forwarded.
|
||||||
|
//! Bit 0 is CPU 0, bit 1 is CPU 1, and so on. If the value is 0, then the interrupt
|
||||||
|
//! will not be forwarded to any CPUs. This parameter is only used if @a filter_list
|
||||||
|
//! is set to #kGicSgiFilter_UseTargetList.
|
||||||
|
//! @param filter_list One of the enums of the #_gicd_sgi_filter enumeration. The selected
|
||||||
|
//! option determines which CPUs the interrupt will be sent to. If the value
|
||||||
|
//! is #kGicSgiFilter_UseTargetList, then the @a target_list parameter is used.
|
||||||
|
void gic_send_sgi(uint32_t irq_id, uint32_t target_list, uint32_t filter_list);
|
||||||
|
//@}
|
||||||
|
|
||||||
|
//! @name GIC CPU Interface Functions
|
||||||
|
//@{
|
||||||
|
//! @brief Enable or disable the interface to the GIC for the current CPU.
|
||||||
|
//!
|
||||||
|
//! @param enableIt Pass true to enable or false to disable.
|
||||||
|
void gic_cpu_enable(bool enableIt);
|
||||||
|
|
||||||
|
//! @brief Set the mask of which interrupt priorities the CPU will receive.
|
||||||
|
//!
|
||||||
|
//! @param priority The lowest priority that will be passed to the current CPU. Pass 0xff to
|
||||||
|
//! allow all priority interrupts to signal the CPU.
|
||||||
|
void gic_set_cpu_priority_mask(uint32_t priority);
|
||||||
|
|
||||||
|
//! @brief Acknowledge starting of interrupt handling and get the interrupt number.
|
||||||
|
//!
|
||||||
|
//! Normally, this function is called at the beginning of the IRQ handler. It tells the GIC
|
||||||
|
//! that you are starting to handle an interupt, and returns the number of the interrupt you
|
||||||
|
//! need to handle. After the interrupt is handled, you should call gic_write_end_of_irq()
|
||||||
|
//! to signal that the interrupt is completely handled.
|
||||||
|
//!
|
||||||
|
//! In some cases, a spurious interrupt might happen. One possibility is if another CPU handles
|
||||||
|
//! the interrupt. When a spurious interrupt occurs, the end of the interrupt should be indicated
|
||||||
|
//! but nothing else.
|
||||||
|
//!
|
||||||
|
//! @return The number for the highest priority interrupt available for the calling CPU. If
|
||||||
|
//! the return value is 1022 or 1023, a spurious interrupt has occurred.
|
||||||
|
uint32_t gic_read_irq_ack(void);
|
||||||
|
|
||||||
|
//! @brief Signal the end of handling an interrupt.
|
||||||
|
//!
|
||||||
|
//! @param irq_id The number of the interrupt for which handling has finished.
|
||||||
|
void gic_write_end_of_irq(uint32_t irq_id);
|
||||||
|
//@}
|
||||||
|
|
||||||
|
#if defined(__cplusplus)
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
//! @}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// EOF
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
|
@ -0,0 +1,111 @@
|
||||||
|
/*
|
||||||
|
* include/linux/irqchip/gicv3_registers.h
|
||||||
|
*
|
||||||
|
* Copyright (C) 2002 ARM Limited, All Rights Reserved.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* @file gicv3_registers.c
|
||||||
|
* @brief gicv3 registers
|
||||||
|
* @version 1.0
|
||||||
|
* @author AIIT XUOS Lab
|
||||||
|
* @date 2024.05.09
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*************************************************
|
||||||
|
File name: gicv3_registers.c
|
||||||
|
Description: gicv3 registers
|
||||||
|
Others:
|
||||||
|
History:
|
||||||
|
Author: AIIT XUOS Lab
|
||||||
|
Modification:
|
||||||
|
1. Rename the file
|
||||||
|
*************************************************/
|
||||||
|
|
||||||
|
#ifndef __LINUX_IRQCHIP_ARM_GIC_H
|
||||||
|
#define __LINUX_IRQCHIP_ARM_GIC_H
|
||||||
|
|
||||||
|
#define D_CTLR 0x0
|
||||||
|
#define D_TYPER 0x4
|
||||||
|
#define D_IGROUPR(n) (0x80 + (uint64)(n) * 4)
|
||||||
|
#define D_ISENABLER(n) (0x100 + (uint64)(n) * 4)
|
||||||
|
#define D_ICENABLER(n) (0x180 + (uint64)(n) * 4)
|
||||||
|
#define D_ISPENDR(n) (0x200 + (uint64)(n) * 4)
|
||||||
|
#define D_ICPENDR(n) (0x280 + (uint64)(n) * 4)
|
||||||
|
#define D_IPRIORITYR(n) (0x400 + (uint64)(n) * 4)
|
||||||
|
#define D_ITARGETSR(n) (0x800 + (uint64)(n) * 4)
|
||||||
|
#define D_ICFGR(n) (0xc00 + (uint64)(n) * 4)
|
||||||
|
|
||||||
|
#define R_CTLR 0x0
|
||||||
|
#define R_WAKER 0x14
|
||||||
|
|
||||||
|
#define SGI_BASE 0x10000
|
||||||
|
#define R_IGROUPR0 (SGI_BASE + 0x80)
|
||||||
|
#define R_ISENABLER0 (SGI_BASE + 0x100)
|
||||||
|
#define R_ICENABLER0 (SGI_BASE + 0x180)
|
||||||
|
#define R_ICPENDR0 (SGI_BASE + 0x280)
|
||||||
|
#define R_IPRIORITYR(n) (SGI_BASE + 0x400 + (n) * 4)
|
||||||
|
#define R_ICFGR0 (SGI_BASE + 0xc00)
|
||||||
|
#define R_ICFGR1 (SGI_BASE + 0xc04)
|
||||||
|
#define R_IGRPMODR0 (SGI_BASE + 0xd00)
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
enum _gic_base_offsets {
|
||||||
|
kGICDBaseOffset = 0x10000, //!< GIC distributor offset.
|
||||||
|
kGICCBaseOffset = 0x100 //!< GIC CPU interface offset.
|
||||||
|
};
|
||||||
|
|
||||||
|
//! @brief GIC distributor registers.
|
||||||
|
//!
|
||||||
|
//! Uses the GICv2 register names, but does not include GICv2 registers.
|
||||||
|
//!
|
||||||
|
//! The IPRIORITYRn and ITARGETSRn registers are byte accessible, so their types are uint8_t
|
||||||
|
//! instead of uint32_t to reflect this. These members are indexed directly with the interrupt
|
||||||
|
//! number.
|
||||||
|
struct _gicd_registers {
|
||||||
|
uint32_t CTLR; //!< Distributor Control Register.
|
||||||
|
uint32_t TYPER; //!< Interrupt Controller Type Register.
|
||||||
|
uint32_t IIDR; //!< Distributor Implementer Identification Register.
|
||||||
|
uint32_t _reserved0[29];
|
||||||
|
uint32_t IGROUPRn[8]; //!< Interrupt Group Registers.
|
||||||
|
uint32_t _reserved1[24];
|
||||||
|
uint32_t ISENABLERn[32]; //!< Interrupt Set-Enable Registers.
|
||||||
|
uint32_t ICENABLERn[32]; //!< Interrupt Clear-Enable Registers.
|
||||||
|
uint32_t ISPENDRn[32]; //!< Interrupt Set-Pending Registers.
|
||||||
|
uint32_t ICPENDRn[32]; //!< Interrupt Clear-Pending Registers.
|
||||||
|
uint32_t ICDABRn[32]; //!< Active Bit Registers.
|
||||||
|
uint32_t _reserved2[32];
|
||||||
|
uint8_t IPRIORITYRn[255 * sizeof(uint32_t)]; //!< Interrupt Priority Registers. (Byte accessible)
|
||||||
|
uint32_t _reserved3;
|
||||||
|
uint8_t ITARGETSRn[255 * sizeof(uint32_t)]; //!< Interrupt Processor Targets Registers. (Byte accessible)
|
||||||
|
uint32_t _reserved4;
|
||||||
|
uint32_t ICFGRn[64]; //!< Interrupt Configuration Registers.
|
||||||
|
uint32_t _reserved5[128];
|
||||||
|
uint32_t SGIR; //!< Software Generated Interrupt Register
|
||||||
|
};
|
||||||
|
|
||||||
|
//! @brief Bitfields constants for the GICD_CTLR register.
|
||||||
|
enum _gicd_ctlr_fields {
|
||||||
|
kBM_GICD_CTLR_EnableGrp1 = (1 << 1),
|
||||||
|
kBM_GICD_CTLR_EnableGrp0 = (1 << 0)
|
||||||
|
};
|
||||||
|
|
||||||
|
enum _gicd_sgir_fields {
|
||||||
|
kBP_GICD_SGIR_TargetListFilter = 24,
|
||||||
|
kBM_GICD_SGIR_TargetListFilter = (0x3 << kBP_GICD_SGIR_TargetListFilter),
|
||||||
|
|
||||||
|
kBP_GICD_SGIR_CPUTargetList = 16,
|
||||||
|
kBM_GICD_SGIR_CPUTargetList = (0xff << kBP_GICD_SGIR_CPUTargetList),
|
||||||
|
|
||||||
|
kBP_GICD_SGIR_NSATT = 15,
|
||||||
|
kBM_GICD_SGIR_NSATT = (1 << kBP_GICD_SGIR_NSATT),
|
||||||
|
|
||||||
|
kBP_GICD_SGIR_SGIINTID = 0,
|
||||||
|
kBM_GICD_SGIR_SGIINTID = 0xf
|
||||||
|
};
|
|
@ -47,7 +47,7 @@ Author: AIIT XUOS Lab
|
||||||
Modification:
|
Modification:
|
||||||
*************************************************/
|
*************************************************/
|
||||||
|
|
||||||
.code 64
|
.arch armv8-a
|
||||||
.section ".text","ax"
|
.section ".text","ax"
|
||||||
|
|
||||||
.global cpu_get_current
|
.global cpu_get_current
|
||||||
|
@ -61,15 +61,15 @@ _spinlock_lock:
|
||||||
ldxr x1, [x0] // check if the spinlock is currently unlocked
|
ldxr x1, [x0] // check if the spinlock is currently unlocked
|
||||||
cmp x1, #UNLOCKED
|
cmp x1, #UNLOCKED
|
||||||
|
|
||||||
wfene // wait for an event signal
|
wfe // wait for an event signal
|
||||||
bne _spinlock_lock
|
bne _spinlock_lock
|
||||||
|
|
||||||
mrs x1, mpidr_el1 // get our CPU ID
|
mrs x1, mpidr_el1 // get our CPU ID
|
||||||
and x1, x1, #3
|
and x1, x1, #3
|
||||||
stxr x2, x1, [x0]
|
stxr w2, x1, [x0]
|
||||||
cbnz x2, _spinlock_lock // check if the write was successful, if the write failed, start over
|
cbnz x2, _spinlock_lock // check if the write was successful, if the write failed, start over
|
||||||
|
|
||||||
dmb // Ensure that accesses to shared resource have completed
|
dmb ish // Ensure that accesses to shared resource have completed
|
||||||
|
|
||||||
mov x0, #0
|
mov x0, #0
|
||||||
ret
|
ret
|
||||||
|
@ -89,12 +89,12 @@ ldr x2, [x0]
|
||||||
cmp x1, x2
|
cmp x1, x2
|
||||||
bne 1f //doesn't match,jump to 1
|
bne 1f //doesn't match,jump to 1
|
||||||
|
|
||||||
dmb
|
dmb ish
|
||||||
|
|
||||||
mov x1, #UNLOCKED
|
mov x1, #UNLOCKED
|
||||||
str x1, [x0]
|
str x1, [x0]
|
||||||
|
|
||||||
dsb //Ensure that no instructions following the barrier execute until
|
dsb ish //Ensure that no instructions following the barrier execute until
|
||||||
// all memory accesses prior to the barrier have completed.
|
// all memory accesses prior to the barrier have completed.
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -86,9 +86,9 @@ enum _ls_interrupts {
|
||||||
LS_INT_GPIO3 = 69, //!< GPIO3
|
LS_INT_GPIO3 = 69, //!< GPIO3
|
||||||
|
|
||||||
LS_INT_FLETIMER1 = 76, //!< ORed all Flextimer 1 interrupt signals
|
LS_INT_FLETIMER1 = 76, //!< ORed all Flextimer 1 interrupt signals
|
||||||
LS_INT_FLETIMER1 = 77, //!< ORed all Flextimer 2 interrupt signals
|
LS_INT_FLETIMER2 = 77, //!< ORed all Flextimer 2 interrupt signals
|
||||||
LS_INT_FLETIMER1 = 78, //!< ORed all Flextimer 3 interrupt signals
|
LS_INT_FLETIMER3 = 78, //!< ORed all Flextimer 3 interrupt signals
|
||||||
LS_INT_FLETIMER1 = 79, //!< ORed all Flextimer 4 interrupt signals
|
LS_INT_FLETIMER4 = 79, //!< ORed all Flextimer 4 interrupt signals
|
||||||
|
|
||||||
LS_INT_I2C5_6 = 106, //!< I2C5 and I2C6 ORed
|
LS_INT_I2C5_6 = 106, //!< I2C5 and I2C6 ORed
|
||||||
LS_INT_I2C7_8 = 107, //!< I2C7 and I2C8 ORed
|
LS_INT_I2C7_8 = 107, //!< I2C7 and I2C8 ORed
|
||||||
|
|
|
@ -29,7 +29,7 @@ Modification:
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
#include "core.h"
|
#include "core.h"
|
||||||
#include "gicv2_common_opa.h"
|
#include "gicv3_common_opa.h"
|
||||||
#include "trap_common.h"
|
#include "trap_common.h"
|
||||||
|
|
||||||
#include "log.h"
|
#include "log.h"
|
||||||
|
@ -101,7 +101,7 @@ static void _sys_irq_init(int cpu_id)
|
||||||
}
|
}
|
||||||
/* active hardware irq responser */
|
/* active hardware irq responser */
|
||||||
gic_init();
|
gic_init();
|
||||||
xizi_trap_driver.switch_hw_irqtbl((uint64_t*)&_vector_jumper);
|
xizi_trap_driver.switch_hw_irqtbl((uint32_t*)&_vector_jumper);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _cpu_irq_enable(void)
|
static void _cpu_irq_enable(void)
|
||||||
|
@ -116,27 +116,22 @@ static void _cpu_irq_disable(void)
|
||||||
|
|
||||||
static void _single_irq_enable(int irq, int cpu, int prio)
|
static void _single_irq_enable(int irq, int cpu, int prio)
|
||||||
{
|
{
|
||||||
gic_set_irq_priority(irq, prio);
|
gic_enable();
|
||||||
gic_set_irq_security(irq, false); // set IRQ as non-secure
|
|
||||||
gic_set_cpu_target(irq, cpu, true);
|
|
||||||
gic_enable_irq(irq, true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _single_irq_disable(int irq, int cpu)
|
static void _single_irq_disable(int irq, int cpu)
|
||||||
{
|
{
|
||||||
gic_enable_irq(irq, false);
|
|
||||||
gic_set_cpu_target(irq, cpu, false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define VBAR
|
#define VBAR
|
||||||
static inline uint64_t _switch_hw_irqtbl(uint64_t* new_tbl_base)
|
static inline uint32_t _switch_hw_irqtbl(uint32_t* new_tbl_base)
|
||||||
{
|
{
|
||||||
uint64_t old_tbl_base = 0;
|
uint32_t old_tbl_base = 0;
|
||||||
//get old irq table base addr
|
// get old irq table base addr
|
||||||
asm volatile("mrs %0, vbar_el1" : "=r" (old_tbl_base));
|
__asm__ volatile("mrs %0, vbar_el1" : "=r"(old_tbl_base));
|
||||||
|
|
||||||
// set new irq table base addr
|
// set new irq table base addr
|
||||||
asm volatile("msr vbar_el1, %0" : : "r" (new_tbl_base));
|
__asm__ volatile("msr vbar_el1, %0" : : "r"(new_tbl_base));
|
||||||
|
|
||||||
return old_tbl_base;
|
return old_tbl_base;
|
||||||
}
|
}
|
||||||
|
@ -146,29 +141,6 @@ static void _bind_irq_handler(int irq, irq_handler_t handler)
|
||||||
xizi_trap_driver.sw_irqtbl[irq].handler = handler;
|
xizi_trap_driver.sw_irqtbl[irq].handler = handler;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool _send_sgi(uint32_t irq, uint32_t bitmask, enum SgiFilterType type)
|
|
||||||
{
|
|
||||||
if (bitmask > (1 << NR_CPU) - 1) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
enum _gicd_sgi_filter sgi_filter;
|
|
||||||
switch (type) {
|
|
||||||
case SgiFilter_TargetList:
|
|
||||||
sgi_filter = kGicSgiFilter_UseTargetList;
|
|
||||||
break;
|
|
||||||
case SgiFilter_AllOtherCPUs:
|
|
||||||
sgi_filter = kGicSgiFilter_AllOtherCPUs;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
sgi_filter = kGicSgiFilter_OnlyThisCPU;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
gic_send_sgi(irq, bitmask, sgi_filter);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static uint32_t _hw_before_irq()
|
static uint32_t _hw_before_irq()
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -185,7 +157,7 @@ static uint32_t _hw_cur_int_num(uint32_t int_info)
|
||||||
return int_info & 0x1FF;
|
return int_info & 0x1FF;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t _hw_cur_int_cpu(uint32_t int_info)
|
static __attribute__((unused)) uint32_t _hw_cur_int_cpu(uint32_t int_info)
|
||||||
{
|
{
|
||||||
return (int_info >> 10) & 0x7;
|
return (int_info >> 10) & 0x7;
|
||||||
}
|
}
|
||||||
|
@ -195,7 +167,7 @@ static void _hw_after_irq(uint32_t int_info)
|
||||||
gic_write_end_of_irq(int_info);
|
gic_write_end_of_irq(int_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int _is_interruptable(void)
|
static __attribute__((unused)) int _is_interruptable(void)
|
||||||
{
|
{
|
||||||
uint32_t val;
|
uint32_t val;
|
||||||
|
|
||||||
|
@ -217,15 +189,12 @@ static struct XiziTrapDriver xizi_trap_driver = {
|
||||||
.cpu_irq_disable = _cpu_irq_disable,
|
.cpu_irq_disable = _cpu_irq_disable,
|
||||||
.single_irq_enable = _single_irq_enable,
|
.single_irq_enable = _single_irq_enable,
|
||||||
.single_irq_disable = _single_irq_disable,
|
.single_irq_disable = _single_irq_disable,
|
||||||
.switch_hw_irqtbl = _switch_hw_irqtbl,
|
//.switch_hw_irqtbl = _switch_hw_irqtbl,
|
||||||
|
|
||||||
.bind_irq_handler = _bind_irq_handler,
|
.bind_irq_handler = _bind_irq_handler,
|
||||||
.send_sgi = _send_sgi,
|
|
||||||
|
|
||||||
.is_interruptable = _is_interruptable,
|
|
||||||
.hw_before_irq = _hw_before_irq,
|
.hw_before_irq = _hw_before_irq,
|
||||||
.hw_cur_int_num = _hw_cur_int_num,
|
.hw_cur_int_num = _hw_cur_int_num,
|
||||||
.hw_cur_int_cpu = _hw_cur_int_cpu,
|
|
||||||
.hw_after_irq = _hw_after_irq,
|
.hw_after_irq = _hw_after_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -80,11 +80,11 @@ user_trap_swi_enter:
|
||||||
stp x3, x4, [sp, #-16]!
|
stp x3, x4, [sp, #-16]!
|
||||||
stp x1, x2, [sp, #-16]!
|
stp x1, x2, [sp, #-16]!
|
||||||
|
|
||||||
mrs x2, spsr_el1
|
// mrs x2, spsr_el1
|
||||||
str x2, [sp, #-8]
|
//str x2, [sp, #-8]
|
||||||
str x30, [sp, #-8]
|
//str x30, [sp, #-8]
|
||||||
stp sp, elr_el1, [sp, #-16]!
|
//stp sp, elr_el1, [sp, #-16]!
|
||||||
str sp, [sp, #-8]
|
//str sp, [sp, #-8]
|
||||||
|
|
||||||
// Call syscall handler
|
// Call syscall handler
|
||||||
mov x0, sp
|
mov x0, sp
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
|
ifneq ($(findstring $(BOARD), ok1028a-c), )
|
||||||
|
SRC_DIR := armv8-a
|
||||||
|
endif
|
||||||
|
ifneq ($(findstring $(BOARD), imx6q-sabrelite zynq7000-zc702), )
|
||||||
SRC_DIR := armv7-a
|
SRC_DIR := armv7-a
|
||||||
|
endif
|
||||||
|
|
||||||
|
|
||||||
include $(KERNEL_ROOT)/compiler.mk
|
include $(KERNEL_ROOT)/compiler.mk
|
||||||
|
|
||||||
|
|
|
@ -40,9 +40,9 @@ extern uint64_t kernel_data_begin[];
|
||||||
|
|
||||||
#define NR_PDE_ENTRIES 512
|
#define NR_PDE_ENTRIES 512
|
||||||
#define L1_TYPE_SEC (2 << 0)
|
#define L1_TYPE_SEC (2 << 0)
|
||||||
#define L1_SECT_DEV ((0B00)<<2) //Device memory
|
#define L1_SECT_DEV ((0B00) << 2) // Device memory
|
||||||
#define L1_SECT_AP0 (1 << 6) //Data Access Permissions
|
#define L1_SECT_AP0 (1 << 6) // Data Access Permissions
|
||||||
uint64_t boot_ptable[NR_PTE_ENTRIES] __attribute__((aligned(0x4000))) = { 0 };
|
uint64_t boot_pgdir[NR_PDE_ENTRIES] __attribute__((aligned(0x4000))) = { 0 };
|
||||||
|
|
||||||
static void build_boot_pgdir()
|
static void build_boot_pgdir()
|
||||||
{
|
{
|
||||||
|
@ -68,24 +68,23 @@ static void build_boot_pgdir()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
static void load_boot_pgdir()
|
static void load_boot_pgdir()
|
||||||
{
|
{
|
||||||
uint64_t val;
|
uint64_t val;
|
||||||
|
|
||||||
// DACR_W(0x55555555); // set domain access control as client
|
// DACR_W(0x55555555); // set domain access control as client
|
||||||
TTBCR_W(0x0);
|
// TTBCR_W(0x0);
|
||||||
TTBR0_W((uint64_t)boot_pgdir);
|
// TTBR0_W((uint64_t)boot_pgdir);
|
||||||
|
TTBR0_W(0x0);
|
||||||
|
TTBR1_W((uint64_t)boot_pgdir);
|
||||||
// Enable paging using read/modify/write
|
// Enable paging using read/modify/write
|
||||||
SCTLR_R(val);
|
SCTLR_R(val);
|
||||||
val |= (1 << 0); //EL1 and EL0 stage 1 address translation enabled.
|
val |= (1 << 0); // EL1 and EL0 stage 1 address translation enabled.
|
||||||
val |= (1 << 1); //Alignment check enable
|
val |= (1 << 1); // Alignment check enable
|
||||||
val |= (1 << 2); // Cacheability control, for data caching.
|
val |= (1 << 2); // Cacheability control, for data caching.
|
||||||
val |= (1 << 12); // Instruction access Cacheability control
|
val |= (1 << 12); // Instruction access Cacheability control
|
||||||
val |= (1 << 19); //forced to XN for the EL1&0 translation regime.
|
val |= (1 << 19); // forced to XN for the EL1&0 translation regime.
|
||||||
|
|
||||||
SCTLR_W(val);
|
SCTLR_W(val);
|
||||||
|
|
||||||
// flush all TLB
|
// flush all TLB
|
||||||
|
|
|
@ -33,7 +33,6 @@ Modification:
|
||||||
#include "memlayout.h"
|
#include "memlayout.h"
|
||||||
#include "page_table_entry.h"
|
#include "page_table_entry.h"
|
||||||
|
|
||||||
|
|
||||||
#define TCR_IPS (0 << 32)
|
#define TCR_IPS (0 << 32)
|
||||||
#define TCR_TG1_4K (0b10 << 30)
|
#define TCR_TG1_4K (0b10 << 30)
|
||||||
#define TCR_SH1_INNER (0b11 << 28)
|
#define TCR_SH1_INNER (0b11 << 28)
|
||||||
|
@ -41,16 +40,14 @@ Modification:
|
||||||
#define TCR_TG0_4K (0 << 14)
|
#define TCR_TG0_4K (0 << 14)
|
||||||
#define TCR_SH0_INNER (0b11 << 12)
|
#define TCR_SH0_INNER (0b11 << 12)
|
||||||
#define TCR_ORGN0_IRGN0_WRITEBACK_WRITEALLOC ((0b01 << 10) | (0b01 << 8))
|
#define TCR_ORGN0_IRGN0_WRITEBACK_WRITEALLOC ((0b01 << 10) | (0b01 << 8))
|
||||||
#define TCR_VALUE \
|
#define TCR_VALUE \
|
||||||
(TCR_IPS | \
|
(TCR_IPS | TCR_TG1_4K | TCR_SH1_INNER | TCR_ORGN1_IRGN1_WRITEBACK_WRITEALLOC | TCR_TG0_4K | TCR_SH0_INNER | TCR_ORGN0_IRGN0_WRITEBACK_WRITEALLOC)
|
||||||
TCR_TG1_4K | TCR_SH1_INNER | TCR_ORGN1_IRGN1_WRITEBACK_WRITEALLOC | \
|
|
||||||
TCR_TG0_4K | TCR_SH0_INNER | TCR_ORGN0_IRGN0_WRITEBACK_WRITEALLOC)
|
|
||||||
|
|
||||||
enum AccessPermission {
|
enum AccessPermission {
|
||||||
AccessPermission_NoAccess = 0,
|
AccessPermission_NoAccess = 0,
|
||||||
AccessPermission_KernelOnly = 1, //EL1
|
AccessPermission_KernelOnly = 1, // EL1
|
||||||
AccessPermission_Reserved = 2,
|
AccessPermission_Reserved = 2,
|
||||||
AccessPermission_KernelUser = 3, //EL1&EL0
|
AccessPermission_KernelUser = 3, // EL1&EL0
|
||||||
};
|
};
|
||||||
|
|
||||||
void GetDevPteAttr(uintptr_t* attr);
|
void GetDevPteAttr(uintptr_t* attr);
|
||||||
|
@ -59,32 +56,37 @@ void GetUsrDevPteAttr(uintptr_t* attr);
|
||||||
void GetKernPteAttr(uintptr_t* attr);
|
void GetKernPteAttr(uintptr_t* attr);
|
||||||
void GetPdeAttr(uintptr_t* attr);
|
void GetPdeAttr(uintptr_t* attr);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Enable MMU, cache, write buffer, etc.
|
Enable MMU, cache, write buffer, etc.
|
||||||
*/
|
*/
|
||||||
#define SCTLR_R(val) __asm__ volatile("mrs %0, sctlr_el1" : "=r"(val))
|
#define SCTLR_R(val) __asm__ volatile("mrs %0, sctlr_el1" : "=r"(val))
|
||||||
#define SCTLR_W(val) __asm__ volatile("msr sctlr_el1, %0" :: "r"(val))
|
#define SCTLR_W(val) __asm__ volatile("msr sctlr_el1, %0" ::"r"(val))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Read and write mmu pagetable register base addr
|
Read and write mmu pagetable register base addr
|
||||||
*/
|
*/
|
||||||
#define TTBR0_R(val) __asm__ volatile("mrs %0, ttbr0_el1" : "=r"(val))
|
#define TTBR0_R(val) __asm__ volatile("mrs %0, ttbr0_el1" : "=r"(val))
|
||||||
#define TTBR0_W(val) __asm__ volatile("msr ttbr0_el1, %0" :: "r"(val))
|
#define TTBR0_W(val) __asm__ volatile("msr ttbr0_el1, %0" ::"r"(val))
|
||||||
|
|
||||||
|
/*
|
||||||
|
Read and write mmu pagetable register base addr
|
||||||
|
*/
|
||||||
|
#define TTBR1_R(val) __asm__ volatile("mrs %0, ttbr1_el1" : "=r"(val))
|
||||||
|
#define TTBR1_W(val) __asm__ volatile("msr ttbr1_el1, %0" ::"r"(val))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
TTBCR is used for choosing TTBR0 and TTBR1 as page table register.
|
TTBCR is used for choosing TTBR0 and TTBR1 as page table register.
|
||||||
When TTBCR is set to 0, TTBR0 is selected by default.
|
When TTBCR is set to 0, TTBR0 is selected by default.
|
||||||
*/
|
*/
|
||||||
#define TTBCR_R(val) __asm__ volatile("mrs %0, ttbcr_el1" : "=r"(val))
|
// #define TTBCR_R(val) __asm__ volatile("mrs %0, ttbcr_el1" : "=r"(val))
|
||||||
#define TTBCR_W(val) __asm__ volatile("msr ttbcr_el1, %0" :: "r"(val))
|
// #define TTBCR_W(val) __asm__ volatile("msr ttbcr_el1, %0" ::"r"(val))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
DACR registers are used to control memory privilage.
|
DACR registers are used to control memory privilage.
|
||||||
The domain value is usually 0x01. The memory privilage will be controled by pte AP/APX
|
The domain value is usually 0x01. The memory privilage will be controled by pte AP/APX
|
||||||
*/
|
*/
|
||||||
//#define DACR_R(val) __asm__ volatile("mrs %0, dacr_el1" : "=r"(val))
|
// #define DACR_R(val) __asm__ volatile("mrs %0, dacr_el1" : "=r"(val))
|
||||||
//#define DACR_W(val) __asm__ volatile("msr dacr_el1, %0" :: "r"(val))
|
// #define DACR_W(val) __asm__ volatile("msr dacr_el1, %0" :: "r"(val))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Flush TLB when loading a new page table.
|
Flush TLB when loading a new page table.
|
||||||
|
@ -97,15 +99,7 @@ When nG is set in the pte attribute, the process is assigned an ASID, which is s
|
||||||
When the process switches, the flush TLB is no longer required anymore.
|
When the process switches, the flush TLB is no longer required anymore.
|
||||||
*/
|
*/
|
||||||
#define CONTEXTIDR_R(val) __asm__ volatile("mrs %0, contextidr_el1" : "=r"(val))
|
#define CONTEXTIDR_R(val) __asm__ volatile("mrs %0, contextidr_el1" : "=r"(val))
|
||||||
#define CONTEXTIDR_W(val) __asm__ volatile("msr contextidr_el1, %0" :: "r"(val))
|
#define CONTEXTIDR_W(val) __asm__ volatile("msr contextidr_el1, %0" ::"r"(val))
|
||||||
|
|
||||||
|
|
||||||
/* virtual and physical addr translate */
|
|
||||||
#define V2P(a) ((uint64_t)((uint64_t)(a)-KERN_OFFSET))
|
|
||||||
#define P2V(a) ((void*)((void*)(a) + KERN_OFFSET))
|
|
||||||
|
|
||||||
#define V2P_WO(x) ((x)-KERN_OFFSET) // same as V2P, but without casts
|
|
||||||
#define P2V_WO(x) ((x) + KERN_OFFSET) // same as V2P, but without casts
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLER__
|
#ifndef __ASSEMBLER__
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
|
@ -38,24 +38,6 @@ Modification:
|
||||||
// extern struct MmuCommonDone mmu_common_done;
|
// extern struct MmuCommonDone mmu_common_done;
|
||||||
static struct MmuDriverRightGroup right_group;
|
static struct MmuDriverRightGroup right_group;
|
||||||
|
|
||||||
void load_pgdir_critical(uintptr_t pgdir_paddr, struct TraceTag* intr_driver_tag)
|
|
||||||
{
|
|
||||||
|
|
||||||
/* get cache driver */
|
|
||||||
struct ICacheDone* p_icache_done = AchieveResource(&right_group.icache_driver_tag);
|
|
||||||
struct DCacheDone* p_dcache_done = AchieveResource(&right_group.dcache_driver_tag);
|
|
||||||
|
|
||||||
/* get intr driver */
|
|
||||||
struct XiziTrapDriver* p_intr_driver = AchieveResource(intr_driver_tag);
|
|
||||||
|
|
||||||
p_intr_driver->cpu_irq_disable();
|
|
||||||
TTBR0_W((uint64_t)pgdir_paddr);
|
|
||||||
CLEARTLB(0);
|
|
||||||
p_icache_done->invalidateall();
|
|
||||||
p_dcache_done->flushall();
|
|
||||||
p_intr_driver->cpu_irq_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
void load_pgdir(uintptr_t pgdir_paddr)
|
void load_pgdir(uintptr_t pgdir_paddr)
|
||||||
{
|
{
|
||||||
/* get cache driver */
|
/* get cache driver */
|
||||||
|
@ -70,7 +52,7 @@ void load_pgdir(uintptr_t pgdir_paddr)
|
||||||
|
|
||||||
__attribute__((always_inline)) inline static void _tlb_flush(uintptr_t va)
|
__attribute__((always_inline)) inline static void _tlb_flush(uintptr_t va)
|
||||||
{
|
{
|
||||||
__asm__ volatile("tlbi vae1is, %0" :: "r"(va) );
|
__asm__ volatile("tlbi vae1is, %0" ::"r"(va));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tlb_flush_range(uintptr_t vstart, int len)
|
static void tlb_flush_range(uintptr_t vstart, int len)
|
||||||
|
@ -94,7 +76,6 @@ static struct MmuCommonDone mmu_common_done = {
|
||||||
.MmuUsrDevPteAttr = GetUsrDevPteAttr,
|
.MmuUsrDevPteAttr = GetUsrDevPteAttr,
|
||||||
.MmuKernPteAttr = GetKernPteAttr,
|
.MmuKernPteAttr = GetKernPteAttr,
|
||||||
|
|
||||||
.LoadPgdirCrit = load_pgdir_critical,
|
|
||||||
.LoadPgdir = load_pgdir,
|
.LoadPgdir = load_pgdir,
|
||||||
.TlbFlushAll = tlb_flush_all,
|
.TlbFlushAll = tlb_flush_all,
|
||||||
.TlbFlush = tlb_flush_range,
|
.TlbFlush = tlb_flush_range,
|
||||||
|
|
|
@ -72,10 +72,10 @@ Modification:
|
||||||
#define USER_IPC_SPACE_TOP (USER_MEM_TOP - USER_STACK_SIZE)
|
#define USER_IPC_SPACE_TOP (USER_MEM_TOP - USER_STACK_SIZE)
|
||||||
|
|
||||||
/* Kernel memory layout */
|
/* Kernel memory layout */
|
||||||
#define KERN_MEM_BASE (0xffff000000000000) // First kernel virtual address
|
#define KERN_MEM_BASE (0xffff000000000000ULL) // First kernel virtual address
|
||||||
#define KERN_OFFSET (KERN_MEM_BASE - PHY_MEM_BASE)
|
#define KERN_OFFSET (KERN_MEM_BASE - PHY_MEM_BASE)
|
||||||
|
|
||||||
#define V2P(a) (((uint64)(a)) - KERN_MEM_BASE)
|
#define V2P(a) (((uint64_t)(a)) - KERN_MEM_BASE)
|
||||||
#define P2V(a) ((void *)(((char *)(a)) + KERN_MEM_BASE))
|
#define P2V(a) ((void *)(((char *)(a)) + KERN_MEM_BASE))
|
||||||
|
|
||||||
#define V2P_WO(x) ((x) - KERN_MEM_BASE) // same as V2P, but without casts
|
#define V2P_WO(x) ((x) - KERN_MEM_BASE) // same as V2P, but without casts
|
||||||
|
|
|
@ -31,67 +31,67 @@ Modification:
|
||||||
|
|
||||||
void GetUsrPteAttr(uintptr_t* attr)
|
void GetUsrPteAttr(uintptr_t* attr)
|
||||||
{
|
{
|
||||||
static char init = 0;
|
// static char init = 0;
|
||||||
static PageTblEntry usr_pte_attr;
|
// static PageTblEntry usr_pte_attr;
|
||||||
if (init == 0) {
|
// if (init == 0) {
|
||||||
init = 1;
|
// init = 1;
|
||||||
|
|
||||||
usr_pte_attr.entry = 0;
|
// usr_pte_attr.entry = 0;
|
||||||
usr_pte_attr.desc_type = PAGE_4K;
|
// usr_pte_attr.desc_type = PAGE_4K;
|
||||||
usr_pte_attr.B = 1;
|
// usr_pte_attr.B = 1;
|
||||||
usr_pte_attr.C = 1;
|
// usr_pte_attr.C = 1;
|
||||||
usr_pte_attr.S = 1;
|
// usr_pte_attr.S = 1;
|
||||||
usr_pte_attr.AP1_0 = AccessPermission_KernelUser;
|
// usr_pte_attr.AP1_0 = AccessPermission_KernelUser;
|
||||||
}
|
// }
|
||||||
*attr = usr_pte_attr.entry;
|
// *attr = usr_pte_attr.entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
void GetUsrDevPteAttr(uintptr_t* attr)
|
void GetUsrDevPteAttr(uintptr_t* attr)
|
||||||
{
|
{
|
||||||
static char init = 0;
|
// static char init = 0;
|
||||||
static PageTblEntry usr_pte_attr;
|
// static PageTblEntry usr_pte_attr;
|
||||||
if (init == 0) {
|
// if (init == 0) {
|
||||||
init = 1;
|
// init = 1;
|
||||||
|
|
||||||
usr_pte_attr.entry = 0;
|
// usr_pte_attr.entry = 0;
|
||||||
usr_pte_attr.desc_type = PAGE_4K;
|
// usr_pte_attr.desc_type = PAGE_4K;
|
||||||
usr_pte_attr.AP1_0 = AccessPermission_KernelUser;
|
// usr_pte_attr.AP1_0 = AccessPermission_KernelUser;
|
||||||
}
|
// }
|
||||||
*attr = usr_pte_attr.entry;
|
// *attr = usr_pte_attr.entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
void GetDevPteAttr(uintptr_t* attr)
|
void GetDevPteAttr(uintptr_t* attr)
|
||||||
{
|
{
|
||||||
static char init = 0;
|
// static char init = 0;
|
||||||
static PageTblEntry dev_pte_attr;
|
// static PageTblEntry dev_pte_attr;
|
||||||
if (init == 0) {
|
// if (init == 0) {
|
||||||
init = 1;
|
// init = 1;
|
||||||
|
|
||||||
dev_pte_attr.entry = 0;
|
// dev_pte_attr.entry = 0;
|
||||||
dev_pte_attr.desc_type = PAGE_4K;
|
// dev_pte_attr.desc_type = PAGE_4K;
|
||||||
dev_pte_attr.AP1_0 = AccessPermission_KernelOnly;
|
// dev_pte_attr.AP1_0 = AccessPermission_KernelOnly;
|
||||||
}
|
// }
|
||||||
*attr = dev_pte_attr.entry;
|
// *attr = dev_pte_attr.entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
void GetKernPteAttr(uintptr_t* attr)
|
void GetKernPteAttr(uintptr_t* attr)
|
||||||
{
|
{
|
||||||
static char init = 0;
|
// static char init = 0;
|
||||||
static PageTblEntry kern_pte_attr;
|
// static PageTblEntry kern_pte_attr;
|
||||||
if (init == 0) {
|
// if (init == 0) {
|
||||||
init = 1;
|
// init = 1;
|
||||||
|
|
||||||
kern_pte_attr.entry = 0;
|
// kern_pte_attr.entry = 0;
|
||||||
kern_pte_attr.desc_type = PAGE_4K;
|
// kern_pte_attr.desc_type = PAGE_4K;
|
||||||
kern_pte_attr.B = 1;
|
// kern_pte_attr.B = 1;
|
||||||
kern_pte_attr.C = 1;
|
// kern_pte_attr.C = 1;
|
||||||
kern_pte_attr.S = 1;
|
// kern_pte_attr.S = 1;
|
||||||
kern_pte_attr.AP1_0 = AccessPermission_KernelOnly;
|
// kern_pte_attr.AP1_0 = AccessPermission_KernelOnly;
|
||||||
}
|
// }
|
||||||
*attr = kern_pte_attr.entry;
|
// *attr = kern_pte_attr.entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
void GetPdeAttr(uintptr_t* attr)
|
void GetPdeAttr(uintptr_t* attr)
|
||||||
{
|
{
|
||||||
*attr = PAGE_DIR_COARSE;
|
// *attr = PAGE_DIR_COARSE;
|
||||||
}
|
}
|
|
@ -19,8 +19,7 @@ struct MmuDriverRightGroup {
|
||||||
struct TraceTag intr_driver_tag;
|
struct TraceTag intr_driver_tag;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct MmuCommonDone
|
struct MmuCommonDone {
|
||||||
{
|
|
||||||
void (*MmuDevPteAttr)(uintptr_t* attr);
|
void (*MmuDevPteAttr)(uintptr_t* attr);
|
||||||
void (*MmuPdeAttr)(uintptr_t* attr);
|
void (*MmuPdeAttr)(uintptr_t* attr);
|
||||||
void (*MmuUsrPteAttr)(uintptr_t* attr);
|
void (*MmuUsrPteAttr)(uintptr_t* attr);
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
# The following three platforms support compatiable instructions.
|
# The following three platforms support compatiable instructions.
|
||||||
|
ifneq ($(findstring $(BOARD), ok1028a-c), )
|
||||||
|
SRC_DIR := armv8-a
|
||||||
|
endif
|
||||||
|
ifneq ($(findstring $(BOARD), imx6q-sabrelite zynq7000-zc702), )
|
||||||
SRC_DIR := armv7-a
|
SRC_DIR := armv7-a
|
||||||
|
endif
|
||||||
|
|
||||||
|
|
||||||
include $(KERNEL_ROOT)/compiler.mk
|
include $(KERNEL_ROOT)/compiler.mk
|
||||||
|
|
|
@ -0,0 +1,4 @@
|
||||||
|
# The following three platforms support compatiable instructions.
|
||||||
|
SRC_DIR := cortex-a72
|
||||||
|
|
||||||
|
include $(KERNEL_ROOT)/compiler.mk
|
|
@ -0,0 +1,3 @@
|
||||||
|
SRC_DIR := uart_io_for_$(BOARD)
|
||||||
|
|
||||||
|
include $(KERNEL_ROOT)/compiler.mk
|
|
@ -40,12 +40,17 @@ endif
|
||||||
|
|
||||||
ifeq ($(BOARD), ok1028a-c)
|
ifeq ($(BOARD), ok1028a-c)
|
||||||
KERNELPATHS += \
|
KERNELPATHS += \
|
||||||
|
-I$(KERNEL_ROOT)/hardkernel/clock/arm/armv8-a/cortex-a72/$(BOARD)/include \
|
||||||
-I$(KERNEL_ROOT)/hardkernel/arch/arm/armv8-a/cortex-a72/preboot_for_$(BOARD)/include \
|
-I$(KERNEL_ROOT)/hardkernel/arch/arm/armv8-a/cortex-a72/preboot_for_$(BOARD)/include \
|
||||||
-I$(KERNEL_ROOT)/hardkernel/arch/arm/armv8-a/cortex-a72/ \
|
-I$(KERNEL_ROOT)/hardkernel/arch/arm/armv8-a/cortex-a72/ \
|
||||||
-I$(KERNEL_ROOT)/hardkernel/mmu/arm/armv8-a/cortex-a72/$(BOARD) \
|
-I$(KERNEL_ROOT)/hardkernel/mmu/arm/armv8-a/cortex-a72/$(BOARD) \
|
||||||
-I$(KERNEL_ROOT)/hardkernel/mmu/arm/armv8-a/cortex-a72/include \
|
-I$(KERNEL_ROOT)/hardkernel/mmu/arm/armv8-a/cortex-a72/include \
|
||||||
-I$(KERNEL_ROOT)/hardkernel/intr/arm/armv8-a/cortex-a72/ \
|
-I$(KERNEL_ROOT)/hardkernel/intr/arm/armv8-a/cortex-a72/ \
|
||||||
-I$(KERNEL_ROOT)/hardkernel/intr/arm/armv8-a/cortex-a72/$(BOARD)
|
-I$(KERNEL_ROOT)/hardkernel/intr/arm/armv8-a/cortex-a72/$(BOARD) \
|
||||||
|
-I$(KERNEL_ROOT)/hardkernel/intr/arm/armv8-a/cortex-a72/gicv3 \
|
||||||
|
-I$(KERNEL_ROOT)/hardkernel/uart/arm/armv8-a/cortex-a72/uart_io_for_$(BOARD)/include \
|
||||||
|
-I$(KERNEL_ROOT)/hardkernel/uart/arm/armv8-a/cortex-a72/ \
|
||||||
|
-I$(KERNEL_ROOT)/hardkernel/cache/L1/arm/cortex-a72/
|
||||||
endif
|
endif
|
||||||
|
|
||||||
KERNELPATHS += \
|
KERNELPATHS += \
|
||||||
|
|
Loading…
Reference in New Issue