This commit is contained in:
songyanguang 2024-12-13 19:42:48 +08:00
parent 72e3175707
commit fe26bb4e5a
9 changed files with 334 additions and 45 deletions

View File

@ -0,0 +1,175 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h
* which was based on arch/arm/include/io.h
*
* Copyright (C) 1996-2000 Russell King
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2014 Regents of the University of California
*/
#ifndef _ASM_RISCV_MMIO_H
#define _ASM_RISCV_MMIO_H
//#include <linux/types.h>
//#include <asm/mmiowb.h>
#include "asm/const.h"
#include <stdint.h>
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef uint64_t u64;
typedef uint32_t __le32;
#ifndef __iomem
#define __iomem
#endif
#ifndef asm
#define asm __asm__
#endif
#define __force
#define cpu_to_le32(x) (x)
#define le16_to_cpu(x) (x)
#define le32_to_cpu(x) (x)
#define le64_to_cpu(x) (x)
/* Generic IO read/write. These perform native-endian accesses. */
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
{
asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
}
#define __raw_writew __raw_writew
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
{
asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
}
#define __raw_writel __raw_writel
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
{
asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
}
#ifdef CONFIG_64BIT
#define __raw_writeq __raw_writeq
static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
{
asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
}
#endif
#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
u8 val;
asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
return val;
}
#define __raw_readw __raw_readw
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 val;
asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
return val;
}
#define __raw_readl __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 val;
asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
return val;
}
#ifdef CONFIG_64BIT
#define __raw_readq __raw_readq
static inline u64 __raw_readq(const volatile void __iomem *addr)
{
u64 val;
asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
return val;
}
#endif
/*
* Unordered I/O memory access primitives. These are even more relaxed than
* the relaxed versions, as they don't even order accesses between successive
* operations to the I/O regions.
*/
#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; })
#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
#define writeb_cpu(v, c) ((void)__raw_writeb((v), (c)))
#define writew_cpu(v, c) ((void)__raw_writew((__force u16)cpu_to_le16(v), (c)))
#define writel_cpu(v, c) ((void)__raw_writel((__force u32)cpu_to_le32(v), (c)))
#ifdef CONFIG_64BIT
#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
#define writeq_cpu(v, c) ((void)__raw_writeq((__force u64)cpu_to_le64(v), (c)))
#endif
/*
* Relaxed I/O memory access primitives. These follow the Device memory
* ordering rules but do not guarantee any ordering relative to Normal memory
* accesses. These are defined to order the indicated access (either a read or
* write) with all other I/O memory accesses. Since the platform specification
* defines that all I/O regions are strongly ordered on channel 2, no explicit
* fences are required to enforce this ordering.
*/
/* FIXME: These are now the same as asm-generic */
#define __io_rbr() do {} while (0)
#define __io_rar() do {} while (0)
#define __io_rbw() do {} while (0)
#define __io_raw() do {} while (0)
#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; })
#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; })
#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; })
#define writeb_relaxed(v, c) ({ __io_rbw(); writeb_cpu((v), (c)); __io_raw(); })
#define writew_relaxed(v, c) ({ __io_rbw(); writew_cpu((v), (c)); __io_raw(); })
#define writel_relaxed(v, c) ({ __io_rbw(); writel_cpu((v), (c)); __io_raw(); })
#ifdef CONFIG_64BIT
#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; })
#define writeq_relaxed(v, c) ({ __io_rbw(); writeq_cpu((v), (c)); __io_raw(); })
#endif
/*
* I/O memory access primitives. Reads are ordered relative to any
* following Normal memory access. Writes are ordered relative to any prior
* Normal memory access. The memory barriers here are necessary as RISC-V
* doesn't define any ordering between the memory space and the I/O space.
*/
#define __io_br() do {} while (0)
#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory")
#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory")
//#define __io_aw() mmiowb_set_pending()
#define __io_aw() do {} while (0)
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; })
#define writeb(v, c) ({ __io_bw(); writeb_cpu((v), (c)); __io_aw(); })
#define writew(v, c) ({ __io_bw(); writew_cpu((v), (c)); __io_aw(); })
#define writel(v, c) ({ __io_bw(); writel_cpu((v), (c)); __io_aw(); })
#ifdef CONFIG_64BIT
#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; })
#define writeq(v, c) ({ __io_bw(); writeq_cpu((v), (c)); __io_aw(); })
#endif
#endif /* _ASM_RISCV_MMIO_H */

View File

@ -1,4 +1,4 @@
SRC_FILES := entry.S trampoline.S $(BOARD)/trap_common.c $(BOARD)/trap.c error_debug.c hard_spinlock.S
SRC_FILES := entry.S trampoline.S $(BOARD)/trap_common.c $(BOARD)/trap.c $(BOARD)/plic.c error_debug.c hard_spinlock.S
ifeq ($(BOARD), jh7110)
SRC_DIR := gicv3

View File

@ -0,0 +1,73 @@
/*
* This driver implements a version of the RISC-V PLIC with the actual layout
* specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
*
* https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
*
*/
#include "asm/csr.h"
#include "printf.h"
#include "plic.h"
#include "asm/mmio.h"
#include "ptrace.h"
extern unsigned long boot_cpu_hartid;
#define MAX_CPUS 4
#define MAX_PLIC_IRQS 136
#define CPU_TO_HART(cpu) ((2 * cpu) + 2)
void plic_set_priority(int hwirq, int pro)
{
unsigned int reg = PLIC_PRIORITY(hwirq);
writel(pro, reg);
}
void plic_enable_irq(int cpu, int hwirq, int enable)
{
unsigned int hwirq_mask = 1 << (hwirq % 32);
int hart = CPU_TO_HART(cpu);
unsigned int reg = PLIC_MENABLE(hart) + 4 * (hwirq / 32);
if (enable) {
writel(readl(reg) | hwirq_mask, reg);
}
else {
writel(readl(reg) & ~hwirq_mask, reg);
}
}
//TODO: to debug
int plic_init(void)
{
int i;
int hwirq;
printk("plic_init boot_cpu_hartid=%lu\n", boot_cpu_hartid);
for (i = 0; i < MAX_CPUS; i++) {
writel(0, PLIC_MTHRESHOLD(CPU_TO_HART(i)));
for (hwirq = 1; hwirq <= MAX_PLIC_IRQS; hwirq++) {
plic_enable_irq(i, hwirq, 0);
plic_set_priority(hwirq, 1);
}
}
csr_set(CSR_IE, IE_EIE);
return 0;
}
void plic_handle_irq(struct pt_regs *regs)
{
int hwirq;
int hart = CPU_TO_HART(0);
unsigned int claim_reg = PLIC_MCLAIM(hart);
csr_clear(CSR_IE, IE_EIE);
//TODO
csr_set(CSR_IE, IE_EIE);
}

View File

@ -0,0 +1,20 @@
#ifndef _RISCV_PLIC_H
#define _RISCV_PLIC_H
#include "memlayout.h"
#include "ptrace.h"
#define PLIC_BASE PLIC_PHYMEM_BASE
#define PLIC_PRIORITY(hwirq) (PLIC_BASE + (hwirq) * 4)
#define PLIC_PENDING(hwirq) (PLIC_BASE + 0x1000 + ((hwirq) / 32) * 4)
#define PLIC_MENABLE(hart) (PLIC_BASE + 0x2000 + (hart) * 0x80)
#define PLIC_MTHRESHOLD(hart) (PLIC_BASE + 0x200000 + (hart) * 0x1000)
#define PLIC_MCLAIM(hart) (PLIC_BASE + 0x200004 + (hart) * 0x1000)
int plic_init(void);
void plic_enable_irq(int cpu, int hwirq, int enable);
void plic_handle_irq(struct pt_regs *regs);
#endif /* _RISCV_PLIC_H */

View File

@ -40,6 +40,7 @@ Modification:
#include "asm/csr.h"
#include "ptrace.h"
#include "plic.h"
extern void dabort_handler(struct trapframe* r);
@ -126,6 +127,16 @@ void syscall_arch_handler(struct trapframe* tf)
extern void do_exception_vector(void);
void trap_init(void)
{
csr_write(stvec, do_exception_vector);
csr_write(sie, 0);
}
static void do_trap_error(struct pt_regs *regs, const char *str)
{
printk("Oops: %s\n", str);
@ -179,26 +190,6 @@ static const struct fault_info fault_inf[] = {
};
/*
void delegate_traps(void)
{
unsigned long interrupts = MIP_SSIP | MIP_STIP | MIP_SEIP;
unsigned long exceptions = (1UL << CAUSE_MISALIGNED_FETCH) |
(1UL << CAUSE_FETCH_PAGE_FAULT) |
(1UL << CAUSE_BREAKPOINT) |
(1UL << CAUSE_LOAD_PAGE_FAULT) |
(1UL << CAUSE_STORE_PAGE_FAULT) |
(1UL << CAUSE_USER_ECALL) |
(1UL << CAUSE_LOAD_ACCESS_FAULT) |
(1UL << CAUSE_STORE_ACCESS_FAULT);
csr_write(mideleg, interrupts);
csr_write(medeleg, exceptions);
}
*/
struct fault_info * ec_to_fault_info(unsigned long scause)
{
struct fault_info *inf;
@ -209,12 +200,22 @@ struct fault_info * ec_to_fault_info(unsigned long scause)
inf = &fault_inf[scause];
return inf;
}
extern void do_exception_vector(void);
void trap_init(void)
void handle_irq(struct pt_regs *regs, unsigned long scause)
{
csr_write(stvec, do_exception_vector);
//printk("stvec=0x%lx, do_exception_vector=0x%lx\n", csr_read(stvec), (unsigned long)do_exception_vector);
csr_write(sie, 0);
switch (scause & ~CAUSE_IRQ_FLAG) {
case IRQ_S_TIMER:
//handle_timer_irq();
break;
case IRQ_S_EXT:
plic_handle_irq(regs);
break;
case IRQ_S_SOFT:
// TODO
break;
default:
panic("unexpected interrupt cause\n");
}
}
void do_exception(struct pt_regs *regs, unsigned long scause)
@ -224,7 +225,7 @@ void do_exception(struct pt_regs *regs, unsigned long scause)
printk("%s, scause: 0x%lx\n", __func__, scause);
if (scause & CAUSE_IRQ_FLAG) {
// TODO: 处理中断
handle_irq(regs, scause);
}
else {
inf = ec_to_fault_info(scause);
@ -233,3 +234,4 @@ void do_exception(struct pt_regs *regs, unsigned long scause)
}
}
}

View File

@ -37,6 +37,8 @@ Modification:
#include "log.h"
#include "multicores.h"
#include "plic.h"
static struct XiziTrapDriver xizi_trap_driver;
void panic(char* s)
@ -46,18 +48,26 @@ void panic(char* s)
;
}
extern void alltraps();
//extern void alltraps();
extern void trap_init(void);
static void _sys_irq_init(int cpu_id)
{
// primary core init intr
xizi_trap_driver.switch_hw_irqtbl((uintptr_t*)alltraps);
// xizi_trap_driver.switch_hw_irqtbl((uintptr_t*)alltraps);
if (cpu_id == 0) {
gic_init();
plic_init();
}
gicv3inithart(cpu_id);
}
static void _sys_trap_init(int cpu_id)
{
if (cpu_id == 0) {
trap_init();
}
_sys_irq_init(cpu_id);
}
static void _cpu_irq_enable(void)
{
intr_on();
@ -70,12 +80,12 @@ static void _cpu_irq_disable(void)
static void _single_irq_enable(int irq, int cpu, int prio)
{
gic_setup_ppi((uint32_t)cpu, (uint32_t)irq);
plic_enable_irq(cpu, irq, 1);
}
static void _single_irq_disable(int irq, int cpu)
{
return;
plic_enable_irq(cpu, irq, 0);
}
static inline uintptr_t* _switch_hw_irqtbl(uintptr_t* new_tbl_base)
@ -113,7 +123,7 @@ int _cur_cpu_id()
}
static struct XiziTrapDriver xizi_trap_driver = {
.sys_irq_init = _sys_irq_init,
.sys_irq_init = _sys_trap_init,
.cur_cpu_id = _cur_cpu_id,
.cpu_irq_enable = _cpu_irq_enable,

View File

@ -1,16 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Regents of the University of California
*/
#ifndef _ASM_RISCV_PTRACE_H
#define _ASM_RISCV_PTRACE_H
//#include <uapi/asm/ptrace.h>
//#include <asm/csr.h>
//#include <linux/compiler.h>
struct pt_regs {
unsigned long epc;
unsigned long ra;

View File

@ -78,6 +78,11 @@ Modification:
#define KERN_MEM_BASE ((0 - 0x0000002000000000ULL) + PHY_MEM_BASE) // First kernel virtual address
#define KERN_OFFSET (KERN_MEM_BASE - PHY_MEM_BASE)
/* PLIC (platform-level interrupt controller) memory layout */
#define PLIC_PHYMEM_BASE (0x0C000000ULL)
#define PLIC_MEM_SIZE (0x00400000ULL)
#define V2P(a) (((uint64_t)(a)) - KERN_OFFSET)
#define P2V(a) ((void *)(((char *)(a)) + KERN_OFFSET))

View File

@ -56,6 +56,7 @@ static pmd_t early_pmd[PTRS_PER_PMD] __initdata __attribute__((aligned(PAGE_SIZE
static pmd_t early_uart_pmd[PTRS_PER_PMD] __initdata __attribute__((aligned(PAGE_SIZE)));
static pmd_t early_pmd_free[((PHY_USER_FREEMEM_BASE - PHY_MEM_BASE) >> PGDIR_SHIFT) + 1][PTRS_PER_PMD] __initdata __attribute__((aligned(PAGE_SIZE)));
static pmd_t early_pmd_inear_map[PTRS_PER_PMD] __initdata __attribute__((aligned(PAGE_SIZE)));
static pmd_t early_plic_pmd[PTRS_PER_PMD] __initdata __attribute__((aligned(PAGE_SIZE)));
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
@ -125,7 +126,6 @@ static void __init create_kernel_page_table_early(pgd_t *pgdir, bool early)
}
}
static void __init create_kernel_pgd_mapping_free_early(pgd_t *pgdp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
@ -195,6 +195,17 @@ static void __init create_kernel_page_table_linear_map_early(pgd_t *pgdir, bool
}
}
static void __init create_plic_page_table_early(pgd_t *pgdir, bool early)
{
uintptr_t va;
for (va = PLIC_PHYMEM_BASE; va < PLIC_PHYMEM_BASE + PLIC_MEM_SIZE; va += PMD_SIZE) {
create_pgd_mapping_early(pgdir, va, (uintptr_t)early_plic_pmd, PGDIR_SIZE, PAGE_TABLE);
create_pmd_mapping_early(early_plic_pmd, va, va, PMD_SIZE, PAGE_KERNEL);
}
}
/*
* setup_vm_early() is called from boot.S with MMU-off.
*
@ -238,5 +249,8 @@ void __init setup_vm_early(void)
/* Setup kernel linear map PGD and PMD */
create_kernel_page_table_linear_map_early(early_pg_dir, true);
/* Setup PLIC PGD and PMD */
create_plic_page_table_early(early_pg_dir, true);
}