Modify mmu

This commit is contained in:
songyanguang 2024-12-03 14:41:41 +08:00
parent 87c5f1549f
commit a9f8fba6dd
19 changed files with 1325 additions and 142 deletions

View File

@ -14,20 +14,13 @@
#define __ASM_STR(x) #x #define __ASM_STR(x) #x
#endif #endif
#if __riscv_xlen == 64
#define __REG_SEL(a, b) __ASM_STR(a)
#elif __riscv_xlen == 32
#define __REG_SEL(a, b) __ASM_STR(b)
#else
#error "Unexpected __riscv_xlen"
#endif
#define REG_L __REG_SEL(ld, lw) #define REG_L ld
#define REG_S __REG_SEL(sd, sw) #define REG_S sd
#define REG_SC __REG_SEL(sc.d, sc.w) #define REG_SC sc.d
#define REG_ASM __REG_SEL(.dword, .word) #define REG_ASM .dword
#define SZREG __REG_SEL(8, 4) #define SZREG 8
#define LGREG __REG_SEL(3, 2) #define LGREG 3
#define RISCV_PTR .dword #define RISCV_PTR .dword

View File

@ -25,6 +25,8 @@
#define _UL(x) (_AC(x, UL)) #define _UL(x) (_AC(x, UL))
#define _ULL(x) (_AC(x, ULL)) #define _ULL(x) (_AC(x, ULL))
#define UL(x) (_UL(x))
#define ULL(x) (_ULL(x))
#define _BITUL(x) (_UL(1) << (x)) #define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x)) #define _BITULL(x) (_ULL(1) << (x))

View File

@ -6,10 +6,10 @@
#ifndef _ASM_RISCV_CSR_H #ifndef _ASM_RISCV_CSR_H
#define _ASM_RISCV_CSR_H #define _ASM_RISCV_CSR_H
#include "autoconf.h"
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/const.h> #include <asm/const.h>
#define CONFIG_64BIT
/* Status register flags */ /* Status register flags */
#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */ #define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */

View File

@ -0,0 +1,186 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
* Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn>
*/
#ifndef _ASM_RISCV_PAGE_H
#define _ASM_RISCV_PAGE_H
#include "autoconf.h"
#include <asm/const.h>
#include <asm/pfn.h>
#ifdef CONFIG_PHYS_ADDR_T_64BIT
typedef uint64_t phys_addr_t;
#else
typedef uint64_t phys_addr_t;
#endif
#define PAGE_SHIFT (12)
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
#ifdef CONFIG_64BIT
#define HUGE_MAX_HSTATE 2
#else
#define HUGE_MAX_HSTATE 1
#endif
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
/*
* PAGE_OFFSET -- the first address of the first page of memory.
* When not using MMU this corresponds to the first free page in
* physical memory (aligned on a page boundary).
*/
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#define KERN_VIRT_SIZE (-PAGE_OFFSET)
#ifndef __ASSEMBLY__
#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
#define copy_user_page(vto, vfrom, vaddr, topg) \
memcpy((vto), (vfrom), PAGE_SIZE)
/*
* Use struct definitions to apply C type checking
*/
/* Page Global Directory entry */
typedef struct {
unsigned long pgd;
} pgd_t;
/* Page Table entry */
typedef struct {
unsigned long pte;
} pte_t;
typedef struct {
unsigned long pgprot;
} pgprot_t;
typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) })
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })
#ifdef CONFIG_64BIT
#define PTE_FMT "%016lx"
#else
#define PTE_FMT "%08lx"
#endif
#ifdef CONFIG_MMU
extern unsigned long riscv_pfn_base;
#define ARCH_PFN_OFFSET (riscv_pfn_base)
#else
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#endif /* CONFIG_MMU */
struct kernel_mapping {
unsigned long virt_addr;
uintptr_t phys_addr;
uintptr_t size;
/* Offset between linear mapping virtual address and kernel load address */
unsigned long va_pa_offset;
/* Offset between kernel mapping virtual address and kernel load address */
unsigned long va_kernel_pa_offset;
unsigned long va_kernel_xip_pa_offset;
#ifdef CONFIG_XIP_KERNEL
uintptr_t xiprom;
uintptr_t xiprom_sz;
#endif
};
extern struct kernel_mapping kernel_map;
extern phys_addr_t phys_ram_base;
#define is_kernel_mapping(x) \
((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
#define is_linear_mapping(x) \
((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < kernel_map.virt_addr))
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
#define kernel_mapping_pa_to_va(y) ({ \
unsigned long _y = y; \
(IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \
(void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset) : \
(void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \
})
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
#define kernel_mapping_va_to_pa(y) ({ \
unsigned long _y = y; \
(IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \
((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : \
((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
})
#define __va_to_pa_nodebug(x) ({ \
unsigned long _x = x; \
is_linear_mapping(_x) ? \
linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
})
#ifdef CONFIG_DEBUG_VIRTUAL
extern phys_addr_t __virt_to_phys(unsigned long x);
extern phys_addr_t __phys_addr_symbol(unsigned long x);
#else
#define __virt_to_phys(x) __va_to_pa_nodebug(x)
#define __phys_addr_symbol(x) __va_to_pa_nodebug(x)
#endif /* CONFIG_DEBUG_VIRTUAL */
#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define __va(x) ((void *)__pa_to_va_nodebug((phys_addr_t)(x)))
#define phys_to_pfn(phys) (PFN_DOWN(phys))
#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
#define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr)))
#define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn)))
#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
#define page_to_bus(page) (page_to_phys(page))
#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) \
(((pfn) >= ARCH_PFN_OFFSET) && (((pfn) - ARCH_PFN_OFFSET) < max_mapnr))
#endif
#endif /* __ASSEMBLY__ */
#define virt_addr_valid(vaddr) ({ \
unsigned long _addr = (unsigned long)vaddr; \
(unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
})
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
#endif /* _ASM_RISCV_PAGE_H */

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PFN_H_
#define _LINUX_PFN_H_
#ifndef __ASSEMBLER__
#include <stdint.h>
/*
* pfn_t: encapsulates a page-frame number that is optionally backed
* by memmap (struct page). Whether a pfn_t has a 'struct page'
* backing is indicated by flags in the high bits of the value.
*/
typedef struct {
uint64_t val;
} pfn_t;
#endif
#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
#define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT)
#define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT))
#endif

View File

@ -0,0 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Regents of the University of California
*/
#ifndef _ASM_RISCV_PGTABLE_64_H
#define _ASM_RISCV_PGTABLE_64_H
#include <asm/const.h>
#include <asm/pgtable-bits.h>
#define PGDIR_SHIFT 30
/* Size of region mapped by a page global directory */
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
#define PMD_SHIFT 21
/* Size of region mapped by a page middle directory */
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE - 1))
/* Page Middle Directory entry */
typedef struct {
unsigned long pmd;
} pmd_t;
#define pmd_val(x) ((x).pmd)
#define __pmd(x) ((pmd_t) { (x) })
#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
{
return __pmd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
}
static inline unsigned long _pmd_pfn(pmd_t pmd)
{
return pmd_val(pmd) >> _PAGE_PFN_SHIFT;
}
#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot)
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
#endif /* _ASM_RISCV_PGTABLE_64_H */

View File

@ -6,6 +6,10 @@
#ifndef _ASM_RISCV_PGTABLE_BITS_H #ifndef _ASM_RISCV_PGTABLE_BITS_H
#define _ASM_RISCV_PGTABLE_BITS_H #define _ASM_RISCV_PGTABLE_BITS_H
#include <asm/const.h>
#define BIT(nr) (UL(1) << (nr))
/* /*
* PTE format: * PTE format:
* | XLEN-1 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 * | XLEN-1 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0

View File

@ -0,0 +1,598 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Regents of the University of California
*/
#ifndef _ASM_RISCV_PGTABLE_H
#define _ASM_RISCV_PGTABLE_H
#include "autoconf.h"
#include <asm/pgtable-bits.h>
#include <asm/sizes.h>
#ifndef CONFIG_MMU
#define KERNEL_LINK_ADDR PAGE_OFFSET
#else
#define ADDRESS_SPACE_END (UL(-1))
#ifdef CONFIG_64BIT
/* Leave 2GB for kernel and BPF at the end of the address space */
#define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
#else
#define KERNEL_LINK_ADDR PAGE_OFFSET
#endif
#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
#define VMALLOC_END (PAGE_OFFSET - 1)
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
#define BPF_JIT_REGION_SIZE (SZ_128M)
#ifdef CONFIG_64BIT
#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
#define BPF_JIT_REGION_END (MODULES_END)
#else
#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
#define BPF_JIT_REGION_END (VMALLOC_END)
#endif
/* Modules always live before the kernel */
#ifdef CONFIG_64BIT
#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
#define MODULES_END (PFN_ALIGN((unsigned long)&_start))
#endif
/*
* Roughly size the vmemmap space to be large enough to fit enough
* struct pages to map half the virtual address space. Then
* position vmemmap directly below the VMALLOC region.
*/
#define STRUCT_PAGE_MAX_SHIFT 0 //syg
#define VMEMMAP_SHIFT \
(CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
#define VMEMMAP_END (VMALLOC_START - 1)
#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
/*
* Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
* is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
*/
#define vmemmap ((struct page *)VMEMMAP_START)
#define PCI_IO_SIZE SZ_16M
#define PCI_IO_END VMEMMAP_START
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP PCI_IO_START
#ifdef CONFIG_64BIT
#define FIXADDR_SIZE PMD_SIZE
#else
#define FIXADDR_SIZE PGDIR_SIZE
#endif
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#endif
#ifdef CONFIG_XIP_KERNEL
#define XIP_OFFSET SZ_8M
#else
#define XIP_OFFSET 0
#endif
#ifndef __ASSEMBLY__
/* Page Upper Directory not used in RISC-V */
//#include <asm-generic/pgtable-nopud.h>
#include <asm/page.h>
#ifdef CONFIG_64BIT
#include <asm/pgtable-64.h>
#else
#include <asm/pgtable-32.h>
#endif /* CONFIG_64BIT */
#ifdef CONFIG_XIP_KERNEL
#define XIP_FIXUP(addr) ({ \
uintptr_t __a = (uintptr_t)(addr); \
(__a >= CONFIG_XIP_PHYS_ADDR && __a < CONFIG_XIP_PHYS_ADDR + SZ_16M) ? \
__a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
__a; \
})
#else
#define XIP_FIXUP(addr) (addr)
#endif /* CONFIG_XIP_KERNEL */
#ifdef CONFIG_MMU
/* Number of entries in the page global directory */
#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
/* Number of entries in the page table */
#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
/* Number of PGD entries that a user-mode program can use */
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
/* Page protection bits */
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
_PAGE_EXEC | _PAGE_WRITE)
#define PAGE_COPY PAGE_READ
#define PAGE_COPY_EXEC PAGE_EXEC
#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
#define PAGE_SHARED PAGE_WRITE
#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
#define _PAGE_KERNEL (_PAGE_READ \
| _PAGE_WRITE \
| _PAGE_PRESENT \
| _PAGE_ACCESSED \
| _PAGE_DIRTY \
| _PAGE_GLOBAL)
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
| _PAGE_EXEC)
#define PAGE_TABLE __pgprot(_PAGE_TABLE)
/*
* The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
* change the properties of memory regions.
*/
#define _PAGE_IOREMAP _PAGE_KERNEL
extern pgd_t swapper_pg_dir[];
/* MAP_PRIVATE permissions: xwr (copy-on-write) */
#define __P000 PAGE_NONE
#define __P001 PAGE_READ
#define __P010 PAGE_COPY
#define __P011 PAGE_COPY
#define __P100 PAGE_EXEC
#define __P101 PAGE_READ_EXEC
#define __P110 PAGE_COPY_EXEC
#define __P111 PAGE_COPY_READ_EXEC
/* MAP_SHARED permissions: xwr */
#define __S000 PAGE_NONE
#define __S001 PAGE_READ
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
#define __S100 PAGE_EXEC
#define __S101 PAGE_READ_EXEC
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_present(pmd_t pmd)
{
/*
* Checking for _PAGE_LEAF is needed too because:
* When splitting a THP, split_huge_page() will temporarily clear
* the present bit, in this situation, pmd_present() and
* pmd_trans_huge() still needs to return true.
*/
return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
}
#else
static inline int pmd_present(pmd_t pmd)
{
return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
}
#endif
static inline int pmd_none(pmd_t pmd)
{
return (pmd_val(pmd) == 0);
}
static inline int pmd_bad(pmd_t pmd)
{
return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
}
#define pmd_leaf pmd_leaf
static inline int pmd_leaf(pmd_t pmd)
{
return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
}
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
}
static inline void pmd_clear(pmd_t *pmdp)
{
set_pmd(pmdp, __pmd(0));
}
static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
{
return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
}
static inline unsigned long _pgd_pfn(pgd_t pgd)
{
return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
}
#if 0
static inline struct page *pmd_page(pmd_t pmd)
{
return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
}
#endif
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
}
static inline pte_t pmd_pte(pmd_t pmd)
{
return __pte(pmd_val(pmd));
}
/* Yields the page frame number (PFN) of a page table entry */
static inline unsigned long pte_pfn(pte_t pte)
{
return (pte_val(pte) >> _PAGE_PFN_SHIFT);
}
#define pte_page(x) pfn_to_page(pte_pfn(x))
/* Constructs a page table entry */
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
{
return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
}
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
static inline int pte_present(pte_t pte)
{
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
}
static inline int pte_none(pte_t pte)
{
return (pte_val(pte) == 0);
}
static inline int pte_write(pte_t pte)
{
return pte_val(pte) & _PAGE_WRITE;
}
static inline int pte_exec(pte_t pte)
{
return pte_val(pte) & _PAGE_EXEC;
}
static inline int pte_huge(pte_t pte)
{
return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
}
static inline int pte_dirty(pte_t pte)
{
return pte_val(pte) & _PAGE_DIRTY;
}
static inline int pte_young(pte_t pte)
{
return pte_val(pte) & _PAGE_ACCESSED;
}
static inline int pte_special(pte_t pte)
{
return pte_val(pte) & _PAGE_SPECIAL;
}
/* static inline pte_t pte_rdprotect(pte_t pte) */
static inline pte_t pte_wrprotect(pte_t pte)
{
return __pte(pte_val(pte) & ~(_PAGE_WRITE));
}
/* static inline pte_t pte_mkread(pte_t pte) */
static inline pte_t pte_mkwrite(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_WRITE);
}
/* static inline pte_t pte_mkexec(pte_t pte) */
static inline pte_t pte_mkdirty(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_DIRTY);
}
static inline pte_t pte_mkclean(pte_t pte)
{
return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
}
static inline pte_t pte_mkyoung(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_ACCESSED);
}
static inline pte_t pte_mkold(pte_t pte)
{
return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
}
static inline pte_t pte_mkspecial(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_SPECIAL);
}
static inline pte_t pte_mkhuge(pte_t pte)
{
return pte;
}
#ifdef CONFIG_NUMA_BALANCING
/*
* See the comment in include/asm-generic/pgtable.h
*/
static inline int pte_protnone(pte_t pte)
{
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
}
static inline int pmd_protnone(pmd_t pmd)
{
return pte_protnone(pmd_pte(pmd));
}
#endif
/* Modify page protection bits */
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
}
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
/* Commit new configuration to MMU hardware */
static inline void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
/*
* The kernel assumes that TLBs don't cache invalid entries, but
* in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
* cache flush; it is necessary even after writing invalid entries.
* Relying on flush_tlb_fix_spurious_fault would suffice, but
* the extra traps reduce performance. So, eagerly SFENCE.VMA.
*/
#if 0 //syg
local_flush_tlb_page(address);
#endif
}
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
pte_t *ptep = (pte_t *)pmdp;
update_mmu_cache(vma, address, ptep);
}
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
return pte_val(pte_a) == pte_val(pte_b);
}
/*
* Certain architectures need to do special things when PTEs within
* a page table are directly modified. Thus, the following hook is
* made available.
*/
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
*ptep = pteval;
}
void flush_icache_pte(pte_t pte);
static inline void set_pte_at(struct mm_struct *mm,
unsigned long addr, pte_t *ptep, pte_t pteval)
{
if (pte_present(pteval) && pte_exec(pteval))
flush_icache_pte(pteval);
set_pte(ptep, pteval);
}
static inline void pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
set_pte_at(mm, addr, ptep, __pte(0));
}
/*
* THP functions
*/
static inline pmd_t pte_pmd(pte_t pte)
{
return __pmd(pte_val(pte));
}
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
return pmd;
}
static inline pmd_t pmd_mkinvalid(pmd_t pmd)
{
return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
}
#define __pmd_to_phys(pmd) (pmd_val(pmd) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
static inline unsigned long pmd_pfn(pmd_t pmd)
{
return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
}
#define pmd_write pmd_write
static inline int pmd_write(pmd_t pmd)
{
return pte_write(pmd_pte(pmd));
}
static inline int pmd_dirty(pmd_t pmd)
{
return pte_dirty(pmd_pte(pmd));
}
static inline int pmd_young(pmd_t pmd)
{
return pte_young(pmd_pte(pmd));
}
static inline pmd_t pmd_mkold(pmd_t pmd)
{
return pte_pmd(pte_mkold(pmd_pte(pmd)));
}
static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
}
static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
return pte_pmd(pte_mkwrite(pmd_pte(pmd)));
}
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
}
static inline pmd_t pmd_mkclean(pmd_t pmd)
{
return pte_pmd(pte_mkclean(pmd_pte(pmd)));
}
static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
}
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
return set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
}
/*
* Encode and decode a swap entry
*
* Format of swap PTE:
* bit 0: _PAGE_PRESENT (zero)
* bit 1: _PAGE_PROT_NONE (zero)
* bits 2 to 6: swap type
* bits 7 to XLEN-1: swap offset
*/
#define __SWP_TYPE_SHIFT 2
#define __SWP_TYPE_BITS 5
#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
#define MAX_SWAPFILES_CHECK() \
BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
#define __swp_entry(type, offset) ((swp_entry_t) \
{ ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
/*
* In the RV64 Linux scheme, we give the user half of the virtual-address space
* and give the kernel the other (upper) half.
*/
#ifdef CONFIG_64BIT
#define KERN_VIRT_START (-(BIT(CONFIG_VA_BITS)) + TASK_SIZE)
#else
#define KERN_VIRT_START FIXADDR_START
#endif
/*
* Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
* Note that PGDIR_SIZE must evenly divide TASK_SIZE.
*/
#ifdef CONFIG_64BIT
#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
#else
#define TASK_SIZE FIXADDR_START
#endif
#else /* CONFIG_MMU */
#define PAGE_SHARED __pgprot(0)
#define PAGE_KERNEL __pgprot(0)
#define swapper_pg_dir NULL
#define TASK_SIZE 0xffffffffUL
#define VMALLOC_START 0
#define VMALLOC_END TASK_SIZE
#endif /* !CONFIG_MMU */
#define kern_addr_valid(addr) (1) /* FIXME */
extern char _start[];
extern char _end[];
extern void *_dtb_early_va;
extern uintptr_t _dtb_early_pa;
#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
#define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
#define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
#else
#define dtb_early_va _dtb_early_va
#define dtb_early_pa _dtb_early_pa
#endif /* CONFIG_XIP_KERNEL */
void paging_init(void);
void misc_mem_init(void);
/*
* ZERO_PAGE is a global shared page that is always zero,
* used for zero-mapped memory areas, etc.
*/
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_RISCV_PGTABLE_H */

View File

@ -0,0 +1,52 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/sizes.h
*/
#ifndef __LINUX_SIZES_H__
#define __LINUX_SIZES_H__
#include <asm/const.h>
#define SZ_1 0x00000001
#define SZ_2 0x00000002
#define SZ_4 0x00000004
#define SZ_8 0x00000008
#define SZ_16 0x00000010
#define SZ_32 0x00000020
#define SZ_64 0x00000040
#define SZ_128 0x00000080
#define SZ_256 0x00000100
#define SZ_512 0x00000200
#define SZ_1K 0x00000400
#define SZ_2K 0x00000800
#define SZ_4K 0x00001000
#define SZ_8K 0x00002000
#define SZ_16K 0x00004000
#define SZ_32K 0x00008000
#define SZ_64K 0x00010000
#define SZ_128K 0x00020000
#define SZ_256K 0x00040000
#define SZ_512K 0x00080000
#define SZ_1M 0x00100000
#define SZ_2M 0x00200000
#define SZ_4M 0x00400000
#define SZ_8M 0x00800000
#define SZ_16M 0x01000000
#define SZ_32M 0x02000000
#define SZ_64M 0x04000000
#define SZ_128M 0x08000000
#define SZ_256M 0x10000000
#define SZ_512M 0x20000000
#define SZ_1G 0x40000000
#define SZ_2G 0x80000000
#define SZ_4G _AC(0x100000000, ULL)
#define SZ_8G _AC(0x200000000, ULL)
#define SZ_16G _AC(0x400000000, ULL)
#define SZ_32G _AC(0x800000000, ULL)
#define SZ_64T _AC(0x400000000000, ULL)
#endif /* __LINUX_SIZES_H__ */

View File

@ -14,75 +14,97 @@
#include "core.h" #include "core.h"
#include "memlayout.h" #include "memlayout.h"
#define HCR_VALUE (1 << 31)
#define SPSR_EL2_VALUE (7 << 6) | (5 << 0)
#define SCTLR_EL1_VALUE (0x30D00800)
.section ".text", "ax" .section ".text", "ax"
.globl _boot_start .globl _boot_start
.globl primary_cpu_init
_boot_start: _boot_start:
call _debug_uart_init_early
la a0, debug_string_start
call _debug_uart_printascii
j _start_kernel
_start_kernel:
/* Mask all interrupts */ /* Mask all interrupts */
csrw CSR_IE, zero csrw CSR_IE, zero
csrw CSR_IP, zero csrw CSR_IP, zero
j primary_cpu_init
/*
switch_mode:
csrr t0, sstatus
srli t0, t0, 8
andi t0, t0, 1
beqz t0, switch_to_s_mode
j continue_execution
switch_to_s_mode:
li t2, 0x100
csrw sstatus, t2
j continue_execution
continue_execution:
j primary_cpu_init
*/
primary_cpu_init:
la t0, boot_start_addr
la t1, boot_end_addr
li t2, 0
clear_bss_sec:
bge t0, t1, clear_bss_sec_done
sb t2, 0(t0)
addi t0, t0, 4
j clear_bss_sec
clear_bss_sec_done:
/* Clear BSS for flat non-ELF images */ /* Clear BSS for flat non-ELF images */
la a3, __bss_start la a3, __bss_start
la a4, __bss_end la a4, __bss_stop
ble a4, a3, clear_bss_done ble a4, a3, clear_bss_done
clear_bss: clear_bss:
sd zero, (a3) sd zero, (a3)
add a3, a3, RISCV_SZPTR add a3, a3, RISCV_SZPTR
blt a3, a4, clear_bss blt a3, a4, clear_bss
clear_bss_done: clear_bss_done:
j bootmain /* Initialize page tables and relocate to virtual addresses */
call setup_vm_early
/* la a0, early_pg_dir
.global enable_mmu_relocate call relocate_enable_mmu
enable_mmu_relocate: la sp, stacks_top
la a2, boot_l2pgdir
srl a2, a2, PAGE_SHIFT call _debug_uart_init
/* Start the kernel */
tail bootmain
relocate_enable_mmu:
/* Relocate return address */
la a1, kernel_map
ld a1, KERNEL_MAP_VIRT_ADDR(a1)
la a2, _start
sub a1, a1, a2
add ra, ra, a1
/* Point stvec to virtual address of intruction after satp write */
la a2, 1f
add a2, a2, a1
csrw CSR_TVEC, a2
/* Compute satp for kernel page tables, but don't load it yet */
srl a2, a0, PAGE_SHIFT
li a1, SATP_MODE li a1, SATP_MODE
or a2, a2, a1 or a2, a2, a1
/*
* Load trampoline page directory, which will cause us to trap to
* stvec if VA != PA, or simply fall through if VA == PA. We need a
* full fence here because setup_vm() just wrote these PTEs and we need
* to ensure the new translations are in use.
*/
la a0, trampoline_pg_dir
srl a0, a0, PAGE_SHIFT
or a0, a0, a1
sfence.vma sfence.vma
csrw CSR_SATP, a0
1:
/* Set trap vector to spin forever to help debug */
la a0, .Lsecondary_park
csrw CSR_TVEC, a0
/*
* Switch to kernel page tables. A full fence is necessary in order to
* avoid using the trampoline translations, which are only correct for
* the first superpage. Fetching the fence is guarnteed to work
* because that first superpage is translated the same way.
*/
csrw CSR_SATP, a2 csrw CSR_SATP, a2
sfence.vma
ret ret
*/
.Lsecondary_park:
/* We lack SMP support or have too many harts, so park this hart */
wfi
j .Lsecondary_park
debug_string_start: .ascii "XiZi boot start\n\0"

View File

@ -8,7 +8,7 @@ export KBUILD_AFLAGS :=
export CHECKFLAGS += -D__riscv -D__riscv_xlen=64 export CHECKFLAGS += -D__riscv -D__riscv_xlen=64
export DEVICE := export DEVICE :=
export CFLAGS := $(KBUILD_CFLAGS) $(KBUILD_AFLAGS) $(CHECKFLAGS) -std=c11 export CFLAGS := $(KBUILD_CFLAGS) $(KBUILD_AFLAGS) $(CHECKFLAGS) -std=c11 -mcmodel=medany
# .vmlinux.cmd:1:cmd_vmlinux := sh scripts/link-vmlinux.sh "riscv64-linux-gnu-ld" " -melf64lriscv" " --build-id=sha1"; # .vmlinux.cmd:1:cmd_vmlinux := sh scripts/link-vmlinux.sh "riscv64-linux-gnu-ld" " -melf64lriscv" " --build-id=sha1";
export LFLAGS := -T $(KERNEL_ROOT)/hardkernel/arch/riscv/rv64gc/preboot_for_jh7110/jh7110.lds export LFLAGS := -T $(KERNEL_ROOT)/hardkernel/arch/riscv/rv64gc/preboot_for_jh7110/jh7110.lds
export CXXFLAGS := export CXXFLAGS :=

View File

@ -0,0 +1,15 @@
#ifndef _AUTOCONF_H
#define _AUTOCONF_H
#define CONFIG_64BIT 1
#define CONFIG_MMU 1
#define CONFIG_PHYS_ADDR_T_64BIT 1
#define CONFIG_PAGE_OFFSET 0xffffffe000000000
#define CONFIG_VA_BITS 39
#define THREAD_SIZE (512 << 2)
#define KERNEL_MAP_VIRT_ADDR 0 /* offsetof(struct kernel_mapping, virt_addr) */
#define TASK_TI_CPU 32 /* offsetof(struct task_struct, thread_info.cpu) */
#endif /* _AUTOCONF_H */

View File

@ -35,23 +35,28 @@
* @author AIIT XUOS Lab * @author AIIT XUOS Lab
* @date 2024.10.10 * @date 2024.10.10
*/ */
OUTPUT_FORMAT("elf64-littleriscv", "elf64-littleriscv", "elf64-littleriscv")
OUTPUT_ARCH(riscv) OUTPUT_ARCH(riscv)
OUTPUT_FORMAT("elf64-littleriscv", "elf64-littleriscv", "elf64-littleriscv")
/* ENTRY(_start) */ /* ENTRY(_start) */
ENTRY( _boot_start ) ENTRY( _boot_start )
BOOT_STACK_SIZE = 0x4000;
MEMORY { MEMORY {
phy_ddr3 (rwx) : ORIGIN = 0x40200000, LENGTH = 1024M phy_ddr3 (rwx) : ORIGIN = 0x40200000, LENGTH = 1024M
vir_ddr3 (rwx) : ORIGIN = 0x0000000040800000, LENGTH = 1024M vir_ddr3 (rwx) : ORIGIN = 0x0000000040800000, LENGTH = 1024M
} }
BOOT_STACK_SIZE = 0x4000;
SECTIONS SECTIONS
{ {
.start_sec : {
. = ORIGIN(phy_ddr3); . = ORIGIN(phy_ddr3);
/* initialization start checkpoint. */ _start = .;
_boot_start = .;
.start_sec : {
_start_image_addr = .; _start_image_addr = .;
boot.o(.text) boot.o(.text)
@ -72,27 +77,32 @@ SECTIONS
bootmmu.o(.bss .bss.* COMMON) bootmmu.o(.bss .bss.* COMMON)
ns16550.o(.bss .bss.* COMMON) ns16550.o(.bss .bss.* COMMON)
/* stack for booting code. */
. = ALIGN(0x1000); . = ALIGN(0x1000);
PROVIDE(stacks_start = .); PROVIDE(stacks_start = .);
. += BOOT_STACK_SIZE; . += BOOT_STACK_SIZE;
PROVIDE(stacks_end = .); PROVIDE(stacks_end = .);
PROVIDE(stacks_top = .); PROVIDE(stacks_top = .);
/* initialization end checkpoint. */
PROVIDE(boot_end_addr = .); PROVIDE(boot_end_addr = .);
} > phy_ddr3 }
/* AT: phy_ddr3 + .start_sec size */ .text : {
.text : AT(0x40800000) {
. = ALIGN(0x1000); . = ALIGN(0x1000);
*(.text .text.* .gnu.linkonce.t.*) *(.text .text.* .gnu.linkonce.t.*)
} > vir_ddr3 }
. = ALIGN(0x1000); . = ALIGN(0x1000);
.data : { .data : {
*(.data .data.*) *(.data .data.*)
__start_init_task = .;
init_thread_union = .;
init_stack = .;
KEEP(*(.data..init_task))
KEEP(*(.data..init_thread_info))
. = __start_init_task + ((1 << (12)) << (2));
__end_init_task = .;
. = ALIGN(0x1000); . = ALIGN(0x1000);
PROVIDE(_binary_fs_img_start = .); PROVIDE(_binary_fs_img_start = .);
*(.rawdata_fs_img*) *(.rawdata_fs_img*)
@ -107,10 +117,10 @@ SECTIONS
PROVIDE(_binary_default_fs_end = .); PROVIDE(_binary_default_fs_end = .);
PROVIDE(__init_array_start = .); PROVIDE(__init_array_start = .);
PROVIDE(__init_array_end = .); PROVIDE(__init_array_end = .);
} > vir_ddr3 }
. = ALIGN(0x1000); . = ALIGN(0x1000);
_image_size = . - ORIGIN(phy_ddr3); _image_size = . - _start;
. = ALIGN(0x1000); . = ALIGN(0x1000);
.bss : { .bss : {
@ -120,7 +130,8 @@ SECTIONS
. = ALIGN(0x1000); . = ALIGN(0x1000);
PROVIDE(__bss_end = .); PROVIDE(__bss_end = .);
PROVIDE(kernel_data_end = .); PROVIDE(kernel_data_end = .);
} > vir_ddr3 __bss_stop = .;
}
. = ALIGN((1 << 21)); . = ALIGN((1 << 21));
.sdata : { .sdata : {
@ -130,5 +141,4 @@ SECTIONS
_edata = .; _edata = .;
_end = .; _end = .;
} }

View File

@ -1,3 +1,4 @@
SRC_FILES := bootmmu.c mmu.c pagetable_attr.c SRC_FILES := bootmmu.c mmu.c pagetable_attr.c mmu_init.c
include $(KERNEL_ROOT)/compiler.mk include $(KERNEL_ROOT)/compiler.mk

View File

@ -39,8 +39,6 @@ Modification:
#include <stdint.h> #include <stdint.h>
#include <string.h> #include <string.h>
extern uint64_t kernel_data_end[];
extern uint64_t kernel_data_begin[];
// clang-format off // clang-format off
#define L2_PTE_VALID (1 << 0) #define L2_PTE_VALID (1 << 0)
@ -73,6 +71,7 @@ uint64_t boot_kern_l3pgdir[NUM_LEVEL3_PDE] __attribute__((aligned(0x1000))) = {
uint64_t boot_dev_l4pgdirs[NUM_LEVEL3_PDE][NUM_LEVEL4_PTE] __attribute__((aligned(0x1000))) = { 0 }; uint64_t boot_dev_l4pgdirs[NUM_LEVEL3_PDE][NUM_LEVEL4_PTE] __attribute__((aligned(0x1000))) = { 0 };
uint64_t boot_kern_l4pgdirs[NUM_LEVEL3_PDE][NUM_LEVEL4_PTE] __attribute__((aligned(0x1000))) = { 0 }; uint64_t boot_kern_l4pgdirs[NUM_LEVEL3_PDE][NUM_LEVEL4_PTE] __attribute__((aligned(0x1000))) = { 0 };
static void build_boot_pgdir() static void build_boot_pgdir()
{ {
static bool built = false; static bool built = false;
@ -87,12 +86,8 @@ static void build_boot_pgdir()
cur_mem_paddr = ALIGNDOWN(dev_phy_mem_base, LEVEL2_PDE_SIZE); cur_mem_paddr = ALIGNDOWN(dev_phy_mem_base, LEVEL2_PDE_SIZE);
for (size_t i = 0; i < NUM_LEVEL3_PDE; i++) { for (size_t i = 0; i < NUM_LEVEL3_PDE; i++) {
boot_dev_l3pgdir[i] = (((uint64_t)boot_dev_l4pgdirs[i] >> PAGE_SHIFT) << _PAGE_PFN_SHIFT) | _PAGE_TABLE; boot_dev_l3pgdir[i] = (((uint64_t)cur_mem_paddr >> PAGE_SHIFT) << _PAGE_PFN_SHIFT) | PAGE_KERNEL;
cur_mem_paddr += LEVEL3_PDE_SIZE;
for (size_t j = 0; j < NUM_LEVEL4_PTE; j++) {
boot_dev_l4pgdirs[i][j] = ((cur_mem_paddr >> PAGE_SHIFT) << _PAGE_PFN_SHIFT) | PAGE_KERNEL;
cur_mem_paddr += PAGE_SIZE;
}
} }
// identical mem // identical mem
@ -101,18 +96,15 @@ static void build_boot_pgdir()
cur_mem_paddr = ALIGNDOWN(kern_phy_mem_base, PAGE_SIZE); cur_mem_paddr = ALIGNDOWN(kern_phy_mem_base, PAGE_SIZE);
for (size_t i = 0; i < NUM_LEVEL3_PDE; i++) { for (size_t i = 0; i < NUM_LEVEL3_PDE; i++) {
boot_kern_l3pgdir[i] = (((uint64_t)boot_kern_l4pgdirs[i] >> PAGE_SHIFT) << _PAGE_PFN_SHIFT) | _PAGE_TABLE; boot_kern_l3pgdir[i] = (((uint64_t)cur_mem_paddr >> PAGE_SHIFT) << _PAGE_PFN_SHIFT) | PAGE_KERNEL;
cur_mem_paddr += LEVEL3_PDE_SIZE;
for (size_t j = 0; j < NUM_LEVEL4_PTE; j++) {
boot_kern_l4pgdirs[i][j] = ((cur_mem_paddr >> PAGE_SHIFT) << _PAGE_PFN_SHIFT) | PAGE_KERNEL;
cur_mem_paddr += PAGE_SIZE;
}
} }
built = true; built = true;
} }
} }
static inline void local_flush_tlb_all(void) static inline void local_flush_tlb_all(void)
{ {
__asm__ __volatile__ ("sfence.vma" : : : "memory"); __asm__ __volatile__ ("sfence.vma" : : : "memory");
@ -123,28 +115,19 @@ static void load_boot_pgdir()
unsigned long satp_val = 0; unsigned long satp_val = 0;
satp_val = (unsigned long)(((uintptr_t)boot_l2pgdir >> PAGE_SHIFT) | SATP_MODE); satp_val = (unsigned long)(((uintptr_t)boot_l2pgdir >> PAGE_SHIFT) | SATP_MODE);
#if 1 //to debug
csr_write(CSR_SATP, satp_val); csr_write(CSR_SATP, satp_val);
#endif
} }
extern void main(void); extern void main(void);
static bool _bss_inited = false;
void bootmain() void bootmain(void)
{ {
_debug_uart_phymem_init();
_debug_uart_printascii("bootmain start.\n"); _debug_uart_printascii("bootmain start.\n");
build_boot_pgdir(); build_boot_pgdir();
load_boot_pgdir(); load_boot_pgdir();
_debug_uart_printascii("boot pgdir success\n"); _debug_uart_printascii("boot pgdir success\n");
__asm__ __volatile__("addi sp, sp, %0" ::"i"(KERN_OFFSET));
if (!_bss_inited) {
memset(&kernel_data_begin, 0x00, (size_t)((uint64_t)kernel_data_end - (uint64_t)kernel_data_begin));
_bss_inited = true;
}
main(); main();
} }

View File

@ -116,3 +116,4 @@ When the process switches, the flush TLB is no longer required anymore.
__attribute__((always_inline)) static inline uint64_t v2p(void* a) { return ((uint64_t)(a)) - KERN_MEM_BASE; } __attribute__((always_inline)) static inline uint64_t v2p(void* a) { return ((uint64_t)(a)) - KERN_MEM_BASE; }
__attribute__((always_inline)) static inline void* p2v(uint64_t a) { return (void*)((a) + KERN_MEM_BASE); } __attribute__((always_inline)) static inline void* p2v(uint64_t a) { return (void*)((a) + KERN_MEM_BASE); }
#endif #endif

View File

@ -0,0 +1,76 @@
#ifndef _XIUOS_PGTABLE_H
#define _XIUOS_PGTABLE_H
#include <asm/pfn.h>
#include <asm/pgtable.h>
/*
* On almost all architectures and configurations, 0 can be used as the
* upper ceiling to free_pgtables(): on many architectures it has the same
* effect as using TASK_SIZE. However, there is one configuration which
* must impose a more careful limit, to avoid freeing kernel pgtables.
*/
#ifndef USER_PGTABLES_CEILING
#define USER_PGTABLES_CEILING 0UL
#endif
/*
* This defines the first usable user address. Platforms
* can override its value with custom FIRST_USER_ADDRESS
* defined in their respective <asm/pgtable.h>.
*/
#ifndef FIRST_USER_ADDRESS
#define FIRST_USER_ADDRESS 0UL
#endif
/*
* This defines the generic helper for accessing PMD page
* table page. Although platforms can still override this
* via their respective <asm/pgtable.h>.
*/
#ifndef pmd_pgtable
#define pmd_pgtable(pmd) pmd_page(pmd)
#endif
/*
* A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
*
* The pXx_index() functions return the index of the entry in the page
* table page which would control the given virtual address
*
* As these functions may be used by the same code for different levels of
* the page table folding, they are always available, regardless of
* CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0
* because in such cases PTRS_PER_PxD equals 1.
*/
static inline unsigned long pte_index(unsigned long address)
{
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}
#ifndef pmd_index
static inline unsigned long pmd_index(unsigned long address)
{
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}
#define pmd_index pmd_index
#endif
#ifndef pgd_index
/* Must be a compile-time constant, so implement it as a macro */
#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
#endif
#ifndef pte_offset_kernel
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
}
#define pte_offset_kernel pte_offset_kernel
#endif
#endif /* _XIUOS_PGTABLE_H */

View File

@ -36,8 +36,8 @@ Modification:
/* physical memory layout */ /* physical memory layout */
#define PHY_MEM_BASE (0x0000000040200000ULL) #define PHY_MEM_BASE (0x0000000040200000ULL)
#define PHY_USER_FREEMEM_BASE (0x0000000080000000ULL) #define PHY_USER_FREEMEM_BASE (0x0000000080000000ULL)
#define PHY_USER_FREEMEM_TOP (0x00000000F0000000ULL) #define PHY_USER_FREEMEM_TOP (0x0000000200000000ULL)
#define PHY_MEM_STOP (0x00000000F0000000ULL) #define PHY_MEM_STOP (0x0000000200000000ULL)
/* PTE-PAGE_SIZE */ /* PTE-PAGE_SIZE */
#define LEVEL4_PTE_SHIFT 12 #define LEVEL4_PTE_SHIFT 12
@ -63,19 +63,19 @@ Modification:
/* Deivce memory layout */ /* Deivce memory layout */
#define DEV_PHYMEM_BASE (0x0000000010000000ULL) #define DEV_PHYMEM_BASE (0x0000000010000000ULL)
#define DEV_VRTMEM_BASE (0x0000001010000000ULL) #define DEV_VRTMEM_BASE ((0 - 0x0000004000000000ULL) + DEV_PHYMEM_BASE)
#define DEV_MEM_SIZE (0x0000000030040000ULL) #define DEV_MEM_SIZE (0x0000000030040000ULL)
/* User memory layout */ /* User memory layout */
#define USER_STACK_SIZE PAGE_SIZE #define USER_STACK_SIZE PAGE_SIZE
#define USER_MEM_BASE (0x0000002000000000ULL) #define USER_MEM_BASE (0x0000000000000000ULL)
#define USER_MEM_TOP (0x0000008000000000ULL) #define USER_MEM_TOP (0x0000004000000000ULL)
#define USER_IPC_SPACE_BASE (0x0000003000000000ULL) #define USER_IPC_SPACE_BASE (0x0000003000000000ULL)
#define USER_IPC_USE_ALLOCATOR_WATERMARK (0x0000003000010000ULL) #define USER_IPC_USE_ALLOCATOR_WATERMARK (0x0000003000010000ULL)
#define USER_IPC_SPACE_TOP (USER_IPC_SPACE_BASE + 0x10000000ULL) #define USER_IPC_SPACE_TOP (USER_IPC_SPACE_BASE + 0x10000000ULL)
/* Kernel memory layout */ /* Kernel memory layout */
#define KERN_MEM_BASE (0x0000000000000000ULL + PHY_MEM_BASE) // First kernel virtual address #define KERN_MEM_BASE ((0 - 0x0000002000000000ULL) + PHY_MEM_BASE) // First kernel virtual address
#define KERN_OFFSET (KERN_MEM_BASE - PHY_MEM_BASE) #define KERN_OFFSET (KERN_MEM_BASE - PHY_MEM_BASE)
#define V2P(a) (((uint64_t)(a)) - KERN_OFFSET) #define V2P(a) (((uint64_t)(a)) - KERN_OFFSET)

View File

@ -0,0 +1,170 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiUOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/**
* @file
* @brief
* @version 1.0
* @author AIIT XUOS Lab
* @date 2024.12.02
*/
/*************************************************
File name: mmu_init.c
Description:
Others:
History:
Author: AIIT XUOS Lab
Modification:
1. first version
*************************************************/
#include <stdint.h>
#include <string.h>
#include <stdbool.h>
#include "asm/page.h"
#include "pgtable.h"
#include "memlayout.h"
#include "ns16550.h"
#define __ro_after_init __attribute__((section(".data..ro_after_init")))
#define __page_aligned_data __attribute__((section(".data..page_aligned"))) __attribute__((aligned(PAGE_SIZE)))
#define __page_aligned_bss __attribute__((section(".bss..page_aligned"))) __attribute__((aligned(PAGE_SIZE)))
#define __initdata __attribute__((section(".init.data")))
#define __init __attribute__((section(".init.text")))
#define __aligned(x) __attribute__((aligned__(x)))
#define __maybe_unused __attribute__((__unused__))
struct kernel_mapping kernel_map __ro_after_init;
extern char _start[];
unsigned long riscv_pfn_base __ro_after_init;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __attribute__((aligned(PAGE_SIZE)));
static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
static pmd_t early_pmd[PTRS_PER_PMD] __initdata __attribute__((aligned(PAGE_SIZE)));
static pmd_t early_uart_pmd[PTRS_PER_PMD] __initdata __attribute__((aligned(PAGE_SIZE)));
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
{
/* Before MMU is enabled */
return (pmd_t *)((uintptr_t)pa);
}
static phys_addr_t __init alloc_pmd_early(uintptr_t va)
{
return (uintptr_t)early_pmd;
}
static void __init create_pmd_mapping_early(pmd_t *pmdp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
{
pte_t *ptep;
phys_addr_t pte_phys;
uintptr_t pmd_idx = pmd_index(va);
if (sz == PMD_SIZE) {
if (pmd_none(pmdp[pmd_idx]))
pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
return;
}
}
void __init create_pgd_mapping_early(pgd_t *pgdp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
{
pmd_t *nextp;
phys_addr_t next_phys;
uintptr_t pgd_idx = pgd_index(va);
if (sz == PGDIR_SIZE) {
if (pgd_val(pgdp[pgd_idx]) == 0)
pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
return;
}
if (pgd_val(pgdp[pgd_idx]) == 0) {
next_phys = alloc_pmd_early(va);
pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
nextp = get_pmd_virt_early(next_phys);
memset(nextp, 0, PAGE_SIZE);
} else {
next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
nextp = get_pmd_virt_early(next_phys);
}
create_pmd_mapping_early(nextp, va, pa, sz, prot);
}
static void __init create_kernel_page_table_early(pgd_t *pgdir, bool early)
{
uintptr_t va, end_va;
int i = 0;
end_va = kernel_map.virt_addr + kernel_map.size;
for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE) {
create_pgd_mapping_early(pgdir, va,
kernel_map.phys_addr + (va - kernel_map.virt_addr),
PMD_SIZE,
PAGE_KERNEL_EXEC);
i++;
}
}
/*
* setup_vm_early() is called from boot.S with MMU-off.
*
* Following requirements should be honoured for setup_vm() to work
* correctly:
* 1) It should use PC-relative addressing for accessing kernel symbols.
* To achieve this we always use GCC cmodel=medany.
* 2) The compiler instrumentation for FTRACE will not work for setup_vm()
* so disable compiler instrumentation when FTRACE is enabled.
*/
void __init setup_vm_early(void)
{
_debug_uart_printascii("setup_vm_early start\n");
kernel_map.virt_addr = KERN_MEM_BASE;
kernel_map.phys_addr = (uintptr_t)(&_start);
kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
kernel_map.va_pa_offset = PAGE_OFFSET - kernel_map.phys_addr;
kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
riscv_pfn_base = PFN_DOWN(kernel_map.phys_addr);
/* Setup trampoline PGD and PMD */
create_pgd_mapping_early(trampoline_pg_dir, kernel_map.virt_addr, (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
create_pmd_mapping_early(trampoline_pmd, kernel_map.virt_addr, kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC);
/*
* Setup early PGD covering entire kernel which will allow
* us to reach paging_init(). We map all memory banks later
* in setup_vm_final() below.
*/
create_kernel_page_table_early(early_pg_dir, true);
/* Setup uart PGD and PMD */
create_pgd_mapping_early(early_pg_dir, DEV_VRTMEM_BASE, (uintptr_t)early_uart_pmd, PGDIR_SIZE, PAGE_TABLE);
create_pmd_mapping_early(early_uart_pmd, DEV_VRTMEM_BASE, DEV_PHYMEM_BASE, PMD_SIZE, PAGE_KERNEL);
}