free memory based on tracer

This commit is contained in:
tuyuyang 2024-07-27 22:37:39 +08:00
parent d78d5bb36a
commit 19d467463b
16 changed files with 103 additions and 78 deletions

View File

@ -147,7 +147,7 @@ void* AchieveResource(TraceTag* tag)
bool CreateResourceTag(TraceTag* new_tag, TraceTag* owner, char* name, tracemeta_ac_type type, void* p_resource) bool CreateResourceTag(TraceTag* new_tag, TraceTag* owner, char* name, tracemeta_ac_type type, void* p_resource)
{ {
assert(new_tag != NULL && owner != NULL); assert(owner != NULL);
if (owner->meta == NULL) { if (owner->meta == NULL) {
ERROR("Tracer: Empty owner\n"); ERROR("Tracer: Empty owner\n");
return false; return false;
@ -168,7 +168,9 @@ bool CreateResourceTag(TraceTag* new_tag, TraceTag* owner, char* name, tracemeta
doubleListAddOnHead(&new_node->list_node, &owner->meta->children_guard); doubleListAddOnHead(&new_node->list_node, &owner->meta->children_guard);
new_node->parent = owner->meta; new_node->parent = owner->meta;
if (new_tag != NULL) {
new_tag->meta = new_node; new_tag->meta = new_node;
}
return true; return true;
} }
@ -187,12 +189,42 @@ bool DeleteResource(TraceTag* target, TraceTag* owner)
if (target->meta->name != NULL) { if (target->meta->name != NULL) {
slab_free(&sys_tracer.node_name_allocator, target->meta->name); slab_free(&sys_tracer.node_name_allocator, target->meta->name);
} }
// delete all children // delete all children
/// @attention currently donot allow multilevel resource deletion
if (target->meta->type == TRACER_OWNER) { if (target->meta->type == TRACER_OWNER) {
assert(IS_DOUBLE_LIST_EMPTY(&target->meta->children_guard)); while (!IS_DOUBLE_LIST_EMPTY(&target->meta->children_guard)) {
TraceTag tmp_node = {
.meta = DOUBLE_LIST_ENTRY(target->meta->children_guard.next, TracerNode, list_node),
};
DeleteResource(&tmp_node, target);
}
} }
slab_free(&sys_tracer.node_allocator, target->meta); slab_free(&sys_tracer.node_allocator, target->meta);
target->meta = NULL; target->meta = NULL;
return true; return true;
} }
void debug_list_tracetree_inner(TracerNode* cur_node)
{
DEBUG("[%s] ", cur_node->name);
TracerNode* tmp = NULL;
DOUBLE_LIST_FOR_EACH_ENTRY(tmp, &cur_node->children_guard, list_node)
{
if (tmp->name != NULL) {
DEBUG("%s ", tmp->name);
} else {
DEBUG("ANON ");
}
}
DEBUG("\n");
DOUBLE_LIST_FOR_EACH_ENTRY(tmp, &cur_node->children_guard, list_node)
{
debug_list_tracetree_inner(tmp);
}
}
void debug_list_tracetree()
{
TracerNode* ref_root = RequireRootTag()->meta;
debug_list_tracetree_inner(ref_root);
}

View File

@ -32,7 +32,6 @@ Modification:
#include "list.h" #include "list.h"
#include "memlayout.h" #include "memlayout.h"
#include "spinlock.h" #include "spinlock.h"
#include "pagetable.h"
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>

View File

@ -34,6 +34,7 @@ Modification:
bool module_phymem_init(); bool module_phymem_init();
char* kalloc(size_t size); char* kalloc(size_t size);
bool kfree(char* vaddr); bool kfree(char* vaddr);
bool raw_kfree(char* paddr);
char* raw_alloc(size_t size); char* raw_alloc(size_t size);
bool raw_free(char* paddr); bool raw_free(char* paddr);

View File

@ -29,10 +29,14 @@ Modification:
*************************************************/ *************************************************/
#pragma once #pragma once
#include "actracer.h"
#include "bitmap64.h" #include "bitmap64.h"
#include "buddy.h" #include "buddy.h"
#include "list.h" #include "list.h"
#include "share_page.h"
struct TopLevelPageDirectory {
uintptr_t* pd_addr;
};
struct ThreadStackPointer { struct ThreadStackPointer {
int argc; int argc;
@ -42,6 +46,8 @@ struct ThreadStackPointer {
}; };
struct MemSpace { struct MemSpace {
/* trace node */
TraceTag tag;
/* task memory resources */ /* task memory resources */
struct TopLevelPageDirectory pgdir; // [phy] vm pgtbl base address struct TopLevelPageDirectory pgdir; // [phy] vm pgtbl base address
uintptr_t heap_base; // mem size of proc used(allocated by kernel) uintptr_t heap_base; // mem size of proc used(allocated by kernel)

View File

@ -33,11 +33,12 @@ Modification:
#include <string.h> #include <string.h>
#include "memlayout.h" #include "memlayout.h"
#include "actracer.h"
#include "mmu.h" #include "mmu.h"
#include "mmu_common.h" #include "mmu_common.h"
#include "actracer.h"
#include "memspace.h"
// clang-format off // clang-format off
#define ALIGNUP(size, align) (((uintptr_t)(size) + (uintptr_t)(align) - 1) & ~((uintptr_t)(align) - 1)) #define ALIGNUP(size, align) (((uintptr_t)(size) + (uintptr_t)(align) - 1) & ~((uintptr_t)(align) - 1))
#define ALIGNDOWN(size, align) ((uintptr_t)(size) & ~((uintptr_t)(align) - 1)) #define ALIGNDOWN(size, align) ((uintptr_t)(size) & ~((uintptr_t)(align) - 1))
@ -49,10 +50,6 @@ Modification:
#define TOPLEVLE_PAGEDIR_SIZE sizeof(uintptr_t) * NUM_TOPLEVEL_PDE #define TOPLEVLE_PAGEDIR_SIZE sizeof(uintptr_t) * NUM_TOPLEVEL_PDE
// clang-format on // clang-format on
struct TopLevelPageDirectory {
uintptr_t* pd_addr;
};
struct PagerRightGroup { struct PagerRightGroup {
struct TraceTag mmu_driver_tag; struct TraceTag mmu_driver_tag;
}; };
@ -60,10 +57,10 @@ struct PagerRightGroup {
struct XiziPageManager { struct XiziPageManager {
bool (*new_pgdir)(struct TopLevelPageDirectory* pgdir); bool (*new_pgdir)(struct TopLevelPageDirectory* pgdir);
void (*free_user_pgdir)(struct TopLevelPageDirectory* pgdir); void (*free_user_pgdir)(struct TopLevelPageDirectory* pgdir);
bool (*map_pages)(uintptr_t* pd_addr, uintptr_t vaddr, uintptr_t paddr, int len, bool is_dev); bool (*map_pages)(struct MemSpace* pmemspace, uintptr_t vaddr, uintptr_t paddr, int len, bool is_dev);
bool (*unmap_pages)(uintptr_t* pd_addr, uintptr_t vaddr, int len); bool (*unmap_pages)(uintptr_t* pd_addr, uintptr_t vaddr, int len);
uintptr_t (*resize_user_pgdir)(struct TopLevelPageDirectory* pgdir, uintptr_t old_size, uintptr_t new_size); uintptr_t (*resize_user_pgdir)(struct MemSpace* pmemspace, uintptr_t old_size, uintptr_t new_size);
uintptr_t (*address_translate)(struct TopLevelPageDirectory* pgdir, uintptr_t vaddr); uintptr_t (*address_translate)(struct TopLevelPageDirectory* pgdir, uintptr_t vaddr);
uintptr_t (*cross_vspace_data_copy)(struct TopLevelPageDirectory* pgdir, uintptr_t cross_dest, uintptr_t src, uintptr_t len); uintptr_t (*cross_vspace_data_copy)(struct TopLevelPageDirectory* pgdir, uintptr_t cross_dest, uintptr_t src, uintptr_t len);
}; };

View File

@ -109,6 +109,7 @@ struct SchedulerRightGroup {
}; };
struct XiziTaskManager { struct XiziTaskManager {
TraceTag tag;
/* thead schedule lists */ /* thead schedule lists */
struct double_list_node task_list_head[TASK_MAX_PRIORITY]; /* list of task control blocks that are allocated */ struct double_list_node task_list_head[TASK_MAX_PRIORITY]; /* list of task control blocks that are allocated */
struct double_list_node task_running_list_head; struct double_list_node task_running_list_head;
@ -149,5 +150,4 @@ extern uint32_t ready_task_priority;
extern struct Thread* next_task_emergency; extern struct Thread* next_task_emergency;
extern struct XiziTaskManager xizi_task_manager; extern struct XiziTaskManager xizi_task_manager;
int spawn_embedded_task(char* img_start, char* name, char** argv); bool module_task_manager_init(TraceTag* softkernel_tag);
bool module_task_manager_init(void);

View File

@ -48,7 +48,7 @@ bool softkernel_init(struct TraceTag* _hardkernel_tag, struct TraceTag* _softker
AchieveResourceTag(&intr_driver_tag, _hardkernel_tag, "intr-ac-resource"); AchieveResourceTag(&intr_driver_tag, _hardkernel_tag, "intr-ac-resource");
load_kern_pgdir(&mmu_driver_tag, &intr_driver_tag); // enter kernel virtmem space load_kern_pgdir(&mmu_driver_tag, &intr_driver_tag); // enter kernel virtmem space
module_task_manager_init(); // init task module_task_manager_init(_softkernel_tag); // init task
struct SharePageRightGroup sp_rights; struct SharePageRightGroup sp_rights;
AchieveResourceTag(&sp_rights.dcache_driver_tag, _hardkernel_tag, "dcache-ac-resource"); AchieveResourceTag(&sp_rights.dcache_driver_tag, _hardkernel_tag, "dcache-ac-resource");

View File

@ -44,8 +44,10 @@ extern int sys_spawn(char* img_start, char* name, char** argv);
static struct TraceTag hardkernel_tag, softkernel_tag; static struct TraceTag hardkernel_tag, softkernel_tag;
static volatile int core_para_init = 0; static volatile int core_para_init = 0;
static void sync_cores() { static void main_sync_cores()
while (core_para_init != ((1 << NR_CPU) - 1)) ; {
while (core_para_init != ((1 << NR_CPU) - 1))
;
return; return;
} }
@ -87,7 +89,8 @@ int main(void)
for (int i = 1; i < NR_CPU; i++) { for (int i = 1; i < NR_CPU; i++) {
// start secondary cpus // start secondary cpus
while ((core_para_init & (1 << (i - 1))) == 0); while ((core_para_init & (1 << (i - 1))) == 0)
;
cpu_start_secondary(i); cpu_start_secondary(i);
} }
@ -106,7 +109,7 @@ int main(void)
// sync memory // sync memory
__sync_synchronize(); __sync_synchronize();
sync_cores(); main_sync_cores();
start_smp_cache_broadcast(cpu_id); start_smp_cache_broadcast(cpu_id);
// enter kernel seriously // enter kernel seriously
xizi_enter_kernel(); xizi_enter_kernel();

View File

@ -69,6 +69,11 @@ bool kfree(char* vaddr)
return KBuddyFree(&kern_virtmem_buddy, V2P_WO(vaddr)); return KBuddyFree(&kern_virtmem_buddy, V2P_WO(vaddr));
} }
bool raw_kfree(char* paddr)
{
return KBuddyFree(&kern_virtmem_buddy, paddr);
}
char* raw_alloc(size_t size) char* raw_alloc(size_t size)
{ {
char* mem_alloc = KBuddyAlloc(&user_phy_freemem_buddy, size); char* mem_alloc = KBuddyAlloc(&user_phy_freemem_buddy, size);

View File

@ -122,7 +122,7 @@ static bool _unmap_pages(uintptr_t* pgdir, uintptr_t vaddr, int len)
/// @param len /// @param len
/// @param is_dev /// @param is_dev
/// @return /// @return
static bool _map_user_pages(uintptr_t* pgdir, uintptr_t vaddr, uintptr_t paddr, int len, bool is_dev) static bool _map_user_pages(struct MemSpace* pmemspace, uintptr_t vaddr, uintptr_t paddr, int len, bool is_dev)
{ {
if (len < 0) { if (len < 0) {
return false; return false;
@ -140,13 +140,13 @@ static bool _map_user_pages(uintptr_t* pgdir, uintptr_t vaddr, uintptr_t paddr,
_p_pgtbl_mmu_access->MmuUsrDevPteAttr(&mem_attr); _p_pgtbl_mmu_access->MmuUsrDevPteAttr(&mem_attr);
} }
return _map_pages(pgdir, vaddr, paddr, (intptr_t)len, mem_attr); return _map_pages(pmemspace->pgdir.pd_addr, vaddr, paddr, (intptr_t)len, mem_attr);
} }
/// assume that a user pagedir is allocated from [0, size) /// assume that a user pagedir is allocated from [0, size)
/// if new_size > old_size, allocate more space, /// if new_size > old_size, allocate more space,
/// if old_size > new_size, free extra space, to avoid unnecessary alloc/free. /// if old_size > new_size, free extra space, to avoid unnecessary alloc/free.
static uintptr_t _resize_user_pgdir(struct TopLevelPageDirectory* pgdir, uintptr_t old_size, uintptr_t new_size) static uintptr_t _resize_user_pgdir(struct MemSpace* pmemspace, uintptr_t old_size, uintptr_t new_size)
{ {
if (UNLIKELY(new_size > USER_MEM_TOP)) { if (UNLIKELY(new_size > USER_MEM_TOP)) {
ERROR("user size out of range.\n"); ERROR("user size out of range.\n");
@ -167,9 +167,10 @@ static uintptr_t _resize_user_pgdir(struct TopLevelPageDirectory* pgdir, uintptr
} }
memset(new_page, 0, PAGE_SIZE); memset(new_page, 0, PAGE_SIZE);
if (!xizi_pager.map_pages(pgdir->pd_addr, cur_size, V2P(new_page), PAGE_SIZE, false)) { if (!xizi_pager.map_pages(pmemspace, cur_size, V2P(new_page), PAGE_SIZE, false)) {
return cur_size; return cur_size;
} }
CreateResourceTag(NULL, &pmemspace->tag, NULL, TRACER_MEM_FROM_BUDDY_AC_RESOURCE, V2P(new_page));
cur_size += PAGE_SIZE; cur_size += PAGE_SIZE;
} }

View File

@ -73,23 +73,6 @@ void _free_user_pgdir(struct TopLevelPageDirectory* pgdir)
// free each level4 page table // free each level4 page table
uintptr_t* pgtbl_paddr = (uintptr_t*)LEVEL4_PTE_ADDR(pgdir->pd_addr[level4_entry_idx]); uintptr_t* pgtbl_paddr = (uintptr_t*)LEVEL4_PTE_ADDR(pgdir->pd_addr[level4_entry_idx]);
if (pgtbl_paddr != NULL) { if (pgtbl_paddr != NULL) {
// free each page
for (uintptr_t page_entry_idx = 0; page_entry_idx < NUM_LEVEL4_PTE; page_entry_idx++) {
uintptr_t vaddr = (level4_entry_idx << LEVEL3_PDE_SHIFT) | (page_entry_idx << LEVEL4_PTE_SHIFT);
// get page paddr
uintptr_t* page_paddr = (uintptr_t*)ALIGNDOWN(((uintptr_t*)P2V(pgtbl_paddr))[page_entry_idx], PAGE_SIZE);
if (page_paddr != NULL) {
// IPC vaddr should not be addressed here.
assert(vaddr < USER_IPC_SPACE_BASE || vaddr >= USER_IPC_SPACE_TOP);
if (LIKELY((uintptr_t)page_paddr >= low_bound && (uintptr_t)page_paddr < high_bound)) {
kfree(P2V(page_paddr));
} else if (LIKELY((uintptr_t)page_paddr >= user_low_bound && (uintptr_t)page_paddr < user_high_bound)) {
raw_free((char*)page_paddr);
}
}
}
kfree(P2V(pgtbl_paddr)); kfree(P2V(pgtbl_paddr));
} }
} }

View File

@ -96,22 +96,6 @@ void _free_user_pgdir(struct TopLevelPageDirectory* pgdir)
uintptr_t* l4_table_paddr = (uintptr_t*)LEVEL4_PTE_ADDR(l3_table_vaddr[l3_entry_idx]); uintptr_t* l4_table_paddr = (uintptr_t*)LEVEL4_PTE_ADDR(l3_table_vaddr[l3_entry_idx]);
if (l4_table_paddr != NULL) { if (l4_table_paddr != NULL) {
uintptr_t* l4_table_vaddr = P2V(l4_table_paddr); uintptr_t* l4_table_vaddr = P2V(l4_table_paddr);
for (uintptr_t page_entry_idx = 0; page_entry_idx < NUM_LEVEL4_PTE; page_entry_idx++) {
uintptr_t vaddr = (l2_entry_idx << LEVEL2_PDE_SHIFT) | (l3_entry_idx << LEVEL3_PDE_SHIFT) | (page_entry_idx << LEVEL4_PTE_SHIFT);
// get page paddr
uintptr_t* page_paddr = (uintptr_t*)ALIGNDOWN((l4_table_vaddr)[page_entry_idx], PAGE_SIZE);
if (page_paddr != NULL) {
// Ensure the virtual address is not in the IPC address space
assert(vaddr < USER_IPC_SPACE_BASE || vaddr >= USER_IPC_SPACE_TOP);
if (LIKELY((uintptr_t)page_paddr >= low_bound && (uintptr_t)page_paddr < high_bound)) {
kfree(P2V(page_paddr));
} else if (LIKELY((uintptr_t)page_paddr >= user_low_bound && (uintptr_t)page_paddr < user_high_bound)) {
raw_free((char*)page_paddr);
}
}
}
kfree(P2V(l4_table_paddr)); kfree(P2V(l4_table_paddr));
} }
} }

View File

@ -133,12 +133,12 @@ static uintptr_t map_task_share_page(struct Thread* task, const uintptr_t paddr,
} }
// map first area // map first area
if (!xizi_pager.map_pages(task->memspace->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, false)) { if (!xizi_pager.map_pages(task->memspace, vaddr, paddr, nr_pages * PAGE_SIZE, false)) {
return (uintptr_t)NULL; return (uintptr_t)NULL;
} }
// map second area // map second area
if (!xizi_pager.map_pages(task->memspace->pgdir.pd_addr, vaddr + (nr_pages * PAGE_SIZE), paddr, nr_pages * PAGE_SIZE, false)) { if (!xizi_pager.map_pages(task->memspace, vaddr + (nr_pages * PAGE_SIZE), paddr, nr_pages * PAGE_SIZE, false)) {
xizi_pager.unmap_pages(task->memspace->pgdir.pd_addr, vaddr, nr_pages * PAGE_SIZE); xizi_pager.unmap_pages(task->memspace->pgdir.pd_addr, vaddr, nr_pages * PAGE_SIZE);
return (uintptr_t)NULL; return (uintptr_t)NULL;
} }
@ -161,9 +161,9 @@ uintptr_t task_map_pages(struct Thread* task, const uintptr_t vaddr, const uintp
bool ret = false; bool ret = false;
if (is_dev) { if (is_dev) {
ret = xizi_pager.map_pages(task->memspace->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, true); ret = xizi_pager.map_pages(task->memspace, vaddr, paddr, nr_pages * PAGE_SIZE, true);
} else { } else {
ret = xizi_pager.map_pages(task->memspace->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, false); ret = xizi_pager.map_pages(task->memspace, vaddr, paddr, nr_pages * PAGE_SIZE, false);
} }
if (!ret) { if (!ret) {

View File

@ -44,6 +44,11 @@ int sys_mmap(uintptr_t* vaddr, uintptr_t* paddr, int len, int is_dev)
int true_len = ALIGNUP(len, PAGE_SIZE); int true_len = ALIGNUP(len, PAGE_SIZE);
if (*paddr != (uintptr_t)NULL) { if (*paddr != (uintptr_t)NULL) {
if (*paddr >= PHY_MEM_BASE && *paddr < PHY_MEM_STOP && cur_task->tid > 1) {
ERROR("mapping invalid memory: 0x%p\n", *paddr);
return -1;
}
if (xizi_share_page_manager.task_map_pages(cur_task, *vaddr, *paddr, true_len / PAGE_SIZE, is_dev) == (uintptr_t)NULL) { if (xizi_share_page_manager.task_map_pages(cur_task, *vaddr, *paddr, true_len / PAGE_SIZE, is_dev) == (uintptr_t)NULL) {
return -1; return -1;
} }
@ -52,6 +57,7 @@ int sys_mmap(uintptr_t* vaddr, uintptr_t* paddr, int len, int is_dev)
uintptr_t load_vaddr = *vaddr; uintptr_t load_vaddr = *vaddr;
while (load_len < true_len) { while (load_len < true_len) {
char* new_paddr = raw_alloc(PAGE_SIZE); char* new_paddr = raw_alloc(PAGE_SIZE);
CreateResourceTag(NULL, &cur_task->memspace->tag, NULL, TRACER_MEM_FROM_BUDDY_AC_RESOURCE, new_paddr);
if (new_paddr == NULL) { if (new_paddr == NULL) {
return -1; return -1;
} }

View File

@ -32,11 +32,11 @@ Modification:
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include "assert.h"
#include "bitmap64.h" #include "bitmap64.h"
#include "execelf.h" #include "execelf.h"
#include "kalloc.h" #include "kalloc.h"
#include "memspace.h" #include "memspace.h"
#include "pagetable.h"
#include "task.h" #include "task.h"
#define MAX_SUPPORT_PARAMS 32 #define MAX_SUPPORT_PARAMS 32
@ -56,6 +56,7 @@ struct MemSpace* alloc_memspace()
pmemspace->mem_size = 0; pmemspace->mem_size = 0;
pmemspace->pgdir.pd_addr = 0; pmemspace->pgdir.pd_addr = 0;
pmemspace->thread_to_notify = NULL; pmemspace->thread_to_notify = NULL;
CreateResourceTag(&pmemspace->tag, &xizi_task_manager.tag, NULL, TRACER_OWNER, (void*)pmemspace);
return pmemspace; return pmemspace;
} }
@ -68,12 +69,24 @@ void free_memspace(struct MemSpace* pmemspace)
xizi_pager.free_user_pgdir(&pmemspace->pgdir); xizi_pager.free_user_pgdir(&pmemspace->pgdir);
} }
TracerNode* tmp_node = NULL;
DOUBLE_LIST_FOR_EACH_ENTRY(tmp_node, &pmemspace->tag.meta->children_guard, list_node)
{
assert((uintptr_t)tmp_node->p_resource >= PHY_MEM_BASE && (uintptr_t)tmp_node->p_resource < PHY_MEM_STOP);
if ((uintptr_t)tmp_node->p_resource < PHY_USER_FREEMEM_BASE) {
kfree(P2V(tmp_node->p_resource));
} else {
raw_free(tmp_node->p_resource);
}
}
/* free ipc virt address allocator */ /* free ipc virt address allocator */
if (pmemspace->massive_ipc_allocator != NULL) { if (pmemspace->massive_ipc_allocator != NULL) {
KBuddyDestory(pmemspace->massive_ipc_allocator); KBuddyDestory(pmemspace->massive_ipc_allocator);
slab_free(&xizi_task_manager.task_buddy_allocator, (void*)pmemspace->massive_ipc_allocator); slab_free(&xizi_task_manager.task_buddy_allocator, (void*)pmemspace->massive_ipc_allocator);
} }
DeleteResource(&pmemspace->tag, &xizi_task_manager.tag);
slab_free(&xizi_task_manager.memspace_allocator, (void*)pmemspace); slab_free(&xizi_task_manager.memspace_allocator, (void*)pmemspace);
} }
@ -101,14 +114,12 @@ uintptr_t* load_memspace(struct MemSpace* pmemspace, char* img_start)
/* allocate a pgdir */ /* allocate a pgdir */
/* only supports first inited memspace */ /* only supports first inited memspace */
assert(pmemspace->pgdir.pd_addr == NULL); assert(pmemspace->pgdir.pd_addr == NULL);
struct TopLevelPageDirectory pgdir; if (UNLIKELY(!xizi_pager.new_pgdir(&pmemspace->pgdir))) {
pgdir.pd_addr = NULL;
if (UNLIKELY(!xizi_pager.new_pgdir(&pgdir))) {
ERROR("Create new pgdir failed.\n"); ERROR("Create new pgdir failed.\n");
goto error_exec; goto error_exec;
} }
/* copy kernel pagetable so that interrupt and syscall wont corrupt */ /* copy kernel pagetable so that interrupt and syscall wont corrupt */
memcpy(pgdir.pd_addr, kern_pgdir.pd_addr, TOPLEVLE_PAGEDIR_SIZE); memcpy(pmemspace->pgdir.pd_addr, kern_pgdir.pd_addr, TOPLEVLE_PAGEDIR_SIZE);
// read elf file by (header, section) // read elf file by (header, section)
uintptr_t load_size = 0; uintptr_t load_size = 0;
@ -126,7 +137,7 @@ uintptr_t* load_memspace(struct MemSpace* pmemspace, char* img_start)
// read section // read section
// 1. alloc space // 1. alloc space
if ((load_size = xizi_pager.resize_user_pgdir(&pgdir, load_size, ph.vaddr + ph.memsz)) if ((load_size = xizi_pager.resize_user_pgdir(pmemspace, load_size, ph.vaddr + ph.memsz))
!= ph.vaddr + ph.memsz) { != ph.vaddr + ph.memsz) {
ERROR("Add uspace size failed.\n"); ERROR("Add uspace size failed.\n");
goto error_exec; goto error_exec;
@ -136,9 +147,9 @@ uintptr_t* load_memspace(struct MemSpace* pmemspace, char* img_start)
LOG("Unsupported elf file, try use flag -N to compile.\n"); LOG("Unsupported elf file, try use flag -N to compile.\n");
} }
for (int addr_offset = 0; addr_offset < ph.filesz; addr_offset += PAGE_SIZE) { for (int addr_offset = 0; addr_offset < ph.filesz; addr_offset += PAGE_SIZE) {
uintptr_t page_paddr = xizi_pager.address_translate(&pgdir, ph.vaddr + addr_offset); uintptr_t page_paddr = xizi_pager.address_translate(&pmemspace->pgdir, ph.vaddr + addr_offset);
if (page_paddr == 0) { if (page_paddr == 0) {
ERROR("copy elf file to unmapped addr: %x(pgdir: %x)\n", ph.vaddr + addr_offset, pgdir.pd_addr); ERROR("copy elf file to unmapped addr: %x(pgdir: %x)\n", ph.vaddr + addr_offset, pmemspace->pgdir.pd_addr);
goto error_exec; goto error_exec;
} }
uintptr_t read_size = (ph.filesz - addr_offset < PAGE_SIZE ? ph.filesz - addr_offset : PAGE_SIZE); uintptr_t read_size = (ph.filesz - addr_offset < PAGE_SIZE ? ph.filesz - addr_offset : PAGE_SIZE);
@ -148,15 +159,14 @@ uintptr_t* load_memspace(struct MemSpace* pmemspace, char* img_start)
/// elf file content now in memory /// elf file content now in memory
// memspace will use this page dir // memspace will use this page dir
pmemspace->pgdir = pgdir;
pmemspace->heap_base = ALIGNUP(load_size, PAGE_SIZE); pmemspace->heap_base = ALIGNUP(load_size, PAGE_SIZE);
pmemspace->mem_size = pmemspace->heap_base; pmemspace->mem_size = pmemspace->heap_base;
return (uintptr_t*)elf.entry; return (uintptr_t*)elf.entry;
error_exec: error_exec:
if (pgdir.pd_addr != NULL) { if (pmemspace->pgdir.pd_addr != NULL) {
xizi_pager.free_user_pgdir(&pgdir); xizi_pager.free_user_pgdir(&pmemspace->pgdir);
} }
ERROR("Error loading memspace.\n"); ERROR("Error loading memspace.\n");
return NULL; return NULL;
@ -209,7 +219,7 @@ struct ThreadStackPointer load_user_stack(struct MemSpace* pmemspace, char** arg
} }
/* map memory to user stack space in memspace*/ /* map memory to user stack space in memspace*/
if (!xizi_pager.map_pages(pmemspace->pgdir.pd_addr, USER_MEM_TOP - ((stack_idx + 1) * USER_STACK_SIZE), V2P(stack_bottom), USER_STACK_SIZE, false)) { if (!xizi_pager.map_pages(pmemspace, USER_MEM_TOP - ((stack_idx + 1) * USER_STACK_SIZE), V2P(stack_bottom), USER_STACK_SIZE, false)) {
/* this could only fail due to inner page directory's allocation failure */ /* this could only fail due to inner page directory's allocation failure */
ERROR("User stack map failed\n"); ERROR("User stack map failed\n");
handle_error_stack_loading(pmemspace, stack_idx, stack_bottom, false); handle_error_stack_loading(pmemspace, stack_idx, stack_bottom, false);

View File

@ -303,11 +303,8 @@ static void _scheduler(struct SchedulerRightGroup right_group)
/* if there's not a runnable task, wait for one */ /* if there's not a runnable task, wait for one */
if (next_task == NULL) { if (next_task == NULL) {
xizi_leave_kernel(); xizi_leave_kernel();
/* leave kernel for other cores, so they may create a runnable task */ /* leave kernel for other cores, so they may create a runnable task */
xizi_enter_kernel(); xizi_enter_kernel();
continue; continue;
} }
@ -392,8 +389,9 @@ struct XiziTaskManager xizi_task_manager = {
.set_cur_task_priority = _set_cur_task_priority .set_cur_task_priority = _set_cur_task_priority
}; };
bool module_task_manager_init(void) bool module_task_manager_init(TraceTag* softkernel_tag)
{ {
CreateResourceTag(&xizi_task_manager.tag, softkernel_tag, "KTaskManager", TRACER_OWNER, &xizi_task_manager);
xizi_task_manager.init(); xizi_task_manager.init();
return true; return true;
} }