forked from xuos/xiuos
free memory based on tracer
This commit is contained in:
parent
d78d5bb36a
commit
19d467463b
|
@ -147,7 +147,7 @@ void* AchieveResource(TraceTag* tag)
|
|||
|
||||
bool CreateResourceTag(TraceTag* new_tag, TraceTag* owner, char* name, tracemeta_ac_type type, void* p_resource)
|
||||
{
|
||||
assert(new_tag != NULL && owner != NULL);
|
||||
assert(owner != NULL);
|
||||
if (owner->meta == NULL) {
|
||||
ERROR("Tracer: Empty owner\n");
|
||||
return false;
|
||||
|
@ -168,7 +168,9 @@ bool CreateResourceTag(TraceTag* new_tag, TraceTag* owner, char* name, tracemeta
|
|||
doubleListAddOnHead(&new_node->list_node, &owner->meta->children_guard);
|
||||
new_node->parent = owner->meta;
|
||||
|
||||
new_tag->meta = new_node;
|
||||
if (new_tag != NULL) {
|
||||
new_tag->meta = new_node;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -187,12 +189,42 @@ bool DeleteResource(TraceTag* target, TraceTag* owner)
|
|||
if (target->meta->name != NULL) {
|
||||
slab_free(&sys_tracer.node_name_allocator, target->meta->name);
|
||||
}
|
||||
|
||||
// delete all children
|
||||
/// @attention currently donot allow multilevel resource deletion
|
||||
if (target->meta->type == TRACER_OWNER) {
|
||||
assert(IS_DOUBLE_LIST_EMPTY(&target->meta->children_guard));
|
||||
while (!IS_DOUBLE_LIST_EMPTY(&target->meta->children_guard)) {
|
||||
TraceTag tmp_node = {
|
||||
.meta = DOUBLE_LIST_ENTRY(target->meta->children_guard.next, TracerNode, list_node),
|
||||
};
|
||||
DeleteResource(&tmp_node, target);
|
||||
}
|
||||
}
|
||||
slab_free(&sys_tracer.node_allocator, target->meta);
|
||||
target->meta = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
void debug_list_tracetree_inner(TracerNode* cur_node)
|
||||
{
|
||||
DEBUG("[%s] ", cur_node->name);
|
||||
TracerNode* tmp = NULL;
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(tmp, &cur_node->children_guard, list_node)
|
||||
{
|
||||
if (tmp->name != NULL) {
|
||||
DEBUG("%s ", tmp->name);
|
||||
} else {
|
||||
DEBUG("ANON ");
|
||||
}
|
||||
}
|
||||
DEBUG("\n");
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(tmp, &cur_node->children_guard, list_node)
|
||||
{
|
||||
debug_list_tracetree_inner(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
void debug_list_tracetree()
|
||||
{
|
||||
TracerNode* ref_root = RequireRootTag()->meta;
|
||||
debug_list_tracetree_inner(ref_root);
|
||||
}
|
|
@ -32,7 +32,6 @@ Modification:
|
|||
#include "list.h"
|
||||
#include "memlayout.h"
|
||||
#include "spinlock.h"
|
||||
#include "pagetable.h"
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
|
|
@ -34,6 +34,7 @@ Modification:
|
|||
bool module_phymem_init();
|
||||
char* kalloc(size_t size);
|
||||
bool kfree(char* vaddr);
|
||||
bool raw_kfree(char* paddr);
|
||||
|
||||
char* raw_alloc(size_t size);
|
||||
bool raw_free(char* paddr);
|
||||
|
|
|
@ -29,10 +29,14 @@ Modification:
|
|||
*************************************************/
|
||||
#pragma once
|
||||
|
||||
#include "actracer.h"
|
||||
#include "bitmap64.h"
|
||||
#include "buddy.h"
|
||||
#include "list.h"
|
||||
#include "share_page.h"
|
||||
|
||||
struct TopLevelPageDirectory {
|
||||
uintptr_t* pd_addr;
|
||||
};
|
||||
|
||||
struct ThreadStackPointer {
|
||||
int argc;
|
||||
|
@ -42,6 +46,8 @@ struct ThreadStackPointer {
|
|||
};
|
||||
|
||||
struct MemSpace {
|
||||
/* trace node */
|
||||
TraceTag tag;
|
||||
/* task memory resources */
|
||||
struct TopLevelPageDirectory pgdir; // [phy] vm pgtbl base address
|
||||
uintptr_t heap_base; // mem size of proc used(allocated by kernel)
|
||||
|
|
|
@ -33,11 +33,12 @@ Modification:
|
|||
#include <string.h>
|
||||
|
||||
#include "memlayout.h"
|
||||
|
||||
#include "actracer.h"
|
||||
#include "mmu.h"
|
||||
#include "mmu_common.h"
|
||||
|
||||
#include "actracer.h"
|
||||
#include "memspace.h"
|
||||
|
||||
// clang-format off
|
||||
#define ALIGNUP(size, align) (((uintptr_t)(size) + (uintptr_t)(align) - 1) & ~((uintptr_t)(align) - 1))
|
||||
#define ALIGNDOWN(size, align) ((uintptr_t)(size) & ~((uintptr_t)(align) - 1))
|
||||
|
@ -49,10 +50,6 @@ Modification:
|
|||
#define TOPLEVLE_PAGEDIR_SIZE sizeof(uintptr_t) * NUM_TOPLEVEL_PDE
|
||||
// clang-format on
|
||||
|
||||
struct TopLevelPageDirectory {
|
||||
uintptr_t* pd_addr;
|
||||
};
|
||||
|
||||
struct PagerRightGroup {
|
||||
struct TraceTag mmu_driver_tag;
|
||||
};
|
||||
|
@ -60,10 +57,10 @@ struct PagerRightGroup {
|
|||
struct XiziPageManager {
|
||||
bool (*new_pgdir)(struct TopLevelPageDirectory* pgdir);
|
||||
void (*free_user_pgdir)(struct TopLevelPageDirectory* pgdir);
|
||||
bool (*map_pages)(uintptr_t* pd_addr, uintptr_t vaddr, uintptr_t paddr, int len, bool is_dev);
|
||||
bool (*map_pages)(struct MemSpace* pmemspace, uintptr_t vaddr, uintptr_t paddr, int len, bool is_dev);
|
||||
bool (*unmap_pages)(uintptr_t* pd_addr, uintptr_t vaddr, int len);
|
||||
|
||||
uintptr_t (*resize_user_pgdir)(struct TopLevelPageDirectory* pgdir, uintptr_t old_size, uintptr_t new_size);
|
||||
uintptr_t (*resize_user_pgdir)(struct MemSpace* pmemspace, uintptr_t old_size, uintptr_t new_size);
|
||||
uintptr_t (*address_translate)(struct TopLevelPageDirectory* pgdir, uintptr_t vaddr);
|
||||
uintptr_t (*cross_vspace_data_copy)(struct TopLevelPageDirectory* pgdir, uintptr_t cross_dest, uintptr_t src, uintptr_t len);
|
||||
};
|
||||
|
|
|
@ -109,6 +109,7 @@ struct SchedulerRightGroup {
|
|||
};
|
||||
|
||||
struct XiziTaskManager {
|
||||
TraceTag tag;
|
||||
/* thead schedule lists */
|
||||
struct double_list_node task_list_head[TASK_MAX_PRIORITY]; /* list of task control blocks that are allocated */
|
||||
struct double_list_node task_running_list_head;
|
||||
|
@ -149,5 +150,4 @@ extern uint32_t ready_task_priority;
|
|||
extern struct Thread* next_task_emergency;
|
||||
extern struct XiziTaskManager xizi_task_manager;
|
||||
|
||||
int spawn_embedded_task(char* img_start, char* name, char** argv);
|
||||
bool module_task_manager_init(void);
|
||||
bool module_task_manager_init(TraceTag* softkernel_tag);
|
||||
|
|
|
@ -48,7 +48,7 @@ bool softkernel_init(struct TraceTag* _hardkernel_tag, struct TraceTag* _softker
|
|||
AchieveResourceTag(&intr_driver_tag, _hardkernel_tag, "intr-ac-resource");
|
||||
load_kern_pgdir(&mmu_driver_tag, &intr_driver_tag); // enter kernel virtmem space
|
||||
|
||||
module_task_manager_init(); // init task
|
||||
module_task_manager_init(_softkernel_tag); // init task
|
||||
|
||||
struct SharePageRightGroup sp_rights;
|
||||
AchieveResourceTag(&sp_rights.dcache_driver_tag, _hardkernel_tag, "dcache-ac-resource");
|
||||
|
|
|
@ -44,8 +44,10 @@ extern int sys_spawn(char* img_start, char* name, char** argv);
|
|||
static struct TraceTag hardkernel_tag, softkernel_tag;
|
||||
static volatile int core_para_init = 0;
|
||||
|
||||
static void sync_cores() {
|
||||
while (core_para_init != ((1 << NR_CPU) - 1)) ;
|
||||
static void main_sync_cores()
|
||||
{
|
||||
while (core_para_init != ((1 << NR_CPU) - 1))
|
||||
;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -87,7 +89,8 @@ int main(void)
|
|||
|
||||
for (int i = 1; i < NR_CPU; i++) {
|
||||
// start secondary cpus
|
||||
while ((core_para_init & (1 << (i - 1))) == 0);
|
||||
while ((core_para_init & (1 << (i - 1))) == 0)
|
||||
;
|
||||
cpu_start_secondary(i);
|
||||
}
|
||||
|
||||
|
@ -106,7 +109,7 @@ int main(void)
|
|||
|
||||
// sync memory
|
||||
__sync_synchronize();
|
||||
sync_cores();
|
||||
main_sync_cores();
|
||||
start_smp_cache_broadcast(cpu_id);
|
||||
// enter kernel seriously
|
||||
xizi_enter_kernel();
|
||||
|
|
|
@ -69,6 +69,11 @@ bool kfree(char* vaddr)
|
|||
return KBuddyFree(&kern_virtmem_buddy, V2P_WO(vaddr));
|
||||
}
|
||||
|
||||
bool raw_kfree(char* paddr)
|
||||
{
|
||||
return KBuddyFree(&kern_virtmem_buddy, paddr);
|
||||
}
|
||||
|
||||
char* raw_alloc(size_t size)
|
||||
{
|
||||
char* mem_alloc = KBuddyAlloc(&user_phy_freemem_buddy, size);
|
||||
|
|
|
@ -122,7 +122,7 @@ static bool _unmap_pages(uintptr_t* pgdir, uintptr_t vaddr, int len)
|
|||
/// @param len
|
||||
/// @param is_dev
|
||||
/// @return
|
||||
static bool _map_user_pages(uintptr_t* pgdir, uintptr_t vaddr, uintptr_t paddr, int len, bool is_dev)
|
||||
static bool _map_user_pages(struct MemSpace* pmemspace, uintptr_t vaddr, uintptr_t paddr, int len, bool is_dev)
|
||||
{
|
||||
if (len < 0) {
|
||||
return false;
|
||||
|
@ -140,13 +140,13 @@ static bool _map_user_pages(uintptr_t* pgdir, uintptr_t vaddr, uintptr_t paddr,
|
|||
_p_pgtbl_mmu_access->MmuUsrDevPteAttr(&mem_attr);
|
||||
}
|
||||
|
||||
return _map_pages(pgdir, vaddr, paddr, (intptr_t)len, mem_attr);
|
||||
return _map_pages(pmemspace->pgdir.pd_addr, vaddr, paddr, (intptr_t)len, mem_attr);
|
||||
}
|
||||
|
||||
/// assume that a user pagedir is allocated from [0, size)
|
||||
/// if new_size > old_size, allocate more space,
|
||||
/// if old_size > new_size, free extra space, to avoid unnecessary alloc/free.
|
||||
static uintptr_t _resize_user_pgdir(struct TopLevelPageDirectory* pgdir, uintptr_t old_size, uintptr_t new_size)
|
||||
static uintptr_t _resize_user_pgdir(struct MemSpace* pmemspace, uintptr_t old_size, uintptr_t new_size)
|
||||
{
|
||||
if (UNLIKELY(new_size > USER_MEM_TOP)) {
|
||||
ERROR("user size out of range.\n");
|
||||
|
@ -167,9 +167,10 @@ static uintptr_t _resize_user_pgdir(struct TopLevelPageDirectory* pgdir, uintptr
|
|||
}
|
||||
memset(new_page, 0, PAGE_SIZE);
|
||||
|
||||
if (!xizi_pager.map_pages(pgdir->pd_addr, cur_size, V2P(new_page), PAGE_SIZE, false)) {
|
||||
if (!xizi_pager.map_pages(pmemspace, cur_size, V2P(new_page), PAGE_SIZE, false)) {
|
||||
return cur_size;
|
||||
}
|
||||
CreateResourceTag(NULL, &pmemspace->tag, NULL, TRACER_MEM_FROM_BUDDY_AC_RESOURCE, V2P(new_page));
|
||||
cur_size += PAGE_SIZE;
|
||||
}
|
||||
|
||||
|
|
|
@ -73,23 +73,6 @@ void _free_user_pgdir(struct TopLevelPageDirectory* pgdir)
|
|||
// free each level4 page table
|
||||
uintptr_t* pgtbl_paddr = (uintptr_t*)LEVEL4_PTE_ADDR(pgdir->pd_addr[level4_entry_idx]);
|
||||
if (pgtbl_paddr != NULL) {
|
||||
// free each page
|
||||
for (uintptr_t page_entry_idx = 0; page_entry_idx < NUM_LEVEL4_PTE; page_entry_idx++) {
|
||||
uintptr_t vaddr = (level4_entry_idx << LEVEL3_PDE_SHIFT) | (page_entry_idx << LEVEL4_PTE_SHIFT);
|
||||
|
||||
// get page paddr
|
||||
uintptr_t* page_paddr = (uintptr_t*)ALIGNDOWN(((uintptr_t*)P2V(pgtbl_paddr))[page_entry_idx], PAGE_SIZE);
|
||||
if (page_paddr != NULL) {
|
||||
// IPC vaddr should not be addressed here.
|
||||
assert(vaddr < USER_IPC_SPACE_BASE || vaddr >= USER_IPC_SPACE_TOP);
|
||||
|
||||
if (LIKELY((uintptr_t)page_paddr >= low_bound && (uintptr_t)page_paddr < high_bound)) {
|
||||
kfree(P2V(page_paddr));
|
||||
} else if (LIKELY((uintptr_t)page_paddr >= user_low_bound && (uintptr_t)page_paddr < user_high_bound)) {
|
||||
raw_free((char*)page_paddr);
|
||||
}
|
||||
}
|
||||
}
|
||||
kfree(P2V(pgtbl_paddr));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,22 +96,6 @@ void _free_user_pgdir(struct TopLevelPageDirectory* pgdir)
|
|||
uintptr_t* l4_table_paddr = (uintptr_t*)LEVEL4_PTE_ADDR(l3_table_vaddr[l3_entry_idx]);
|
||||
if (l4_table_paddr != NULL) {
|
||||
uintptr_t* l4_table_vaddr = P2V(l4_table_paddr);
|
||||
for (uintptr_t page_entry_idx = 0; page_entry_idx < NUM_LEVEL4_PTE; page_entry_idx++) {
|
||||
uintptr_t vaddr = (l2_entry_idx << LEVEL2_PDE_SHIFT) | (l3_entry_idx << LEVEL3_PDE_SHIFT) | (page_entry_idx << LEVEL4_PTE_SHIFT);
|
||||
|
||||
// get page paddr
|
||||
uintptr_t* page_paddr = (uintptr_t*)ALIGNDOWN((l4_table_vaddr)[page_entry_idx], PAGE_SIZE);
|
||||
if (page_paddr != NULL) {
|
||||
// Ensure the virtual address is not in the IPC address space
|
||||
assert(vaddr < USER_IPC_SPACE_BASE || vaddr >= USER_IPC_SPACE_TOP);
|
||||
|
||||
if (LIKELY((uintptr_t)page_paddr >= low_bound && (uintptr_t)page_paddr < high_bound)) {
|
||||
kfree(P2V(page_paddr));
|
||||
} else if (LIKELY((uintptr_t)page_paddr >= user_low_bound && (uintptr_t)page_paddr < user_high_bound)) {
|
||||
raw_free((char*)page_paddr);
|
||||
}
|
||||
}
|
||||
}
|
||||
kfree(P2V(l4_table_paddr));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -133,12 +133,12 @@ static uintptr_t map_task_share_page(struct Thread* task, const uintptr_t paddr,
|
|||
}
|
||||
|
||||
// map first area
|
||||
if (!xizi_pager.map_pages(task->memspace->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, false)) {
|
||||
if (!xizi_pager.map_pages(task->memspace, vaddr, paddr, nr_pages * PAGE_SIZE, false)) {
|
||||
return (uintptr_t)NULL;
|
||||
}
|
||||
|
||||
// map second area
|
||||
if (!xizi_pager.map_pages(task->memspace->pgdir.pd_addr, vaddr + (nr_pages * PAGE_SIZE), paddr, nr_pages * PAGE_SIZE, false)) {
|
||||
if (!xizi_pager.map_pages(task->memspace, vaddr + (nr_pages * PAGE_SIZE), paddr, nr_pages * PAGE_SIZE, false)) {
|
||||
xizi_pager.unmap_pages(task->memspace->pgdir.pd_addr, vaddr, nr_pages * PAGE_SIZE);
|
||||
return (uintptr_t)NULL;
|
||||
}
|
||||
|
@ -161,9 +161,9 @@ uintptr_t task_map_pages(struct Thread* task, const uintptr_t vaddr, const uintp
|
|||
|
||||
bool ret = false;
|
||||
if (is_dev) {
|
||||
ret = xizi_pager.map_pages(task->memspace->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, true);
|
||||
ret = xizi_pager.map_pages(task->memspace, vaddr, paddr, nr_pages * PAGE_SIZE, true);
|
||||
} else {
|
||||
ret = xizi_pager.map_pages(task->memspace->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, false);
|
||||
ret = xizi_pager.map_pages(task->memspace, vaddr, paddr, nr_pages * PAGE_SIZE, false);
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
|
|
|
@ -44,6 +44,11 @@ int sys_mmap(uintptr_t* vaddr, uintptr_t* paddr, int len, int is_dev)
|
|||
int true_len = ALIGNUP(len, PAGE_SIZE);
|
||||
|
||||
if (*paddr != (uintptr_t)NULL) {
|
||||
if (*paddr >= PHY_MEM_BASE && *paddr < PHY_MEM_STOP && cur_task->tid > 1) {
|
||||
ERROR("mapping invalid memory: 0x%p\n", *paddr);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (xizi_share_page_manager.task_map_pages(cur_task, *vaddr, *paddr, true_len / PAGE_SIZE, is_dev) == (uintptr_t)NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
@ -52,6 +57,7 @@ int sys_mmap(uintptr_t* vaddr, uintptr_t* paddr, int len, int is_dev)
|
|||
uintptr_t load_vaddr = *vaddr;
|
||||
while (load_len < true_len) {
|
||||
char* new_paddr = raw_alloc(PAGE_SIZE);
|
||||
CreateResourceTag(NULL, &cur_task->memspace->tag, NULL, TRACER_MEM_FROM_BUDDY_AC_RESOURCE, new_paddr);
|
||||
if (new_paddr == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -32,11 +32,11 @@ Modification:
|
|||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "assert.h"
|
||||
#include "bitmap64.h"
|
||||
#include "execelf.h"
|
||||
#include "kalloc.h"
|
||||
#include "memspace.h"
|
||||
#include "pagetable.h"
|
||||
#include "task.h"
|
||||
|
||||
#define MAX_SUPPORT_PARAMS 32
|
||||
|
@ -56,6 +56,7 @@ struct MemSpace* alloc_memspace()
|
|||
pmemspace->mem_size = 0;
|
||||
pmemspace->pgdir.pd_addr = 0;
|
||||
pmemspace->thread_to_notify = NULL;
|
||||
CreateResourceTag(&pmemspace->tag, &xizi_task_manager.tag, NULL, TRACER_OWNER, (void*)pmemspace);
|
||||
return pmemspace;
|
||||
}
|
||||
|
||||
|
@ -68,12 +69,24 @@ void free_memspace(struct MemSpace* pmemspace)
|
|||
xizi_pager.free_user_pgdir(&pmemspace->pgdir);
|
||||
}
|
||||
|
||||
TracerNode* tmp_node = NULL;
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(tmp_node, &pmemspace->tag.meta->children_guard, list_node)
|
||||
{
|
||||
assert((uintptr_t)tmp_node->p_resource >= PHY_MEM_BASE && (uintptr_t)tmp_node->p_resource < PHY_MEM_STOP);
|
||||
if ((uintptr_t)tmp_node->p_resource < PHY_USER_FREEMEM_BASE) {
|
||||
kfree(P2V(tmp_node->p_resource));
|
||||
} else {
|
||||
raw_free(tmp_node->p_resource);
|
||||
}
|
||||
}
|
||||
|
||||
/* free ipc virt address allocator */
|
||||
if (pmemspace->massive_ipc_allocator != NULL) {
|
||||
KBuddyDestory(pmemspace->massive_ipc_allocator);
|
||||
slab_free(&xizi_task_manager.task_buddy_allocator, (void*)pmemspace->massive_ipc_allocator);
|
||||
}
|
||||
|
||||
DeleteResource(&pmemspace->tag, &xizi_task_manager.tag);
|
||||
slab_free(&xizi_task_manager.memspace_allocator, (void*)pmemspace);
|
||||
}
|
||||
|
||||
|
@ -101,14 +114,12 @@ uintptr_t* load_memspace(struct MemSpace* pmemspace, char* img_start)
|
|||
/* allocate a pgdir */
|
||||
/* only supports first inited memspace */
|
||||
assert(pmemspace->pgdir.pd_addr == NULL);
|
||||
struct TopLevelPageDirectory pgdir;
|
||||
pgdir.pd_addr = NULL;
|
||||
if (UNLIKELY(!xizi_pager.new_pgdir(&pgdir))) {
|
||||
if (UNLIKELY(!xizi_pager.new_pgdir(&pmemspace->pgdir))) {
|
||||
ERROR("Create new pgdir failed.\n");
|
||||
goto error_exec;
|
||||
}
|
||||
/* copy kernel pagetable so that interrupt and syscall wont corrupt */
|
||||
memcpy(pgdir.pd_addr, kern_pgdir.pd_addr, TOPLEVLE_PAGEDIR_SIZE);
|
||||
memcpy(pmemspace->pgdir.pd_addr, kern_pgdir.pd_addr, TOPLEVLE_PAGEDIR_SIZE);
|
||||
|
||||
// read elf file by (header, section)
|
||||
uintptr_t load_size = 0;
|
||||
|
@ -126,7 +137,7 @@ uintptr_t* load_memspace(struct MemSpace* pmemspace, char* img_start)
|
|||
|
||||
// read section
|
||||
// 1. alloc space
|
||||
if ((load_size = xizi_pager.resize_user_pgdir(&pgdir, load_size, ph.vaddr + ph.memsz))
|
||||
if ((load_size = xizi_pager.resize_user_pgdir(pmemspace, load_size, ph.vaddr + ph.memsz))
|
||||
!= ph.vaddr + ph.memsz) {
|
||||
ERROR("Add uspace size failed.\n");
|
||||
goto error_exec;
|
||||
|
@ -136,9 +147,9 @@ uintptr_t* load_memspace(struct MemSpace* pmemspace, char* img_start)
|
|||
LOG("Unsupported elf file, try use flag -N to compile.\n");
|
||||
}
|
||||
for (int addr_offset = 0; addr_offset < ph.filesz; addr_offset += PAGE_SIZE) {
|
||||
uintptr_t page_paddr = xizi_pager.address_translate(&pgdir, ph.vaddr + addr_offset);
|
||||
uintptr_t page_paddr = xizi_pager.address_translate(&pmemspace->pgdir, ph.vaddr + addr_offset);
|
||||
if (page_paddr == 0) {
|
||||
ERROR("copy elf file to unmapped addr: %x(pgdir: %x)\n", ph.vaddr + addr_offset, pgdir.pd_addr);
|
||||
ERROR("copy elf file to unmapped addr: %x(pgdir: %x)\n", ph.vaddr + addr_offset, pmemspace->pgdir.pd_addr);
|
||||
goto error_exec;
|
||||
}
|
||||
uintptr_t read_size = (ph.filesz - addr_offset < PAGE_SIZE ? ph.filesz - addr_offset : PAGE_SIZE);
|
||||
|
@ -148,15 +159,14 @@ uintptr_t* load_memspace(struct MemSpace* pmemspace, char* img_start)
|
|||
|
||||
/// elf file content now in memory
|
||||
// memspace will use this page dir
|
||||
pmemspace->pgdir = pgdir;
|
||||
pmemspace->heap_base = ALIGNUP(load_size, PAGE_SIZE);
|
||||
pmemspace->mem_size = pmemspace->heap_base;
|
||||
|
||||
return (uintptr_t*)elf.entry;
|
||||
|
||||
error_exec:
|
||||
if (pgdir.pd_addr != NULL) {
|
||||
xizi_pager.free_user_pgdir(&pgdir);
|
||||
if (pmemspace->pgdir.pd_addr != NULL) {
|
||||
xizi_pager.free_user_pgdir(&pmemspace->pgdir);
|
||||
}
|
||||
ERROR("Error loading memspace.\n");
|
||||
return NULL;
|
||||
|
@ -209,7 +219,7 @@ struct ThreadStackPointer load_user_stack(struct MemSpace* pmemspace, char** arg
|
|||
}
|
||||
|
||||
/* map memory to user stack space in memspace*/
|
||||
if (!xizi_pager.map_pages(pmemspace->pgdir.pd_addr, USER_MEM_TOP - ((stack_idx + 1) * USER_STACK_SIZE), V2P(stack_bottom), USER_STACK_SIZE, false)) {
|
||||
if (!xizi_pager.map_pages(pmemspace, USER_MEM_TOP - ((stack_idx + 1) * USER_STACK_SIZE), V2P(stack_bottom), USER_STACK_SIZE, false)) {
|
||||
/* this could only fail due to inner page directory's allocation failure */
|
||||
ERROR("User stack map failed\n");
|
||||
handle_error_stack_loading(pmemspace, stack_idx, stack_bottom, false);
|
||||
|
|
|
@ -303,11 +303,8 @@ static void _scheduler(struct SchedulerRightGroup right_group)
|
|||
/* if there's not a runnable task, wait for one */
|
||||
if (next_task == NULL) {
|
||||
xizi_leave_kernel();
|
||||
|
||||
|
||||
/* leave kernel for other cores, so they may create a runnable task */
|
||||
xizi_enter_kernel();
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -392,8 +389,9 @@ struct XiziTaskManager xizi_task_manager = {
|
|||
.set_cur_task_priority = _set_cur_task_priority
|
||||
};
|
||||
|
||||
bool module_task_manager_init(void)
|
||||
bool module_task_manager_init(TraceTag* softkernel_tag)
|
||||
{
|
||||
CreateResourceTag(&xizi_task_manager.tag, softkernel_tag, "KTaskManager", TRACER_OWNER, &xizi_task_manager);
|
||||
xizi_task_manager.init();
|
||||
return true;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue