Fix minor cases.

This commit is contained in:
TXuian 2024-05-20 16:55:43 +08:00
parent f4e193a738
commit f7a232ed4f
8 changed files with 61 additions and 228 deletions

View File

@ -25,6 +25,8 @@ int sub_thread_entry(int argc, char** argv)
global_value++;
}
/// @warning session is single threaded, so that each thread cannot share a common session.
// sub thread connect to semaphore server
struct Session sem_session;
while (connect_session(&sem_session, sem_server_name, 4096) < 0) {
yield(SYS_TASK_YIELD_NO_REASON);

View File

@ -39,8 +39,8 @@ Modification:
#include "mmu_common.h"
// clang-format off
#define ALIGNUP(sz, al) (((uintptr_t)(sz) + (uintptr_t)(al) - 1) & ~((uintptr_t)(al) - 1))
#define ALIGNDOWN(sz, al) ((uintptr_t)(sz) & ~((uintptr_t)(al) - 1))
#define ALIGNUP(size, align) (((uintptr_t)(size) + (uintptr_t)(align) - 1) & ~((uintptr_t)(align) - 1))
#define ALIGNDOWN(size, align) ((uintptr_t)(size) & ~((uintptr_t)(align) - 1))
#define LEVEL4_PTE_IDX(v) (((uintptr_t)(v) >> LEVEL4_PTE_SHIFT) & (NUM_LEVEL4_PTE - 1))
#define LEVEL4_PTE_ADDR(v) ALIGNDOWN(v, LEVEL4_PTE_SIZE)

View File

@ -58,7 +58,7 @@ static inline bool check_pages_unmapped(struct Thread* task, uintptr_t vaddr, in
{
static uintptr_t paddr = UINT32_MAX;
for (uintptr_t i = 0; i < nr_pages; i++) {
if ((paddr = xizi_pager.address_translate(&task->memspace->pgdir, vaddr)) != 0) {
if ((paddr = xizi_pager.address_translate(&task->memspace->pgdir, vaddr)) != (uintptr_t)NULL) {
return false;
}
vaddr += PAGE_SIZE;
@ -75,13 +75,16 @@ static uintptr_t alloc_share_page_addr(struct Thread* task, const int nr_pages)
{
uintptr_t vaddr = USER_IPC_SPACE_BASE;
while (!check_pages_unmapped(task, vaddr, nr_pages)) {
// vaddr is destinate to be (2 * PAGE_SIZE) aligned
vaddr += 2 * PAGE_SIZE;
assert(vaddr % PAGE_SIZE == 0);
}
// now that nr_pages size after vaddr is unmapped
if (UNLIKELY(vaddr >= USER_IPC_SPACE_TOP)) {
return (uintptr_t)NULL;
}
return vaddr;
}
@ -94,10 +97,20 @@ static uintptr_t map_task_share_page(struct Thread* task, const uintptr_t paddr,
// map double vaddr page to support uniform ring buffer r/w
uintptr_t vaddr = (uintptr_t)NULL;
if (task->memspace->massive_ipc_allocator != NULL) {
// alloc from ipc area buddy
vaddr = (uintptr_t)KBuddyAlloc(task->memspace->massive_ipc_allocator, PAGE_SIZE * nr_pages * 2);
if (vaddr == (uintptr_t)NULL) {
ERROR("Task %s drains all ipc area.\n", task->name);
return (uintptr_t)NULL;
}
// allocated ipc share vaddr must not been used
assert(xizi_pager.address_translate(&task->memspace->pgdir, vaddr) == (uintptr_t)NULL);
} else {
// simple allocation
vaddr = alloc_share_page_addr(task, nr_pages * 2);
// time to use buddy
if (vaddr >= USER_IPC_USE_ALLOCATOR_WATERMARK) {
task->memspace->massive_ipc_allocator = (struct KBuddy*)slab_alloc(&xizi_task_manager.task_buddy_allocator);
KBuddyInit(task->memspace->massive_ipc_allocator, USER_IPC_USE_ALLOCATOR_WATERMARK, USER_IPC_SPACE_TOP);
@ -112,19 +125,25 @@ static uintptr_t map_task_share_page(struct Thread* task, const uintptr_t paddr,
if (UNLIKELY(vaddr == (uintptr_t)NULL)) {
return (uintptr_t)NULL;
}
// map first area
if (!xizi_pager.map_pages(task->memspace->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, false)) {
return (uintptr_t)NULL;
}
// map second area
if (!xizi_pager.map_pages(task->memspace->pgdir.pd_addr, vaddr + (nr_pages * PAGE_SIZE), paddr, nr_pages * PAGE_SIZE, false)) {
xizi_pager.unmap_pages(task->memspace->pgdir.pd_addr, vaddr, nr_pages * PAGE_SIZE);
return (uintptr_t)NULL;
}
// flush tlb
if (task == cur_cpu()->task) {
p_mmu_driver->TlbFlush(vaddr, 2 * nr_pages * PAGE_SIZE);
/// @todo clean range rather than all
p_dcache_done->invalidateall();
}
return vaddr;
}
@ -140,12 +159,13 @@ uintptr_t task_map_pages(struct Thread* task, const uintptr_t vaddr, const uintp
} else {
ret = xizi_pager.map_pages(task->memspace->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, false);
}
if (!ret) {
return (uintptr_t)NULL;
}
if (task == cur_cpu()->task) {
p_mmu_driver->TlbFlush(vaddr, nr_pages * PAGE_SIZE);
/// @todo clean range rather than all
p_dcache_done->invalidateall();
}
@ -155,18 +175,24 @@ uintptr_t task_map_pages(struct Thread* task, const uintptr_t vaddr, const uintp
void unmap_task_share_pages(struct Thread* task, const uintptr_t task_vaddr, const int nr_pages)
{
// usages of unmap_task_share_pages must be correct
assert(task_vaddr >= USER_IPC_SPACE_BASE && task_vaddr < USER_IPC_SPACE_TOP);
/* get driver codes */
struct DCacheDone* p_dcache_done = AchieveResource(&right_group.dcache_driver_tag);
struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag);
xizi_pager.unmap_pages(task->memspace->pgdir.pd_addr, task_vaddr, nr_pages * PAGE_SIZE);
xizi_pager.unmap_pages(task->memspace->pgdir.pd_addr, task_vaddr + (nr_pages * PAGE_SIZE), nr_pages * PAGE_SIZE);
// unmap must be correct
assert(xizi_pager.unmap_pages(task->memspace->pgdir.pd_addr, task_vaddr, nr_pages * PAGE_SIZE));
assert(xizi_pager.unmap_pages(task->memspace->pgdir.pd_addr, task_vaddr + (nr_pages * PAGE_SIZE), nr_pages * PAGE_SIZE));
// retrieve virtual address
if (task_vaddr >= USER_IPC_USE_ALLOCATOR_WATERMARK) {
KBuddyFree(task->memspace->massive_ipc_allocator, (void*)task_vaddr);
}
if (task == cur_cpu()->task) {
p_mmu_driver->TlbFlush(task_vaddr, 2 * nr_pages * PAGE_SIZE);
/// @todo clean range rather than all
p_dcache_done->invalidateall();
}
@ -175,6 +201,7 @@ void unmap_task_share_pages(struct Thread* task, const uintptr_t task_vaddr, con
static int next_session_id = 1;
struct session_backend* create_share_pages(struct Thread* client, struct Thread* server, const int capacity)
{
/* alloc session backend */
struct session_backend* session_backend = (struct session_backend*)slab_alloc(SessionAllocator());
if (UNLIKELY(session_backend == NULL)) {
return NULL;
@ -182,20 +209,26 @@ struct session_backend* create_share_pages(struct Thread* client, struct Thread*
int true_capacity = ALIGNUP(capacity, PAGE_SIZE);
int nr_pages = true_capacity / PAGE_SIZE;
/* alloc free memory as share memory */
uintptr_t kern_vaddr = (uintptr_t)kalloc(true_capacity);
if (UNLIKELY(kern_vaddr == (uintptr_t)NULL)) {
ERROR("No memory\n");
ERROR("No memory for session\n");
slab_free(SessionAllocator(), session_backend);
return NULL;
}
assert(kern_vaddr % PAGE_SIZE == 0);
/* map client vspace */
uintptr_t client_vaddr = map_task_share_page(client, V2P_WO(kern_vaddr), nr_pages);
if (UNLIKELY(client_vaddr == 0)) {
if (UNLIKELY(client_vaddr == (uintptr_t)NULL)) {
kfree((char*)kern_vaddr);
slab_free(SessionAllocator(), session_backend);
return NULL;
}
/* map server vspace */
uintptr_t server_vaddr = map_task_share_page(server, V2P_WO(kern_vaddr), nr_pages);
if (UNLIKELY(server_vaddr == 0)) {
if (UNLIKELY(server_vaddr == (uintptr_t)NULL)) {
unmap_task_share_pages(client, client_vaddr, nr_pages);
kfree((char*)kern_vaddr);
slab_free(SessionAllocator(), session_backend);

View File

@ -61,6 +61,7 @@ int sys_close_session(struct Thread* cur_task, struct Session* session)
break;
}
}
if (UNLIKELY(session_backend == NULL)) {
struct server_session* server_session = NULL;
DOUBLE_LIST_FOR_EACH_ENTRY(server_session, &cur_task->svr_sess_listhead, node)

View File

@ -62,6 +62,8 @@ int sys_connect_session(char* path, int capacity, struct Session* user_session)
}
struct Thread* client = cur_cpu()->task;
assert(client != NULL);
/// get server
struct TraceTag server_identifier_owner;
if (!AchieveResourceTag(&server_identifier_owner, RequireRootTag(), "softkernel/server-identifier")) {
@ -74,11 +76,12 @@ int sys_connect_session(char* path, int capacity, struct Session* user_session)
DEBUG("Not server: %s\n", path);
return -1;
}
struct Thread* server = AchieveResource(&server_tag);
assert(server != NULL);
if (create_session_inner(client, server, capacity, user_session) == NULL) {
return -1;
}
return 0;
}

View File

@ -1,212 +0,0 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiUOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/**
* @file sys_exec.c
* @brief task execution syscall
* @version 3.0
* @author AIIT XUOS Lab
* @date 2023.08.25
*/
/*************************************************
File name: sys_exec.c
Description: task execution syscall
Others:
History:
1. Date: 2023-08-28
Author: AIIT XUOS Lab
Modification:
1. first version
*************************************************/
#include <string.h>
#include "execelf.h"
#include "actracer.h"
#include "assert.h"
#include "kalloc.h"
#include "multicores.h"
#include "pagetable.h"
#include "syscall.h"
#include "task.h"
#define PRINT_ELFHDR(elf) \
do { \
DEBUG_PRINTF("magic: %x\n", elf.magic); \
DEBUG_PRINTF("elf: "); \
for (int i = 0; i < 12; i++) { \
DEBUG_PRINTF("%x ", elf.elf[i]); \
} \
DEBUG_PRINTF("\n"); \
DEBUG_PRINTF("type: %x\n", elf.type); \
DEBUG_PRINTF("machine: %x\n", elf.machine); \
DEBUG_PRINTF("version: %x\n", elf.version); \
DEBUG_PRINTF("entry: %x\n", elf.entry); \
DEBUG_PRINTF("phoff: %x\n", elf.phoff); \
DEBUG_PRINTF("shoff: %x\n", elf.shoff); \
DEBUG_PRINTF("flags: %x\n", elf.flags); \
DEBUG_PRINTF("ehsize: %x\n", elf.ehsize); \
DEBUG_PRINTF("phentsize: %x\n", elf.phentsize); \
DEBUG_PRINTF("phnum: %x\n", elf.phnum); \
DEBUG_PRINTF("shentsize: %x\n", elf.shentsize); \
DEBUG_PRINTF("shnum: %x\n", elf.shnum); \
DEBUG_PRINTF("shstrndx: %x\n", elf.shstrndx); \
} while (0)
/// @brief load a user program for execution
/// @param path path to elf file
/// @param argv arguments giving to main
/// @return
int task_exec(struct Thread* task, char* img_start, char* name, char** argv)
{
/* load img to task */
if (img_start == NULL) {
return -1;
}
/* 1. load elf header */
struct elfhdr elf;
memcpy((void*)&elf, img_start, sizeof(elf));
if (elf.magic != ELF_MAGIC) {
return -1;
}
// pgdir for new task
struct TopLevelPageDirectory pgdir;
pgdir.pd_addr = NULL;
if (UNLIKELY(!xizi_pager.new_pgdir(&pgdir))) {
ERROR("create new pgdir failed.\n");
goto error_exec;
}
memcpy(pgdir.pd_addr, kern_pgdir.pd_addr, TOPLEVLE_PAGEDIR_SIZE);
// read elf file by (header, section)
uintptr_t load_size = 0;
struct proghdr ph;
for (int sec_idx = 0, off = elf.phoff; sec_idx < elf.phnum; sec_idx++, off += sizeof(ph)) {
// load proghdr
memcpy((char*)&ph, img_start + off, sizeof(ph));
if (ph.type != ELF_PROG_LOAD)
continue;
if (ph.memsz < ph.filesz) {
ERROR("elf header mem size less than file size\n");
goto error_exec;
}
// read section
// 1. alloc space
if ((load_size = xizi_pager.resize_user_pgdir(&pgdir, load_size, ph.vaddr + ph.memsz))
!= ph.vaddr + ph.memsz) {
goto error_exec;
}
// 2. copy inode to space
assert(ph.vaddr % PAGE_SIZE == 0);
for (int addr_offset = 0; addr_offset < ph.filesz; addr_offset += PAGE_SIZE) {
uintptr_t page_paddr = xizi_pager.address_translate(&pgdir, ph.vaddr + addr_offset);
if (page_paddr == 0) {
ERROR("copy elf file to unmapped addr: %x(pgdir: %x)\n", ph.vaddr + addr_offset, pgdir.pd_addr);
goto error_exec;
}
uintptr_t read_size = (ph.filesz - addr_offset < PAGE_SIZE ? ph.filesz - addr_offset : PAGE_SIZE);
memcpy(P2V(page_paddr), img_start + (ph.off + addr_offset), read_size);
}
}
/// elf file content now in memory
// alloc stack page and map to TOP of user vspace
uintptr_t* stack_bottom = (uintptr_t*)kalloc(USER_STACK_SIZE);
if (UNLIKELY(stack_bottom == NULL)) {
ERROR("No memory.\n");
goto error_exec;
}
if (!xizi_pager.map_pages(pgdir.pd_addr, USER_MEM_TOP - USER_STACK_SIZE, V2P(stack_bottom), USER_STACK_SIZE, false)) {
ERROR("User stack map failed\n");
kfree((char*)stack_bottom);
goto error_exec;
}
uintptr_t user_vspace_sp = USER_MEM_TOP;
/// @todo change 32 to some macro
uintptr_t user_stack_init[32];
uintptr_t argc = 0;
uintptr_t copy_len = 0;
for (argc = 0; argv != NULL && argv[argc] != NULL; argc++) {
/// @todo handle with large number of parameters (more than 32)
// copy param to user stack
copy_len = strlen(argv[argc]) + 1;
user_vspace_sp = (user_vspace_sp - copy_len) & ~3;
uintptr_t copied_len = xizi_pager.cross_vspace_data_copy(&pgdir, user_vspace_sp, (uintptr_t)argv[argc], copy_len);
if (UNLIKELY(copied_len != copy_len)) {
ERROR("Something went wrong when copying params.\n");
goto error_exec;
}
user_stack_init[argc] = user_vspace_sp;
}
user_stack_init[argc] = 0;
copy_len = (argc + 1) * sizeof(uintptr_t);
user_vspace_sp -= copy_len;
uintptr_t copied_len = xizi_pager.cross_vspace_data_copy(&pgdir, user_vspace_sp, (uintptr_t)user_stack_init, copy_len);
if (UNLIKELY(copied_len != copy_len)) {
ERROR("Something went wrong when copying params.\n");
goto error_exec;
}
// init task trapframe, which stores in svc stack
// do not go tp error_exec once we change trapframe!
assert(copied_len == (argc + 1) * sizeof(uintptr_t));
arch_trapframe_set_sp_pc(task->thread_context.trapframe, user_vspace_sp, elf.entry);
arch_set_main_params(task->thread_context.trapframe, argc, user_vspace_sp);
// save program name
char* last = NULL;
for (last = name; *name; name++) {
if (*name == '/') {
last = name + 1;
}
}
strncpy(task->name, last, sizeof(task->name));
if (task->pgdir.pd_addr != NULL) {
xizi_pager.free_user_pgdir(&task->pgdir);
}
task->pgdir = pgdir;
task->heap_base = ALIGNUP(load_size, PAGE_SIZE);
task->mem_size = task->heap_base + USER_STACK_SIZE;
return 0;
error_exec:
if (pgdir.pd_addr != NULL) {
xizi_pager.free_user_pgdir(&pgdir);
}
ERROR("task create error\n");
return -1;
}
int sys_exec(char* img_start, char* name, char** argv)
{
/// @todo find a source of mmu_driver_tag instead of requiring from root
static struct TraceTag mmu_driver_tag;
static bool init = false;
if (UNLIKELY(!init)) {
AchieveResourceTag(&mmu_driver_tag, RequireRootTag(), "hardkernel/mmu-ac-resource");
init = true;
}
struct MmuCommonDone* p_mmu_driver = AchieveResource(&mmu_driver_tag);
struct Thread* current_task = cur_cpu()->task;
int ret = task_exec(current_task, img_start, name, argv);
if (ret >= 0) {
p_mmu_driver->LoadPgdir((uintptr_t)V2P(current_task->pgdir.pd_addr));
return ret;
}
return -1;
}

View File

@ -39,13 +39,18 @@ Modification:
int sys_register_as_server(char* name)
{
// get server thread
struct Thread* server = cur_cpu()->task;
assert(server != NULL);
// get server tag owner
struct TraceTag server_identifier_set_tag;
if (!AchieveResourceTag(&server_identifier_set_tag, RequireRootTag(), "softkernel/server-identifier")) {
panic("Server identifier not initialized.\b");
}
assert(server_identifier_set_tag.meta != NULL);
// create server tag under server tag owner
if (!CreateResourceTag(&server->server_identifier, &server_identifier_set_tag, name, TRACER_SERVER_IDENTITY_AC_RESOURCE, server)) {
return -1;
}

View File

@ -130,7 +130,9 @@ uintptr_t* load_memspace(struct MemSpace* pmemspace, char* img_start)
goto error_exec;
}
// 2. copy inode to space
assert(ph.vaddr % PAGE_SIZE == 0);
if (ph.vaddr % PAGE_SIZE != 0) {
LOG("Unsupported elf file, try use flag -N to compile.\n");
}
for (int addr_offset = 0; addr_offset < ph.filesz; addr_offset += PAGE_SIZE) {
uintptr_t page_paddr = xizi_pager.address_translate(&pgdir, ph.vaddr + addr_offset);
if (page_paddr == 0) {
@ -219,12 +221,11 @@ struct ThreadStackPointer load_user_stack(struct MemSpace* pmemspace, char** arg
memset(user_stack_init, 0, sizeof(user_stack_init));
uintptr_t argc = 0;
uintptr_t copy_len = 0;
for (argc = 0; argv != NULL && argv[argc] != NULL; argc++) {
for (argc = 0; argv != NULL && argc < MAX_SUPPORT_PARAMS && argv[argc] != NULL; argc++) {
/// @todo handle with large number of parameters (more than 32)
// copy param to user stack
copy_len = strlen(argv[argc]) + 1;
user_vspace_sp = (user_vspace_sp - copy_len) & ~3;
user_vspace_sp = ALIGNDOWN(user_vspace_sp - copy_len, sizeof(uintptr_t));
uintptr_t copied_len = xizi_pager.cross_vspace_data_copy(&pmemspace->pgdir, user_vspace_sp, (uintptr_t)argv[argc], copy_len);
if (UNLIKELY(copied_len != copy_len)) {
ERROR("Something went wrong when copying params.\n");