forked from xuos/xiuos
Rename struct Thread; Completely split task memspace and shceduling
This commit is contained in:
parent
0b858de120
commit
736ba18769
|
@ -1,7 +1,8 @@
|
|||
export CROSS_COMPILE ?= arm-none-eabi-
|
||||
export DEVICE = -march=armv7-a -mtune=cortex-a9 -mfpu=vfpv3-d16 -ftree-vectorize -ffast-math -mfloat-abi=softfp
|
||||
# export CFLAGS := $(DEVICE) -std=c11 -Wall -O2 -g -gdwarf-2 -Wnull-dereference -Waddress -Warray-bounds -Wchar-subscripts -Wimplicit-int -Wimplicit-function-declaration -Wcomment -Wformat -Wmissing-braces -Wnonnull -Wparentheses -Wpointer-sign -Wreturn-type -Wsequence-point -Wstrict-aliasing -Wstrict-overflow=1 -Wswitch -Wtrigraphs -Wuninitialized -Wunknown-pragmas -Wunused-function -Wunused-label -Wunused-value -Wunused-variable -Wunused-function
|
||||
export CFLAGS := $(DEVICE) -std=c11 -Wall -O2 -g -gdwarf-2 -Waddress -Warray-bounds -Wchar-subscripts -Wimplicit-int -Wimplicit-function-declaration -Wcomment -Wformat -Wmissing-braces -Wnonnull -Wparentheses -Wpointer-sign -Wreturn-type -Wsequence-point -Wstrict-aliasing -Wstrict-overflow=1 -Wswitch -Wtrigraphs -Wuninitialized -Wunknown-pragmas -Wunused-function -Wunused-label -Wunused-value -Wunused-variable -Wunused-function
|
||||
# export CFLAGS := $(DEVICE) -std=c11 -Wall -O2 -g -gdwarf-2 -Waddress -Warray-bounds -Wchar-subscripts -Wimplicit-int -Wimplicit-function-declaration -Wcomment -Wformat -Wmissing-braces -Wnonnull -Wparentheses -Wpointer-sign -Wreturn-type -Wsequence-point -Wstrict-aliasing -Wstrict-overflow=1 -Wswitch -Wtrigraphs -Wuninitialized -Wunknown-pragmas -Wunused-function -Wunused-label -Wunused-value -Wunused-variable -Wunused-function
|
||||
export CFLAGS := $(DEVICE) -std=c11 -Wall -O0 -g -gdwarf-2 -Waddress -Warray-bounds -Wchar-subscripts -Wimplicit-int -Wimplicit-function-declaration -Wcomment -Wformat -Wmissing-braces -Wnonnull -Wparentheses -Wpointer-sign -Wreturn-type -Wsequence-point -Wstrict-aliasing -Wstrict-overflow=1 -Wswitch -Wtrigraphs -Wuninitialized -Wunknown-pragmas -Wunused-function -Wunused-label -Wunused-value -Wunused-variable -Wunused-function
|
||||
export AFLAGS := -c $(DEVICE) -x assembler-with-cpp -D__ASSEMBLY__ -gdwarf-2
|
||||
# export LFLAGS := $(DEVICE) -Wl,-Map=XiZi-imx6q-sabrelite.map,-cref,-u,_boot_start -T $(KERNEL_ROOT)/hardkernel/arch/arm/armv7-a/cortex-a9/preboot_for_imx6q-sabrelite/nxp_imx6q_sabrelite.lds
|
||||
export LFLAGS := $(DEVICE) --specs=nosys.specs -Wl,-Map=XiZi-imx6q-sabrelite.map,-cref,-u,_boot_start -T $(KERNEL_ROOT)/hardkernel/arch/arm/armv7-a/cortex-a9/preboot_for_imx6q-sabrelite/nxp_imx6q_sabrelite.lds
|
||||
|
|
|
@ -56,12 +56,13 @@ Modification:
|
|||
#define MAX_NR_FREE_PAGES ((PHY_MEM_STOP - PHY_MEM_BASE) >> LEVEL4_PTE_SHIFT)
|
||||
|
||||
/* User memory layout */
|
||||
#define NR_MAX_TREADS_PER_TASK 64
|
||||
#define USER_STACK_SIZE MODE_STACK_SIZE
|
||||
#define USER_MEM_BASE (0x00000000)
|
||||
#define USER_MEM_TOP DEV_VRTMEM_BASE
|
||||
#define USER_IPC_SPACE_BASE (0x70000000)
|
||||
#define USER_IPC_USE_ALLOCATOR_WATERMARK (0x70010000)
|
||||
#define USER_IPC_SPACE_TOP (USER_MEM_TOP - USER_STACK_SIZE)
|
||||
#define USER_IPC_SPACE_TOP (USER_MEM_TOP - (NR_MAX_TREADS_PER_TASK * USER_STACK_SIZE))
|
||||
|
||||
/* Deivce memory layout */
|
||||
#define DEV_PHYMEM_BASE (0x00000000)
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Copyright (c) 2020 AIIT XUOS Lab
|
||||
* XiUOS is licensed under Mulan PSL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PSL v2.
|
||||
* You may obtain a copy of Mulan PSL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPSL2
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PSL v2 for more details.
|
||||
*/
|
||||
/**
|
||||
* @file bitmap64.h
|
||||
* @brief 64 bit bitmap support
|
||||
* @version 3.0
|
||||
* @author AIIT XUOS Lab
|
||||
* @date 2023.08.25
|
||||
*/
|
||||
|
||||
/*************************************************
|
||||
File name: bitmap64.h
|
||||
Description: 64 bit bitmap support
|
||||
Others:
|
||||
History:
|
||||
1. Date: 2024-05-18
|
||||
Author: AIIT XUOS Lab
|
||||
Modification:
|
||||
1. first version
|
||||
*************************************************/
|
||||
#pragma once
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "assert.h"
|
||||
|
||||
struct bitmap64 {
|
||||
uint64_t map;
|
||||
};
|
||||
|
||||
static inline void bitmap64_init(struct bitmap64* bitmap)
|
||||
{
|
||||
bitmap->map = 0;
|
||||
}
|
||||
|
||||
static inline int bitmap64_alloc(struct bitmap64* bitmap)
|
||||
{
|
||||
int free_bit = -1;
|
||||
// free bit is the first 0 bit, from [1, 64]
|
||||
free_bit = __builtin_ffsll(~(bitmap->map));
|
||||
// handle if bitmap is full (no using 64th bit here)
|
||||
if (free_bit == 64) {
|
||||
return -1;
|
||||
}
|
||||
assert(free_bit < 64 && free_bit >= 1);
|
||||
// alloc and return
|
||||
bitmap->map |= (1 << (free_bit - 1));
|
||||
return free_bit - 1;
|
||||
}
|
||||
|
||||
static inline void bitmap64_free(struct bitmap64* bitmap, int idx)
|
||||
{
|
||||
// usages of bitmap64 must be correct
|
||||
assert((bitmap->map & (1 << idx)) != 0);
|
||||
// free bit
|
||||
bitmap->map &= ~(uint64_t)(1 << idx);
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Copyright (c) 2020 AIIT XUOS Lab
|
||||
* XiUOS is licensed under Mulan PSL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PSL v2.
|
||||
* You may obtain a copy of Mulan PSL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPSL2
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PSL v2 for more details.
|
||||
*/
|
||||
/**
|
||||
* @file memspace.h
|
||||
* @brief memspace loader
|
||||
* @version 3.0
|
||||
* @author AIIT XUOS Lab
|
||||
* @date 2023.08.25
|
||||
*/
|
||||
|
||||
/*************************************************
|
||||
File name: memspace.h
|
||||
Description: memspace loader
|
||||
Others:
|
||||
History:
|
||||
1. Date: 2023-08-28
|
||||
Author: AIIT XUOS Lab
|
||||
Modification:
|
||||
1. first version
|
||||
*************************************************/
|
||||
|
||||
#include "task.h"
|
||||
|
||||
struct ThreadStackPointer {
|
||||
int argc;
|
||||
int stack_idx;
|
||||
uintptr_t user_sp;
|
||||
uintptr_t user_stack_vaddr;
|
||||
};
|
||||
|
||||
struct MemSpace* alloc_memspace();
|
||||
void free_memspace(struct MemSpace* pmemspace);
|
||||
uintptr_t* load_memspace(struct MemSpace* pmemspace, char* img_start);
|
||||
struct ThreadStackPointer load_user_stack(struct MemSpace* pmemspace, char** argv);
|
|
@ -36,7 +36,7 @@ Modification:
|
|||
struct CPU {
|
||||
int cpuid;
|
||||
|
||||
struct TaskMicroDescriptor* task;
|
||||
struct Thread* task;
|
||||
struct context* scheduler;
|
||||
};
|
||||
|
||||
|
|
|
@ -59,8 +59,8 @@ struct PagerRightGroup {
|
|||
struct XiziPageManager {
|
||||
bool (*new_pgdir)(struct TopLevelPageDirectory* pgdir);
|
||||
void (*free_user_pgdir)(struct TopLevelPageDirectory* pgdir);
|
||||
bool (*map_pages)(uintptr_t* pgdir, uintptr_t vaddr, uintptr_t paddr, uintptr_t len, bool is_dev);
|
||||
bool (*unmap_pages)(uintptr_t* pgdir, uintptr_t vaddr, uintptr_t len);
|
||||
bool (*map_pages)(uintptr_t* pd_addr, uintptr_t vaddr, uintptr_t paddr, uintptr_t len, bool is_dev);
|
||||
bool (*unmap_pages)(uintptr_t* pd_addr, uintptr_t vaddr, uintptr_t len);
|
||||
|
||||
uintptr_t (*resize_user_pgdir)(struct TopLevelPageDirectory* pgdir, uintptr_t old_size, uintptr_t new_size);
|
||||
uintptr_t (*address_translate)(struct TopLevelPageDirectory* pgdir, uintptr_t vaddr);
|
||||
|
|
|
@ -31,6 +31,6 @@ Modification:
|
|||
|
||||
#include "task.h"
|
||||
|
||||
struct TaskMicroDescriptor* max_priority_runnable_task(void);
|
||||
struct TaskMicroDescriptor* round_robin_runnable_task(uint32_t priority);
|
||||
struct Thread* max_priority_runnable_task(void);
|
||||
struct Thread* round_robin_runnable_task(uint32_t priority);
|
||||
void recover_priority(void);
|
|
@ -69,8 +69,8 @@ struct session_backend {
|
|||
struct client_session client_side;
|
||||
int session_id; // id of this session
|
||||
int nr_pages; // pages used by this pipe
|
||||
struct TaskMicroDescriptor* client; // client of this pipe
|
||||
struct TaskMicroDescriptor* server; // server of this pipe
|
||||
struct Thread* client; // client of this pipe
|
||||
struct Thread* server; // server of this pipe
|
||||
|
||||
uintptr_t buf_kernel_addr;
|
||||
};
|
||||
|
@ -81,11 +81,11 @@ struct SharePageRightGroup {
|
|||
};
|
||||
|
||||
struct XiziSharePageManager {
|
||||
struct session_backend* (*create_share_pages)(struct TaskMicroDescriptor* client, struct TaskMicroDescriptor* server, const int capacity);
|
||||
void (*unmap_task_share_pages)(struct TaskMicroDescriptor* task, const uintptr_t task_vaddr, const int nr_pages);
|
||||
struct session_backend* (*create_share_pages)(struct Thread* client, struct Thread* server, const int capacity);
|
||||
void (*unmap_task_share_pages)(struct Thread* task, const uintptr_t task_vaddr, const int nr_pages);
|
||||
int (*delete_share_pages)(struct session_backend* session_backend);
|
||||
|
||||
uintptr_t (*task_map_pages)(struct TaskMicroDescriptor* task, const uintptr_t vaddr, const uintptr_t paddr, const int nr_pages, const int is_dev);
|
||||
uintptr_t (*task_map_pages)(struct Thread* task, const uintptr_t vaddr, const uintptr_t paddr, const int nr_pages, const int is_dev);
|
||||
};
|
||||
extern struct XiziSharePageManager xizi_share_page_manager;
|
||||
|
||||
|
|
|
@ -85,20 +85,20 @@ typedef int (*ipc_write_fn)(struct Session* session, int fd, char* src, int offs
|
|||
int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4);
|
||||
|
||||
int sys_spawn(char* img_start, char* name, char** argv);
|
||||
int sys_exit(struct TaskMicroDescriptor* ptask);
|
||||
int sys_exit(struct Thread* ptask);
|
||||
int sys_yield(task_yield_reason reason);
|
||||
int sys_kill(int id);
|
||||
|
||||
int sys_register_as_server(char* name);
|
||||
int sys_connect_session(char* path, int capacity, struct Session* user_session);
|
||||
int sys_poll_session(struct Session* userland_session_arr, int arr_capacity);
|
||||
int sys_close_session(struct TaskMicroDescriptor* task, struct Session* session);
|
||||
int sys_close_session(struct Thread* task, struct Session* session);
|
||||
|
||||
int sys_exec(char* img_start, char* name, char** argv);
|
||||
int sys_state(sys_state_option option, sys_state_info* info);
|
||||
int sys_mmap(uintptr_t vaddr, uintptr_t paddr, int len, int is_dev);
|
||||
|
||||
int sys_register_irq(int irq_num, int irq_opcode);
|
||||
int sys_unbind_irq_all(struct TaskMicroDescriptor* task);
|
||||
int sys_unbind_irq(struct TaskMicroDescriptor* task, int irq_num);
|
||||
int sys_unbind_irq_all(struct Thread* task);
|
||||
int sys_unbind_irq(struct Thread* task, int irq_num);
|
||||
#endif
|
||||
|
|
|
@ -31,6 +31,7 @@ Modification:
|
|||
|
||||
#include "core.h"
|
||||
|
||||
#include "bitmap64.h"
|
||||
#include "buddy.h"
|
||||
#include "list.h"
|
||||
#include "object_allocator.h"
|
||||
|
@ -52,41 +53,58 @@ enum ProcState {
|
|||
NEVER_RUN,
|
||||
};
|
||||
|
||||
struct MemSpace {
|
||||
/* task memory resources */
|
||||
struct TopLevelPageDirectory pgdir; // [phy] vm pgtbl base address
|
||||
uintptr_t heap_base; // mem size of proc used(allocated by kernel)
|
||||
uintptr_t mem_size;
|
||||
/* task communication mem resources */
|
||||
struct KBuddy* massive_ipc_allocator;
|
||||
|
||||
/* thread using this memspace */
|
||||
struct bitmap64 thread_stack_idx_bitmap;
|
||||
struct double_list_node thread_list_guard;
|
||||
};
|
||||
|
||||
/* Thread Control Block */
|
||||
struct Thread {
|
||||
struct TaskMicroDescriptor* task; // process of current thread
|
||||
uintptr_t stack_addr; // [virt] stack base address
|
||||
struct ThreadContext {
|
||||
struct Thread* task; // process of current thread
|
||||
|
||||
/* kernel stack of thread */
|
||||
uintptr_t kern_stack_addr; // [virt] stack base address
|
||||
|
||||
/* user stack */
|
||||
int user_stack_idx; // [virt] stack idx in user memspace
|
||||
uintptr_t uspace_stack_addr; // [virt] user stack base address in memspace
|
||||
uintptr_t ustack_kvaddr; // [virt] user stack memeory's kernel vaddr
|
||||
|
||||
/* kernel context of thread */
|
||||
struct context* context;
|
||||
/* user context of thread */
|
||||
struct trapframe* trapframe;
|
||||
};
|
||||
|
||||
/* Process Control Block */
|
||||
struct TaskMicroDescriptor {
|
||||
/* task debug resources */
|
||||
int pid;
|
||||
bool bind_irq;
|
||||
bool dead;
|
||||
struct Thread {
|
||||
/* task name */
|
||||
char name[TASK_NAME_MAX_LEN];
|
||||
|
||||
/// @todo support return value
|
||||
int ret; // state val that be returned to parent
|
||||
/// @todo support parent
|
||||
struct TaskMicroDescriptor* parent;
|
||||
/* task debug resources */
|
||||
int tid;
|
||||
bool bind_irq;
|
||||
bool dead;
|
||||
|
||||
/* task context resources */
|
||||
struct Thread main_thread; // will only access by task itself
|
||||
struct ThreadContext thread_context; // will only access by task itself
|
||||
|
||||
/* task memory resources */
|
||||
struct TopLevelPageDirectory pgdir; // [phy] vm pgtbl base address
|
||||
uintptr_t heap_base; // mem size of proc used(allocated by kernel)
|
||||
/// @todo support heap_base
|
||||
uintptr_t mem_size;
|
||||
/* thread mem space */
|
||||
struct MemSpace* memspace;
|
||||
struct double_list_node memspace_list_node;
|
||||
|
||||
/* task communication resources */
|
||||
struct double_list_node cli_sess_listhead;
|
||||
struct double_list_node svr_sess_listhead;
|
||||
bool current_ipc_handled;
|
||||
struct KBuddy* massive_ipc_allocator;
|
||||
struct TraceTag server_identifier;
|
||||
|
||||
/* task schedule attributes */
|
||||
|
@ -106,36 +124,39 @@ struct XiziTaskManager {
|
|||
struct double_list_node task_list_head[TASK_MAX_PRIORITY]; /* list of task control blocks that are allocated */
|
||||
struct double_list_node task_running_list_head;
|
||||
struct double_list_node task_blocked_list_head;
|
||||
struct slab_allocator task_allocator;
|
||||
struct slab_allocator task_buddy_allocator;
|
||||
|
||||
/* mem allocator */
|
||||
struct slab_allocator memspace_allocator;
|
||||
struct slab_allocator task_allocator; // allocate struct Tread
|
||||
struct slab_allocator task_buddy_allocator; // allocate buddy for memspace
|
||||
uint32_t next_pid;
|
||||
|
||||
/* init task manager */
|
||||
void (*init)();
|
||||
/* new a task control block, checkout #sys_spawn for usage */
|
||||
struct TaskMicroDescriptor* (*new_task_cb)();
|
||||
struct Thread* (*new_task_cb)(struct MemSpace* pmemspace);
|
||||
/* free a task control block, this calls #free_user_pgdir to free all vitual spaces */
|
||||
void (*free_pcb)(struct TaskMicroDescriptor*);
|
||||
void (*free_pcb)(struct Thread*);
|
||||
/* init a task control block, set name, remain_tick, state, cwd, priority, etc. */
|
||||
void (*task_set_default_schedule_attr)(struct TaskMicroDescriptor*);
|
||||
void (*task_set_default_schedule_attr)(struct Thread*);
|
||||
|
||||
/* use by task_scheduler, find next READY task, should be in locked */
|
||||
struct TaskMicroDescriptor* (*next_runnable_task)(void);
|
||||
struct Thread* (*next_runnable_task)(void);
|
||||
/* function that's runing by kernel thread context, schedule use tasks */
|
||||
void (*task_scheduler)(struct SchedulerRightGroup);
|
||||
|
||||
/* handle task state */
|
||||
/* call to yield current use task */
|
||||
void (*task_yield_noschedule)(struct TaskMicroDescriptor* task, bool is_blocking);
|
||||
void (*task_yield_noschedule)(struct Thread* task, bool is_blocking);
|
||||
/* block and unblock task */
|
||||
void (*task_block)(struct TaskMicroDescriptor* task);
|
||||
void (*task_unblock)(struct TaskMicroDescriptor* task);
|
||||
void (*task_block)(struct Thread* task);
|
||||
void (*task_unblock)(struct Thread* task);
|
||||
/* set task priority */
|
||||
void (*set_cur_task_priority)(int priority);
|
||||
};
|
||||
|
||||
extern uint32_t ready_task_priority;
|
||||
extern struct TaskMicroDescriptor* next_task_emergency;
|
||||
extern struct Thread* next_task_emergency;
|
||||
extern struct XiziTaskManager xizi_task_manager;
|
||||
|
||||
int spawn_embedded_task(char* img_start, char* name, char** argv);
|
||||
|
|
|
@ -30,7 +30,6 @@ Modification:
|
|||
#include "kern_init.h"
|
||||
|
||||
#include "assert.h"
|
||||
#include "kalloc.h"
|
||||
#include "log.h"
|
||||
#include "task.h"
|
||||
|
||||
|
|
|
@ -54,6 +54,7 @@ char* kalloc(size_t size)
|
|||
if (mem_alloc == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
assert((uintptr_t)mem_alloc >= V2P(&kernel_data_end) && (uintptr_t)mem_alloc < PHY_USER_FREEMEM_BASE);
|
||||
mem_alloc = P2V(mem_alloc);
|
||||
if ((uintptr_t)mem_alloc < KERN_MEM_BASE) {
|
||||
DEBUG("Error Alloc: %x by size: %d (Caused by double free)\n", mem_alloc, size);
|
||||
|
|
|
@ -54,11 +54,11 @@ static struct slab_allocator* SessionAllocator()
|
|||
/// @param vaddr
|
||||
/// @param nr_pages
|
||||
/// @return true if mem range is free, false if at least one page inside [vaddr, vaddr + nr_pages * PAGE_SIZE) is mapped
|
||||
static inline bool check_pages_unmapped(struct TaskMicroDescriptor* task, uintptr_t vaddr, int nr_pages)
|
||||
static inline bool check_pages_unmapped(struct Thread* task, uintptr_t vaddr, int nr_pages)
|
||||
{
|
||||
static uintptr_t paddr = UINT32_MAX;
|
||||
for (uintptr_t i = 0; i < nr_pages; i++) {
|
||||
if ((paddr = xizi_pager.address_translate(&task->pgdir, vaddr)) != 0) {
|
||||
if ((paddr = xizi_pager.address_translate(&task->memspace->pgdir, vaddr)) != 0) {
|
||||
return false;
|
||||
}
|
||||
vaddr += PAGE_SIZE;
|
||||
|
@ -71,7 +71,7 @@ static inline bool check_pages_unmapped(struct TaskMicroDescriptor* task, uintpt
|
|||
/// @param nr_pages continuously map nr_pages
|
||||
/// @return addr to be mapped, aligned by page
|
||||
/// @todo optimize, and suppurt multiple pages
|
||||
static uintptr_t alloc_share_page_addr(struct TaskMicroDescriptor* task, const int nr_pages)
|
||||
static uintptr_t alloc_share_page_addr(struct Thread* task, const int nr_pages)
|
||||
{
|
||||
uintptr_t vaddr = USER_IPC_SPACE_BASE;
|
||||
while (!check_pages_unmapped(task, vaddr, nr_pages)) {
|
||||
|
@ -85,7 +85,7 @@ static uintptr_t alloc_share_page_addr(struct TaskMicroDescriptor* task, const i
|
|||
return vaddr;
|
||||
}
|
||||
|
||||
static uintptr_t map_task_share_page(struct TaskMicroDescriptor* task, const uintptr_t paddr, const int nr_pages)
|
||||
static uintptr_t map_task_share_page(struct Thread* task, const uintptr_t paddr, const int nr_pages)
|
||||
{
|
||||
/* get driver codes */
|
||||
struct DCacheDone* p_dcache_done = AchieveResource(&right_group.dcache_driver_tag);
|
||||
|
@ -93,15 +93,15 @@ static uintptr_t map_task_share_page(struct TaskMicroDescriptor* task, const uin
|
|||
|
||||
// map double vaddr page to support uniform ring buffer r/w
|
||||
uintptr_t vaddr = (uintptr_t)NULL;
|
||||
if (task->massive_ipc_allocator != NULL) {
|
||||
vaddr = (uintptr_t)KBuddyAlloc(task->massive_ipc_allocator, PAGE_SIZE * nr_pages * 2);
|
||||
assert(xizi_pager.address_translate(&task->pgdir, vaddr) == (uintptr_t)NULL);
|
||||
if (task->memspace->massive_ipc_allocator != NULL) {
|
||||
vaddr = (uintptr_t)KBuddyAlloc(task->memspace->massive_ipc_allocator, PAGE_SIZE * nr_pages * 2);
|
||||
assert(xizi_pager.address_translate(&task->memspace->pgdir, vaddr) == (uintptr_t)NULL);
|
||||
} else {
|
||||
vaddr = alloc_share_page_addr(task, nr_pages * 2);
|
||||
if (vaddr >= USER_IPC_USE_ALLOCATOR_WATERMARK) {
|
||||
task->massive_ipc_allocator = (struct KBuddy*)slab_alloc(&xizi_task_manager.task_buddy_allocator);
|
||||
KBuddyInit(task->massive_ipc_allocator, USER_IPC_USE_ALLOCATOR_WATERMARK, USER_IPC_SPACE_TOP);
|
||||
if (!task->massive_ipc_allocator) {
|
||||
task->memspace->massive_ipc_allocator = (struct KBuddy*)slab_alloc(&xizi_task_manager.task_buddy_allocator);
|
||||
KBuddyInit(task->memspace->massive_ipc_allocator, USER_IPC_USE_ALLOCATOR_WATERMARK, USER_IPC_SPACE_TOP);
|
||||
if (!task->memspace->massive_ipc_allocator) {
|
||||
ERROR("Alloc task buddy failed.\n");
|
||||
return (uintptr_t)NULL;
|
||||
}
|
||||
|
@ -112,11 +112,11 @@ static uintptr_t map_task_share_page(struct TaskMicroDescriptor* task, const uin
|
|||
if (UNLIKELY(vaddr == (uintptr_t)NULL)) {
|
||||
return (uintptr_t)NULL;
|
||||
}
|
||||
if (!xizi_pager.map_pages(task->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, false)) {
|
||||
if (!xizi_pager.map_pages(task->memspace->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, false)) {
|
||||
return (uintptr_t)NULL;
|
||||
}
|
||||
if (!xizi_pager.map_pages(task->pgdir.pd_addr, vaddr + (nr_pages * PAGE_SIZE), paddr, nr_pages * PAGE_SIZE, false)) {
|
||||
xizi_pager.unmap_pages(task->pgdir.pd_addr, vaddr, nr_pages * PAGE_SIZE);
|
||||
if (!xizi_pager.map_pages(task->memspace->pgdir.pd_addr, vaddr + (nr_pages * PAGE_SIZE), paddr, nr_pages * PAGE_SIZE, false)) {
|
||||
xizi_pager.unmap_pages(task->memspace->pgdir.pd_addr, vaddr, nr_pages * PAGE_SIZE);
|
||||
return (uintptr_t)NULL;
|
||||
}
|
||||
if (task == cur_cpu()->task) {
|
||||
|
@ -128,7 +128,7 @@ static uintptr_t map_task_share_page(struct TaskMicroDescriptor* task, const uin
|
|||
return vaddr;
|
||||
}
|
||||
|
||||
uintptr_t task_map_pages(struct TaskMicroDescriptor* task, const uintptr_t vaddr, const uintptr_t paddr, const int nr_pages, const int is_dev)
|
||||
uintptr_t task_map_pages(struct Thread* task, const uintptr_t vaddr, const uintptr_t paddr, const int nr_pages, const int is_dev)
|
||||
{
|
||||
/* get driver codes */
|
||||
struct DCacheDone* p_dcache_done = AchieveResource(&right_group.dcache_driver_tag);
|
||||
|
@ -136,9 +136,9 @@ uintptr_t task_map_pages(struct TaskMicroDescriptor* task, const uintptr_t vaddr
|
|||
|
||||
bool ret = false;
|
||||
if (is_dev) {
|
||||
ret = xizi_pager.map_pages(task->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, true);
|
||||
ret = xizi_pager.map_pages(task->memspace->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, true);
|
||||
} else {
|
||||
ret = xizi_pager.map_pages(task->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, false);
|
||||
ret = xizi_pager.map_pages(task->memspace->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, false);
|
||||
}
|
||||
if (!ret) {
|
||||
return (uintptr_t)NULL;
|
||||
|
@ -153,16 +153,16 @@ uintptr_t task_map_pages(struct TaskMicroDescriptor* task, const uintptr_t vaddr
|
|||
return vaddr;
|
||||
}
|
||||
|
||||
void unmap_task_share_pages(struct TaskMicroDescriptor* task, const uintptr_t task_vaddr, const int nr_pages)
|
||||
void unmap_task_share_pages(struct Thread* task, const uintptr_t task_vaddr, const int nr_pages)
|
||||
{
|
||||
/* get driver codes */
|
||||
struct DCacheDone* p_dcache_done = AchieveResource(&right_group.dcache_driver_tag);
|
||||
struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag);
|
||||
|
||||
xizi_pager.unmap_pages(task->pgdir.pd_addr, task_vaddr, nr_pages * PAGE_SIZE);
|
||||
xizi_pager.unmap_pages(task->pgdir.pd_addr, task_vaddr + (nr_pages * PAGE_SIZE), nr_pages * PAGE_SIZE);
|
||||
xizi_pager.unmap_pages(task->memspace->pgdir.pd_addr, task_vaddr, nr_pages * PAGE_SIZE);
|
||||
xizi_pager.unmap_pages(task->memspace->pgdir.pd_addr, task_vaddr + (nr_pages * PAGE_SIZE), nr_pages * PAGE_SIZE);
|
||||
if (task_vaddr >= USER_IPC_USE_ALLOCATOR_WATERMARK) {
|
||||
KBuddyFree(task->massive_ipc_allocator, (void*)task_vaddr);
|
||||
KBuddyFree(task->memspace->massive_ipc_allocator, (void*)task_vaddr);
|
||||
}
|
||||
if (task == cur_cpu()->task) {
|
||||
p_mmu_driver->TlbFlush(task_vaddr, 2 * nr_pages * PAGE_SIZE);
|
||||
|
@ -173,7 +173,7 @@ void unmap_task_share_pages(struct TaskMicroDescriptor* task, const uintptr_t ta
|
|||
}
|
||||
|
||||
static int next_session_id = 1;
|
||||
struct session_backend* create_share_pages(struct TaskMicroDescriptor* client, struct TaskMicroDescriptor* server, const int capacity)
|
||||
struct session_backend* create_share_pages(struct Thread* client, struct Thread* server, const int capacity)
|
||||
{
|
||||
struct session_backend* session_backend = (struct session_backend*)slab_alloc(SessionAllocator());
|
||||
if (UNLIKELY(session_backend == NULL)) {
|
||||
|
@ -223,8 +223,8 @@ struct session_backend* create_share_pages(struct TaskMicroDescriptor* client, s
|
|||
doubleListNodeInit(&session_backend->server_side.node);
|
||||
doubleListAddOnBack(&session_backend->server_side.node, &server->svr_sess_listhead);
|
||||
|
||||
server->mem_size += true_capacity;
|
||||
client->mem_size += true_capacity;
|
||||
server->memspace->mem_size += true_capacity;
|
||||
client->memspace->mem_size += true_capacity;
|
||||
|
||||
return session_backend;
|
||||
}
|
||||
|
@ -248,7 +248,7 @@ int delete_share_pages(struct session_backend* session_backend)
|
|||
if (session_backend->server_side.closed && session_backend->server != NULL) {
|
||||
xizi_share_page_manager.unmap_task_share_pages(session_backend->server, session_backend->server_side.buf_addr, session_backend->nr_pages);
|
||||
doubleListDel(&session_backend->server_side.node);
|
||||
session_backend->server->mem_size -= session_backend->nr_pages * PAGE_SIZE;
|
||||
session_backend->server->memspace->mem_size -= session_backend->nr_pages * PAGE_SIZE;
|
||||
session_backend->server = NULL;
|
||||
}
|
||||
|
||||
|
@ -256,7 +256,7 @@ int delete_share_pages(struct session_backend* session_backend)
|
|||
if (session_backend->client_side.closed && session_backend->client != NULL) {
|
||||
xizi_share_page_manager.unmap_task_share_pages(session_backend->client, session_backend->client_side.buf_addr, session_backend->nr_pages);
|
||||
doubleListDel(&session_backend->client_side.node);
|
||||
session_backend->client->mem_size -= session_backend->nr_pages * PAGE_SIZE;
|
||||
session_backend->client->memspace->mem_size -= session_backend->nr_pages * PAGE_SIZE;
|
||||
session_backend->client = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
SRC_FILES := syscall.c \
|
||||
sys_spawn.c \
|
||||
sys_exec.c \
|
||||
sys_yield.c \
|
||||
sys_register_as_server.c \
|
||||
sys_connect_session.c \
|
||||
|
|
|
@ -38,7 +38,7 @@ Modification:
|
|||
/// @param cur_task
|
||||
/// @param session
|
||||
/// @return
|
||||
int sys_close_session(struct TaskMicroDescriptor* cur_task, struct Session* session)
|
||||
int sys_close_session(struct Thread* cur_task, struct Session* session)
|
||||
{
|
||||
assert(cur_task != NULL);
|
||||
/* check if session is available */
|
||||
|
|
|
@ -35,7 +35,7 @@ Modification:
|
|||
#include "syscall.h"
|
||||
#include "task.h"
|
||||
|
||||
struct session_backend* create_session_inner(struct TaskMicroDescriptor* client, struct TaskMicroDescriptor* server, int capacity, struct Session* user_session)
|
||||
struct session_backend* create_session_inner(struct Thread* client, struct Thread* server, int capacity, struct Session* user_session)
|
||||
{
|
||||
// create share pages
|
||||
assert(server != NULL && client != NULL);
|
||||
|
@ -61,7 +61,7 @@ int sys_connect_session(char* path, int capacity, struct Session* user_session)
|
|||
return -1;
|
||||
}
|
||||
|
||||
struct TaskMicroDescriptor* client = cur_cpu()->task;
|
||||
struct Thread* client = cur_cpu()->task;
|
||||
/// get server
|
||||
struct TraceTag server_identifier_owner;
|
||||
if (!AchieveResourceTag(&server_identifier_owner, RequireRootTag(), "softkernel/server-identifier")) {
|
||||
|
@ -75,7 +75,7 @@ int sys_connect_session(char* path, int capacity, struct Session* user_session)
|
|||
return -1;
|
||||
}
|
||||
|
||||
struct TaskMicroDescriptor* server = AchieveResource(&server_tag);
|
||||
struct Thread* server = AchieveResource(&server_tag);
|
||||
assert(server != NULL);
|
||||
if (create_session_inner(client, server, capacity, user_session) == NULL) {
|
||||
return -1;
|
||||
|
|
|
@ -66,7 +66,7 @@ Modification:
|
|||
/// @param path path to elf file
|
||||
/// @param argv arguments giving to main
|
||||
/// @return
|
||||
int task_exec(struct TaskMicroDescriptor* task, char* img_start, char* name, char** argv)
|
||||
int task_exec(struct Thread* task, char* img_start, char* name, char** argv)
|
||||
{
|
||||
/* load img to task */
|
||||
if (img_start == NULL) {
|
||||
|
@ -163,8 +163,8 @@ int task_exec(struct TaskMicroDescriptor* task, char* img_start, char* name, cha
|
|||
// init task trapframe, which stores in svc stack
|
||||
// do not go tp error_exec once we change trapframe!
|
||||
assert(copied_len == (argc + 1) * sizeof(uintptr_t));
|
||||
arch_trapframe_set_sp_pc(task->main_thread.trapframe, user_vspace_sp, elf.entry);
|
||||
arch_set_main_params(task->main_thread.trapframe, argc, user_vspace_sp);
|
||||
arch_trapframe_set_sp_pc(task->thread_context.trapframe, user_vspace_sp, elf.entry);
|
||||
arch_set_main_params(task->thread_context.trapframe, argc, user_vspace_sp);
|
||||
|
||||
// save program name
|
||||
char* last = NULL;
|
||||
|
@ -202,7 +202,7 @@ int sys_exec(char* img_start, char* name, char** argv)
|
|||
}
|
||||
|
||||
struct MmuCommonDone* p_mmu_driver = AchieveResource(&mmu_driver_tag);
|
||||
struct TaskMicroDescriptor* current_task = cur_cpu()->task;
|
||||
struct Thread* current_task = cur_cpu()->task;
|
||||
int ret = task_exec(current_task, img_start, name, argv);
|
||||
if (ret >= 0) {
|
||||
p_mmu_driver->LoadPgdir((uintptr_t)V2P(current_task->pgdir.pd_addr));
|
||||
|
|
|
@ -36,7 +36,7 @@ Modification:
|
|||
#include "syscall.h"
|
||||
#include "task.h"
|
||||
|
||||
int sys_exit(struct TaskMicroDescriptor* ptask)
|
||||
int sys_exit(struct Thread* ptask)
|
||||
{
|
||||
assert(ptask != NULL);
|
||||
ptask->dead = true;
|
||||
|
|
|
@ -31,14 +31,14 @@ Modification:
|
|||
|
||||
#include "task.h"
|
||||
|
||||
extern int sys_exit(struct TaskMicroDescriptor* task);
|
||||
extern int sys_exit(struct Thread* task);
|
||||
int sys_kill(int id)
|
||||
{
|
||||
struct TaskMicroDescriptor* task = NULL;
|
||||
struct Thread* task = NULL;
|
||||
// check if task is a running one
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_running_list_head, node)
|
||||
{
|
||||
if (task->pid == id) {
|
||||
if (task->tid == id) {
|
||||
sys_exit(task);
|
||||
return 0;
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ int sys_kill(int id)
|
|||
// check if task is a blocking one
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_blocked_list_head, node)
|
||||
{
|
||||
if (task->pid == id) {
|
||||
if (task->tid == id) {
|
||||
sys_exit(task);
|
||||
return 0;
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ int sys_kill(int id)
|
|||
for (int prio = 0; prio < TASK_MAX_PRIORITY; prio++) {
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[prio], node)
|
||||
{
|
||||
if (task->pid == id) {
|
||||
if (task->tid == id) {
|
||||
sys_exit(task);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -30,15 +30,15 @@ Modification:
|
|||
#include <stdint.h>
|
||||
|
||||
#include "assert.h"
|
||||
#include "multicores.h"
|
||||
#include "kalloc.h"
|
||||
#include "multicores.h"
|
||||
#include "share_page.h"
|
||||
#include "syscall.h"
|
||||
#include "task.h"
|
||||
|
||||
int sys_mmap(uintptr_t vaddr, uintptr_t paddr, int len, int is_dev)
|
||||
{
|
||||
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
||||
struct Thread* cur_task = cur_cpu()->task;
|
||||
assert(cur_task != NULL);
|
||||
|
||||
int true_len = ALIGNUP(len, PAGE_SIZE);
|
||||
|
@ -64,6 +64,6 @@ int sys_mmap(uintptr_t vaddr, uintptr_t paddr, int len, int is_dev)
|
|||
}
|
||||
}
|
||||
|
||||
cur_task->mem_size += true_len;
|
||||
cur_task->memspace->mem_size += true_len;
|
||||
return vaddr + true_len;
|
||||
}
|
|
@ -44,7 +44,7 @@ static inline bool is_msg_needed(struct IpcMsg* msg)
|
|||
|
||||
int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
|
||||
{
|
||||
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
||||
struct Thread* cur_task = cur_cpu()->task;
|
||||
if (cur_task == NULL) {
|
||||
ERROR("%s by killed task\n");
|
||||
return -1;
|
||||
|
@ -66,7 +66,7 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
|
|||
// update session_backend
|
||||
// if current session is handled
|
||||
if (server_session->head != userland_session_arr[i].head) {
|
||||
struct TaskMicroDescriptor* client = SERVER_SESSION_BACKEND(server_session)->client;
|
||||
struct Thread* client = SERVER_SESSION_BACKEND(server_session)->client;
|
||||
if (client->state == BLOCKED) {
|
||||
xizi_task_manager.task_unblock(client);
|
||||
} else {
|
||||
|
|
|
@ -39,7 +39,7 @@ Modification:
|
|||
|
||||
int sys_register_as_server(char* name)
|
||||
{
|
||||
struct TaskMicroDescriptor* server = cur_cpu()->task;
|
||||
struct Thread* server = cur_cpu()->task;
|
||||
struct TraceTag server_identifier_set_tag;
|
||||
if (!AchieveResourceTag(&server_identifier_set_tag, RequireRootTag(), "softkernel/server-identifier")) {
|
||||
panic("Server identifier not initialized.\b");
|
||||
|
|
|
@ -34,15 +34,15 @@ Modification:
|
|||
#include "actracer.h"
|
||||
#include "assert.h"
|
||||
#include "ipc.h"
|
||||
#include "kalloc.h"
|
||||
#include "memspace.h"
|
||||
#include "multicores.h"
|
||||
#include "share_page.h"
|
||||
#include "syscall.h"
|
||||
#include "task.h"
|
||||
|
||||
static struct TaskMicroDescriptor* kernel_irq_proxy;
|
||||
static struct Thread* kernel_irq_proxy;
|
||||
static struct {
|
||||
struct TaskMicroDescriptor* handle_task;
|
||||
struct Thread* handle_task;
|
||||
struct Session session;
|
||||
struct session_backend* p_kernel_session;
|
||||
int opcode;
|
||||
|
@ -56,7 +56,7 @@ static void send_irq_to_user(int irq_num)
|
|||
len += sizeof(struct IpcArgInfo);
|
||||
|
||||
/* get message space and add session tail */
|
||||
void* session_kern_vaddr = P2V(xizi_pager.address_translate(&kernel_irq_proxy->pgdir, (uintptr_t)session->buf));
|
||||
void* session_kern_vaddr = P2V(xizi_pager.address_translate(&kernel_irq_proxy->memspace->pgdir, (uintptr_t)session->buf));
|
||||
struct IpcMsg* buf = session_kern_vaddr + session->tail;
|
||||
|
||||
/* check if server session is full */
|
||||
|
@ -98,7 +98,7 @@ int user_irq_handler(int irq, void* tf, void* arg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
extern struct session_backend* create_session_inner(struct TaskMicroDescriptor* client, struct TaskMicroDescriptor* server, int capacity, struct Session* user_session);
|
||||
extern struct session_backend* create_session_inner(struct Thread* client, struct Thread* server, int capacity, struct Session* user_session);
|
||||
/// @warning no tested.
|
||||
|
||||
static struct XiziTrapDriver* p_intr_driver = NULL;
|
||||
|
@ -116,10 +116,16 @@ int sys_register_irq(int irq_num, int irq_opcode)
|
|||
|
||||
// init kerenl sender proxy
|
||||
if (kernel_irq_proxy == NULL) {
|
||||
kernel_irq_proxy = xizi_task_manager.new_task_cb();
|
||||
/// @todo handle corner cases
|
||||
struct MemSpace* pmemspace = alloc_memspace();
|
||||
if (pmemspace == NULL) {
|
||||
return -1;
|
||||
}
|
||||
xizi_pager.new_pgdir(&pmemspace->pgdir);
|
||||
memcpy(pmemspace->pgdir.pd_addr, kern_pgdir.pd_addr, TOPLEVLE_PAGEDIR_SIZE);
|
||||
|
||||
kernel_irq_proxy = xizi_task_manager.new_task_cb(pmemspace);
|
||||
kernel_irq_proxy->state = NEVER_RUN;
|
||||
xizi_pager.new_pgdir(&kernel_irq_proxy->pgdir);
|
||||
memcpy(kernel_irq_proxy->pgdir.pd_addr, kern_pgdir.pd_addr, TOPLEVLE_PAGEDIR_SIZE);
|
||||
}
|
||||
|
||||
// bind irq to session
|
||||
|
@ -127,7 +133,7 @@ int sys_register_irq(int irq_num, int irq_opcode)
|
|||
ERROR("irq %d is occupied.\n", irq_num);
|
||||
return -1;
|
||||
}
|
||||
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
||||
struct Thread* cur_task = cur_cpu()->task;
|
||||
irq_forward_table[irq_num].handle_task = cur_task;
|
||||
irq_forward_table[irq_num].opcode = irq_opcode;
|
||||
irq_forward_table[irq_num].p_kernel_session = create_session_inner(kernel_irq_proxy, cur_task, PAGE_SIZE, &irq_forward_table[irq_num].session);
|
||||
|
@ -137,7 +143,7 @@ int sys_register_irq(int irq_num, int irq_opcode)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int sys_unbind_irq(struct TaskMicroDescriptor* task, int irq_num)
|
||||
int sys_unbind_irq(struct Thread* task, int irq_num)
|
||||
{
|
||||
if (irq_forward_table[irq_num].handle_task != task) {
|
||||
return -1;
|
||||
|
@ -147,7 +153,7 @@ int sys_unbind_irq(struct TaskMicroDescriptor* task, int irq_num)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int sys_unbind_irq_all(struct TaskMicroDescriptor* task)
|
||||
int sys_unbind_irq_all(struct Thread* task)
|
||||
{
|
||||
for (int idx = 0; idx < NR_IRQS; idx++) {
|
||||
if (irq_forward_table[idx].handle_task == task) {
|
||||
|
|
|
@ -29,26 +29,64 @@ Modification:
|
|||
*************************************************/
|
||||
#include "actracer.h"
|
||||
#include "assert.h"
|
||||
#include "memspace.h"
|
||||
#include "share_page.h"
|
||||
#include "syscall.h"
|
||||
#include "task.h"
|
||||
|
||||
extern int task_exec(struct TaskMicroDescriptor* task, char* img_start, char* name, char** argv);
|
||||
extern int task_exec(struct Thread* task, char* img_start, char* name, char** argv);
|
||||
int sys_spawn(char* img_start, char* name, char** argv)
|
||||
{
|
||||
// alloc a new pcb
|
||||
struct TaskMicroDescriptor* new_task_cb = xizi_task_manager.new_task_cb();
|
||||
if (UNLIKELY(!new_task_cb)) {
|
||||
ERROR("Unable to new task control block.\n");
|
||||
// alloc a new memspace
|
||||
struct MemSpace* pmemspace = alloc_memspace();
|
||||
if (pmemspace == NULL) {
|
||||
return -1;
|
||||
}
|
||||
// init trapframe
|
||||
arch_init_trapframe(new_task_cb->main_thread.trapframe, 0, 0);
|
||||
if (UNLIKELY(task_exec(new_task_cb, img_start, name, argv)) < 0) {
|
||||
|
||||
// load memspace
|
||||
uintptr_t* entry = load_memspace(pmemspace, img_start);
|
||||
if (NULL == entry) {
|
||||
ERROR("Loading memspace from %016x failed.\n", img_start);
|
||||
free_memspace(pmemspace);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// alloc a new pcb
|
||||
struct Thread* new_task_cb = xizi_task_manager.new_task_cb(pmemspace);
|
||||
if (UNLIKELY(!new_task_cb)) {
|
||||
ERROR("Unable to new task control block.\n");
|
||||
free_memspace(pmemspace);
|
||||
return -1;
|
||||
}
|
||||
assert(!IS_DOUBLE_LIST_EMPTY(&pmemspace->thread_list_guard));
|
||||
|
||||
// init params
|
||||
struct ThreadStackPointer loaded_sp = load_user_stack(pmemspace, argv);
|
||||
if (loaded_sp.stack_idx == -1) {
|
||||
ERROR("Uable to load params to memspace.\n");
|
||||
/* memspace is freed alone with free_pcb() */
|
||||
xizi_task_manager.free_pcb(new_task_cb);
|
||||
return -1;
|
||||
}
|
||||
// init pcb
|
||||
|
||||
// init trapframe
|
||||
new_task_cb->thread_context.user_stack_idx = loaded_sp.stack_idx;
|
||||
new_task_cb->thread_context.uspace_stack_addr = USER_MEM_TOP - ((loaded_sp.stack_idx + 1) * USER_STACK_SIZE);
|
||||
new_task_cb->thread_context.ustack_kvaddr = loaded_sp.user_stack_vaddr;
|
||||
arch_init_trapframe(new_task_cb->thread_context.trapframe, 0, 0);
|
||||
arch_trapframe_set_sp_pc(new_task_cb->thread_context.trapframe, loaded_sp.user_sp, (uintptr_t)entry);
|
||||
arch_set_main_params(new_task_cb->thread_context.trapframe, loaded_sp.argc, loaded_sp.user_sp);
|
||||
|
||||
// init thread name
|
||||
char* last = NULL;
|
||||
for (last = name; *name; name++) {
|
||||
if (*name == '/') {
|
||||
last = name + 1;
|
||||
}
|
||||
}
|
||||
strncpy(new_task_cb->name, last, sizeof(new_task_cb->name));
|
||||
|
||||
// init pcb schedule attributes
|
||||
xizi_task_manager.task_set_default_schedule_attr(new_task_cb);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -41,11 +41,11 @@ Modification:
|
|||
extern uint8_t _binary_fs_img_start[], _binary_fs_img_end[];
|
||||
|
||||
#define SHOWINFO_BORDER_LINE() LOG_PRINTF("******************************************************\n");
|
||||
#define SHOWTASK_TASK_BASE_INFO(task) LOG_PRINTF(" %-6d %-16s %-4d 0x%x(%-d)\n", task->pid, task->name, task->priority, task->mem_size >> 10, task->mem_size >> 10)
|
||||
#define SHOWTASK_TASK_BASE_INFO(task) LOG_PRINTF(" %-6d %-16s %-4d 0x%x(%-d)\n", task->tid, task->name, task->priority, task->memspace->mem_size >> 10, task->memspace->mem_size >> 10)
|
||||
|
||||
void show_tasks(void)
|
||||
{
|
||||
struct TaskMicroDescriptor* task = NULL;
|
||||
struct Thread* task = NULL;
|
||||
SHOWINFO_BORDER_LINE();
|
||||
for (int i = 0; i < NR_CPU; i++) {
|
||||
LOG_PRINTF("CPU %-2d: %s\n", i, (global_cpus[i].task == NULL ? "NULL" : global_cpus[i].task->name));
|
||||
|
@ -128,7 +128,7 @@ void show_cpu(void)
|
|||
|
||||
int cpu_id = 0;
|
||||
|
||||
struct TaskMicroDescriptor* current_task = cur_cpu()->task;
|
||||
struct Thread* current_task = cur_cpu()->task;
|
||||
assert(current_task != NULL);
|
||||
|
||||
LOG_PRINTF(" ID COMMAND USED_TICKS FREE_TICKS \n");
|
||||
|
@ -144,7 +144,7 @@ int sys_state(sys_state_option option, sys_state_info* info)
|
|||
info->memblock_info.memblock_start = (uintptr_t)V2P(_binary_fs_img_start);
|
||||
info->memblock_info.memblock_end = (uintptr_t)V2P(_binary_fs_img_end);
|
||||
} else if (option == SYS_STATE_GET_HEAP_BASE) {
|
||||
return cur_cpu()->task->heap_base;
|
||||
return cur_cpu()->task->memspace->heap_base;
|
||||
} else if (option == SYS_STATE_SET_TASK_PRIORITY) {
|
||||
xizi_task_manager.set_cur_task_priority(info->priority);
|
||||
} else if (option == SYS_STATE_SHOW_TASKS) {
|
||||
|
|
|
@ -35,7 +35,7 @@ Modification:
|
|||
|
||||
int sys_yield(task_yield_reason reason)
|
||||
{
|
||||
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
||||
struct Thread* cur_task = cur_cpu()->task;
|
||||
xizi_task_manager.task_yield_noschedule(cur_task, false);
|
||||
|
||||
// handle ipc block
|
||||
|
|
|
@ -63,7 +63,8 @@ int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, u
|
|||
ret = sys_close_session(cur_cpu()->task, (struct Session*)param1);
|
||||
break;
|
||||
case SYSCALL_EXEC:
|
||||
ret = sys_exec((char*)param1, (char*)param2, (char**)param3);
|
||||
// ret = sys_exec((char*)param1, (char*)param2, (char**)param3);
|
||||
ret = -1;
|
||||
break;
|
||||
case SYSCALL_SYS_STATE:
|
||||
ret = sys_state(param1, (sys_state_info*)param2);
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
SRC_FILES := task.c schedule.c
|
||||
SRC_FILES := task.c schedule.c memspace.c
|
||||
|
||||
include $(KERNEL_ROOT)/compiler.mk
|
||||
|
|
|
@ -0,0 +1,251 @@
|
|||
/*
|
||||
* Copyright (c) 2020 AIIT XUOS Lab
|
||||
* XiUOS is licensed under Mulan PSL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PSL v2.
|
||||
* You may obtain a copy of Mulan PSL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPSL2
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PSL v2 for more details.
|
||||
*/
|
||||
/**
|
||||
* @file memspace.c
|
||||
* @brief memspace loader
|
||||
* @version 3.0
|
||||
* @author AIIT XUOS Lab
|
||||
* @date 2023.08.25
|
||||
*/
|
||||
|
||||
/*************************************************
|
||||
File name: memspace.c
|
||||
Description: memspace loader
|
||||
Others:
|
||||
History:
|
||||
1. Date: 2023-08-28
|
||||
Author: AIIT XUOS Lab
|
||||
Modification:
|
||||
1. first version
|
||||
*************************************************/
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "bitmap64.h"
|
||||
#include "execelf.h"
|
||||
#include "kalloc.h"
|
||||
#include "memspace.h"
|
||||
#include "pagetable.h"
|
||||
#include "task.h"
|
||||
|
||||
#define MAX_SUPPORT_PARAMS 32
|
||||
|
||||
struct MemSpace* alloc_memspace()
|
||||
{
|
||||
struct MemSpace* pmemspace = slab_alloc(&xizi_task_manager.memspace_allocator);
|
||||
if (pmemspace == NULL) {
|
||||
ERROR("Alloc memspace for thread failed.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bitmap64_init(&pmemspace->thread_stack_idx_bitmap);
|
||||
doubleListNodeInit(&pmemspace->thread_list_guard);
|
||||
pmemspace->massive_ipc_allocator = NULL;
|
||||
pmemspace->heap_base = 0;
|
||||
pmemspace->mem_size = 0;
|
||||
pmemspace->pgdir.pd_addr = 0;
|
||||
return pmemspace;
|
||||
}
|
||||
|
||||
void free_memspace(struct MemSpace* pmemspace)
|
||||
{
|
||||
assert(pmemspace != NULL);
|
||||
|
||||
/* free page table and all its allocated memories */
|
||||
if (pmemspace->pgdir.pd_addr != NULL) {
|
||||
xizi_pager.free_user_pgdir(&pmemspace->pgdir);
|
||||
}
|
||||
|
||||
/* free ipc virt address allocator */
|
||||
if (pmemspace->massive_ipc_allocator != NULL) {
|
||||
KBuddyDestory(pmemspace->massive_ipc_allocator);
|
||||
slab_free(&xizi_task_manager.task_buddy_allocator, (void*)pmemspace->massive_ipc_allocator);
|
||||
}
|
||||
|
||||
slab_free(&xizi_task_manager.memspace_allocator, (void*)pmemspace);
|
||||
}
|
||||
|
||||
/// @return return the entry of program
|
||||
uintptr_t* load_memspace(struct MemSpace* pmemspace, char* img_start)
|
||||
{
|
||||
if (pmemspace == NULL) {
|
||||
ERROR("Loading an empty memspace.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (img_start == NULL) {
|
||||
ERROR("Empty elf file.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* 1. load elf header */
|
||||
struct elfhdr elf;
|
||||
memcpy((void*)&elf, img_start, sizeof(elf));
|
||||
if (elf.magic != ELF_MAGIC) {
|
||||
ERROR("Not an elf file.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* allocate a pgdir */
|
||||
/* only supports first inited memspace */
|
||||
assert(pmemspace->pgdir.pd_addr == NULL);
|
||||
struct TopLevelPageDirectory pgdir;
|
||||
pgdir.pd_addr = NULL;
|
||||
if (UNLIKELY(!xizi_pager.new_pgdir(&pgdir))) {
|
||||
ERROR("Create new pgdir failed.\n");
|
||||
goto error_exec;
|
||||
}
|
||||
/* copy kernel pagetable so that interrupt and syscall wont corrupt */
|
||||
memcpy(pgdir.pd_addr, kern_pgdir.pd_addr, TOPLEVLE_PAGEDIR_SIZE);
|
||||
|
||||
// read elf file by (header, section)
|
||||
uintptr_t load_size = 0;
|
||||
struct proghdr ph;
|
||||
for (int sec_idx = 0, off = elf.phoff; sec_idx < elf.phnum; sec_idx++, off += sizeof(ph)) {
|
||||
// load proghdr
|
||||
memcpy((char*)&ph, img_start + off, sizeof(ph));
|
||||
|
||||
if (ph.type != ELF_PROG_LOAD)
|
||||
continue;
|
||||
if (ph.memsz < ph.filesz) {
|
||||
ERROR("elf header mem size less than file size\n");
|
||||
goto error_exec;
|
||||
}
|
||||
|
||||
// read section
|
||||
// 1. alloc space
|
||||
if ((load_size = xizi_pager.resize_user_pgdir(&pgdir, load_size, ph.vaddr + ph.memsz))
|
||||
!= ph.vaddr + ph.memsz) {
|
||||
goto error_exec;
|
||||
}
|
||||
// 2. copy inode to space
|
||||
assert(ph.vaddr % PAGE_SIZE == 0);
|
||||
for (int addr_offset = 0; addr_offset < ph.filesz; addr_offset += PAGE_SIZE) {
|
||||
uintptr_t page_paddr = xizi_pager.address_translate(&pgdir, ph.vaddr + addr_offset);
|
||||
if (page_paddr == 0) {
|
||||
ERROR("copy elf file to unmapped addr: %x(pgdir: %x)\n", ph.vaddr + addr_offset, pgdir.pd_addr);
|
||||
goto error_exec;
|
||||
}
|
||||
uintptr_t read_size = (ph.filesz - addr_offset < PAGE_SIZE ? ph.filesz - addr_offset : PAGE_SIZE);
|
||||
memcpy(P2V(page_paddr), img_start + (ph.off + addr_offset), read_size);
|
||||
}
|
||||
}
|
||||
|
||||
/// elf file content now in memory
|
||||
// memspace will use this page dir
|
||||
pmemspace->pgdir = pgdir;
|
||||
pmemspace->heap_base = ALIGNUP(load_size, PAGE_SIZE);
|
||||
pmemspace->mem_size = pmemspace->heap_base;
|
||||
|
||||
return (uintptr_t*)elf.entry;
|
||||
|
||||
error_exec:
|
||||
if (pgdir.pd_addr != NULL) {
|
||||
xizi_pager.free_user_pgdir(&pgdir);
|
||||
}
|
||||
ERROR("Error loading memspace.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void handle_error_stack_loading(struct MemSpace* pmemspace, int stack_idx, uintptr_t* stack_bottom, bool is_mapped_successful)
|
||||
{
|
||||
if (stack_idx != -1) {
|
||||
bitmap64_free(&pmemspace->thread_stack_idx_bitmap, stack_idx);
|
||||
}
|
||||
|
||||
if (stack_bottom != NULL) {
|
||||
kfree((char*)stack_bottom);
|
||||
}
|
||||
|
||||
if (is_mapped_successful) {
|
||||
xizi_pager.unmap_pages(pmemspace->pgdir.pd_addr, USER_MEM_TOP - ((stack_idx + 1) * USER_STACK_SIZE), USER_STACK_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
/// @return return thread's user stack index
|
||||
struct ThreadStackPointer load_user_stack(struct MemSpace* pmemspace, char** argv)
|
||||
{
|
||||
/* usages of load_user_stack() must be correct */
|
||||
assert(pmemspace != NULL);
|
||||
assert(pmemspace->pgdir.pd_addr != NULL);
|
||||
assert(argv != NULL);
|
||||
|
||||
struct ThreadStackPointer loaded_sp = {
|
||||
.argc = 0,
|
||||
.stack_idx = -1,
|
||||
.user_sp = 0,
|
||||
.user_stack_vaddr = 0,
|
||||
};
|
||||
|
||||
/* alloc a user stack index */
|
||||
int stack_idx = bitmap64_alloc(&pmemspace->thread_stack_idx_bitmap);
|
||||
if (stack_idx == -1) {
|
||||
ERROR("Number of threads created exceeds kernel support.\n");
|
||||
handle_error_stack_loading(pmemspace, stack_idx, NULL, false);
|
||||
return loaded_sp;
|
||||
}
|
||||
|
||||
/* allocate memory space for user stack */
|
||||
uintptr_t* stack_bottom = (uintptr_t*)kalloc(USER_STACK_SIZE);
|
||||
if (UNLIKELY(stack_bottom == NULL)) {
|
||||
ERROR("No memory to alloc user stack.\n");
|
||||
handle_error_stack_loading(pmemspace, stack_idx, stack_bottom, false);
|
||||
return loaded_sp;
|
||||
}
|
||||
|
||||
/* map memory to user stack space in memspace*/
|
||||
if (!xizi_pager.map_pages(pmemspace->pgdir.pd_addr, USER_MEM_TOP - ((stack_idx + 1) * USER_STACK_SIZE), V2P(stack_bottom), USER_STACK_SIZE, false)) {
|
||||
/* this could only fail due to inner page directory's allocation failure */
|
||||
ERROR("User stack map failed\n");
|
||||
handle_error_stack_loading(pmemspace, stack_idx, stack_bottom, false);
|
||||
return loaded_sp;
|
||||
}
|
||||
|
||||
/* start loading main params into user stack */
|
||||
/// @warning supports only main style params
|
||||
uintptr_t user_vspace_sp = USER_MEM_TOP;
|
||||
static uintptr_t user_stack_init[MAX_SUPPORT_PARAMS];
|
||||
memset(user_stack_init, 0, sizeof(user_stack_init));
|
||||
uintptr_t argc = 0;
|
||||
uintptr_t copy_len = 0;
|
||||
for (argc = 0; argv != NULL && argv[argc] != NULL; argc++) {
|
||||
/// @todo handle with large number of parameters (more than 32)
|
||||
|
||||
// copy param to user stack
|
||||
copy_len = strlen(argv[argc]) + 1;
|
||||
user_vspace_sp = (user_vspace_sp - copy_len) & ~3;
|
||||
uintptr_t copied_len = xizi_pager.cross_vspace_data_copy(&pmemspace->pgdir, user_vspace_sp, (uintptr_t)argv[argc], copy_len);
|
||||
if (UNLIKELY(copied_len != copy_len)) {
|
||||
ERROR("Something went wrong when copying params.\n");
|
||||
handle_error_stack_loading(pmemspace, stack_idx, stack_bottom, true);
|
||||
return loaded_sp;
|
||||
}
|
||||
user_stack_init[argc] = user_vspace_sp;
|
||||
}
|
||||
|
||||
user_stack_init[argc] = 0;
|
||||
copy_len = (argc + 1) * sizeof(uintptr_t);
|
||||
user_vspace_sp -= copy_len;
|
||||
/* this copy has no reason to fail */
|
||||
uintptr_t copied_len = xizi_pager.cross_vspace_data_copy(&pmemspace->pgdir, user_vspace_sp, (uintptr_t)user_stack_init, copy_len);
|
||||
assert(copied_len == copy_len);
|
||||
|
||||
pmemspace->mem_size += USER_STACK_SIZE;
|
||||
|
||||
loaded_sp.argc = argc;
|
||||
loaded_sp.stack_idx = stack_idx;
|
||||
loaded_sp.user_sp = user_vspace_sp;
|
||||
loaded_sp.user_stack_vaddr = (uintptr_t)stack_bottom;
|
||||
return loaded_sp;
|
||||
}
|
|
@ -30,9 +30,9 @@ Modification:
|
|||
#include "log.h"
|
||||
#include "scheduler.h"
|
||||
|
||||
struct TaskMicroDescriptor* max_priority_runnable_task(void)
|
||||
struct Thread* max_priority_runnable_task(void)
|
||||
{
|
||||
static struct TaskMicroDescriptor* task = NULL;
|
||||
static struct Thread* task = NULL;
|
||||
static int priority = 0;
|
||||
|
||||
priority = __builtin_ffs(ready_task_priority) - 1;
|
||||
|
@ -53,9 +53,9 @@ struct TaskMicroDescriptor* max_priority_runnable_task(void)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
struct TaskMicroDescriptor* round_robin_runnable_task(uint32_t priority)
|
||||
struct Thread* round_robin_runnable_task(uint32_t priority)
|
||||
{
|
||||
struct TaskMicroDescriptor* task = NULL;
|
||||
struct Thread* task = NULL;
|
||||
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node)
|
||||
{
|
||||
|
@ -74,7 +74,7 @@ struct TaskMicroDescriptor* round_robin_runnable_task(uint32_t priority)
|
|||
/* recover task priority */
|
||||
void recover_priority(void)
|
||||
{
|
||||
struct TaskMicroDescriptor* task = NULL;
|
||||
struct Thread* task = NULL;
|
||||
for (int i = 1; i < TASK_MAX_PRIORITY; i++) {
|
||||
if (i == TASK_DEFAULT_PRIORITY)
|
||||
continue;
|
||||
|
|
|
@ -33,7 +33,7 @@ Modification:
|
|||
|
||||
#include "assert.h"
|
||||
#include "kalloc.h"
|
||||
#include "log.h"
|
||||
#include "memspace.h"
|
||||
#include "multicores.h"
|
||||
#include "scheduler.h"
|
||||
#include "syscall.h"
|
||||
|
@ -42,7 +42,7 @@ Modification:
|
|||
struct CPU global_cpus[NR_CPU];
|
||||
uint32_t ready_task_priority;
|
||||
|
||||
static inline void task_node_leave_list(struct TaskMicroDescriptor* task)
|
||||
static inline void task_node_leave_list(struct Thread* task)
|
||||
{
|
||||
doubleListDel(&task->node);
|
||||
if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[task->priority])) {
|
||||
|
@ -50,13 +50,13 @@ static inline void task_node_leave_list(struct TaskMicroDescriptor* task)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void task_node_add_to_ready_list_head(struct TaskMicroDescriptor* task)
|
||||
static inline void task_node_add_to_ready_list_head(struct Thread* task)
|
||||
{
|
||||
doubleListAddOnHead(&task->node, &xizi_task_manager.task_list_head[task->priority]);
|
||||
ready_task_priority |= ((uint32_t)1 << task->priority);
|
||||
}
|
||||
|
||||
static inline void task_node_add_to_ready_list_back(struct TaskMicroDescriptor* task)
|
||||
static inline void task_node_add_to_ready_list_back(struct Thread* task)
|
||||
{
|
||||
doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]);
|
||||
ready_task_priority |= ((uint32_t)1 << task->priority);
|
||||
|
@ -71,10 +71,11 @@ static void _task_manager_init()
|
|||
doubleListNodeInit(&xizi_task_manager.task_blocked_list_head);
|
||||
doubleListNodeInit(&xizi_task_manager.task_running_list_head);
|
||||
// init task (slab) allocator
|
||||
slab_init(&xizi_task_manager.task_allocator, sizeof(struct TaskMicroDescriptor));
|
||||
slab_init(&xizi_task_manager.memspace_allocator, sizeof(struct MemSpace));
|
||||
slab_init(&xizi_task_manager.task_allocator, sizeof(struct Thread));
|
||||
slab_init(&xizi_task_manager.task_buddy_allocator, sizeof(struct KBuddy));
|
||||
|
||||
// pid pool
|
||||
// tid pool
|
||||
xizi_task_manager.next_pid = 0;
|
||||
|
||||
// init priority bit map
|
||||
|
@ -82,22 +83,23 @@ static void _task_manager_init()
|
|||
}
|
||||
|
||||
/// @brief alloc a new task without init
|
||||
static struct TaskMicroDescriptor* _alloc_task_cb()
|
||||
static struct Thread* _alloc_task_cb()
|
||||
{
|
||||
// alloc task and add it to used task list
|
||||
struct TaskMicroDescriptor* task = (struct TaskMicroDescriptor*)slab_alloc(&xizi_task_manager.task_allocator);
|
||||
struct Thread* task = (struct Thread*)slab_alloc(&xizi_task_manager.task_allocator);
|
||||
if (UNLIKELY(task == NULL)) {
|
||||
ERROR("Not enough memory\n");
|
||||
return NULL;
|
||||
}
|
||||
// set pid once task is allocated
|
||||
// set tid once task is allocated
|
||||
memset(task, 0, sizeof(*task));
|
||||
task->pid = xizi_task_manager.next_pid++;
|
||||
task->tid = xizi_task_manager.next_pid++;
|
||||
task->thread_context.user_stack_idx = -1;
|
||||
|
||||
return task;
|
||||
}
|
||||
|
||||
int _task_retrieve_sys_resources(struct TaskMicroDescriptor* ptask)
|
||||
int _task_retrieve_sys_resources(struct Thread* ptask)
|
||||
{
|
||||
assert(ptask != NULL);
|
||||
|
||||
|
@ -143,7 +145,7 @@ int _task_retrieve_sys_resources(struct TaskMicroDescriptor* ptask)
|
|||
|
||||
/// @brief this function changes task list without locking, so it must be called inside a lock critical area
|
||||
/// @param task
|
||||
static void _dealloc_task_cb(struct TaskMicroDescriptor* task)
|
||||
static void _dealloc_task_cb(struct Thread* task)
|
||||
{
|
||||
if (UNLIKELY(task == NULL)) {
|
||||
ERROR("deallocating a NULL task\n");
|
||||
|
@ -152,22 +154,38 @@ static void _dealloc_task_cb(struct TaskMicroDescriptor* task)
|
|||
|
||||
_task_retrieve_sys_resources(task);
|
||||
|
||||
// stack is mapped in vspace, so it should be free by pgdir
|
||||
if (task->pgdir.pd_addr) {
|
||||
xizi_pager.free_user_pgdir(&task->pgdir);
|
||||
}
|
||||
if (task->main_thread.stack_addr) {
|
||||
kfree((char*)task->main_thread.stack_addr);
|
||||
/* free thread's user stack */
|
||||
if (task->thread_context.user_stack_idx != -1) {
|
||||
// stack is mapped in vspace, so it should be freed from pgdir
|
||||
assert(task->thread_context.user_stack_idx >= 0 && task->thread_context.user_stack_idx < 64);
|
||||
assert(task->memspace != NULL);
|
||||
|
||||
/* the stack must have be set in memspace if bitmap has been set */
|
||||
assert(xizi_pager.unmap_pages(task->memspace->pgdir.pd_addr, task->thread_context.uspace_stack_addr, USER_STACK_SIZE));
|
||||
bitmap64_free(&task->memspace->thread_stack_idx_bitmap, task->thread_context.user_stack_idx);
|
||||
/* thread's user stack space is also allocated for kernel free space */
|
||||
assert(kfree((char*)task->thread_context.ustack_kvaddr));
|
||||
}
|
||||
|
||||
// remove it from used task list
|
||||
/* free thread's kernel stack */
|
||||
if (task->thread_context.kern_stack_addr) {
|
||||
kfree((char*)task->thread_context.kern_stack_addr);
|
||||
}
|
||||
|
||||
/* free memspace if needed to */
|
||||
if (task->memspace != NULL) {
|
||||
doubleListDel(&task->memspace_list_node);
|
||||
/* free memspace if thread is the last one using it */
|
||||
if (IS_DOUBLE_LIST_EMPTY(&task->memspace->thread_list_guard)) {
|
||||
// free memspace
|
||||
free_memspace(task->memspace);
|
||||
}
|
||||
}
|
||||
|
||||
// remove thread from used task list
|
||||
task_node_leave_list(task);
|
||||
|
||||
// free task back to allocator
|
||||
if (task->massive_ipc_allocator != NULL) {
|
||||
KBuddyDestory(task->massive_ipc_allocator);
|
||||
slab_free(&xizi_task_manager.task_buddy_allocator, (void*)task->massive_ipc_allocator);
|
||||
}
|
||||
slab_free(&xizi_task_manager.task_allocator, (void*)task);
|
||||
}
|
||||
|
||||
|
@ -179,45 +197,53 @@ __attribute__((optimize("O0"))) void task_prepare_enter()
|
|||
trap_return();
|
||||
}
|
||||
|
||||
static struct TaskMicroDescriptor* _new_task_cb()
|
||||
static struct Thread* _new_task_cb(struct MemSpace* pmemspace)
|
||||
{
|
||||
// alloc task space
|
||||
struct TaskMicroDescriptor* task = _alloc_task_cb();
|
||||
struct Thread* task = _alloc_task_cb();
|
||||
if (!task) {
|
||||
return NULL;
|
||||
}
|
||||
// init vm
|
||||
task->pgdir.pd_addr = NULL;
|
||||
if (pmemspace != NULL) {
|
||||
task->memspace = pmemspace;
|
||||
task->thread_context.user_stack_idx = -1;
|
||||
doubleListNodeInit(&task->memspace_list_node);
|
||||
doubleListAddOnBack(&task->memspace_list_node, &pmemspace->thread_list_guard);
|
||||
} else {
|
||||
task->memspace = NULL;
|
||||
}
|
||||
|
||||
/* init basic task member */
|
||||
doubleListNodeInit(&task->cli_sess_listhead);
|
||||
doubleListNodeInit(&task->svr_sess_listhead);
|
||||
|
||||
/* init main thread of task */
|
||||
task->main_thread.task = task;
|
||||
task->thread_context.task = task;
|
||||
// alloc stack page for task
|
||||
if ((void*)(task->main_thread.stack_addr = (uintptr_t)kalloc(USER_STACK_SIZE)) == NULL) {
|
||||
if ((void*)(task->thread_context.kern_stack_addr = (uintptr_t)kalloc(USER_STACK_SIZE)) == NULL) {
|
||||
_dealloc_task_cb(task);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* set context of main thread stack */
|
||||
/// stack bottom
|
||||
memset((void*)task->main_thread.stack_addr, 0x00, USER_STACK_SIZE);
|
||||
char* sp = (char*)task->main_thread.stack_addr + USER_STACK_SIZE - 4;
|
||||
memset((void*)task->thread_context.kern_stack_addr, 0x00, USER_STACK_SIZE);
|
||||
char* sp = (char*)task->thread_context.kern_stack_addr + USER_STACK_SIZE - 4;
|
||||
|
||||
/// 1. trap frame into stack, for process to nomally return by trap_return
|
||||
sp -= sizeof(*task->main_thread.trapframe);
|
||||
task->main_thread.trapframe = (struct trapframe*)sp;
|
||||
sp -= sizeof(*task->thread_context.trapframe);
|
||||
task->thread_context.trapframe = (struct trapframe*)sp;
|
||||
|
||||
/// 2. context into stack
|
||||
sp -= sizeof(*task->main_thread.context);
|
||||
task->main_thread.context = (struct context*)sp;
|
||||
arch_init_context(task->main_thread.context);
|
||||
sp -= sizeof(*task->thread_context.context);
|
||||
task->thread_context.context = (struct context*)sp;
|
||||
arch_init_context(task->thread_context.context);
|
||||
|
||||
return task;
|
||||
}
|
||||
|
||||
static void _task_set_default_schedule_attr(struct TaskMicroDescriptor* task)
|
||||
static void _task_set_default_schedule_attr(struct Thread* task)
|
||||
{
|
||||
task->remain_tick = TASK_CLOCK_TICK;
|
||||
task->maxium_tick = TASK_CLOCK_TICK * 10;
|
||||
|
@ -226,7 +252,7 @@ static void _task_set_default_schedule_attr(struct TaskMicroDescriptor* task)
|
|||
task_node_add_to_ready_list_head(task);
|
||||
}
|
||||
|
||||
static void task_state_set_running(struct TaskMicroDescriptor* task)
|
||||
static void task_state_set_running(struct Thread* task)
|
||||
{
|
||||
assert(task != NULL && task->state == READY);
|
||||
task->state = RUNNING;
|
||||
|
@ -234,12 +260,12 @@ static void task_state_set_running(struct TaskMicroDescriptor* task)
|
|||
doubleListAddOnHead(&task->node, &xizi_task_manager.task_running_list_head);
|
||||
}
|
||||
|
||||
struct TaskMicroDescriptor* next_task_emergency = NULL;
|
||||
struct Thread* next_task_emergency = NULL;
|
||||
extern void context_switch(struct context**, struct context*);
|
||||
static void _scheduler(struct SchedulerRightGroup right_group)
|
||||
{
|
||||
struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag);
|
||||
struct TaskMicroDescriptor* next_task;
|
||||
struct Thread* next_task;
|
||||
struct CPU* cpu = cur_cpu();
|
||||
|
||||
while (1) {
|
||||
|
@ -269,13 +295,14 @@ static void _scheduler(struct SchedulerRightGroup right_group)
|
|||
/* run the chosen task */
|
||||
task_state_set_running(next_task);
|
||||
cpu->task = next_task;
|
||||
p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->pgdir.pd_addr));
|
||||
context_switch(&cpu->scheduler, next_task->main_thread.context);
|
||||
assert(next_task->memspace->pgdir.pd_addr != NULL);
|
||||
p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->memspace->pgdir.pd_addr));
|
||||
context_switch(&cpu->scheduler, next_task->thread_context.context);
|
||||
assert(next_task->state != RUNNING);
|
||||
}
|
||||
}
|
||||
|
||||
static void _task_yield_noschedule(struct TaskMicroDescriptor* task, bool blocking)
|
||||
static void _task_yield_noschedule(struct Thread* task, bool blocking)
|
||||
{
|
||||
assert(task != NULL);
|
||||
/// @warning only support current task yield now
|
||||
|
@ -291,7 +318,7 @@ static void _task_yield_noschedule(struct TaskMicroDescriptor* task, bool blocki
|
|||
task_node_add_to_ready_list_back(task);
|
||||
}
|
||||
|
||||
static void _task_block(struct TaskMicroDescriptor* task)
|
||||
static void _task_block(struct Thread* task)
|
||||
{
|
||||
assert(task != NULL);
|
||||
assert(task->state != RUNNING);
|
||||
|
@ -300,7 +327,7 @@ static void _task_block(struct TaskMicroDescriptor* task)
|
|||
doubleListAddOnHead(&task->node, &xizi_task_manager.task_blocked_list_head);
|
||||
}
|
||||
|
||||
static void _task_unblock(struct TaskMicroDescriptor* task)
|
||||
static void _task_unblock(struct Thread* task)
|
||||
{
|
||||
assert(task != NULL);
|
||||
assert(task->state == BLOCKED);
|
||||
|
@ -318,7 +345,7 @@ static void _set_cur_task_priority(int priority)
|
|||
return;
|
||||
}
|
||||
|
||||
struct TaskMicroDescriptor* current_task = cur_cpu()->task;
|
||||
struct Thread* current_task = cur_cpu()->task;
|
||||
assert(current_task != NULL && current_task->state == RUNNING);
|
||||
|
||||
task_node_leave_list(current_task);
|
||||
|
|
|
@ -58,14 +58,14 @@ __attribute__((optimize("O0"))) void dabort_handler(struct trapframe* r)
|
|||
panic("data abort exception\n");
|
||||
}
|
||||
|
||||
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
||||
struct Thread* cur_task = cur_cpu()->task;
|
||||
ERROR("dabort in user space: %s\n", cur_task->name);
|
||||
dabort_reason(r);
|
||||
|
||||
xizi_enter_kernel();
|
||||
sys_exit(cur_task);
|
||||
assert(cur_cpu()->task == NULL);
|
||||
context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler);
|
||||
context_switch(&cur_task->thread_context.context, cur_cpu()->scheduler);
|
||||
panic("dabort end should never be reashed.\n");
|
||||
}
|
||||
|
||||
|
@ -78,13 +78,13 @@ __attribute__((optimize("O0"))) void iabort_handler(struct trapframe* r)
|
|||
panic("kernel prefetch abort exception\n");
|
||||
}
|
||||
|
||||
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
||||
struct Thread* cur_task = cur_cpu()->task;
|
||||
ERROR("iabort in user space: %s\n", cur_task->name);
|
||||
iabort_reason(r);
|
||||
|
||||
xizi_enter_kernel();
|
||||
sys_exit(cur_task);
|
||||
assert(cur_cpu()->task == NULL);
|
||||
context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler);
|
||||
context_switch(&cur_task->thread_context.context, cur_cpu()->scheduler);
|
||||
panic("iabort end should never be reashed.\n");
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ int xizi_clock_handler(int irq, void* tf, void* arg)
|
|||
if (p_clock_driver->is_timer_expired()) {
|
||||
p_clock_driver->clear_clock_intr();
|
||||
global_tick++;
|
||||
struct TaskMicroDescriptor* current_task = cur_cpu()->task;
|
||||
struct Thread* current_task = cur_cpu()->task;
|
||||
if (current_task) {
|
||||
current_task->remain_tick--;
|
||||
current_task->maxium_tick--;
|
||||
|
|
|
@ -65,9 +65,9 @@ void intr_irq_dispatch(struct trapframe* tf)
|
|||
goto intr_leave_interrupt;
|
||||
}
|
||||
|
||||
struct TaskMicroDescriptor* current_task = cur_cpu()->task;
|
||||
struct Thread* current_task = cur_cpu()->task;
|
||||
assert(current_task != NULL);
|
||||
current_task->main_thread.trapframe = tf;
|
||||
current_task->thread_context.trapframe = tf;
|
||||
|
||||
int cpu = cur_cpuid();
|
||||
assert(cpu >= 0 && cpu < NR_CPU);
|
||||
|
@ -86,7 +86,7 @@ void intr_irq_dispatch(struct trapframe* tf)
|
|||
|
||||
if (cur_cpu()->task == NULL || current_task->state != RUNNING) {
|
||||
cur_cpu()->task = NULL;
|
||||
context_switch(¤t_task->main_thread.context, cur_cpu()->scheduler);
|
||||
context_switch(¤t_task->thread_context.context, cur_cpu()->scheduler);
|
||||
}
|
||||
assert(current_task == cur_cpu()->task);
|
||||
|
||||
|
|
|
@ -52,15 +52,15 @@ void software_irq_dispatch(struct trapframe* tf)
|
|||
assert(p_intr_driver != NULL);
|
||||
|
||||
// get current task
|
||||
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
||||
struct Thread* cur_task = cur_cpu()->task;
|
||||
/// @todo: Handle dead task
|
||||
|
||||
int syscall_num = -1;
|
||||
if (cur_task && cur_task->state != DEAD) {
|
||||
cur_task->main_thread.trapframe = tf;
|
||||
cur_task->thread_context.trapframe = tf;
|
||||
// call syscall
|
||||
|
||||
int ret = arch_syscall(cur_task->main_thread.trapframe, &syscall_num);
|
||||
int ret = arch_syscall(cur_task->thread_context.trapframe, &syscall_num);
|
||||
|
||||
if (syscall_num != SYSCALL_EXEC) {
|
||||
arch_set_return(tf, ret);
|
||||
|
@ -69,7 +69,7 @@ void software_irq_dispatch(struct trapframe* tf)
|
|||
|
||||
if ((cur_cpu()->task == NULL && cur_task != NULL) || cur_task->state != RUNNING) {
|
||||
cur_cpu()->task = NULL;
|
||||
context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler);
|
||||
context_switch(&cur_task->thread_context.context, cur_cpu()->scheduler);
|
||||
}
|
||||
if (syscall_num == SYSCALL_EXIT) {
|
||||
panic("Exit reaches");
|
||||
|
|
|
@ -1,79 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2020 AIIT XUOS Lab
|
||||
* XiUOS is licensed under Mulan PSL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PSL v2.
|
||||
* You may obtain a copy of Mulan PSL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPSL2
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PSL v2 for more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file: test_cache.h
|
||||
* @brief: test the function of cache
|
||||
* @version: 3.0
|
||||
* @author: AIIT XUOS Lab
|
||||
* @date: 2023/4/27
|
||||
*
|
||||
*/
|
||||
|
||||
/*************************************************
|
||||
File name: l1_cache.h
|
||||
Description: test the function of cache
|
||||
Others:
|
||||
History:
|
||||
1. Date: 2023-04-27
|
||||
Author: AIIT XUOS Lab
|
||||
Modification:
|
||||
1、test the function of cache
|
||||
*************************************************/
|
||||
|
||||
#include "irq_numbers.h"
|
||||
#include "trap_common.h"
|
||||
|
||||
#include "cache_common_ope.h"
|
||||
#include "log.h"
|
||||
#include "kalloc.h"
|
||||
|
||||
int system_tick = 0;
|
||||
extern struct ICacheDone icache_done;
|
||||
extern struct DCacheDone dcache_done;
|
||||
|
||||
void test_cache(void)
|
||||
{
|
||||
int* block = (int*)kalloc(200000 * sizeof(int));
|
||||
|
||||
int size = 100000;
|
||||
xizi_trap_driver.cpu_irq_disable();
|
||||
icache_done.enable();
|
||||
dcache_done.enable();
|
||||
xizi_trap_driver.cpu_irq_enable();
|
||||
|
||||
for (int i = 0; i < 2; i++) {
|
||||
LOG("start.\n");
|
||||
int temp = 0;
|
||||
|
||||
int tick0 = system_tick;
|
||||
for (int i = 0; i < size; ++i) {
|
||||
block[i] = 0;
|
||||
temp += block[i];
|
||||
}
|
||||
|
||||
int tick1 = system_tick;
|
||||
LOG("tick1:%d.\n", tick1 - tick0);
|
||||
|
||||
temp = 0;
|
||||
tick1 = system_tick;
|
||||
for (int i = 0; i < size; ++i)
|
||||
temp += block[i];
|
||||
int tick2 = system_tick;
|
||||
LOG("tick2:%d.\n", tick2 - tick1);
|
||||
|
||||
xizi_trap_driver.cpu_irq_disable();
|
||||
dcache_done.flushall();
|
||||
xizi_trap_driver.cpu_irq_enable();
|
||||
LOG("ok.\n");
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue