forked from xuos/xiuos
Support smp for softkernel. Support userland interrupt handler by using ipc. Assure that interrupt is close in kernel and one thread kernel state. Support userland dynamic memory allocation. Move elf image reader to usyscall level. Support blocking task in ipc procedure. Split free physical memory to kernel usage and userland usage. Support killing a other task, but will only freeing a task while it’s not in running state from Tuyuyang
it is OK
This commit is contained in:
commit
bcd9835020
|
@ -50,7 +50,7 @@ Modification:
|
|||
#define CPSR_MODE (0x1f) //!< Current processor mode
|
||||
//@}
|
||||
|
||||
#define MODE_STACK_SIZE 0x4000
|
||||
#define MODE_STACK_SIZE 0x1000
|
||||
|
||||
//! @name Interrupt enable bits in CPSR
|
||||
//@{
|
||||
|
@ -107,11 +107,11 @@ struct context {
|
|||
|
||||
/// @brief init task context, set return address to trap return
|
||||
/// @param
|
||||
extern void trap_return(void);
|
||||
extern void task_prepare_enter();
|
||||
__attribute__((__always_inline__)) static inline void arch_init_context(struct context* ctx)
|
||||
{
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
ctx->lr = (uint32_t)(trap_return);
|
||||
ctx->lr = (uint32_t)(task_prepare_enter + 4);
|
||||
}
|
||||
|
||||
struct trapframe {
|
||||
|
@ -193,4 +193,6 @@ __attribute__((__always_inline__)) static inline void arch_set_return(struct tra
|
|||
tf->r0 = (uint32_t)ret;
|
||||
}
|
||||
|
||||
void cpu_start_secondary(uint8_t cpu_id);
|
||||
void start_smp_cache_broadcast(int cpu_id);
|
||||
#endif
|
|
@ -1,3 +1,8 @@
|
|||
SRC_FILES := ivt.c dcd.c imx6q_lowlevel_init.S cortexA9.S boot.S
|
||||
SRC_FILES := ivt.c \
|
||||
dcd.c \
|
||||
boot.S \
|
||||
imx6q_lowlevel_init.S \
|
||||
cortexA9.S \
|
||||
smp.c
|
||||
|
||||
include $(KERNEL_ROOT)/compiler.mk
|
|
@ -84,11 +84,10 @@ _boot_start:
|
|||
@ check cpu id - cpu0 is primary cpu
|
||||
cmp r5, #0
|
||||
beq primary_cpu_init
|
||||
bx r4 @ for secondary cpus, jump to argument function pointer passed in by ROM
|
||||
|
||||
@ control should not return from the secondary cpu entry point
|
||||
b .
|
||||
bl bootmain @ for secondary cpus, jump to argument function pointer passed in by ROM
|
||||
|
||||
bl .
|
||||
|
||||
primary_cpu_init:
|
||||
/* init .bss */
|
||||
/* clear the .bss section (zero init) */
|
||||
|
|
|
@ -71,7 +71,7 @@ arm_set_interrupt_state:
|
|||
cpu_get_current:
|
||||
mrc p15, 0, r0, c0, c0, 5
|
||||
and r0, r0, #3
|
||||
BX lr
|
||||
BX lr
|
||||
.endfunc @cpu_get_current()@
|
||||
|
||||
.global enable_neon_fpu
|
||||
|
@ -128,8 +128,8 @@ disable_L1_cache:
|
|||
get_arm_private_peripheral_base:
|
||||
|
||||
@ Get base address of private perpherial space
|
||||
mrc p15, 4, r0, c15, c0, 0 @ Read periph base address
|
||||
@ mov r0, #0x00A00000
|
||||
@ mrc p15, 4, r0, c15, c0, 0 @ Read periph base address
|
||||
mov r0, #0x00A00000
|
||||
bx lr
|
||||
|
||||
.endfunc @get_arm_private_peripheral_base()@
|
||||
|
@ -213,7 +213,8 @@ arm_branch_target_cache_invalidate_is:
|
|||
scu_enable:
|
||||
|
||||
@ mrc p15, 4, r0, c15, c0, 0 @ Read periph base address
|
||||
mov r0, #0x00A00000
|
||||
mov r0, #0x00A00000
|
||||
add r0, #0x80000000
|
||||
|
||||
ldr r1, [r0, #0x0] @ Read the SCU Control Register
|
||||
orr r1, r1, #0x1 @ Set bit 0 (The Enable bit)
|
||||
|
@ -268,7 +269,8 @@ scu_leave_smp:
|
|||
scu_get_cpus_in_smp:
|
||||
|
||||
@ mrc p15, 4, r0, c15, c0, 0 @ Read periph base address
|
||||
mov r0, #0x00A00000
|
||||
mov r0, #0x00A00000
|
||||
add r0, #0x80000000
|
||||
|
||||
ldr r0, [r0, #0x004] @ Read SCU Configuration register
|
||||
mov r0, r0, lsr #4 @ Bits 7:4 gives the cores in SMP mode, shift then mask
|
||||
|
@ -327,6 +329,7 @@ scu_secure_invalidate:
|
|||
mov r1, r1, lsl r0 @ Shift ways into the correct CPU field
|
||||
|
||||
mrc p15, 4, r2, c15, c0, 0 @ Read periph base address
|
||||
add r2, #0x80000000
|
||||
|
||||
str r1, [r2, #0x0C] @ Write to SCU Invalidate All in Secure State
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -81,7 +81,7 @@ MEMORY
|
|||
{
|
||||
ocram (rwx) : ORIGIN = 0x00900000, LENGTH = 256K
|
||||
ddr3 (rwx) : ORIGIN = 0x10000000, LENGTH = 1024M
|
||||
virt_ddr3 (WRX) : ORIGIN = 0x90010000, LENGTH = 1024M
|
||||
virt_ddr3 (WRX) : ORIGIN = 0x90011000, LENGTH = 1024M
|
||||
}
|
||||
|
||||
SECTIONS
|
||||
|
@ -155,7 +155,7 @@ SECTIONS
|
|||
} > ddr3
|
||||
|
||||
/* Other Kernel code is placed over 0x80000000 + 128KB. */
|
||||
.text : AT(0x10010000) {
|
||||
.text : AT(0x10011000) {
|
||||
*(.vectors)
|
||||
. = ALIGN(0x1000);
|
||||
*(.text .text.* .gnu.linkonce.t.*)
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
* Copyright (c) 2010-2012, Freescale Semiconductor, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* o Redistributions of source code must retain the above copyright notice, this list
|
||||
* of conditions and the following disclaimer.
|
||||
*
|
||||
* o Redistributions in binary form must reproduce the above copyright notice, this
|
||||
* list of conditions and the following disclaimer in the documentation and/or
|
||||
* other materials provided with the distribution.
|
||||
*
|
||||
* o Neither the name of Freescale Semiconductor, Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from this
|
||||
* software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file smp.c
|
||||
* @brief start multicore
|
||||
* @version 3.0
|
||||
* @author AIIT XUOS Lab
|
||||
* @date 2024.03.10
|
||||
*/
|
||||
|
||||
/*************************************************
|
||||
File name: smp.c
|
||||
Description:
|
||||
Others:
|
||||
History:
|
||||
1. Date: 2024-03-10
|
||||
Author: AIIT XUOS Lab
|
||||
Modification:
|
||||
1. No modifications
|
||||
*************************************************/
|
||||
#include "cortex_a9.h"
|
||||
#include "regssrc.h"
|
||||
|
||||
extern void _boot_start();
|
||||
void cpu_start_secondary(uint8_t cpu_id)
|
||||
{
|
||||
// Prepare pointers for ROM code. The entry point is always _start, which does some
|
||||
// basic preparatory work and then calls the common_cpu_entry function, which itself
|
||||
// calls the entry point saved in s_core_info.
|
||||
switch (cpu_id) {
|
||||
case 1:
|
||||
HW_SRC_GPR3_WR((uint32_t)&_boot_start);
|
||||
HW_SRC_SCR.B.CORE1_ENABLE = 1;
|
||||
break;
|
||||
|
||||
case 2:
|
||||
HW_SRC_GPR5_WR((uint32_t)&_boot_start);
|
||||
HW_SRC_SCR.B.CORE2_ENABLE = 1;
|
||||
break;
|
||||
|
||||
case 3:
|
||||
HW_SRC_GPR7_WR((uint32_t)&_boot_start);
|
||||
HW_SRC_SCR.B.CORE3_ENABLE = 1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void start_smp_cache_broadcast(int cpu_id)
|
||||
{
|
||||
return;
|
||||
}
|
|
@ -1,3 +1,3 @@
|
|||
SRC_FILES := boot.S cpu_init.S xil-crt0.S cortexA9.S
|
||||
SRC_FILES := boot.S cpu_init.S xil-crt0.S cortexA9.S smp.c
|
||||
|
||||
include $(KERNEL_ROOT)/compiler.mk
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Copyright (c) 2010-2012, Freescale Semiconductor, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* o Redistributions of source code must retain the above copyright notice, this list
|
||||
* of conditions and the following disclaimer.
|
||||
*
|
||||
* o Redistributions in binary form must reproduce the above copyright notice, this
|
||||
* list of conditions and the following disclaimer in the documentation and/or
|
||||
* other materials provided with the distribution.
|
||||
*
|
||||
* o Neither the name of Freescale Semiconductor, Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from this
|
||||
* software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file smp.c
|
||||
* @brief start multicore
|
||||
* @version 3.0
|
||||
* @author AIIT XUOS Lab
|
||||
* @date 2024.03.10
|
||||
*/
|
||||
|
||||
/*************************************************
|
||||
File name: smp.c
|
||||
Description:
|
||||
Others:
|
||||
History:
|
||||
1. Date: 2024-03-10
|
||||
Author: AIIT XUOS Lab
|
||||
Modification:
|
||||
1. No modifications
|
||||
*************************************************/
|
||||
#include "cortex_a9.h"
|
||||
|
||||
extern void _boot_start();
|
||||
void cpu_start_secondary(uint8_t cpu_id)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void start_smp_cache_broadcast(int cpu_id)
|
||||
{
|
||||
return;
|
||||
}
|
|
@ -72,7 +72,6 @@ SECTIONS
|
|||
PROVIDE(boot_end_addr = .);
|
||||
} > ddr3
|
||||
|
||||
/* Other Kernel code is placed over 0x80000000 + 128KB. */
|
||||
.text : AT(0x00110000) {
|
||||
*(.vectors)
|
||||
. = ALIGN(0x1000);
|
||||
|
|
|
@ -34,6 +34,7 @@ Modification:
|
|||
|
||||
#include "clock_common_op.h"
|
||||
#include "irq_numbers.h"
|
||||
#include "multicores.h"
|
||||
|
||||
static void _sys_clock_init()
|
||||
{
|
||||
|
|
|
@ -36,6 +36,7 @@ Modification:
|
|||
#include "uart_common_ope.h"
|
||||
|
||||
#include "assert.h"
|
||||
#include "pagetable.h"
|
||||
|
||||
#define KERN_BOOT_DRIVER(n, bi, f) \
|
||||
{ \
|
||||
|
@ -130,7 +131,6 @@ static bool xizi_gpt_init()
|
|||
struct XiziTrapDriver* p_intr_driver = (struct XiziTrapDriver*)AchieveResource(&intr_driver_tag);
|
||||
p_intr_driver->bind_irq_handler(p_clock_driver->get_clock_int(), xizi_clock_handler);
|
||||
p_intr_driver->single_irq_enable(p_clock_driver->get_clock_int(), 0, 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -196,4 +196,31 @@ bool hardkernel_init(struct TraceTag* _hardkernel_tag)
|
|||
LOG_PRINTF("\n");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool secondary_cpu_hardkernel_init(int cpu_id, struct TraceTag* _hardkernel_tag)
|
||||
{
|
||||
struct TraceTag init_intr_tag, init_icache_tag, init_dcache_tag, init_clock_tag, init_mmu_tag;
|
||||
AchieveResourceTag(&init_intr_tag, _hardkernel_tag, "intr-ac-resource");
|
||||
AchieveResourceTag(&init_icache_tag, _hardkernel_tag, "icache-ac-resource");
|
||||
AchieveResourceTag(&init_dcache_tag, _hardkernel_tag, "dcache-ac-resource");
|
||||
AchieveResourceTag(&init_clock_tag, _hardkernel_tag, "clock-ac-resource");
|
||||
AchieveResourceTag(&init_mmu_tag, _hardkernel_tag, "mmu-ac-resource");
|
||||
struct XiziTrapDriver* p_intr_driver = (struct XiziTrapDriver*)AchieveResource(&init_intr_tag);
|
||||
struct ICacheDone* p_icache_driver = (struct ICacheDone*)AchieveResource(&init_icache_tag);
|
||||
struct DCacheDone* p_dcache_driver = (struct DCacheDone*)AchieveResource(&init_dcache_tag);
|
||||
struct XiziClockDriver* p_clock_driver = (struct XiziClockDriver*)AchieveResource(&init_clock_tag);
|
||||
|
||||
// secondary cpu init hardwares
|
||||
// intr
|
||||
p_intr_driver->sys_irq_init(cpu_id);
|
||||
p_intr_driver->cpu_irq_disable();
|
||||
// cache
|
||||
p_icache_driver->enable();
|
||||
p_dcache_driver->enable();
|
||||
// clock
|
||||
p_intr_driver->single_irq_enable(p_clock_driver->get_clock_int(), cpu_id, 0);
|
||||
// mmu
|
||||
secondary_cpu_load_kern_pgdir(&init_mmu_tag, &init_intr_tag);
|
||||
return true;
|
||||
}
|
|
@ -1,3 +1,4 @@
|
|||
SRC_DIR := arm/armv7-a/cortex-a9
|
||||
SRC_FILES := spinlock.c
|
||||
|
||||
include $(KERNEL_ROOT)/compiler.mk
|
|
@ -1,5 +1,5 @@
|
|||
|
||||
SRC_FILES := vector.S trampoline.S $(BOARD)/trap_common.c error_debug.c spinlock.c hard_spinlock.S
|
||||
SRC_FILES := vector.S trampoline.S $(BOARD)/trap_common.c error_debug.c hard_spinlock.S
|
||||
|
||||
ifeq ($(BOARD), imx6q-sabrelite)
|
||||
SRC_DIR := gicv2
|
||||
|
@ -9,7 +9,6 @@ ifeq ($(BOARD), zynq7000-zc702)
|
|||
# SRC_DIR := gicv2
|
||||
SRC_DIR := gicv3
|
||||
SRC_FILES += $(BOARD)/xil_assert.c
|
||||
# SRC_FILES := vector.S trampoline.S imx6q-sabrelite/trap_common.c error_debug.c spinlock.c hard_spinlock.S
|
||||
endif
|
||||
|
||||
|
||||
|
|
|
@ -41,31 +41,18 @@ Modification:
|
|||
*************************************************/
|
||||
#include "core.h"
|
||||
#include "memlayout.h"
|
||||
#include "spinlock.h"
|
||||
#include "trap_common.h"
|
||||
|
||||
#include "log.h"
|
||||
#include "assert.h"
|
||||
#include "multicores.h"
|
||||
#include "syscall.h"
|
||||
|
||||
__attribute__((always_inline)) static inline void _abort_reason(uint32_t fault_status)
|
||||
{
|
||||
if ((fault_status & 0xd) == 0x1) // Alignment failure
|
||||
KPrintf("reason: alignment\n");
|
||||
else if ((fault_status & 0xd) == 0x5) // External abort "on translation"
|
||||
KPrintf("reason: ext. abort on trnslt.\n");
|
||||
else if ((fault_status & 0xd) == 0x5) // Translation
|
||||
KPrintf("reason: sect. translation\n");
|
||||
else if ((fault_status & 0xd) == 0x9) // Domain
|
||||
KPrintf("reason: sect. domain\n");
|
||||
else if ((fault_status & 0xd) == 0xd) // Permission
|
||||
KPrintf("reason: sect. permission\n");
|
||||
else if ((fault_status & 0xd) == 0x8) // External abort
|
||||
KPrintf("reason: ext. abort\n");
|
||||
else
|
||||
KPrintf("reason: unknown???\n");
|
||||
}
|
||||
#include "task.h"
|
||||
|
||||
void dump_tf(struct trapframe* tf)
|
||||
{
|
||||
KPrintf("sp_usr: 0x%x\n", tf->sp_usr);
|
||||
KPrintf("lr_usr: 0x%x\n", tf->lr_usr);
|
||||
KPrintf("lr_svc: 0x%x\n", tf->lr_svc);
|
||||
KPrintf(" spsr: 0x%x\n", tf->spsr);
|
||||
KPrintf(" r0: 0x%x\n", tf->r0);
|
||||
|
@ -84,62 +71,74 @@ void dump_tf(struct trapframe* tf)
|
|||
KPrintf(" pc: 0x%x\n", tf->pc);
|
||||
}
|
||||
|
||||
void dabort_reason(struct trapframe* r)
|
||||
{
|
||||
uint32_t fault_status, dfa;
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 0" : "=r"(fault_status)::);
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 0" : "=r"(dfa)::);
|
||||
LOG("program counter: 0x%x caused\n", r->pc);
|
||||
LOG("data abort at 0x%x, status 0x%x\n", dfa, fault_status);
|
||||
|
||||
if ((fault_status & 0xd) == 0x1) // Alignment failure
|
||||
KPrintf("reason: alignment\n");
|
||||
else if ((fault_status & 0xd) == 0x5) // External abort "on translation"
|
||||
KPrintf("reason: ext. abort on trnslt.\n");
|
||||
else if ((fault_status & 0xd) == 0x5) // Translation
|
||||
KPrintf("reason: sect. translation\n");
|
||||
else if ((fault_status & 0xd) == 0x9) // Domain
|
||||
KPrintf("reason: sect. domain\n");
|
||||
else if ((fault_status & 0xd) == 0xd) // Permission
|
||||
KPrintf("reason: sect. permission\n");
|
||||
else if ((fault_status & 0xd) == 0x8) // External abort
|
||||
KPrintf("reason: ext. abort\n");
|
||||
else
|
||||
KPrintf("reason: unknown???\n");
|
||||
|
||||
dump_tf(r);
|
||||
}
|
||||
|
||||
void iabort_reason(struct trapframe* r)
|
||||
{
|
||||
uint32_t fault_status, ifa;
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 1" : "=r"(fault_status)::);
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 2" : "=r"(ifa)::);
|
||||
LOG("prefetch abort at 0x%x, status 0x%x\n", ifa, fault_status);
|
||||
|
||||
if ((fault_status & 0xd) == 0x1) // Alignment failure
|
||||
KPrintf("reason: alignment\n");
|
||||
else if ((fault_status & 0xd) == 0x5) // External abort "on translation"
|
||||
KPrintf("reason: ext. abort on trnslt.\n");
|
||||
else if ((fault_status & 0xd) == 0x5) // Translation
|
||||
KPrintf("reason: sect. translation\n");
|
||||
else if ((fault_status & 0xd) == 0x9) // Domain
|
||||
KPrintf("reason: sect. domain\n");
|
||||
else if ((fault_status & 0xd) == 0xd) // Permission
|
||||
KPrintf("reason: sect. permission\n");
|
||||
else if ((fault_status & 0xd) == 0x8) // External abort
|
||||
KPrintf("reason: ext. abort\n");
|
||||
else
|
||||
KPrintf("reason: unknown???\n");
|
||||
|
||||
dump_tf(r);
|
||||
}
|
||||
|
||||
void handle_undefined_instruction(struct trapframe* tf)
|
||||
{
|
||||
// unimplemented trap handler
|
||||
KPrintf("undefined instruction at %x\n", tf->pc);
|
||||
xizi_enter_kernel();
|
||||
ERROR("undefined instruction at %x\n", tf->pc);
|
||||
panic("");
|
||||
}
|
||||
|
||||
extern void context_switch(struct context**, struct context*);
|
||||
void dabort_handler(struct trapframe* r)
|
||||
void handle_reserved(void)
|
||||
{
|
||||
uint32_t dfs, dfa;
|
||||
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 0" : "=r"(dfs)::);
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 0" : "=r"(dfa)::);
|
||||
|
||||
if (r->pc < KERN_MEM_BASE) { // Exception occured in User space: exit
|
||||
ERROR("dabort in user space: %s\n", cur_cpu()->task->name);
|
||||
LOG("program counter: 0x%x caused\n", r->pc);
|
||||
LOG("data abort at 0x%x, status 0x%x\n", dfa, dfs);
|
||||
_abort_reason(dfs);
|
||||
dump_tf(r);
|
||||
}
|
||||
if (cur_cpu()->task != NULL) {
|
||||
sys_exit();
|
||||
context_switch(&cur_cpu()->task->main_thread.context, cur_cpu()->scheduler);
|
||||
} else { // Exception occured in Kernel space: panic
|
||||
LOG("program counter: 0x%x caused\n", r->pc);
|
||||
LOG("data abort at 0x%x, status 0x%x\n", dfa, dfs);
|
||||
_abort_reason(dfs);
|
||||
dump_tf(r);
|
||||
panic("data abort exception\n");
|
||||
}
|
||||
// unimplemented trap handler
|
||||
xizi_enter_kernel();
|
||||
panic("Unimplemented Reserved\n");
|
||||
}
|
||||
|
||||
void iabort_handler(struct trapframe* r)
|
||||
void handle_fiq(void)
|
||||
{
|
||||
uint32_t ifs, ifa;
|
||||
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 1" : "=r"(ifs)::);
|
||||
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 2" : "=r"(ifa)::);
|
||||
|
||||
if (r->pc < KERN_MEM_BASE) { // Exception occured in User space: exit
|
||||
ERROR("iabort in user space: %s\n", cur_cpu()->task->name);
|
||||
LOG("program counter: 0x%x(%s) caused\n", r->pc, cur_cpu()->task);
|
||||
LOG("prefetch abort at 0x%x, status 0x%x\n", ifa, ifs);
|
||||
_abort_reason(ifs);
|
||||
dump_tf(r);
|
||||
}
|
||||
if (cur_cpu()->task != NULL) {
|
||||
sys_exit();
|
||||
context_switch(&cur_cpu()->task->main_thread.context, cur_cpu()->scheduler);
|
||||
} else { // Exception occured in Kernel space: panic
|
||||
LOG("program counter: 0x%x(%s) caused\n", r->pc, cur_cpu()->task);
|
||||
LOG("prefetch abort at 0x%x, status 0x%x\n", ifa, ifs);
|
||||
_abort_reason(ifs);
|
||||
dump_tf(r);
|
||||
panic("prefetch abort exception\n");
|
||||
}
|
||||
}
|
||||
xizi_enter_kernel();
|
||||
panic("Unimplemented FIQ\n");
|
||||
}
|
|
@ -34,6 +34,7 @@ Modification:
|
|||
#include "trap_common.h"
|
||||
|
||||
#include "log.h"
|
||||
#include "multicores.h"
|
||||
|
||||
extern void init_stack(uint32_t, uint32_t);
|
||||
extern void user_trap_swi_enter(void);
|
||||
|
@ -41,12 +42,13 @@ extern void trap_iabort(void);
|
|||
extern void trap_dabort(void);
|
||||
extern void trap_irq_enter(void);
|
||||
extern void trap_undefined_instruction(void);
|
||||
extern void handle_reserved(void);
|
||||
extern void handle_fiq(void);
|
||||
|
||||
static struct XiziTrapDriver xizi_trap_driver;
|
||||
|
||||
void panic(char* s)
|
||||
{
|
||||
xizi_trap_driver.cpu_irq_disable();
|
||||
KPrintf("panic: %s\n", s);
|
||||
for (;;)
|
||||
;
|
||||
|
@ -54,7 +56,6 @@ void panic(char* s)
|
|||
|
||||
/* stack for different mode*/
|
||||
static char mode_stack_pages[NR_CPU][NR_MODE_STACKS][MODE_STACK_SIZE];
|
||||
|
||||
extern uint32_t _vector_jumper;
|
||||
extern uint32_t _vector_start;
|
||||
extern uint32_t _vector_end;
|
||||
|
@ -69,37 +70,25 @@ void init_cpu_mode_stacks(int cpu_id)
|
|||
}
|
||||
}
|
||||
|
||||
void handle_reserved(void)
|
||||
{
|
||||
// unimplemented trap handler
|
||||
LOG("Unimplemented Reserved\n");
|
||||
panic("");
|
||||
}
|
||||
|
||||
void handle_fiq(void)
|
||||
{
|
||||
LOG("Unimplemented FIQ\n");
|
||||
panic("");
|
||||
}
|
||||
|
||||
static void _sys_irq_init()
|
||||
static void _sys_irq_init(int cpu_id)
|
||||
{
|
||||
/* load exception vectors */
|
||||
volatile uint32_t* vector_base = &_vector_start;
|
||||
init_cpu_mode_stacks(cpu_id);
|
||||
if (cpu_id == 0) {
|
||||
volatile uint32_t* vector_base = &_vector_start;
|
||||
|
||||
// Set Interrupt handler start address
|
||||
vector_base[1] = (uint32_t)trap_undefined_instruction; // Undefined Instruction
|
||||
vector_base[2] = (uint32_t)user_trap_swi_enter; // Software Interrupt
|
||||
vector_base[3] = (uint32_t)trap_iabort; // Prefetch Abort
|
||||
vector_base[4] = (uint32_t)trap_dabort; // Data Abort
|
||||
vector_base[5] = (uint32_t)handle_reserved; // Reserved
|
||||
vector_base[6] = (uint32_t)trap_irq_enter; // IRQ
|
||||
vector_base[7] = (uint32_t)handle_fiq; // FIQ
|
||||
|
||||
init_cpu_mode_stacks(0);
|
||||
// Set Interrupt handler start address
|
||||
vector_base[1] = (uint32_t)trap_undefined_instruction; // Undefined Instruction
|
||||
vector_base[2] = (uint32_t)user_trap_swi_enter; // Software Interrupt
|
||||
vector_base[3] = (uint32_t)trap_iabort; // Prefetch Abort
|
||||
vector_base[4] = (uint32_t)trap_dabort; // Data Abort
|
||||
vector_base[5] = (uint32_t)handle_reserved; // Reserved
|
||||
vector_base[6] = (uint32_t)trap_irq_enter; // IRQ
|
||||
vector_base[7] = (uint32_t)handle_fiq; // FIQ
|
||||
|
||||
gic_init();
|
||||
}
|
||||
/* active hardware irq responser */
|
||||
gic_init();
|
||||
xizi_trap_driver.switch_hw_irqtbl((uint32_t*)&_vector_jumper);
|
||||
}
|
||||
|
||||
|
@ -151,29 +140,6 @@ static void _bind_irq_handler(int irq, irq_handler_t handler)
|
|||
xizi_trap_driver.sw_irqtbl[irq].handler = handler;
|
||||
}
|
||||
|
||||
static bool _send_sgi(uint32_t irq, uint32_t bitmask, enum SgiFilterType type)
|
||||
{
|
||||
if (bitmask > (1 << NR_CPU) - 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
enum _gicd_sgi_filter sgi_filter;
|
||||
switch (type) {
|
||||
case SgiFilter_TargetList:
|
||||
sgi_filter = kGicSgiFilter_UseTargetList;
|
||||
break;
|
||||
case SgiFilter_AllOtherCPUs:
|
||||
sgi_filter = kGicSgiFilter_AllOtherCPUs;
|
||||
break;
|
||||
default:
|
||||
sgi_filter = kGicSgiFilter_OnlyThisCPU;
|
||||
break;
|
||||
}
|
||||
gic_send_sgi(irq, bitmask, sgi_filter);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static uint32_t _hw_before_irq()
|
||||
{
|
||||
|
||||
|
@ -213,8 +179,14 @@ static int _is_interruptable(void)
|
|||
return !(val & DIS_INT);
|
||||
}
|
||||
|
||||
int _cur_cpu_id()
|
||||
{
|
||||
return cpu_get_current();
|
||||
}
|
||||
|
||||
static struct XiziTrapDriver xizi_trap_driver = {
|
||||
.sys_irq_init = _sys_irq_init,
|
||||
.cur_cpu_id = _cur_cpu_id,
|
||||
|
||||
.cpu_irq_enable = _cpu_irq_enable,
|
||||
.cpu_irq_disable = _cpu_irq_disable,
|
||||
|
@ -223,7 +195,6 @@ static struct XiziTrapDriver xizi_trap_driver = {
|
|||
.switch_hw_irqtbl = _switch_hw_irqtbl,
|
||||
|
||||
.bind_irq_handler = _bind_irq_handler,
|
||||
.send_sgi = _send_sgi,
|
||||
|
||||
.is_interruptable = _is_interruptable,
|
||||
.hw_before_irq = _hw_before_irq,
|
||||
|
@ -234,7 +205,7 @@ static struct XiziTrapDriver xizi_trap_driver = {
|
|||
|
||||
struct XiziTrapDriver* hardkernel_intr_init(struct TraceTag* hardkernel_tag)
|
||||
{
|
||||
xizi_trap_driver.sys_irq_init();
|
||||
xizi_trap_driver.sys_irq_init(0);
|
||||
xizi_trap_driver.cpu_irq_disable();
|
||||
return &xizi_trap_driver;
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2020 AIIT XUOS Lab
|
||||
* XiUOS is licensed under Mulan PSL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PSL v2.
|
||||
* You may obtain a copy of Mulan PSL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPSL2
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PSL v2 for more details.
|
||||
*/
|
||||
/**
|
||||
* @file spinlock.c
|
||||
* @brief spinlock interfaces
|
||||
* @version 3.0
|
||||
* @author AIIT XUOS Lab
|
||||
* @date 2023.11.23
|
||||
*/
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "assert.h"
|
||||
#include "spinlock.h"
|
||||
|
||||
bool module_spinlock_use_intr_init(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#define SPINLOCK_STATE_UNLOCK 0xFF
|
||||
|
||||
enum {
|
||||
SPINLOCK_LOCK_NOWAIT = 0,
|
||||
SPINLOCK_LOCK_WAITFOREVER = 0xFFFFFFFF,
|
||||
};
|
||||
|
||||
void spinlock_init(struct spinlock* lock, char* name)
|
||||
{
|
||||
lock->owner_cpu = SPINLOCK_STATE_UNLOCK;
|
||||
strncpy(lock->name, name, 24);
|
||||
}
|
||||
|
||||
extern int _spinlock_lock(struct spinlock* lock, uint32_t timeout);
|
||||
void spinlock_lock(struct spinlock* lock)
|
||||
{
|
||||
if (lock->owner_cpu != SPINLOCK_STATE_UNLOCK) {
|
||||
ERROR("spinlock %s lock double locked by core %d\n", lock->name, lock->owner_cpu);
|
||||
panic("");
|
||||
}
|
||||
assert(_spinlock_lock(lock, SPINLOCK_LOCK_WAITFOREVER) == 0);
|
||||
}
|
||||
|
||||
void _spinlock_unlock(struct spinlock* lock);
|
||||
void spinlock_unlock(struct spinlock* lock)
|
||||
{
|
||||
_spinlock_unlock(lock);
|
||||
}
|
|
@ -47,43 +47,17 @@ trap_return:
|
|||
ldmfd r13!, {r14}
|
||||
ldmfd r13!, {r2}
|
||||
msr spsr_cxsf, r2
|
||||
ldr r0, [r13], #4
|
||||
ldr r1, [r13], #4
|
||||
ldr r2, [r13], #4
|
||||
ldr r3, [r13], #4
|
||||
ldr r4, [r13], #4
|
||||
ldr r5, [r13], #4
|
||||
ldr r6, [r13], #4
|
||||
ldr r7, [r13], #4
|
||||
ldr r8, [r13], #4
|
||||
ldr r9, [r13], #4
|
||||
ldr r10, [r13], #4
|
||||
ldr r11, [r13], #4
|
||||
ldr r12, [r13], #4
|
||||
ldm r13!, {pc}^
|
||||
ldmfd r13!, {r0-r12, pc}^ // restore context and return
|
||||
|
||||
user_trap_swi_enter:
|
||||
# save trapframe to swi stack
|
||||
sub sp, sp, #56
|
||||
str r14, [sp, #52]
|
||||
str r12, [sp, #48]
|
||||
str r11, [sp, #44]
|
||||
str r10, [sp, #40]
|
||||
str r9, [sp, #36]
|
||||
str r8, [sp, #32]
|
||||
str r7, [sp, #28]
|
||||
str r6, [sp, #24]
|
||||
str r5, [sp, #20]
|
||||
str r4, [sp, #16]
|
||||
str r3, [sp, #12]
|
||||
str r2, [sp, #8]
|
||||
str r1, [sp, #4]
|
||||
str r0, [sp]
|
||||
# save trapframe to swi stack
|
||||
cpsid i
|
||||
stmfd sp!, {r0-r12, r14} // save context
|
||||
mrs r2, spsr // copy spsr to r2
|
||||
stmfd r13!, {r2} // save r2(spsr) to the stack
|
||||
|
||||
mrs r2, spsr
|
||||
stmfd r13!, {r2}
|
||||
stmfd r13!, {r14}
|
||||
stmfd r13, {sp, lr}^
|
||||
stmfd r13!, {r14} // save r14 again to have one uniform trapframe
|
||||
stmfd r13, {sp, lr}^ // save user mode sp and lr
|
||||
sub r13, r13, #8
|
||||
|
||||
# call syscall handler
|
||||
|
@ -92,17 +66,13 @@ user_trap_swi_enter:
|
|||
b trap_return
|
||||
|
||||
trap_irq_enter:
|
||||
# save context in irq stack
|
||||
sub r14, r14, #4
|
||||
sub sp, sp, #16
|
||||
str r14, [sp, #12]
|
||||
str r2, [sp, #8]
|
||||
str r1, [sp, #4]
|
||||
str r0, [sp]
|
||||
|
||||
mrs r1, spsr
|
||||
mov r0, r13 // irq stack stop
|
||||
add r13, r13, #16 // reset IRQ stack
|
||||
# save it on the stack as r14 is banked
|
||||
cpsid i
|
||||
sub r14, r14, #4 // r14 (lr) contains the interrupted PC
|
||||
stmfd r13!, {r0-r2, r14} //
|
||||
mrs r1, spsr // save spsr_irq
|
||||
mov r0, r13 // save stack stop (r13_irq)
|
||||
add r13, r13, #16 // reset the IRQ stack
|
||||
|
||||
# switch to the SVC mode
|
||||
mrs r2, cpsr
|
||||
|
@ -111,134 +81,110 @@ trap_irq_enter:
|
|||
msr cpsr_cxsf, r2
|
||||
|
||||
# build the trap frame
|
||||
ldr r2, [r0, #12]
|
||||
ldr r2, [r0, #12] // read the r14_irq, then save it
|
||||
stmfd r13!, {r2}
|
||||
sub r13, r13, #40
|
||||
str r12, [r13, #36]
|
||||
str r11, [r13, #32]
|
||||
str r10, [r13, #28]
|
||||
str r9, [r13, #24]
|
||||
str r8, [r13, #20]
|
||||
str r7, [r13, #16]
|
||||
str r6, [r13, #12]
|
||||
str r5, [r13, #8]
|
||||
str r4, [r13, #4]
|
||||
str r3, [r13]
|
||||
|
||||
ldmfd r0, {r3-r5}
|
||||
stmfd r13!, {r3-r12} // r4-r12 are preserved (non-banked)
|
||||
ldmfd r0, {r3-r5} // copy r0-r2 over from irq stack
|
||||
stmfd r13!, {r3-r5}
|
||||
stmfd r13!, {r1}
|
||||
stmfd r13!, {lr}
|
||||
stmfd r13, {sp, lr}^
|
||||
stmfd r13!, {r1} // save spsr
|
||||
stmfd r13!, {lr} // save lr_svc
|
||||
|
||||
stmfd r13, {sp, lr}^ // save user mode sp and lr
|
||||
sub r13, r13, #8
|
||||
|
||||
mov r0, r13 // trapframe as parameters
|
||||
bl intr_irq_dispatch
|
||||
b trap_return
|
||||
|
||||
trap_reset_enter:
|
||||
mov r14, #0
|
||||
sub r13, r13, #56
|
||||
str r14, [r13, #52]
|
||||
str r12, [r13, #48]
|
||||
str r11, [r13, #44]
|
||||
str r10, [r13, #40]
|
||||
str r9, [r13, #36]
|
||||
str r8, [r13, #32]
|
||||
str r7, [r13, #28]
|
||||
str r6, [r13, #24]
|
||||
str r5, [r13, #20]
|
||||
str r4, [r13, #16]
|
||||
str r3, [r13, #12]
|
||||
str r2, [r13, #8]
|
||||
str r1, [r13, #4]
|
||||
str r0, [r13]
|
||||
|
||||
mrs r2, spsr
|
||||
stmfd r13!, {r2}
|
||||
stmfd r13!, {r14}
|
||||
stmfd r13, {sp, lr}^
|
||||
sub r13, r13, #8
|
||||
mov r0, r13
|
||||
bl _vector_jumper
|
||||
|
||||
trap_dabort:
|
||||
sub r14, r14, #8
|
||||
sub r13, r13, #56
|
||||
str r14, [r13, #52]
|
||||
str r12, [r13, #48]
|
||||
str r11, [r13, #44]
|
||||
str r10, [r13, #40]
|
||||
str r9, [r13, #36]
|
||||
str r8, [r13, #32]
|
||||
str r7, [r13, #28]
|
||||
str r6, [r13, #24]
|
||||
str r5, [r13, #20]
|
||||
str r4, [r13, #16]
|
||||
str r3, [r13, #12]
|
||||
str r2, [r13, #8]
|
||||
str r1, [r13, #4]
|
||||
str r0, [r13]
|
||||
# save it on the stack as r14 is banked
|
||||
cpsid i
|
||||
sub r14, r14, #8 // r14 (lr) contains the interrupted PC
|
||||
stmfd r13!, {r0-r2, r14} //
|
||||
mrs r1, spsr // save spsr_irq
|
||||
mov r0, r13 // save stack stop (r13_irq)
|
||||
add r13, r13, #16 // reset the IRQ stack
|
||||
|
||||
mrs r2, spsr
|
||||
stmfd r13!, {r2}
|
||||
stmfd r13!, {r14}
|
||||
stmfd r13, {sp, lr}^
|
||||
# switch to the SVC mode
|
||||
mrs r2, cpsr
|
||||
bic r2, r2, #ARM_CPSR_MODE_MASK
|
||||
orr r2, r2, #ARM_MODE_SVC
|
||||
msr cpsr_cxsf, r2
|
||||
|
||||
# build the trap frame
|
||||
ldr r2, [r0, #12] // read the r14_irq, then save it
|
||||
stmfd r13!, {r2}
|
||||
stmfd r13!, {r3-r12} // r4-r12 are preserved (non-banked)
|
||||
ldmfd r0, {r3-r5} // copy r0-r2 over from irq stack
|
||||
stmfd r13!, {r3-r5}
|
||||
stmfd r13!, {r1} // save spsr
|
||||
stmfd r13!, {lr} // save lr_svc
|
||||
|
||||
stmfd r13, {sp, lr}^ // save user mode sp and lr
|
||||
sub r13, r13, #8
|
||||
mov r0, r13
|
||||
|
||||
mov r0, r13 // trapframe as parameters
|
||||
bl dabort_handler
|
||||
|
||||
trap_iabort:
|
||||
sub r14, r14, #4
|
||||
sub r13, r13, #56
|
||||
str r14, [r13, #52]
|
||||
str r12, [r13, #48]
|
||||
str r11, [r13, #44]
|
||||
str r10, [r13, #40]
|
||||
str r9, [r13, #36]
|
||||
str r8, [r13, #32]
|
||||
str r7, [r13, #28]
|
||||
str r6, [r13, #24]
|
||||
str r5, [r13, #20]
|
||||
str r4, [r13, #16]
|
||||
str r3, [r13, #12]
|
||||
str r2, [r13, #8]
|
||||
str r1, [r13, #4]
|
||||
str r0, [r13]
|
||||
# save it on the stack as r14 is banked
|
||||
cpsid i
|
||||
sub r14, r14, #4 // r14 (lr) contains the interrupted PC
|
||||
stmfd r13!, {r0-r2, r14} //
|
||||
mrs r1, spsr // save spsr_irq
|
||||
mov r0, r13 // save stack stop (r13_irq)
|
||||
add r13, r13, #16 // reset the IRQ stack
|
||||
|
||||
mrs r2, spsr
|
||||
# switch to the SVC mode
|
||||
mrs r2, cpsr
|
||||
bic r2, r2, #ARM_CPSR_MODE_MASK
|
||||
orr r2, r2, #ARM_MODE_SVC
|
||||
msr cpsr_cxsf, r2
|
||||
|
||||
# build the trap frame
|
||||
ldr r2, [r0, #12] // read the r14_irq, then save it
|
||||
stmfd r13!, {r2}
|
||||
stmfd r13!, {r14}
|
||||
stmfd r13, {sp, lr}^
|
||||
stmfd r13!, {r3-r12} // r4-r12 are preserved (non-banked)
|
||||
ldmfd r0, {r3-r5} // copy r0-r2 over from irq stack
|
||||
stmfd r13!, {r3-r5}
|
||||
stmfd r13!, {r1} // save spsr
|
||||
stmfd r13!, {lr} // save lr_svc
|
||||
|
||||
stmfd r13, {sp, lr}^ // save user mode sp and lr
|
||||
sub r13, r13, #8
|
||||
mov r0, r13
|
||||
|
||||
mov r0, r13 // trapframe as parameters
|
||||
bl iabort_handler
|
||||
|
||||
trap_undefined_instruction:
|
||||
sub r13, r13, #56
|
||||
str r14, [r13, #52]
|
||||
str r12, [r13, #48]
|
||||
str r11, [r13, #44]
|
||||
str r10, [r13, #40]
|
||||
str r9, [r13, #36]
|
||||
str r8, [r13, #32]
|
||||
str r7, [r13, #28]
|
||||
str r6, [r13, #24]
|
||||
str r5, [r13, #20]
|
||||
str r4, [r13, #16]
|
||||
str r3, [r13, #12]
|
||||
str r2, [r13, #8]
|
||||
str r1, [r13, #4]
|
||||
str r0, [r13]
|
||||
# save it on the stack as r14 is banked
|
||||
cpsid i
|
||||
sub r14, r14, #4 // r14 (lr) contains the interrupted PC
|
||||
stmfd r13!, {r0-r2, r14} //
|
||||
mrs r1, spsr // save spsr_irq
|
||||
mov r0, r13 // save stack stop (r13_irq)
|
||||
add r13, r13, #16 // reset the IRQ stack
|
||||
|
||||
mrs r2, spsr
|
||||
# switch to the SVC mode
|
||||
mrs r2, cpsr
|
||||
bic r2, r2, #ARM_CPSR_MODE_MASK
|
||||
orr r2, r2, #ARM_MODE_SVC
|
||||
msr cpsr_cxsf, r2
|
||||
|
||||
# build the trap frame
|
||||
ldr r2, [r0, #12] // read the r14_irq, then save it
|
||||
stmfd r13!, {r2}
|
||||
stmfd r13!, {r14}
|
||||
stmfd r13, {sp, lr}^
|
||||
sub r13, r13, #8
|
||||
mov r0, r13
|
||||
bl handle_undefined_instruction
|
||||
stmfd r13!, {r3-r12} // r4-r12 are preserved (non-banked)
|
||||
ldmfd r0, {r3-r5} // copy r0-r2 over from irq stack
|
||||
stmfd r13!, {r3-r5}
|
||||
stmfd r13!, {r1} // save spsr
|
||||
stmfd r13!, {lr} // save lr_svc
|
||||
|
||||
stmfd r13, {sp, lr}^ // save user mode sp and lr
|
||||
sub r13, r13, #8
|
||||
|
||||
mov r0, r13 // trapframe as parameters
|
||||
bl handle_undefined_instruction
|
||||
|
||||
init_stack:
|
||||
# set the stack for Other mode
|
||||
|
|
|
@ -42,12 +42,13 @@ extern void trap_iabort(void);
|
|||
extern void trap_dabort(void);
|
||||
extern void trap_irq_enter(void);
|
||||
extern void trap_undefined_instruction(void);
|
||||
extern void handle_reserved(void);
|
||||
extern void handle_fiq(void);
|
||||
|
||||
static struct XiziTrapDriver xizi_trap_driver;
|
||||
|
||||
void panic(char* s)
|
||||
{
|
||||
xizi_trap_driver.cpu_irq_disable();
|
||||
KPrintf("panic: %s\n", s);
|
||||
for (;;)
|
||||
;
|
||||
|
@ -55,7 +56,6 @@ void panic(char* s)
|
|||
|
||||
/* stack for different mode*/
|
||||
static char mode_stack_pages[NR_CPU][NR_MODE_STACKS][MODE_STACK_SIZE];
|
||||
|
||||
extern uint32_t _vector_jumper;
|
||||
extern uint32_t _vector_start;
|
||||
extern uint32_t _vector_end;
|
||||
|
@ -72,45 +72,34 @@ void init_cpu_mode_stacks(int cpu_id)
|
|||
}
|
||||
}
|
||||
|
||||
void handle_reserved(void)
|
||||
static void _sys_irq_init(int cpu_id)
|
||||
{
|
||||
// unimplemented trap handler
|
||||
LOG("Unimplemented Reserved\n");
|
||||
panic("");
|
||||
}
|
||||
|
||||
void handle_fiq(void)
|
||||
{
|
||||
LOG("Unimplemented FIQ\n");
|
||||
panic("");
|
||||
}
|
||||
init_cpu_mode_stacks(cpu_id);
|
||||
if (cpu_id == 0) {
|
||||
/* load exception vectors */
|
||||
volatile uint32_t* vector_base = &_vector_start;
|
||||
|
||||
static void _sys_irq_init()
|
||||
{
|
||||
/* load exception vectors */
|
||||
volatile uint32_t* vector_base = &_vector_start;
|
||||
// Set Interrupt handler start address
|
||||
vector_base[1] = (uint32_t)trap_undefined_instruction; // Undefined Instruction
|
||||
vector_base[2] = (uint32_t)user_trap_swi_enter; // Software Interrupt
|
||||
vector_base[3] = (uint32_t)trap_iabort; // Prefetch Abort
|
||||
vector_base[4] = (uint32_t)trap_dabort; // Data Abort
|
||||
vector_base[5] = (uint32_t)handle_reserved; // Reserved
|
||||
vector_base[6] = (uint32_t)trap_irq_enter; // IRQ
|
||||
vector_base[7] = (uint32_t)handle_fiq; // FIQ
|
||||
|
||||
// Set Interrupt handler start address
|
||||
vector_base[1] = (uint32_t)trap_undefined_instruction; // Undefined Instruction
|
||||
vector_base[2] = (uint32_t)user_trap_swi_enter; // Software Interrupt
|
||||
vector_base[3] = (uint32_t)trap_iabort; // Prefetch Abort
|
||||
vector_base[4] = (uint32_t)trap_dabort; // Data Abort
|
||||
vector_base[5] = (uint32_t)handle_reserved; // Reserved
|
||||
vector_base[6] = (uint32_t)trap_irq_enter; // IRQ
|
||||
vector_base[7] = (uint32_t)handle_fiq; // FIQ
|
||||
|
||||
init_cpu_mode_stacks(0);
|
||||
|
||||
/* active hardware irq responser */
|
||||
XScuGic_Config* gic_config = XScuGic_LookupConfig(XPAR_PS7_SCUGIC_0_DEVICE_ID);
|
||||
if (NULL == gic_config) {
|
||||
ERROR("Error while looking up gic config\n");
|
||||
return;
|
||||
}
|
||||
int gic_init_status = XScuGic_CfgInitialize(&IntcInstance, gic_config, gic_config->CpuBaseAddress);
|
||||
if (gic_init_status != XST_SUCCESS) {
|
||||
ERROR("Error initializing gic\n");
|
||||
return;
|
||||
/* active hardware irq responser */
|
||||
XScuGic_Config* gic_config = XScuGic_LookupConfig(XPAR_PS7_SCUGIC_0_DEVICE_ID);
|
||||
if (NULL == gic_config) {
|
||||
ERROR("Error while looking up gic config\n");
|
||||
return;
|
||||
}
|
||||
int gic_init_status = XScuGic_CfgInitialize(&IntcInstance, gic_config, gic_config->CpuBaseAddress);
|
||||
if (gic_init_status != XST_SUCCESS) {
|
||||
ERROR("Error initializing gic\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
xizi_trap_driver.switch_hw_irqtbl((uint32_t*)&_vector_jumper);
|
||||
|
@ -162,24 +151,6 @@ static void _bind_irq_handler(int irq, irq_handler_t handler)
|
|||
xizi_trap_driver.sw_irqtbl[irq].handler = handler;
|
||||
}
|
||||
|
||||
static bool _send_sgi(uint32_t irq, uint32_t bitmask, enum SgiFilterType type)
|
||||
{
|
||||
if (bitmask > (1 << NR_CPU) - 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int cpu_id = 0;
|
||||
while (bitmask != 0) {
|
||||
if ((bitmask & 0x1) != 0) {
|
||||
XScuGic_SoftwareIntr(&IntcInstance, irq, cpu_id);
|
||||
}
|
||||
cpu_id++;
|
||||
bitmask >>= 1;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static uint32_t _hw_before_irq()
|
||||
{
|
||||
|
||||
|
@ -215,8 +186,14 @@ static int _is_interruptable(void)
|
|||
return !(val & DIS_INT);
|
||||
}
|
||||
|
||||
int _cur_cpu_id()
|
||||
{
|
||||
return cpu_get_current();
|
||||
}
|
||||
|
||||
static struct XiziTrapDriver xizi_trap_driver = {
|
||||
.sys_irq_init = _sys_irq_init,
|
||||
.cur_cpu_id = _cur_cpu_id,
|
||||
|
||||
.cpu_irq_enable = _cpu_irq_enable,
|
||||
.cpu_irq_disable = _cpu_irq_disable,
|
||||
|
@ -225,7 +202,6 @@ static struct XiziTrapDriver xizi_trap_driver = {
|
|||
.switch_hw_irqtbl = _switch_hw_irqtbl,
|
||||
|
||||
.bind_irq_handler = _bind_irq_handler,
|
||||
.send_sgi = _send_sgi,
|
||||
|
||||
.is_interruptable = _is_interruptable,
|
||||
.hw_before_irq = _hw_before_irq,
|
||||
|
@ -236,7 +212,7 @@ static struct XiziTrapDriver xizi_trap_driver = {
|
|||
|
||||
struct XiziTrapDriver* hardkernel_intr_init(struct TraceTag* hardkernel_tag)
|
||||
{
|
||||
xizi_trap_driver.sys_irq_init();
|
||||
xizi_trap_driver.cpu_irq_enable();
|
||||
xizi_trap_driver.sys_irq_init(0);
|
||||
xizi_trap_driver.cpu_irq_disable();
|
||||
return &xizi_trap_driver;
|
||||
}
|
|
@ -0,0 +1,119 @@
|
|||
/*
|
||||
* Copyright (c) 2020 AIIT XUOS Lab
|
||||
* XiUOS is licensed under Mulan PSL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PSL v2.
|
||||
* You may obtain a copy of Mulan PSL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPSL2
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PSL v2 for more details.
|
||||
*/
|
||||
/**
|
||||
* @file spinlock.c
|
||||
* @brief spinlock interfaces
|
||||
* @version 3.0
|
||||
* @author AIIT XUOS Lab
|
||||
* @date 2023.11.23
|
||||
*/
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "spinlock.h"
|
||||
#include "trap_common.h"
|
||||
|
||||
#include "assert.h"
|
||||
#include "list.h"
|
||||
#include "multicores.h"
|
||||
|
||||
struct lock_node {
|
||||
struct double_list_node node;
|
||||
int cpu_id;
|
||||
};
|
||||
|
||||
static struct double_list_node lock_request_guard;
|
||||
static struct lock_node core_lock_request[NR_CPU];
|
||||
static struct spinlock request_lock;
|
||||
bool module_spinlock_use_intr_init(void)
|
||||
{
|
||||
for (int i = 0; i < NR_CPU; i++) {
|
||||
core_lock_request[i].cpu_id = i;
|
||||
doubleListNodeInit(&core_lock_request[i].node);
|
||||
}
|
||||
doubleListNodeInit(&lock_request_guard);
|
||||
spinlock_init(&request_lock, "requestlock");
|
||||
return true;
|
||||
}
|
||||
|
||||
#define SPINLOCK_STATE_UNLOCK 0xFF
|
||||
|
||||
enum {
|
||||
SPINLOCK_LOCK_NOWAIT = 0,
|
||||
SPINLOCK_LOCK_WAITFOREVER = 0xFFFFFFFF,
|
||||
};
|
||||
|
||||
void spinlock_init(struct spinlock* lock, char* name)
|
||||
{
|
||||
lock->owner_cpu = SPINLOCK_STATE_UNLOCK;
|
||||
strncpy(lock->name, name, 24);
|
||||
}
|
||||
|
||||
extern int _spinlock_lock(struct spinlock* lock, uint32_t timeout);
|
||||
void _spinlock_unlock(struct spinlock* lock);
|
||||
|
||||
void spinlock_lock(struct spinlock* lock)
|
||||
{
|
||||
int cur_cpu_id = cur_cpuid();
|
||||
if (lock->owner_cpu != SPINLOCK_STATE_UNLOCK && lock->owner_cpu == cur_cpu_id) {
|
||||
ERROR("spinlock %s lock double locked by core %d\n", lock->name, lock->owner_cpu);
|
||||
panic("");
|
||||
}
|
||||
|
||||
struct double_list_node* p_lock_node = &core_lock_request[cur_cpu_id].node;
|
||||
_spinlock_lock(&request_lock, SPINLOCK_LOCK_WAITFOREVER);
|
||||
doubleListAddOnBack(p_lock_node, &lock_request_guard);
|
||||
_spinlock_unlock(&request_lock);
|
||||
|
||||
while (lock_request_guard.next != p_lock_node)
|
||||
;
|
||||
|
||||
_spinlock_lock(lock, SPINLOCK_LOCK_WAITFOREVER);
|
||||
}
|
||||
|
||||
void spinlock_unlock(struct spinlock* lock)
|
||||
{
|
||||
struct double_list_node* p_lock_node = &core_lock_request[cur_cpuid()].node;
|
||||
assert(lock_request_guard.next == p_lock_node);
|
||||
_spinlock_lock(&request_lock, SPINLOCK_LOCK_WAITFOREVER);
|
||||
_double_list_del(p_lock_node->prev, p_lock_node->next);
|
||||
_spinlock_unlock(&request_lock);
|
||||
|
||||
_spinlock_unlock(lock);
|
||||
}
|
||||
|
||||
bool spinlock_try_lock(struct spinlock* lock)
|
||||
{
|
||||
int cur_cpu_id = cur_cpuid();
|
||||
if (lock->owner_cpu != SPINLOCK_STATE_UNLOCK && lock->owner_cpu == cur_cpu_id) {
|
||||
ERROR("spinlock %s lock double locked by core %d\n", lock->name, lock->owner_cpu);
|
||||
panic("");
|
||||
}
|
||||
|
||||
struct double_list_node* p_lock_node = &core_lock_request[cur_cpu_id].node;
|
||||
_spinlock_lock(&request_lock, SPINLOCK_LOCK_WAITFOREVER);
|
||||
doubleListAddOnBack(p_lock_node, &lock_request_guard);
|
||||
if (lock_request_guard.next != p_lock_node) {
|
||||
_double_list_del(p_lock_node->prev, p_lock_node->next);
|
||||
_spinlock_unlock(&request_lock);
|
||||
return false;
|
||||
}
|
||||
_spinlock_unlock(&request_lock);
|
||||
_spinlock_lock(lock, SPINLOCK_LOCK_WAITFOREVER);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_spinlock_hold_by_current_cpu(struct spinlock* lock)
|
||||
{
|
||||
return lock->owner_cpu;
|
||||
}
|
|
@ -34,11 +34,13 @@ Modification:
|
|||
#define STACK_DEPTH 32
|
||||
|
||||
struct spinlock { // Mutex.
|
||||
uint32_t owner_cpu; // 1 for locked, 0 for unlocked
|
||||
volatile uint32_t owner_cpu; // 1 for locked, 0 for unlocked
|
||||
char name[28]; // The call stack (an array of program counters)
|
||||
} __attribute__((aligned(32)));
|
||||
|
||||
bool module_spinlock_use_intr_init(void);
|
||||
void spinlock_init(struct spinlock* lock, char* name);
|
||||
void spinlock_lock(struct spinlock* lock);
|
||||
void spinlock_unlock(struct spinlock* lock);
|
||||
void spinlock_unlock(struct spinlock* lock);
|
||||
bool spinlock_try_lock(struct spinlock* lock);
|
||||
bool is_spinlock_hold_by_current_cpu(struct spinlock* lock);
|
|
@ -59,15 +59,15 @@ struct XiziTrapDriver {
|
|||
/* current irq number happening in cpu*/
|
||||
uint32_t curr_int[NR_CPU];
|
||||
|
||||
void (*sys_irq_init)();
|
||||
void (*sys_irq_init)(int);
|
||||
int (*cur_cpu_id)();
|
||||
|
||||
void (*cpu_irq_enable)();
|
||||
void (*cpu_irq_disable)();
|
||||
void (*single_irq_enable)(int irq, int cpu, int prio);
|
||||
void (*single_irq_disable)(int irq, int cpu);
|
||||
uint32_t* (*switch_hw_irqtbl)(uint32_t*);
|
||||
|
||||
bool (*send_sgi)(uint32_t, uint32_t, enum SgiFilterType);
|
||||
uint32_t* (*switch_hw_irqtbl)(uint32_t*);
|
||||
void (*bind_irq_handler)(int, irq_handler_t);
|
||||
|
||||
/* check if no if interruptable */
|
||||
|
@ -100,4 +100,7 @@ void panic(char* s);
|
|||
bool intr_distributer_init(struct IrqDispatcherRightGroup*);
|
||||
void intr_irq_dispatch(struct trapframe* tf);
|
||||
bool swi_distributer_init(struct SwiDispatcherRightGroup*);
|
||||
void software_irq_dispatch(struct trapframe* tf);
|
||||
void software_irq_dispatch(struct trapframe* tf);
|
||||
|
||||
void dabort_reason(struct trapframe* r);
|
||||
void iabort_reason(struct trapframe* r);
|
|
@ -89,11 +89,16 @@ static void load_boot_pgdir()
|
|||
}
|
||||
|
||||
extern void main(void);
|
||||
static bool _bss_inited = false;
|
||||
void bootmain()
|
||||
{
|
||||
build_boot_pgdir();
|
||||
load_boot_pgdir();
|
||||
__asm__ __volatile__("add sp, sp, %0" ::"r"(KERN_MEM_BASE - PHY_MEM_BASE));
|
||||
memset(&kernel_data_begin, 0x00, (uint32_t)kernel_data_end - (uint32_t)kernel_data_begin);
|
||||
if (!_bss_inited) {
|
||||
memset(&kernel_data_begin, 0x00, (uint32_t)kernel_data_end - (uint32_t)kernel_data_begin);
|
||||
_bss_inited = true;
|
||||
}
|
||||
|
||||
main();
|
||||
}
|
|
@ -56,10 +56,11 @@ Modification:
|
|||
#define MAX_NR_FREE_PAGES ((PHY_MEM_STOP - PHY_MEM_BASE) >> LEVEL4_PTE_SHIFT)
|
||||
|
||||
/* User memory layout */
|
||||
#define USER_STACK_SIZE PAGE_SIZE
|
||||
#define USER_STACK_SIZE MODE_STACK_SIZE
|
||||
#define USER_MEM_BASE (0x00000000)
|
||||
#define USER_MEM_TOP DEV_VRTMEM_BASE
|
||||
#define USER_IPC_SPACE_BASE (0x70000000)
|
||||
#define USER_IPC_USE_ALLOCATOR_WATERMARK (0x70010000)
|
||||
#define USER_IPC_SPACE_TOP (USER_MEM_TOP - USER_STACK_SIZE)
|
||||
|
||||
/* Deivce memory layout */
|
||||
|
|
|
@ -82,6 +82,11 @@ static void tlb_flush_range(uintptr_t vstart, int len)
|
|||
}
|
||||
}
|
||||
|
||||
static void tlb_flush_all()
|
||||
{
|
||||
CLEARTLB(0);
|
||||
}
|
||||
|
||||
static struct MmuCommonDone mmu_common_done = {
|
||||
.MmuDevPteAttr = GetDevPteAttr,
|
||||
.MmuPdeAttr = GetPdeAttr,
|
||||
|
@ -91,6 +96,7 @@ static struct MmuCommonDone mmu_common_done = {
|
|||
|
||||
.LoadPgdirCrit = load_pgdir_critical,
|
||||
.LoadPgdir = load_pgdir,
|
||||
.TlbFlushAll = tlb_flush_all,
|
||||
.TlbFlush = tlb_flush_range,
|
||||
};
|
||||
|
||||
|
|
|
@ -30,20 +30,6 @@ Modification:
|
|||
#include "mmu.h"
|
||||
#include "mmu_common.h"
|
||||
|
||||
void GetDevPteAttr(uintptr_t* attr)
|
||||
{
|
||||
static char init = 0;
|
||||
static PageTblEntry dev_pte_attr;
|
||||
if (init == 0) {
|
||||
init = 1;
|
||||
|
||||
dev_pte_attr.entry = 0;
|
||||
dev_pte_attr.desc_type = PAGE_4K;
|
||||
dev_pte_attr.AP1_0 = AccessPermission_KernelOnly;
|
||||
}
|
||||
*attr = dev_pte_attr.entry;
|
||||
}
|
||||
|
||||
void GetUsrPteAttr(uintptr_t* attr)
|
||||
{
|
||||
static char init = 0;
|
||||
|
@ -55,6 +41,7 @@ void GetUsrPteAttr(uintptr_t* attr)
|
|||
usr_pte_attr.desc_type = PAGE_4K;
|
||||
usr_pte_attr.B = 1;
|
||||
usr_pte_attr.C = 1;
|
||||
usr_pte_attr.S = 1;
|
||||
usr_pte_attr.AP1_0 = AccessPermission_KernelUser;
|
||||
}
|
||||
*attr = usr_pte_attr.entry;
|
||||
|
@ -74,6 +61,20 @@ void GetUsrDevPteAttr(uintptr_t* attr)
|
|||
*attr = usr_pte_attr.entry;
|
||||
}
|
||||
|
||||
void GetDevPteAttr(uintptr_t* attr)
|
||||
{
|
||||
static char init = 0;
|
||||
static PageTblEntry dev_pte_attr;
|
||||
if (init == 0) {
|
||||
init = 1;
|
||||
|
||||
dev_pte_attr.entry = 0;
|
||||
dev_pte_attr.desc_type = PAGE_4K;
|
||||
dev_pte_attr.AP1_0 = AccessPermission_KernelOnly;
|
||||
}
|
||||
*attr = dev_pte_attr.entry;
|
||||
}
|
||||
|
||||
void GetKernPteAttr(uintptr_t* attr)
|
||||
{
|
||||
static char init = 0;
|
||||
|
@ -85,6 +86,7 @@ void GetKernPteAttr(uintptr_t* attr)
|
|||
kern_pte_attr.desc_type = PAGE_4K;
|
||||
kern_pte_attr.B = 1;
|
||||
kern_pte_attr.C = 1;
|
||||
kern_pte_attr.S = 1;
|
||||
kern_pte_attr.AP1_0 = AccessPermission_KernelOnly;
|
||||
}
|
||||
*attr = kern_pte_attr.entry;
|
||||
|
|
|
@ -53,13 +53,14 @@ Modification:
|
|||
#define NUM_TOPLEVEL_PDE NUM_LEVEL3_PDE
|
||||
|
||||
#define PAGE_SIZE LEVEL4_PTE_SIZE
|
||||
#define MAX_NR_FREE_PAGES ((PHY_MEM_STOP - PHY_MEM_BASE) >> LEVEL4_PTE_SHIFT)
|
||||
#define MAX_NR_FREE_PAGES ((PHY_USER_FREEMEM_BASE - PHY_MEM_BASE) >> LEVEL4_PTE_SHIFT)
|
||||
|
||||
/* User memory layout */
|
||||
#define USER_STACK_SIZE PAGE_SIZE
|
||||
#define USER_STACK_SIZE MODE_STACK_SIZE
|
||||
#define USER_MEM_BASE (0x00000000)
|
||||
#define USER_MEM_TOP DEV_VRTMEM_BASE
|
||||
#define USER_IPC_SPACE_BASE (0x70000000)
|
||||
#define USER_IPC_USE_ALLOCATOR_WATERMARK (0x70010000)
|
||||
#define USER_IPC_SPACE_TOP (USER_MEM_TOP - USER_STACK_SIZE)
|
||||
|
||||
/* Deivce memory layout */
|
||||
|
|
|
@ -29,6 +29,7 @@ struct MmuCommonDone
|
|||
|
||||
void (*LoadPgdirCrit)(uintptr_t pgdir_paddr, struct TraceTag*);
|
||||
void (*LoadPgdir)(uintptr_t pgdir_paddr);
|
||||
void (*TlbFlushAll)();
|
||||
void (*TlbFlush)(uintptr_t vaddr, int len);
|
||||
};
|
||||
|
||||
|
|
|
@ -60,8 +60,6 @@ static inline int namecmp(const char* s, const char* t)
|
|||
static struct TraceMeta* alloc_trace_meta()
|
||||
{
|
||||
int index = -1;
|
||||
|
||||
spinlock_lock(&sys_tracer.trace_meta_bitmap_lock);
|
||||
for (uint32_t idx = 0; idx < BITS_TRACEMETA_BITMAP; idx++) {
|
||||
if (sys_tracer.trace_meta_bit_map[idx] == 0xFFFFFFFF) {
|
||||
continue;
|
||||
|
@ -74,7 +72,6 @@ static struct TraceMeta* alloc_trace_meta()
|
|||
break;
|
||||
}
|
||||
}
|
||||
spinlock_unlock(&sys_tracer.trace_meta_bitmap_lock);
|
||||
|
||||
if (index == -1) {
|
||||
panic("Tracer no enough TracerMeta.");
|
||||
|
@ -87,15 +84,12 @@ static struct TraceMeta* alloc_trace_meta()
|
|||
static bool dealloc_trace_meta(struct TraceMeta* meta)
|
||||
{
|
||||
int index = meta->index;
|
||||
|
||||
spinlock_lock(&sys_tracer.trace_meta_bitmap_lock);
|
||||
// clear bitmap
|
||||
uint32_t outer_index = index / 32;
|
||||
uint32_t inner_index = index % 32;
|
||||
sys_tracer.trace_meta_bit_map[outer_index] &= (uint32_t)(~(1 << inner_index));
|
||||
// clear meta
|
||||
sys_tracer.trace_meta_poll[index].type = TRACER_INVALID;
|
||||
spinlock_unlock(&sys_tracer.trace_meta_bitmap_lock);
|
||||
|
||||
if (index == -1) {
|
||||
panic("Tracer no enough TracerMeta.");
|
||||
|
@ -337,7 +331,7 @@ static struct TraceMeta* tracer_find_meta(struct TraceMeta* const p_owner, char*
|
|||
return p_owner_inside;
|
||||
}
|
||||
if ((vnp = tracer_find_meta_onestep(p_owner_inside, name, NULL)) == 0) {
|
||||
ERROR("Not such object: %s\n", path);
|
||||
DEBUG("Not such object: %s\n", path);
|
||||
return NULL;
|
||||
}
|
||||
p_owner_inside = vnp;
|
||||
|
@ -527,7 +521,8 @@ bool CreateResourceTag(struct TraceTag* new_tag, struct TraceTag* owner, char* n
|
|||
if (!tracer_create_trace(new_tag, owner, name, type)) {
|
||||
return false;
|
||||
}
|
||||
return tracer_write_trace(new_tag, (char*)&p_resource, 0, sizeof(void*)) == sizeof(void*);
|
||||
bool ret = tracer_write_trace(new_tag, (char*)&p_resource, 0, sizeof(void*)) == sizeof(void*);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool DeleteResource(struct TraceTag* target, struct TraceTag* owner)
|
||||
|
|
|
@ -61,7 +61,6 @@ static void tracer_mem_chunk_sync(struct tracer_mem_chunk* b)
|
|||
|
||||
void mem_chunk_synchronizer_init(uintptr_t mem_chunk_base, uint32_t mem_chunk_size, uint32_t nr_mem_chunks)
|
||||
{
|
||||
spinlock_init(&tracer_mem_chunk_syner.lock, "tracer_mem_chunk_syner");
|
||||
tracer_mem_chunk_syner.mem_chunk_base = mem_chunk_base;
|
||||
tracer_mem_chunk_syner.mem_chunk_size = mem_chunk_size;
|
||||
tracer_mem_chunk_syner.nr_mem_chunks = nr_mem_chunks;
|
||||
|
@ -76,8 +75,6 @@ void mem_chunk_synchronizer_init(uintptr_t mem_chunk_base, uint32_t mem_chunk_si
|
|||
|
||||
static struct tracer_mem_chunk* tracer_get_mem_chunk_cache(uint32_t chunk_id)
|
||||
{
|
||||
spinlock_lock(&tracer_mem_chunk_syner.lock);
|
||||
|
||||
// cached mem_chunk cache
|
||||
struct tracer_mem_chunk* b;
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(b, &tracer_mem_chunk_syner.head, list_node)
|
||||
|
@ -85,11 +82,8 @@ static struct tracer_mem_chunk* tracer_get_mem_chunk_cache(uint32_t chunk_id)
|
|||
if (b->chunk_id == chunk_id) {
|
||||
if (!(b->flag & TRACER_MEM_CHUNK_BUSY)) {
|
||||
b->flag |= TRACER_MEM_CHUNK_BUSY;
|
||||
spinlock_unlock(&tracer_mem_chunk_syner.lock);
|
||||
return b;
|
||||
}
|
||||
ERROR("tracer mem_chunk syner is locked\n");
|
||||
panic("");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -99,7 +93,6 @@ static struct tracer_mem_chunk* tracer_get_mem_chunk_cache(uint32_t chunk_id)
|
|||
if ((b->flag & TRACER_MEM_CHUNK_BUSY) == 0) {
|
||||
b->chunk_id = chunk_id;
|
||||
b->flag = TRACER_MEM_CHUNK_BUSY;
|
||||
spinlock_unlock(&tracer_mem_chunk_syner.lock);
|
||||
return b;
|
||||
}
|
||||
}
|
||||
|
@ -130,17 +123,13 @@ void tracer_mem_chunk_write(struct tracer_mem_chunk* b)
|
|||
void tracer_mem_chunk_release(struct tracer_mem_chunk* b)
|
||||
{
|
||||
if ((b->flag & TRACER_MEM_CHUNK_BUSY) == 0) {
|
||||
panic("tracer mem_chunk release but it's busy occupied");
|
||||
panic("tracer mem_chunk release but it's not busy occupied");
|
||||
}
|
||||
|
||||
// move mem_chunk that just used to the head of cache list
|
||||
spinlock_lock(&tracer_mem_chunk_syner.lock);
|
||||
|
||||
doubleListDel(&b->list_node);
|
||||
doubleListAddOnHead(&b->list_node, &tracer_mem_chunk_syner.head);
|
||||
b->flag &= ~TRACER_MEM_CHUNK_BUSY;
|
||||
|
||||
spinlock_unlock(&tracer_mem_chunk_syner.lock);
|
||||
}
|
||||
|
||||
static void tracer_mem_chunk_zero(uint32_t chunk_id)
|
||||
|
@ -157,7 +146,6 @@ static void tracer_mem_chunk_zero(uint32_t chunk_id)
|
|||
static uint32_t find_first_free_mem_chunk()
|
||||
{
|
||||
/// @todo another mem_chunk
|
||||
spinlock_lock(&sys_tracer.mem_chunk_bitmap_lock);
|
||||
for (uint32_t idx = 0; idx < BITS_MEM_CHUNK_BITMAP; idx++) {
|
||||
if (sys_tracer.mem_chunks_bit_map[idx] == 0xFFFFFFFF) {
|
||||
continue;
|
||||
|
@ -165,11 +153,9 @@ static uint32_t find_first_free_mem_chunk()
|
|||
uint32_t position = __builtin_ffs(~sys_tracer.mem_chunks_bit_map[idx]);
|
||||
if (position != 32) {
|
||||
sys_tracer.mem_chunks_bit_map[idx] |= (1 << (position - 1));
|
||||
spinlock_unlock(&sys_tracer.mem_chunk_bitmap_lock);
|
||||
return idx * 32 + position;
|
||||
}
|
||||
}
|
||||
spinlock_unlock(&sys_tracer.mem_chunk_bitmap_lock);
|
||||
panic("Tracer no enough space.");
|
||||
return 0;
|
||||
}
|
||||
|
@ -184,11 +170,9 @@ uint32_t tracer_mem_chunk_alloc()
|
|||
void tracer_mem_chunk_free(uint32_t chunk_id)
|
||||
{
|
||||
assert(chunk_id >= 0 && chunk_id < NR_TRACER_MEM_CHUNKS);
|
||||
spinlock_lock(&sys_tracer.mem_chunk_bitmap_lock);
|
||||
uint32_t idx = chunk_id % 32;
|
||||
uint32_t inner_mem_chunk_bit = chunk_id / 32;
|
||||
// assert mem_chunk is allocated
|
||||
assert((sys_tracer.mem_chunks_bit_map[idx] & (1 << inner_mem_chunk_bit)) != 0);
|
||||
sys_tracer.mem_chunks_bit_map[idx] &= (uint32_t)(~(1 << inner_mem_chunk_bit));
|
||||
spinlock_unlock(&sys_tracer.mem_chunk_bitmap_lock);
|
||||
}
|
|
@ -23,7 +23,11 @@ INC_DIR = -I$(KERNEL_ROOT)/services/shell/letter-shell \
|
|||
-I$(KERNEL_ROOT)/services/boards/$(BOARD) \
|
||||
-I$(KERNEL_ROOT)/services/app
|
||||
|
||||
all: init test_fs simple_client simple_server shell fs_server test_priority readme.txt | bin
|
||||
ifeq ($(BOARD), imx6q-sabrelite)
|
||||
all: init test_fs simple_client simple_server shell fs_server test_priority test_irq_hdlr test_irq_send readme.txt | bin
|
||||
else
|
||||
all: init test_fs simple_client simple_server shell fs_server test_priority test_irq_hdlr readme.txt | bin
|
||||
endif
|
||||
../tools/mkfs/mkfs ./fs.img $^
|
||||
@mv $(filter-out readme.txt, $^) bin
|
||||
@mv *.o bin
|
||||
|
@ -32,31 +36,41 @@ all: init test_fs simple_client simple_server shell fs_server test_priority read
|
|||
bin:
|
||||
@mkdir -p bin
|
||||
|
||||
shell: shell_port.o libserial.o shell_cmd_list.o shell.o shell_ext.o libfs_to_client.o libipc.o session.o usyscall.o
|
||||
ifeq ($(BOARD), imx6q-sabrelite)
|
||||
test_irq_send: test_irq_sender.o usyscall.o libserial.o
|
||||
@${ld} ${user_ldflags} -e main -o $@ $^ ${board_specs}
|
||||
@${objdump} -S $@ > $@.asm
|
||||
endif
|
||||
|
||||
test_irq_hdlr: test_irq_handler.o libserial.o libipc.o session.o usyscall.o libmem.o
|
||||
@${ld} ${user_ldflags} -e main -o $@ $^ ${board_specs}
|
||||
@${objdump} -S $@ > $@.asm
|
||||
|
||||
shell: shell_port.o libserial.o shell_cmd_list.o shell.o shell_ext.o libfs_to_client.o libipc.o session.o usyscall.o libmem.o
|
||||
@${ld} ${user_ldflags} -e main -o $@ $^ ${board_specs}
|
||||
@${objdump} -S $@ > $@.asm
|
||||
|
||||
init: init.o libfs_to_client.o libipc.o session.o libserial.o usyscall.o
|
||||
init: init.o libfs_to_client.o libipc.o session.o libserial.o usyscall.o libmem.o
|
||||
@${ld} ${user_ldflags} -e main -o $@ $^ ${board_specs}
|
||||
@${objdump} -S $@ > $@.asm
|
||||
|
||||
test_fs: test_fs.o libfs_to_client.o libipc.o session.o libserial.o usyscall.o
|
||||
test_fs: test_fs.o libfs_to_client.o libipc.o session.o libserial.o usyscall.o libmem.o
|
||||
@${ld} ${user_ldflags} -e main -o $@ $^ ${board_specs}
|
||||
@${objdump} -S $@ > $@.asm
|
||||
|
||||
simple_client: simple_client.o libserial.o libipc.o session.o simple_service.o libfs_to_client.o usyscall.o
|
||||
simple_client: simple_client.o libserial.o libipc.o session.o simple_service.o libfs_to_client.o usyscall.o libmem.o
|
||||
@${ld} ${user_ldflags} -e main -o $@ $^ ${board_specs}
|
||||
@${objdump} -S $@ > $@.asm
|
||||
|
||||
simple_server: simple_server.o libserial.o libipc.o session.o simple_service.o usyscall.o
|
||||
simple_server: simple_server.o libserial.o libipc.o session.o simple_service.o usyscall.o libmem.o
|
||||
@${ld} ${user_ldflags} -e main -o $@ $^ ${board_specs}
|
||||
@${objdump} -S $@ > $@.asm
|
||||
|
||||
fs_server: fs_server.o libfs_to_client.o fs.o libserial.o libipc.o session.o block_io.o usyscall.o
|
||||
fs_server: fs_server.o libfs_to_client.o fs.o libserial.o libipc.o session.o block_io.o usyscall.o libmem.o
|
||||
@${ld} ${user_ldflags} -e main -o $@ $^ ${board_specs}
|
||||
@${objdump} -S $@ > $@.asm
|
||||
|
||||
test_priority: test_priority.o libserial.o usyscall.o
|
||||
test_priority: test_priority.o libserial.o usyscall.o libmem.o
|
||||
@${ld} ${user_ldflags} -e main -o $@ $^ ${board_specs}
|
||||
@${objdump} -S $@ > $@.asm
|
||||
|
||||
|
|
|
@ -19,7 +19,10 @@
|
|||
int main(int argc, char* argv[])
|
||||
{
|
||||
struct Session session;
|
||||
connect_session(&session, "MemFS", 8092);
|
||||
printf("init: connecting MemFS\n");
|
||||
while (connect_session(&session, "MemFS", 8092) < 0)
|
||||
;
|
||||
printf("init: connect MemFS success\n");
|
||||
|
||||
int fd;
|
||||
char* shell_task_param[2] = { "/shell", 0 };
|
||||
|
@ -28,7 +31,7 @@ int main(int argc, char* argv[])
|
|||
exit();
|
||||
}
|
||||
|
||||
if (spawn(&session, fd, read, shell_task_param[0], shell_task_param) < 0) {
|
||||
if (spawn(&session, fd, read, fsize, shell_task_param[0], shell_task_param) < 0) {
|
||||
printf("Syscall Spawn shell failed\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ signed short userShellRead(char* data, unsigned short len)
|
|||
while (length--) {
|
||||
cur_read = getc();
|
||||
if (cur_read == 0xff) {
|
||||
yield();
|
||||
yield(SYS_TASK_YIELD_NO_REASON);
|
||||
}
|
||||
// *data++ = getc();
|
||||
*data++ = cur_read;
|
||||
|
@ -50,7 +50,8 @@ int main(void)
|
|||
|
||||
shellInit(&shell, shellBuffer, 512);
|
||||
|
||||
connect_session(&session_fs, "MemFS", 8092);
|
||||
while (connect_session(&session_fs, "MemFS", 8092) < 0)
|
||||
;
|
||||
if (!session_fs.buf) {
|
||||
printf("session connect faield\n");
|
||||
return -1;
|
||||
|
|
|
@ -113,10 +113,10 @@ int main(int argc, char** argv)
|
|||
itoa(id - 1, id_buf, 10);
|
||||
char* shell_task_param[3] = { "/simple_client", id_buf, 0 };
|
||||
if ((fd = open(&session, shell_task_param[0])) >= 0) {
|
||||
if (spawn(&session, fd, read, shell_task_param[0], shell_task_param) < 0) {
|
||||
if (spawn(&session, fd, read, fsize, shell_task_param[0], shell_task_param) < 0) {
|
||||
printf("Syscall Spawn simple_client failed\n");
|
||||
}
|
||||
if (spawn(&session, fd, read, shell_task_param[0], shell_task_param) < 0) {
|
||||
if (spawn(&session, fd, read, fsize, shell_task_param[0], shell_task_param) < 0) {
|
||||
printf("Syscall Spawn simple_client failed\n");
|
||||
}
|
||||
close(&session, fd);
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Copyright (c) 2020 AIIT XUOS Lab
|
||||
* XiUOS is licensed under Mulan PSL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PSL v2.
|
||||
* You may obtain a copy of Mulan PSL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPSL2
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PSL v2 for more details.
|
||||
*/
|
||||
|
||||
#include "libipc.h"
|
||||
#include "libserial.h"
|
||||
#include "usyscall.h"
|
||||
|
||||
IPC_SERVICES(IpcSwIntrHandler, Ipc_intr_3);
|
||||
|
||||
enum {
|
||||
SW_INTERRUPT_3 = 3,
|
||||
};
|
||||
|
||||
void sgi_test_handler(void)
|
||||
{
|
||||
printf("TEST_SW_HDLR: In %s()\n", __func__);
|
||||
}
|
||||
|
||||
int IPC_DO_SERVE_FUNC(Ipc_intr_3)(void* useless)
|
||||
{
|
||||
sgi_test_handler();
|
||||
return 0;
|
||||
}
|
||||
|
||||
IPC_SERVER_INTERFACE(Ipc_intr_3, 1);
|
||||
IPC_SERVER_REGISTER_INTERFACES(IpcSwIntrHandler, 1, Ipc_intr_3);
|
||||
int main()
|
||||
{
|
||||
if (register_irq(SW_INTERRUPT_3, Ipc_intr_3) == -1) {
|
||||
printf("TEST_SW_HDLR: bind failed");
|
||||
exit();
|
||||
}
|
||||
ipc_server_loop(&IpcSwIntrHandler);
|
||||
|
||||
exit();
|
||||
}
|
|
@ -13,12 +13,12 @@ cflags = -march=armv7-a -mtune=cortex-a9 -nostdlib -nodefaultlibs -mfloat-abi=so
|
|||
c_useropts = -O0
|
||||
|
||||
INC_DIR = -I$(KERNEL_ROOT)/services/fs/libfs \
|
||||
-I$(KERNEL_ROOT)/services/fs/fs_server/include \
|
||||
-I$(KERNEL_ROOT)/services/lib/ipc \
|
||||
-I$(KERNEL_ROOT)/services/lib/memory \
|
||||
-I$(KERNEL_ROOT)/services/boards/$(BOARD) \
|
||||
-I$(KERNEL_ROOT)/services/app
|
||||
|
||||
board: libserial.o usyscall.o
|
||||
board: libserial.o usyscall.o test_irq_sender.o
|
||||
@mv $^ ../../app
|
||||
|
||||
%.o: %.c
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* Copyright (c) 2020 AIIT XUOS Lab
|
||||
* XiUOS is licensed under Mulan PSL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PSL v2.
|
||||
* You may obtain a copy of Mulan PSL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPSL2
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PSL v2 for more details.
|
||||
*/
|
||||
#include "libserial.h"
|
||||
#include "usyscall.h"
|
||||
|
||||
enum {
|
||||
SW_INTERRUPT_3 = 3,
|
||||
};
|
||||
|
||||
enum {
|
||||
ARM_PERIPHERAL_BASE = 0x00A00000,
|
||||
MX6Q_GICD_BASE_OFFSET = 0x1000,
|
||||
MX6Q_GICC_BASE_OFFSET = 0x100,
|
||||
|
||||
ARM_PERIPHERAL_VIRT_BASE = 0x50000000,
|
||||
};
|
||||
|
||||
enum _gicd_sgi_filter {
|
||||
//! Forward the interrupt to the CPU interfaces specified in the @a target_list parameter.
|
||||
kGicSgiFilter_UseTargetList = 0,
|
||||
|
||||
//! Forward the interrupt to all CPU interfaces except that of the processor that requested
|
||||
//! the interrupt.
|
||||
kGicSgiFilter_AllOtherCPUs = 1,
|
||||
|
||||
//! Forward the interrupt only to the CPU interface of the processor that requested the
|
||||
//! interrupt.
|
||||
kGicSgiFilter_OnlyThisCPU = 2
|
||||
};
|
||||
|
||||
struct _gicd_registers {
|
||||
uint32_t CTLR; //!< Distributor Control Register.
|
||||
uint32_t TYPER; //!< Interrupt Controller Type Register.
|
||||
uint32_t IIDR; //!< Distributor Implementer Identification Register.
|
||||
uint32_t _reserved0[29];
|
||||
uint32_t IGROUPRn[8]; //!< Interrupt Group Registers.
|
||||
uint32_t _reserved1[24];
|
||||
uint32_t ISENABLERn[32]; //!< Interrupt Set-Enable Registers.
|
||||
uint32_t ICENABLERn[32]; //!< Interrupt Clear-Enable Registers.
|
||||
uint32_t ISPENDRn[32]; //!< Interrupt Set-Pending Registers.
|
||||
uint32_t ICPENDRn[32]; //!< Interrupt Clear-Pending Registers.
|
||||
uint32_t ICDABRn[32]; //!< Active Bit Registers.
|
||||
uint32_t _reserved2[32];
|
||||
uint8_t IPRIORITYRn[255 * sizeof(uint32_t)]; //!< Interrupt Priority Registers. (Byte accessible)
|
||||
uint32_t _reserved3;
|
||||
uint8_t ITARGETSRn[255 * sizeof(uint32_t)]; //!< Interrupt Processor Targets Registers. (Byte accessible)
|
||||
uint32_t _reserved4;
|
||||
uint32_t ICFGRn[64]; //!< Interrupt Configuration Registers.
|
||||
uint32_t _reserved5[128];
|
||||
uint32_t SGIR; //!< Software Generated Interrupt Register
|
||||
};
|
||||
typedef volatile struct _gicd_registers gicd_t;
|
||||
|
||||
enum _gicd_sgir_fields {
|
||||
kBP_GICD_SGIR_TargetListFilter = 24,
|
||||
kBM_GICD_SGIR_TargetListFilter = (0x3 << kBP_GICD_SGIR_TargetListFilter),
|
||||
|
||||
kBP_GICD_SGIR_CPUTargetList = 16,
|
||||
kBM_GICD_SGIR_CPUTargetList = (0xff << kBP_GICD_SGIR_CPUTargetList),
|
||||
|
||||
kBP_GICD_SGIR_NSATT = 15,
|
||||
kBM_GICD_SGIR_NSATT = (1 << kBP_GICD_SGIR_NSATT),
|
||||
|
||||
kBP_GICD_SGIR_SGIINTID = 0,
|
||||
kBM_GICD_SGIR_SGIINTID = 0xf
|
||||
};
|
||||
|
||||
void gic_send_sgi(uint32_t irqID, uint32_t target_list, uint32_t filter_list)
|
||||
{
|
||||
gicd_t* gicd = (gicd_t*)(ARM_PERIPHERAL_VIRT_BASE + MX6Q_GICD_BASE_OFFSET);
|
||||
gicd->SGIR = (filter_list << kBP_GICD_SGIR_TargetListFilter) //
|
||||
| (target_list << kBP_GICD_SGIR_CPUTargetList) //
|
||||
| (irqID & 0xf);
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
static char prog_name[] = "TEST_IRQ_SEND";
|
||||
printf("%s: Mapping GIC\n", prog_name);
|
||||
mmap(ARM_PERIPHERAL_VIRT_BASE, ARM_PERIPHERAL_BASE, 0x2000, true);
|
||||
|
||||
int send_time = 1000;
|
||||
printf("%s: Sending soft interrupt for %d times\n", prog_name, send_time);
|
||||
for (int i = 0; i < send_time; i++) {
|
||||
gic_send_sgi(SW_INTERRUPT_3, 0xF, kGicSgiFilter_UseTargetList);
|
||||
printf("%s: Soft interrupt send 1 time\n", prog_name);
|
||||
}
|
||||
printf("%s: Soft interrupt send done\n", prog_name);
|
||||
exit();
|
||||
}
|
|
@ -10,6 +10,7 @@
|
|||
* See the Mulan PSL v2 for more details.
|
||||
*/
|
||||
#include "usyscall.h"
|
||||
#include "libmem.h"
|
||||
|
||||
static int
|
||||
syscall(int sys_num, intptr_t a1, intptr_t a2, intptr_t a3, intptr_t a4)
|
||||
|
@ -33,14 +34,18 @@ syscall(int sys_num, intptr_t a1, intptr_t a2, intptr_t a3, intptr_t a4)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int spawn(struct Session* session, int fd, ipc_read_fn ipc_read, char* name, char** argv)
|
||||
int spawn(struct Session* session, int fd, ipc_read_fn ipc_read, ipc_fsize_fn ipc_fsize, char* name, char** argv)
|
||||
{
|
||||
struct KernReadTool read_tool = {
|
||||
.session = session,
|
||||
.fd = fd,
|
||||
.ipc_read = ipc_read,
|
||||
};
|
||||
return syscall(SYSCALL_SPAWN, (intptr_t)&read_tool, (intptr_t)name, (intptr_t)argv, 0);
|
||||
int file_size = ipc_fsize(session, fd);
|
||||
void* img = malloc(file_size);
|
||||
int read_len = 0, cur_read_len = 0;
|
||||
while (read_len < file_size) {
|
||||
cur_read_len = file_size - read_len < 4096 ? file_size - read_len : 4096;
|
||||
read_len += ipc_read(session, fd, img + read_len, read_len, cur_read_len);
|
||||
}
|
||||
int ret = syscall(SYSCALL_SPAWN, (intptr_t)img, (intptr_t)name, (intptr_t)argv, 0);
|
||||
free(img);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int exit()
|
||||
|
@ -48,9 +53,14 @@ int exit()
|
|||
return syscall(SYSCALL_EXIT, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
int yield()
|
||||
int yield(task_yield_reason reason)
|
||||
{
|
||||
return syscall(SYSCALL_YIELD, 0, 0, 0, 0);
|
||||
return syscall(SYSCALL_YIELD, (uintptr_t)reason, 0, 0, 0);
|
||||
}
|
||||
|
||||
int kill(int pid)
|
||||
{
|
||||
return syscall(SYSCALL_KILL, (intptr_t)pid, 0, 0, 0);
|
||||
}
|
||||
|
||||
int register_server(char* name)
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#define SYSCALL_EXEC 9 // run elf using current task
|
||||
#define SYSCALL_SYS_STATE 10 // run system state
|
||||
#define SYSCALL_REGISTER_IRQ 11 //
|
||||
|
||||
#define SYSCALL_KILL 12 // kill the task by id
|
||||
// clang-format on
|
||||
|
||||
typedef enum {
|
||||
|
@ -42,6 +44,12 @@ typedef enum {
|
|||
SYS_STATE_SHOW_CPU_INFO,
|
||||
} sys_state_option;
|
||||
|
||||
typedef enum {
|
||||
SYS_TASK_YIELD_NO_REASON = 0x0,
|
||||
SYS_TASK_YIELD_FOREVER = 0x1,
|
||||
SYS_TASK_YIELD_BLOCK_IPC = 0x2,
|
||||
} task_yield_reason;
|
||||
|
||||
typedef union {
|
||||
struct {
|
||||
uintptr_t memblock_start;
|
||||
|
@ -51,17 +59,13 @@ typedef union {
|
|||
} sys_state_info;
|
||||
|
||||
typedef int (*ipc_read_fn)(struct Session* session, int fd, char* dst, int offset, int len);
|
||||
typedef int (*ipc_fsize_fn)(struct Session* session, int fd);
|
||||
typedef int (*ipc_write_fn)(struct Session* session, int fd, char* src, int offset, int len);
|
||||
|
||||
struct KernReadTool {
|
||||
struct Session* session;
|
||||
int fd;
|
||||
ipc_read_fn ipc_read;
|
||||
};
|
||||
|
||||
int spawn(struct Session* session, int fd, ipc_read_fn ipc_read, char* name, char** argv);
|
||||
int spawn(struct Session* session, int fd, ipc_read_fn ipc_read, ipc_fsize_fn ipc_fsize, char* name, char** argv);
|
||||
int exit();
|
||||
int yield();
|
||||
int yield(task_yield_reason reason);
|
||||
int kill(int pid);
|
||||
int register_server(char* name);
|
||||
int session(char* path, int capacity, struct Session* user_session);
|
||||
int poll_session(struct Session* userland_session_arr, int arr_capacity);
|
||||
|
|
|
@ -18,6 +18,7 @@ c_useropts = -O0
|
|||
INC_DIR = -I$(KERNEL_ROOT)/services/fs/libfs \
|
||||
-I$(KERNEL_ROOT)/services/fs/fs_server/include \
|
||||
-I$(KERNEL_ROOT)/services/lib/ipc \
|
||||
-I$(KERNEL_ROOT)/services/lib/memory \
|
||||
-I$(KERNEL_ROOT)/services/boards/$(BOARD) \
|
||||
-I$(KERNEL_ROOT)/services/app
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
* See the Mulan PSL v2 for more details.
|
||||
*/
|
||||
#include "usyscall.h"
|
||||
#include "libmem.h"
|
||||
|
||||
static int
|
||||
syscall(int sys_num, intptr_t a1, intptr_t a2, intptr_t a3, intptr_t a4)
|
||||
|
@ -33,14 +34,18 @@ syscall(int sys_num, intptr_t a1, intptr_t a2, intptr_t a3, intptr_t a4)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int spawn(struct Session* session, int fd, ipc_read_fn ipc_read, char* name, char** argv)
|
||||
int spawn(struct Session* session, int fd, ipc_read_fn ipc_read, ipc_fsize_fn ipc_fsize, char* name, char** argv)
|
||||
{
|
||||
struct KernReadTool read_tool = {
|
||||
.session = session,
|
||||
.fd = fd,
|
||||
.ipc_read = ipc_read,
|
||||
};
|
||||
return syscall(SYSCALL_SPAWN, (intptr_t)&read_tool, (intptr_t)name, (intptr_t)argv, 0);
|
||||
int file_size = ipc_fsize(session, fd);
|
||||
void* img = malloc(file_size);
|
||||
int read_len = 0, cur_read_len = 0;
|
||||
while (read_len < file_size) {
|
||||
cur_read_len = file_size - read_len < 4096 ? file_size - read_len : 4096;
|
||||
read_len += ipc_read(session, fd, img + read_len, read_len, cur_read_len);
|
||||
}
|
||||
int ret = syscall(SYSCALL_SPAWN, (intptr_t)img, (intptr_t)name, (intptr_t)argv, 0);
|
||||
free(img);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int exit()
|
||||
|
@ -48,9 +53,14 @@ int exit()
|
|||
return syscall(SYSCALL_EXIT, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
int yield()
|
||||
int yield(task_yield_reason reason)
|
||||
{
|
||||
return syscall(SYSCALL_YIELD, 0, 0, 0, 0);
|
||||
return syscall(SYSCALL_YIELD, (uintptr_t)reason, 0, 0, 0);
|
||||
}
|
||||
|
||||
int kill(int pid)
|
||||
{
|
||||
return syscall(SYSCALL_KILL, (intptr_t)pid, 0, 0, 0);
|
||||
}
|
||||
|
||||
int register_server(char* name)
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#define SYSCALL_EXEC 9 // run elf using current task
|
||||
#define SYSCALL_SYS_STATE 10 // run system state
|
||||
#define SYSCALL_REGISTER_IRQ 11 //
|
||||
|
||||
#define SYSCALL_KILL 12 // kill the task by id
|
||||
// clang-format on
|
||||
|
||||
typedef enum {
|
||||
|
@ -42,6 +44,12 @@ typedef enum {
|
|||
SYS_STATE_SHOW_CPU_INFO,
|
||||
} sys_state_option;
|
||||
|
||||
typedef enum {
|
||||
SYS_TASK_YIELD_NO_REASON = 0x0,
|
||||
SYS_TASK_YIELD_FOREVER = 0x1,
|
||||
SYS_TASK_YIELD_BLOCK_IPC = 0x2,
|
||||
} task_yield_reason;
|
||||
|
||||
typedef union {
|
||||
struct {
|
||||
uintptr_t memblock_start;
|
||||
|
@ -51,17 +59,13 @@ typedef union {
|
|||
} sys_state_info;
|
||||
|
||||
typedef int (*ipc_read_fn)(struct Session* session, int fd, char* dst, int offset, int len);
|
||||
typedef int (*ipc_fsize_fn)(struct Session* session, int fd);
|
||||
typedef int (*ipc_write_fn)(struct Session* session, int fd, char* src, int offset, int len);
|
||||
|
||||
struct KernReadTool {
|
||||
struct Session* session;
|
||||
int fd;
|
||||
ipc_read_fn ipc_read;
|
||||
};
|
||||
|
||||
int spawn(struct Session* session, int fd, ipc_read_fn ipc_read, char* name, char** argv);
|
||||
int spawn(struct Session* session, int fd, ipc_read_fn ipc_read, ipc_fsize_fn ipc_fsize, char* name, char** argv);
|
||||
int exit();
|
||||
int yield();
|
||||
int yield(task_yield_reason reason);
|
||||
int kill(int pid);
|
||||
int register_server(char* name);
|
||||
int session(char* path, int capacity, struct Session* user_session);
|
||||
int poll_session(struct Session* userland_session_arr, int arr_capacity);
|
||||
|
|
|
@ -18,6 +18,7 @@ c_useropts = -O0
|
|||
INC_DIR = -I$(KERNEL_ROOT)/services/fs/libfs \
|
||||
-I$(KERNEL_ROOT)/services/fs/fs_server/include \
|
||||
-I$(KERNEL_ROOT)/services/lib/ipc \
|
||||
-I$(KERNEL_ROOT)/services/lib/memory \
|
||||
-I$(KERNEL_ROOT)/services/boards/$(BOARD) \
|
||||
-I$(KERNEL_ROOT)/services/app
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ int IPC_DO_SERVE_FUNC(Ipc_ls)(char* path)
|
|||
printf("ls: find target Inode failed, ip: %x(%d), dp: %x(%d)\n", ip, ip->inum, dp, dp->inum);
|
||||
return -1;
|
||||
}
|
||||
if (ip->type != T_DIR) {
|
||||
if (ip->type != FS_DIRECTORY) {
|
||||
printf("ls: not a dir\n");
|
||||
return -1;
|
||||
}
|
||||
|
@ -282,7 +282,7 @@ int IPC_DO_SERVE_FUNC(Ipc_read)(int* fd, char* dst, int* offset, int* len)
|
|||
|
||||
int cur_read_len = InodeRead(ip, dst, *offset, *len);
|
||||
|
||||
return *len;
|
||||
return cur_read_len;
|
||||
}
|
||||
|
||||
int IPC_DO_SERVE_FUNC(Ipc_write)(int* fd, char* src, int* offset, int* len)
|
||||
|
@ -305,18 +305,36 @@ int IPC_DO_SERVE_FUNC(Ipc_write)(int* fd, char* src, int* offset, int* len)
|
|||
return cur_write_len;
|
||||
}
|
||||
|
||||
int IPC_DO_SERVE_FUNC(Ipc_fsize)(int* fd)
|
||||
{
|
||||
struct FileDescriptor* fdp = GetFileDescriptor(*fd);
|
||||
if (!fdp) {
|
||||
printf("read: fd invalid\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct Inode* ip = fdp->data;
|
||||
if (ip->type != FS_FILE) {
|
||||
printf("read: %s Is not a file\n", fdp->path);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return ip->size;
|
||||
}
|
||||
|
||||
IPC_SERVER_INTERFACE(Ipc_ls, 1);
|
||||
IPC_SERVER_INTERFACE(Ipc_cd, 1);
|
||||
IPC_SERVER_INTERFACE(Ipc_mkdir, 1);
|
||||
IPC_SERVER_INTERFACE(Ipc_delete, 1);
|
||||
IPC_SERVER_INTERFACE(Ipc_cat, 1);
|
||||
IPC_SERVER_INTERFACE(Ipc_fsize, 1);
|
||||
|
||||
IPC_SERVER_INTERFACE(Ipc_open, 1);
|
||||
IPC_SERVER_INTERFACE(Ipc_close, 1);
|
||||
IPC_SERVER_INTERFACE(Ipc_read, 4);
|
||||
IPC_SERVER_INTERFACE(Ipc_write, 4);
|
||||
|
||||
IPC_SERVER_REGISTER_INTERFACES(IpcFsServer, 9,
|
||||
IPC_SERVER_REGISTER_INTERFACES(IpcFsServer, 10,
|
||||
Ipc_ls,
|
||||
Ipc_cd,
|
||||
Ipc_mkdir,
|
||||
|
@ -325,7 +343,8 @@ IPC_SERVER_REGISTER_INTERFACES(IpcFsServer, 9,
|
|||
Ipc_open,
|
||||
Ipc_close,
|
||||
Ipc_read,
|
||||
Ipc_write);
|
||||
Ipc_write,
|
||||
Ipc_fsize);
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
|
|
|
@ -17,6 +17,7 @@ c_useropts = -O0
|
|||
|
||||
INC_DIR = -I$(KERNEL_ROOT)/services/fs/libfs \
|
||||
-I$(KERNEL_ROOT)/services/lib/ipc \
|
||||
-I$(KERNEL_ROOT)/services/lib/memory \
|
||||
-I$(KERNEL_ROOT)/services/boards/$(BOARD) \
|
||||
-I$(KERNEL_ROOT)/services/app
|
||||
|
||||
|
|
|
@ -63,4 +63,10 @@ IPC_INTERFACE(Ipc_write, 4, fd, src, offset, len, sizeof(int), *(int*)len, sizeo
|
|||
int write(struct Session* session, int fd, char* src, int offset, int len)
|
||||
{
|
||||
return IPC_CALL(Ipc_write)(session, &fd, src, &offset, &len);
|
||||
}
|
||||
|
||||
IPC_INTERFACE(Ipc_fsize, 1, fd, sizeof(int));
|
||||
int fsize(struct Session* session, int fd)
|
||||
{
|
||||
return IPC_CALL(Ipc_fsize)(session, &fd);
|
||||
}
|
|
@ -15,13 +15,14 @@
|
|||
#include "libipc.h"
|
||||
|
||||
IPC_SERVICES(IpcFsServer, Ipc_ls, Ipc_cd, Ipc_mkdir, Ipc_delete, Ipc_cat,
|
||||
Ipc_open, Ipc_close, Ipc_read, Ipc_write);
|
||||
Ipc_open, Ipc_close, Ipc_read, Ipc_write, Ipc_fsize);
|
||||
|
||||
int ls(struct Session* session, char* path);
|
||||
int cd(struct Session* session, char* path);
|
||||
int mkdir(struct Session* session, char* path);
|
||||
int rm(struct Session* session, char* path);
|
||||
int cat(struct Session* session, char* path);
|
||||
int fsize(struct Session* session, int fd);
|
||||
|
||||
int open(struct Session* session, char* path);
|
||||
int close(struct Session* session, int fd);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
SRC_DIR := ipc
|
||||
SRC_DIR := ipc memory
|
||||
|
||||
include $(KERNEL_ROOT)/compiler.mk
|
||||
|
||||
|
|
|
@ -15,8 +15,8 @@ objdump = ${toolchain}objdump
|
|||
|
||||
c_useropts = -O0
|
||||
|
||||
INC_DIR = -I$(KERNEL_ROOT)/services/shell/letter-shell \
|
||||
-I$(KERNEL_ROOT)/services/lib/ipc \
|
||||
INC_DIR = -I$(KERNEL_ROOT)/services/lib/ipc \
|
||||
-I$(KERNEL_ROOT)/services/lib/memory \
|
||||
-I$(KERNEL_ROOT)/services/boards/$(BOARD) \
|
||||
-I$(KERNEL_ROOT)/services/app
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ void ipc_msg_send_wait(struct IpcMsg* msg)
|
|||
msg->header.done = 0;
|
||||
while (msg->header.done == 0) {
|
||||
/// @todo syscall yield with prio decrease
|
||||
yield();
|
||||
yield(SYS_TASK_YIELD_BLOCK_IPC);
|
||||
}
|
||||
assert(msg->header.done == 1);
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ int ipc_session_wait(struct Session* session)
|
|||
struct IpcMsg* msg = IPCSESSION_MSG(session);
|
||||
while (msg->header.done == 0) {
|
||||
/// @todo syscall yield with prio decrease
|
||||
yield();
|
||||
yield(SYS_TASK_YIELD_BLOCK_IPC);
|
||||
}
|
||||
assert(msg->header.done == 1);
|
||||
return msg->header.ret_val;
|
||||
|
@ -159,6 +159,7 @@ void delay_session(void)
|
|||
void ipc_server_loop(struct IpcNode* ipc_node)
|
||||
{
|
||||
struct Session session_list[NR_MAX_SESSION];
|
||||
memset(session_list, 0, sizeof(session_list));
|
||||
for (;;) {
|
||||
/* if connect sessions are greater than NR_MAX_SESSION,
|
||||
a full round will require multiple polls.
|
||||
|
@ -169,7 +170,7 @@ void ipc_server_loop(struct IpcNode* ipc_node)
|
|||
/* handle each session */
|
||||
for (int i = 0; i < NR_MAX_SESSION; i++) {
|
||||
if (session_list[i].buf == NULL) {
|
||||
yield();
|
||||
yield(SYS_TASK_YIELD_NO_REASON);
|
||||
break;
|
||||
}
|
||||
cur_sess_id = session_list[i].id;
|
||||
|
@ -178,11 +179,13 @@ void ipc_server_loop(struct IpcNode* ipc_node)
|
|||
a session could be delay in case one of its message(current message) needs to wait for an interrupt message's arrival
|
||||
interfaces[opcode] should explicitly call delay_session() and return to delay this session
|
||||
*/
|
||||
while (msg->header.magic == IPC_MSG_MAGIC && msg->header.valid == 1 && msg->header.done != 1) {
|
||||
while (msg->header.magic == IPC_MSG_MAGIC && msg->header.valid == 1 && msg->header.done == 0) {
|
||||
// printf("session %d [%d, %d]\n", session_list[i].id, session_list[i].head, session_list[i].tail);
|
||||
if (session_used_size(&session_list[i]) == 0 && session_forward_tail(&session_list[i], msg->header.len) < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
// this is a message needs to handle
|
||||
if (ipc_node->interfaces[msg->header.opcode]) {
|
||||
ipc_node->interfaces[msg->header.opcode](msg);
|
||||
// check if this session is delayed by op handler, all messages after the delayed message in current session is blocked.
|
||||
|
@ -193,9 +196,10 @@ void ipc_server_loop(struct IpcNode* ipc_node)
|
|||
} else {
|
||||
printf("Unsupport opcode(%d) for server: %s\n", msg->header.opcode, ipc_node->name);
|
||||
}
|
||||
// current msg is a message that needs to ignore
|
||||
// finish this message in server's perspective
|
||||
while (session_forward_head(&session_list[i], msg->header.len) < 0) {
|
||||
yield();
|
||||
if (session_forward_head(&session_list[i], msg->header.len) < 0) {
|
||||
break;
|
||||
}
|
||||
msg = IPCSESSION_MSG(&session_list[i]);
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ Modification:
|
|||
#include "ipcargs.h"
|
||||
#include "session.h"
|
||||
|
||||
#define NR_MAX_SESSION 16
|
||||
#define NR_MAX_SESSION 32
|
||||
#define IPC_MSG_MAGIC 0xABCDDCBA
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -61,6 +61,6 @@ bool session_free_buf(struct Session* session, int len)
|
|||
if (len > session_used_size(session)) {
|
||||
return false;
|
||||
}
|
||||
assert(session_forward_head(session, len) != 1);
|
||||
assert(session_forward_head(session, len) != -1);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -55,8 +55,8 @@ __attribute__((__always_inline__)) static inline int session_remain_capacity(str
|
|||
|
||||
__attribute__((__always_inline__)) static inline int session_forward_head(struct Session* session, int len)
|
||||
{
|
||||
if (((session->head + len) % session->capacity) > session->tail) {
|
||||
printf("forward head with too much size\n");
|
||||
if (len > session_used_size(session)) {
|
||||
printf("forward head with too much size, session used size: %d\n", session_used_size(session));
|
||||
return -1;
|
||||
}
|
||||
session->head = (session->head + len) % session->capacity;
|
||||
|
|
|
@ -12,11 +12,14 @@ user_ldflags = -N -Ttext 0
|
|||
cflags = -std=c11 -march=armv7-a -mtune=cortex-a9 -nostdlib -nodefaultlibs -mfloat-abi=soft -fno-pic -static -fno-builtin -fno-strict-aliasing -Wall -ggdb -Wno-unused -Werror -fno-omit-frame-pointer -fno-stack-protector -fno-pie
|
||||
c_useropts = -O0
|
||||
|
||||
INC_DIR = -I$(KERNEL_ROOT)/services/app \
|
||||
-I$(KERNEL_ROOT)/services/boards/$(BOARD) \
|
||||
-I$(KERNEL_ROOT)/services/lib/ipc
|
||||
INC_DIR = -I$(KERNEL_ROOT)/services/app \
|
||||
-I$(KERNEL_ROOT)/services/fs/libfs \
|
||||
-I$(KERNEL_ROOT)/services/boards/$(BOARD) \
|
||||
-I$(KERNEL_ROOT)/services/lib/memory \
|
||||
-I$(KERNEL_ROOT)/services/lib/ipc
|
||||
|
||||
all:
|
||||
all: libmem.o
|
||||
@mv $^ ../../app
|
||||
|
||||
%.o: %.c
|
||||
@echo "cc $^"
|
||||
|
|
|
@ -0,0 +1,369 @@
|
|||
/*
|
||||
* mm.c - malloc using segregated list
|
||||
* KAIST
|
||||
* Tony Kim
|
||||
*
|
||||
* In this approach,
|
||||
* Every block has a header and a footer
|
||||
* in which header contains reallocation information, size, and allocation info
|
||||
* and footer contains size and allocation info.
|
||||
* Free list are tagged to the segregated list.
|
||||
* Therefore all free block contains pointer to the predecessor and successor.
|
||||
* The segregated list headers are organized by 2^k size.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "libmem.h"
|
||||
#include "usyscall.h"
|
||||
|
||||
/* single word (4) or double word (8) alignment */
|
||||
#define ALIGNMENT 8
|
||||
/* rounds up to the nearest multiple of ALIGNMENT */
|
||||
#define ALIGN(size) (((size) + (ALIGNMENT - 1)) & ~0x7)
|
||||
|
||||
// My additional Macros
|
||||
#define WSIZE 4 // word and header/footer size (bytes)
|
||||
#define DSIZE 8 // double word size (bytes)
|
||||
#define INITCHUNKSIZE (1 << 6)
|
||||
#define CHUNKSIZE (1 << 12) //+(1<<7)
|
||||
|
||||
#define LISTLIMIT 20
|
||||
|
||||
#define MAX(x, y) ((x) > (y) ? (x) : (y))
|
||||
#define MIN(x, y) ((x) < (y) ? (x) : (y))
|
||||
|
||||
// Pack a size and allocated bit into a word
|
||||
#define PACK(size, alloc) ((size) | (alloc))
|
||||
|
||||
// Read and write a word at address p
|
||||
#define GET(p) (*(unsigned int*)(p))
|
||||
#define PUT(p, val) (*(unsigned int*)(p) = (val) | GET_TAG(p))
|
||||
#define PUT_NOTAG(p, val) (*(unsigned int*)(p) = (val))
|
||||
|
||||
// Store predecessor or successor pointer for free blocks
|
||||
#define SET_PTR(p, ptr) (*(unsigned int*)(p) = (unsigned int)(ptr))
|
||||
|
||||
// Read the size and allocation bit from address p
|
||||
#define GET_SIZE(p) (GET(p) & ~0x7)
|
||||
#define GET_ALLOC(p) (GET(p) & 0x1)
|
||||
#define GET_TAG(p) (GET(p) & 0x2)
|
||||
#define SET_RATAG(p) (GET(p) |= 0x2)
|
||||
#define REMOVE_RATAG(p) (GET(p) &= ~0x2)
|
||||
|
||||
// Address of block's header and footer
|
||||
#define HDRP(ptr) ((char*)(ptr)-WSIZE)
|
||||
#define FTRP(ptr) ((char*)(ptr) + GET_SIZE(HDRP(ptr)) - DSIZE)
|
||||
|
||||
// Address of (physically) next and previous blocks
|
||||
#define NEXT_BLKP(ptr) ((char*)(ptr) + GET_SIZE((char*)(ptr)-WSIZE))
|
||||
#define PREV_BLKP(ptr) ((char*)(ptr)-GET_SIZE((char*)(ptr)-DSIZE))
|
||||
|
||||
// Address of free block's predecessor and successor entries
|
||||
#define PRED_PTR(ptr) ((char*)(ptr))
|
||||
#define SUCC_PTR(ptr) ((char*)(ptr) + WSIZE)
|
||||
|
||||
// Address of free block's predecessor and successor on the segregated list
|
||||
#define PRED(ptr) (*(char**)(ptr))
|
||||
#define SUCC(ptr) (*(char**)(SUCC_PTR(ptr)))
|
||||
|
||||
// End of my additional macros
|
||||
|
||||
// Global var
|
||||
void* segregated_free_lists[LISTLIMIT];
|
||||
|
||||
// Functions
|
||||
static void* extend_heap(size_t size);
|
||||
static void* coalesce(void* ptr);
|
||||
static void* place(void* ptr, size_t asize);
|
||||
static void insert_node(void* ptr, size_t size);
|
||||
static void delete_node(void* ptr);
|
||||
|
||||
static uintptr_t userland_heap_base;
|
||||
static uintptr_t userland_heap_top;
|
||||
static uintptr_t requested_heap_size;
|
||||
|
||||
static void* mem_sbrk(size_t size)
|
||||
{
|
||||
uintptr_t userland_heap_size = userland_heap_top - userland_heap_base;
|
||||
if (userland_heap_size - requested_heap_size >= size) {
|
||||
void* ret_ptr = (void*)(userland_heap_base + requested_heap_size);
|
||||
requested_heap_size += size;
|
||||
return ret_ptr;
|
||||
}
|
||||
|
||||
uintptr_t size_needed = size - (userland_heap_size - requested_heap_size);
|
||||
userland_heap_top = mmap(userland_heap_top, (uintptr_t)NULL, size_needed, false);
|
||||
|
||||
return mem_sbrk(size);
|
||||
}
|
||||
|
||||
static void* extend_heap(size_t size)
|
||||
{
|
||||
void* ptr;
|
||||
size_t asize; // Adjusted size
|
||||
|
||||
asize = ALIGN(size);
|
||||
|
||||
if ((ptr = mem_sbrk(asize)) == (void*)-1)
|
||||
return NULL;
|
||||
|
||||
// Set headers and footer
|
||||
PUT_NOTAG(HDRP(ptr), PACK(asize, 0));
|
||||
PUT_NOTAG(FTRP(ptr), PACK(asize, 0));
|
||||
PUT_NOTAG(HDRP(NEXT_BLKP(ptr)), PACK(0, 1));
|
||||
insert_node(ptr, asize);
|
||||
|
||||
return coalesce(ptr);
|
||||
}
|
||||
|
||||
static void insert_node(void* ptr, size_t size)
|
||||
{
|
||||
int list = 0;
|
||||
void* search_ptr = ptr;
|
||||
void* insert_ptr = NULL;
|
||||
|
||||
// Select segregated list
|
||||
while ((list < LISTLIMIT - 1) && (size > 1)) {
|
||||
size >>= 1;
|
||||
list++;
|
||||
}
|
||||
|
||||
// Keep size ascending order and search
|
||||
search_ptr = segregated_free_lists[list];
|
||||
while ((search_ptr != NULL) && (size > GET_SIZE(HDRP(search_ptr)))) {
|
||||
insert_ptr = search_ptr;
|
||||
search_ptr = PRED(search_ptr);
|
||||
}
|
||||
|
||||
// Set predecessor and successor
|
||||
if (search_ptr != NULL) {
|
||||
if (insert_ptr != NULL) {
|
||||
SET_PTR(PRED_PTR(ptr), search_ptr);
|
||||
SET_PTR(SUCC_PTR(search_ptr), ptr);
|
||||
SET_PTR(SUCC_PTR(ptr), insert_ptr);
|
||||
SET_PTR(PRED_PTR(insert_ptr), ptr);
|
||||
} else {
|
||||
SET_PTR(PRED_PTR(ptr), search_ptr);
|
||||
SET_PTR(SUCC_PTR(search_ptr), ptr);
|
||||
SET_PTR(SUCC_PTR(ptr), NULL);
|
||||
segregated_free_lists[list] = ptr;
|
||||
}
|
||||
} else {
|
||||
if (insert_ptr != NULL) {
|
||||
SET_PTR(PRED_PTR(ptr), NULL);
|
||||
SET_PTR(SUCC_PTR(ptr), insert_ptr);
|
||||
SET_PTR(PRED_PTR(insert_ptr), ptr);
|
||||
} else {
|
||||
SET_PTR(PRED_PTR(ptr), NULL);
|
||||
SET_PTR(SUCC_PTR(ptr), NULL);
|
||||
segregated_free_lists[list] = ptr;
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void delete_node(void* ptr)
|
||||
{
|
||||
int list = 0;
|
||||
size_t size = GET_SIZE(HDRP(ptr));
|
||||
|
||||
// Select segregated list
|
||||
while ((list < LISTLIMIT - 1) && (size > 1)) {
|
||||
size >>= 1;
|
||||
list++;
|
||||
}
|
||||
|
||||
if (PRED(ptr) != NULL) {
|
||||
if (SUCC(ptr) != NULL) {
|
||||
SET_PTR(SUCC_PTR(PRED(ptr)), SUCC(ptr));
|
||||
SET_PTR(PRED_PTR(SUCC(ptr)), PRED(ptr));
|
||||
} else {
|
||||
SET_PTR(SUCC_PTR(PRED(ptr)), NULL);
|
||||
segregated_free_lists[list] = PRED(ptr);
|
||||
}
|
||||
} else {
|
||||
if (SUCC(ptr) != NULL) {
|
||||
SET_PTR(PRED_PTR(SUCC(ptr)), NULL);
|
||||
} else {
|
||||
segregated_free_lists[list] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void* coalesce(void* ptr)
|
||||
{
|
||||
size_t prev_alloc = GET_ALLOC(HDRP(PREV_BLKP(ptr)));
|
||||
size_t next_alloc = GET_ALLOC(HDRP(NEXT_BLKP(ptr)));
|
||||
size_t size = GET_SIZE(HDRP(ptr));
|
||||
|
||||
// Do not coalesce with previous block if the previous block is tagged with Reallocation tag
|
||||
if (GET_TAG(HDRP(PREV_BLKP(ptr))))
|
||||
prev_alloc = 1;
|
||||
|
||||
if (prev_alloc && next_alloc) { // Case 1
|
||||
return ptr;
|
||||
} else if (prev_alloc && !next_alloc) { // Case 2
|
||||
delete_node(ptr);
|
||||
delete_node(NEXT_BLKP(ptr));
|
||||
size += GET_SIZE(HDRP(NEXT_BLKP(ptr)));
|
||||
PUT(HDRP(ptr), PACK(size, 0));
|
||||
PUT(FTRP(ptr), PACK(size, 0));
|
||||
} else if (!prev_alloc && next_alloc) { // Case 3
|
||||
delete_node(ptr);
|
||||
delete_node(PREV_BLKP(ptr));
|
||||
size += GET_SIZE(HDRP(PREV_BLKP(ptr)));
|
||||
PUT(FTRP(ptr), PACK(size, 0));
|
||||
PUT(HDRP(PREV_BLKP(ptr)), PACK(size, 0));
|
||||
ptr = PREV_BLKP(ptr);
|
||||
} else { // Case 4
|
||||
delete_node(ptr);
|
||||
delete_node(PREV_BLKP(ptr));
|
||||
delete_node(NEXT_BLKP(ptr));
|
||||
size += GET_SIZE(HDRP(PREV_BLKP(ptr))) + GET_SIZE(HDRP(NEXT_BLKP(ptr)));
|
||||
PUT(HDRP(PREV_BLKP(ptr)), PACK(size, 0));
|
||||
PUT(FTRP(NEXT_BLKP(ptr)), PACK(size, 0));
|
||||
ptr = PREV_BLKP(ptr);
|
||||
}
|
||||
|
||||
insert_node(ptr, size);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void* place(void* ptr, size_t asize)
|
||||
{
|
||||
size_t ptr_size = GET_SIZE(HDRP(ptr));
|
||||
size_t remainder = ptr_size - asize;
|
||||
|
||||
delete_node(ptr);
|
||||
|
||||
if (remainder <= DSIZE * 2) {
|
||||
// Do not split block
|
||||
PUT(HDRP(ptr), PACK(ptr_size, 1));
|
||||
PUT(FTRP(ptr), PACK(ptr_size, 1));
|
||||
}
|
||||
|
||||
else if (asize >= 100) {
|
||||
// Split block
|
||||
PUT(HDRP(ptr), PACK(remainder, 0));
|
||||
PUT(FTRP(ptr), PACK(remainder, 0));
|
||||
PUT_NOTAG(HDRP(NEXT_BLKP(ptr)), PACK(asize, 1));
|
||||
PUT_NOTAG(FTRP(NEXT_BLKP(ptr)), PACK(asize, 1));
|
||||
insert_node(ptr, remainder);
|
||||
return NEXT_BLKP(ptr);
|
||||
|
||||
}
|
||||
|
||||
else {
|
||||
// Split block
|
||||
PUT(HDRP(ptr), PACK(asize, 1));
|
||||
PUT(FTRP(ptr), PACK(asize, 1));
|
||||
PUT_NOTAG(HDRP(NEXT_BLKP(ptr)), PACK(remainder, 0));
|
||||
PUT_NOTAG(FTRP(NEXT_BLKP(ptr)), PACK(remainder, 0));
|
||||
insert_node(NEXT_BLKP(ptr), remainder);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static bool malloc_inited = false;
|
||||
int mm_init(void)
|
||||
{
|
||||
// init heap
|
||||
userland_heap_base = task_heap_base();
|
||||
userland_heap_top = userland_heap_base;
|
||||
requested_heap_size = 0;
|
||||
|
||||
int list;
|
||||
char* heap_start; // Pointer to beginning of heap
|
||||
|
||||
// Initialize segregated free lists
|
||||
for (list = 0; list < LISTLIMIT; list++) {
|
||||
segregated_free_lists[list] = NULL;
|
||||
}
|
||||
|
||||
// Allocate memory for the initial empty heap
|
||||
if ((long)(heap_start = mem_sbrk(4 * WSIZE)) == -1)
|
||||
return -1;
|
||||
|
||||
PUT_NOTAG(heap_start, 0); /* Alignment padding */
|
||||
PUT_NOTAG(heap_start + (1 * WSIZE), PACK(DSIZE, 1)); /* Prologue header */
|
||||
PUT_NOTAG(heap_start + (2 * WSIZE), PACK(DSIZE, 1)); /* Prologue footer */
|
||||
PUT_NOTAG(heap_start + (3 * WSIZE), PACK(0, 1)); /* Epilogue header */
|
||||
|
||||
if (extend_heap(INITCHUNKSIZE) == NULL)
|
||||
return -1;
|
||||
|
||||
malloc_inited = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void* malloc(size_t size)
|
||||
{
|
||||
while (!malloc_inited) {
|
||||
mm_init();
|
||||
}
|
||||
|
||||
size_t asize; /* Adjusted block size */
|
||||
size_t extendsize; /* Amount to extend heap if no fit */
|
||||
void* ptr = NULL; /* Pointer */
|
||||
|
||||
// Ignore size 0 cases
|
||||
if (size == 0)
|
||||
return NULL;
|
||||
|
||||
// Align block size
|
||||
if (size <= DSIZE) {
|
||||
asize = 2 * DSIZE;
|
||||
} else {
|
||||
asize = ALIGN(size + DSIZE);
|
||||
}
|
||||
|
||||
int list = 0;
|
||||
size_t searchsize = asize;
|
||||
// Search for free block in segregated list
|
||||
while (list < LISTLIMIT) {
|
||||
if ((list == LISTLIMIT - 1) || ((searchsize <= 1) && (segregated_free_lists[list] != NULL))) {
|
||||
ptr = segregated_free_lists[list];
|
||||
// Ignore blocks that are too small or marked with the reallocation bit
|
||||
while ((ptr != NULL) && ((asize > GET_SIZE(HDRP(ptr))) || (GET_TAG(HDRP(ptr))))) {
|
||||
ptr = PRED(ptr);
|
||||
}
|
||||
if (ptr != NULL)
|
||||
break;
|
||||
}
|
||||
|
||||
searchsize >>= 1;
|
||||
list++;
|
||||
}
|
||||
|
||||
// if free block is not found, extend the heap
|
||||
if (ptr == NULL) {
|
||||
extendsize = MAX(asize, CHUNKSIZE);
|
||||
|
||||
if ((ptr = extend_heap(extendsize)) == NULL)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Place and divide block
|
||||
ptr = place(ptr, asize);
|
||||
|
||||
// Return pointer to newly allocated block
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void free(void* ptr)
|
||||
{
|
||||
size_t size = GET_SIZE(HDRP(ptr));
|
||||
|
||||
REMOVE_RATAG(HDRP(NEXT_BLKP(ptr)));
|
||||
PUT(HDRP(ptr), PACK(size, 0));
|
||||
PUT(FTRP(ptr), PACK(size, 0));
|
||||
|
||||
insert_node(ptr, size);
|
||||
coalesce(ptr);
|
||||
|
||||
return;
|
||||
}
|
|
@ -10,7 +10,7 @@
|
|||
* See the Mulan PSL v2 for more details.
|
||||
*/
|
||||
/**
|
||||
* @file libmalloc.h
|
||||
* @file libmem.h
|
||||
* @brief support malloc and free in userland
|
||||
* @version 3.0
|
||||
* @author AIIT XUOS Lab
|
||||
|
@ -18,7 +18,7 @@
|
|||
*/
|
||||
|
||||
/*************************************************
|
||||
File name: libmalloc.h
|
||||
File name: libmem.h
|
||||
Description: support malloc and free in userland
|
||||
Others:
|
||||
History:
|
|
@ -16,9 +16,10 @@ objdump = ${toolchain}objdump
|
|||
c_useropts = -O0
|
||||
|
||||
INC_DIR = -I$(KERNEL_ROOT)/services/app \
|
||||
-I$(KERNEL_ROOT)/services/fs/libfs \
|
||||
-I$(KERNEL_ROOT)/services/lib/memory \
|
||||
-I$(KERNEL_ROOT)/services/lib/ipc \
|
||||
-I$(KERNEL_ROOT)/services/boards/$(BOARD) \
|
||||
-I$(KERNEL_ROOT)/services/fs/libfs
|
||||
-I$(KERNEL_ROOT)/services/boards/$(BOARD)
|
||||
|
||||
all: shell_cmd_list.o shell_ext.o shell.o
|
||||
@mv $^ ../../app
|
||||
|
|
|
@ -1282,7 +1282,7 @@ void shellExec(Shell* shell)
|
|||
if (fd < 0) {
|
||||
shellWriteString(shell, shellText[SHELL_TEXT_CMD_NOT_FOUND]);
|
||||
} else {
|
||||
if (spawn(&session_fs, fd, read, shell->parser.param[0], shell->parser.param) < 0) {
|
||||
if (spawn(&session_fs, fd, read, fsize, shell->parser.param[0], shell->parser.param) < 0) {
|
||||
shellWriteString(shell, shellText[SHELL_TEXT_CMD_NOT_FOUND]);
|
||||
}
|
||||
close(&session_fs, fd);
|
||||
|
@ -1725,6 +1725,11 @@ int shellRun(Shell* shell, const char* cmd)
|
|||
}
|
||||
}
|
||||
|
||||
void shellKill(int pid)
|
||||
{
|
||||
kill(pid);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief ls 打印当前路径下所有文件(文件系统调用)
|
||||
*/
|
||||
|
|
|
@ -36,6 +36,7 @@ extern void shellMkdir(const char* path);
|
|||
extern void shellRm(const char* path);
|
||||
extern void shellCat(const char* path);
|
||||
|
||||
extern void shellKill(int pid);
|
||||
extern void shellShowTasks();
|
||||
extern void shellShowMemInfo();
|
||||
extern void shellShowCpusInfo();
|
||||
|
@ -97,8 +98,11 @@ const ShellCommand shellCommandList[] = {
|
|||
SHELL_CMD_ITEM(SHELL_CMD_PERMISSION(0) | SHELL_CMD_TYPE(SHELL_TYPE_CMD_FUNC) | SHELL_CMD_DISABLE_RETURN,
|
||||
sh, SHELL_AGENCY_FUNC_NAME(shellRun), run command directly),
|
||||
|
||||
SHELL_CMD_ITEM(SHELL_CMD_PERMISSION(0) | SHELL_CMD_TYPE(SHELL_TYPE_CMD_FUNC) | SHELL_CMD_DISABLE_RETURN,
|
||||
kill, shellKill, kill task by id),
|
||||
SHELL_CMD_ITEM(SHELL_CMD_PERMISSION(0) | SHELL_CMD_TYPE(SHELL_TYPE_CMD_FUNC) | SHELL_CMD_DISABLE_RETURN,
|
||||
ls, shellLs, ls files),
|
||||
|
||||
SHELL_CMD_ITEM(SHELL_CMD_PERMISSION(0) | SHELL_CMD_TYPE(SHELL_TYPE_CMD_FUNC) | SHELL_CMD_DISABLE_RETURN,
|
||||
cd, shellCd, go to target path),
|
||||
SHELL_CMD_ITEM(SHELL_CMD_PERMISSION(0) | SHELL_CMD_TYPE(SHELL_TYPE_CMD_FUNC) | SHELL_CMD_DISABLE_RETURN,
|
||||
|
|
|
@ -68,7 +68,6 @@ struct KFreeList {
|
|||
struct double_list_node list_head;
|
||||
};
|
||||
|
||||
#define MAX_NR_PAGES MAX_NR_FREE_PAGES
|
||||
struct KBuddy {
|
||||
uint32_t n_pages;
|
||||
uint32_t use_lock;
|
||||
|
@ -77,7 +76,7 @@ struct KBuddy {
|
|||
struct KPage* first_page;
|
||||
uint32_t mem_start;
|
||||
uint32_t mem_end;
|
||||
struct KPage pages[MAX_NR_PAGES];
|
||||
struct KPage* pages;
|
||||
};
|
||||
|
||||
/*********************************************
|
||||
|
@ -89,6 +88,7 @@ struct KBuddy {
|
|||
* @param mem_end free memory region end
|
||||
* @return void
|
||||
*/
|
||||
bool KBuddyInit(struct KBuddy* pbuddy, uint32_t mem_start, uint32_t mem_end);
|
||||
void KBuddySysInit(struct KBuddy* pbuddy, uint32_t mem_start, uint32_t mem_end);
|
||||
|
||||
/*
|
||||
|
@ -105,6 +105,8 @@ char* KBuddyAlloc(struct KBuddy* pbuddy, uint32_t size);
|
|||
*/
|
||||
bool KBuddyFree(struct KBuddy* pbuddy, char* vaddr);
|
||||
|
||||
void KBuddyDestory(struct KBuddy* pbuddy);
|
||||
|
||||
/*
|
||||
* Print current free pages for debug.
|
||||
*/
|
||||
|
|
|
@ -37,5 +37,6 @@ struct XiziBootNode {
|
|||
};
|
||||
|
||||
bool hardkernel_init(struct TraceTag*);
|
||||
bool secondary_cpu_hardkernel_init(int cpu_id, struct TraceTag* _hardkernel_tag);
|
||||
bool softkernel_init(struct TraceTag* _hardkernel_tag, struct TraceTag* _softkernel_tag);
|
||||
void show_xizi_bar(void);
|
||||
|
|
|
@ -34,6 +34,7 @@ Modification:
|
|||
#define OUTPUT_LEVLE_ERROR 2
|
||||
|
||||
#define OUTPUT_LEVLE OUTPUT_LEVLE_DEBUG
|
||||
// #define OUTPUT_LEVLE OUTPUT_LEVLE_LOG
|
||||
|
||||
extern void KPrintf(char* fmt, ...);
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ Modification:
|
|||
#pragma once
|
||||
|
||||
#include "core.h"
|
||||
#include "spinlock.h"
|
||||
#include "trap_common.h"
|
||||
|
||||
struct CPU {
|
||||
|
@ -44,4 +45,11 @@ extern struct CPU global_cpus[NR_CPU];
|
|||
static inline struct CPU* cur_cpu(void)
|
||||
{
|
||||
return &global_cpus[cur_cpuid()];
|
||||
}
|
||||
}
|
||||
|
||||
struct spinlock whole_kernel_lock;
|
||||
|
||||
void xizi_enter_kernel();
|
||||
bool xizi_try_enter_kernel();
|
||||
void xizi_leave_kernel();
|
||||
bool xizi_is_in_kernel();
|
|
@ -72,6 +72,7 @@ uintptr_t* _page_walk(uintptr_t* pgdir, uintptr_t vaddr, bool alloc);
|
|||
|
||||
extern struct TopLevelPageDirectory kern_pgdir;
|
||||
void load_kern_pgdir(struct TraceTag* mmu_driver_tag, struct TraceTag* intr_driver_tag);
|
||||
void secondary_cpu_load_kern_pgdir(struct TraceTag* mmu_driver_tag, struct TraceTag* intr_driver_tag);
|
||||
|
||||
extern struct XiziPageManager xizi_pager;
|
||||
bool module_pager_init(struct PagerRightGroup*);
|
|
@ -44,6 +44,8 @@ Modification:
|
|||
#define SYSCALL_EXEC 9 // run elf using current task
|
||||
#define SYSCALL_SYS_STATE 10 // run system state
|
||||
#define SYSCALL_REGISTER_IRQ 11 //
|
||||
|
||||
#define SYSCALL_KILL 12 // kill the task by id
|
||||
// clang-format on
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
|
@ -62,6 +64,12 @@ typedef enum {
|
|||
SYS_STATE_SHOW_CPU_INFO,
|
||||
} sys_state_option;
|
||||
|
||||
typedef enum {
|
||||
SYS_TASK_YIELD_NO_REASON = 0x0,
|
||||
SYS_TASK_YIELD_FOREVER = 0x1,
|
||||
SYS_TASK_YIELD_BLOCK_IPC = 0x2,
|
||||
} task_yield_reason;
|
||||
|
||||
typedef union {
|
||||
struct {
|
||||
uintptr_t memblock_start;
|
||||
|
@ -74,26 +82,23 @@ typedef union {
|
|||
typedef int (*ipc_read_fn)(struct Session* session, int fd, char* dst, int offset, int len);
|
||||
typedef int (*ipc_write_fn)(struct Session* session, int fd, char* src, int offset, int len);
|
||||
|
||||
struct KernReadTool {
|
||||
struct Session* session;
|
||||
int fd;
|
||||
ipc_read_fn ipc_read;
|
||||
};
|
||||
|
||||
int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4);
|
||||
|
||||
int sys_spawn(struct KernReadTool* read_tool, char* name, char** argv);
|
||||
int sys_exit();
|
||||
int sys_yield();
|
||||
int sys_spawn(char* img_start, char* name, char** argv);
|
||||
int sys_exit(struct TaskMicroDescriptor* ptask);
|
||||
int sys_yield(task_yield_reason reason);
|
||||
int sys_kill(int id);
|
||||
|
||||
int sys_register_as_server(char* name);
|
||||
int sys_connect_session(char* path, int capacity, struct Session* user_session);
|
||||
int sys_poll_session(struct Session* userland_session_arr, int arr_capacity);
|
||||
int sys_close_session(struct Session* session);
|
||||
|
||||
int sys_exec(struct KernReadTool* read_tool, char* name, char** argv);
|
||||
int sys_exec(char* img_start, char* name, char** argv);
|
||||
int sys_state(sys_state_option option, sys_state_info* info);
|
||||
int sys_mmap(uintptr_t vaddr, uintptr_t paddr, int len, int is_dev);
|
||||
|
||||
int sys_register_irq(int irq_num, int irq_opcode);
|
||||
int sys_unbind_irq_all(struct TaskMicroDescriptor* task);
|
||||
int sys_unbind_irq(struct TaskMicroDescriptor* task, int irq_num);
|
||||
#endif
|
||||
|
|
|
@ -31,6 +31,7 @@ Modification:
|
|||
|
||||
#include "core.h"
|
||||
|
||||
#include "buddy.h"
|
||||
#include "list.h"
|
||||
#include "object_allocator.h"
|
||||
#include "pagetable.h"
|
||||
|
@ -47,6 +48,8 @@ enum ProcState {
|
|||
READY,
|
||||
RUNNING,
|
||||
DEAD,
|
||||
BLOCKED,
|
||||
NEVER_RUN,
|
||||
};
|
||||
|
||||
/* Thread Control Block */
|
||||
|
@ -59,33 +62,39 @@ struct Thread {
|
|||
|
||||
/* Process Control Block */
|
||||
struct TaskMicroDescriptor {
|
||||
struct double_list_node node;
|
||||
|
||||
struct spinlock lock;
|
||||
/* task->lock needed */
|
||||
/* task debug resources */
|
||||
int pid;
|
||||
bool bind_irq;
|
||||
bool dead;
|
||||
char name[TASK_NAME_MAX_LEN];
|
||||
|
||||
/// @todo support return value
|
||||
int ret; // state val that be returned to parent
|
||||
/// @todo support parent
|
||||
struct TaskMicroDescriptor* parent;
|
||||
enum ProcState state;
|
||||
/// @todo support ret value
|
||||
int ret; // state val that be returned to parent
|
||||
|
||||
/* task context resources */
|
||||
struct Thread main_thread; // will only access by task itself
|
||||
|
||||
/* task memory resources */
|
||||
struct TopLevelPageDirectory pgdir; // [phy] vm pgtbl base address
|
||||
uintptr_t heap_base; // mem size of proc used(allocated by kernel)
|
||||
/// @todo support heap_base
|
||||
uintptr_t mem_size;
|
||||
|
||||
/* task communication resources */
|
||||
struct double_list_node cli_sess_listhead;
|
||||
struct double_list_node svr_sess_listhead;
|
||||
bool current_ipc_handled;
|
||||
struct KBuddy* massive_ipc_allocator;
|
||||
struct TraceTag server_identifier;
|
||||
|
||||
/* task->lock not necessary */
|
||||
struct Thread main_thread; // will only access by task itself
|
||||
/* task schedule attributes */
|
||||
struct double_list_node node;
|
||||
enum ProcState state;
|
||||
int priority; // priority
|
||||
int remain_tick;
|
||||
int maxium_tick;
|
||||
|
||||
struct TraceTag cwd; // current directory
|
||||
|
||||
int priority; // priority
|
||||
|
||||
/// @todo support mem_size
|
||||
uintptr_t mem_size; // mem size of proc used(allocated by kernel)
|
||||
char name[TASK_NAME_MAX_LEN];
|
||||
};
|
||||
|
||||
struct SchedulerRightGroup {
|
||||
|
@ -94,17 +103,12 @@ struct SchedulerRightGroup {
|
|||
};
|
||||
|
||||
struct XiziTaskManager {
|
||||
struct spinlock lock; // lock to organize free and used task list
|
||||
struct double_list_node task_list_head[TASK_MAX_PRIORITY]; /* list of task control blocks that are allocated */
|
||||
int nr_pcb_used; // for debug
|
||||
struct double_list_node task_blocked_list_head;
|
||||
struct slab_allocator task_allocator;
|
||||
|
||||
/// @todo Add pid to task
|
||||
struct slab_allocator task_buddy_allocator;
|
||||
uint32_t next_pid;
|
||||
|
||||
/* number of tcbs in which one page contains */
|
||||
int nr_tcb_per_page;
|
||||
|
||||
/* init task manager */
|
||||
void (*init)();
|
||||
/* new a task control block, checkout #sys_spawn for usage */
|
||||
|
@ -112,14 +116,19 @@ struct XiziTaskManager {
|
|||
/* free a task control block, this calls #free_user_pgdir to free all vitual spaces */
|
||||
void (*free_pcb)(struct TaskMicroDescriptor*);
|
||||
/* init a task control block, set name, remain_tick, state, cwd, priority, etc. */
|
||||
void (*task_set_default_schedule_attr)(struct TaskMicroDescriptor*, struct TraceTag* cwd);
|
||||
void (*task_set_default_schedule_attr)(struct TaskMicroDescriptor*);
|
||||
|
||||
/* use by task_scheduler, find next READY task, should be in locked */
|
||||
struct TaskMicroDescriptor* (*next_runnable_task)(void);
|
||||
/* function that's runing by kernel thread context, schedule use tasks */
|
||||
void (*task_scheduler)(struct SchedulerRightGroup);
|
||||
|
||||
/* handle task state */
|
||||
/* call to yield current use task */
|
||||
void (*cur_task_yield_noschedule)(void);
|
||||
void (*task_yield_noschedule)(struct TaskMicroDescriptor* task, bool is_blocking);
|
||||
/* block and unblock task */
|
||||
void (*task_block)(struct TaskMicroDescriptor* task);
|
||||
void (*task_unblock)(struct TaskMicroDescriptor* task);
|
||||
/* set task priority */
|
||||
void (*set_cur_task_priority)(int priority);
|
||||
};
|
||||
|
|
|
@ -27,48 +27,78 @@ Author: AIIT XUOS Lab
|
|||
Modification:
|
||||
1. first version
|
||||
*************************************************/
|
||||
/// @todo use hardkernel
|
||||
|
||||
#include "kern_init.h"
|
||||
#include "multicores.h"
|
||||
|
||||
#include "assert.h"
|
||||
#include "task.h"
|
||||
|
||||
#include "trap_common.h"
|
||||
|
||||
extern uint32_t _binary_init_start[], _binary_default_fs_start[];
|
||||
extern int sys_spawn(char* path, char** argv);
|
||||
extern int sys_spawn(char* img_start, char* name, char** argv);
|
||||
|
||||
static struct TraceTag hardkernel_tag, softkernel_tag;
|
||||
static int core_init_done = 0;
|
||||
int main(void)
|
||||
{
|
||||
/* init tracer */
|
||||
// clang-format off
|
||||
tracer_init(); // init tracer system
|
||||
struct TraceTag hardkernel_tag, softkernel_tag;
|
||||
if (!CreateResourceTag(&hardkernel_tag, RequireRootTag(), "hardkernel", TRACER_OWNER, NULL) ||
|
||||
!CreateResourceTag(&softkernel_tag, RequireRootTag(), "softkernel", TRACER_OWNER, NULL)) {
|
||||
ERROR("Failed to create hardkernel owner and softkernel owner.\n");
|
||||
return -1;
|
||||
}
|
||||
// clang-format on
|
||||
uint32_t cpu_id = cur_cpuid();
|
||||
|
||||
/* init hardkernel */
|
||||
if (!hardkernel_init(&hardkernel_tag)) {
|
||||
return -1;
|
||||
}
|
||||
/* init softkernel */
|
||||
if (!softkernel_init(&hardkernel_tag, &softkernel_tag)) {
|
||||
return -1;
|
||||
}
|
||||
show_xizi_bar();
|
||||
if (cpu_id == 0) {
|
||||
tracer_init(); // init tracer system
|
||||
// clang-format off
|
||||
if (!CreateResourceTag(&hardkernel_tag, RequireRootTag(), "hardkernel", TRACER_OWNER, NULL) ||
|
||||
!CreateResourceTag(&softkernel_tag, RequireRootTag(), "softkernel", TRACER_OWNER, NULL)) {
|
||||
ERROR("Failed to create hardkernel owner and softkernel owner.\n");
|
||||
return -1;
|
||||
}
|
||||
// clang-format on
|
||||
/* init hardkernel */
|
||||
if (!hardkernel_init(&hardkernel_tag)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* start first task */
|
||||
char* init_task_param[2] = { "/app/init", 0 };
|
||||
spawn_embedded_task((char*)_binary_init_start, "init", init_task_param);
|
||||
char* fs_server_task_param[2] = { "/app/fs_server", 0 };
|
||||
spawn_embedded_task((char*)_binary_default_fs_start, "memfs", fs_server_task_param);
|
||||
spinlock_init(&whole_kernel_lock, "wklock");
|
||||
} else {
|
||||
spinlock_lock(&whole_kernel_lock);
|
||||
secondary_cpu_hardkernel_init(cpu_id, &hardkernel_tag);
|
||||
spinlock_unlock(&whole_kernel_lock);
|
||||
}
|
||||
|
||||
spinlock_lock(&whole_kernel_lock);
|
||||
if (cpu_id == 0) {
|
||||
/* init softkernel */
|
||||
if (!softkernel_init(&hardkernel_tag, &softkernel_tag)) {
|
||||
return -1;
|
||||
}
|
||||
show_xizi_bar();
|
||||
|
||||
for (int i = 1; i < NR_CPU; i++) {
|
||||
// start secondary cpus
|
||||
cpu_start_secondary(i);
|
||||
}
|
||||
|
||||
/* start first task */
|
||||
char* init_task_param[2] = { "/app/init", 0 };
|
||||
sys_spawn((char*)_binary_init_start, "init", init_task_param);
|
||||
char* fs_server_task_param[2] = { "/app/fs_server", 0 };
|
||||
sys_spawn((char*)_binary_default_fs_start, "memfs", fs_server_task_param);
|
||||
}
|
||||
|
||||
/* start scheduler */
|
||||
struct SchedulerRightGroup scheduler_rights;
|
||||
assert(AchieveResourceTag(&scheduler_rights.mmu_driver_tag, &hardkernel_tag, "mmu-ac-resource"));
|
||||
assert(AchieveResourceTag(&scheduler_rights.intr_driver_tag, &hardkernel_tag, "intr-ac-resource"));
|
||||
core_init_done |= (1 << cpu_id);
|
||||
LOG_PRINTF("CPU %d init done\n", cpu_id);
|
||||
spinlock_unlock(&whole_kernel_lock);
|
||||
|
||||
while (core_init_done != (1 << NR_CPU) - 1)
|
||||
;
|
||||
|
||||
xizi_enter_kernel();
|
||||
start_smp_cache_broadcast(cpu_id);
|
||||
xizi_task_manager.task_scheduler(scheduler_rights);
|
||||
|
||||
// never reached
|
||||
|
|
|
@ -29,6 +29,7 @@ Modification:
|
|||
*************************************************/
|
||||
|
||||
#include "buddy.h"
|
||||
#include "kalloc.h"
|
||||
#include "log.h"
|
||||
|
||||
static void _buddy_split_page(struct KPage* page, uint32_t low_order, uint32_t high_order, struct KFreeList* list)
|
||||
|
@ -75,8 +76,6 @@ static struct KPage* KBuddyPagesAlloc(struct KBuddy* pbuddy, int nPages)
|
|||
struct KFreeList* list = NULL;
|
||||
int i = 0, order = 0;
|
||||
|
||||
spinlock_lock(&pbuddy->lock);
|
||||
|
||||
// find order
|
||||
for (order = 0; (FREE_LIST_INDEX(order)) < nPages; order++)
|
||||
;
|
||||
|
@ -99,12 +98,10 @@ static struct KPage* KBuddyPagesAlloc(struct KBuddy* pbuddy, int nPages)
|
|||
// set the pages' order
|
||||
_buddy_set_pages_order(page, order);
|
||||
|
||||
spinlock_unlock(&pbuddy->lock);
|
||||
return page;
|
||||
}
|
||||
|
||||
// there is no enough free page to satisfy the nPages
|
||||
spinlock_unlock(&pbuddy->lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -116,8 +113,6 @@ static void KBuddyPagesFree(struct KBuddy* pbuddy, struct KPage* page)
|
|||
uint32_t buddy_idx = 0, new_buddy_idx = 0;
|
||||
uint32_t page_idx = page - pbuddy->pages;
|
||||
|
||||
spinlock_lock(&pbuddy->lock);
|
||||
|
||||
for (; order < MAX_BUDDY_ORDER - 1; order++) {
|
||||
// find and delete buddy to combine
|
||||
buddy_idx = BUDDY_PAGE_INDEX(page_idx, order);
|
||||
|
@ -141,19 +136,22 @@ static void KBuddyPagesFree(struct KBuddy* pbuddy, struct KPage* page)
|
|||
doubleListAddOnHead(&page->node, &pbuddy->free_list[order].list_head);
|
||||
pbuddy->free_list[order].n_free_pages++;
|
||||
|
||||
spinlock_unlock(&pbuddy->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
void KBuddySysInit(struct KBuddy* pbuddy, uint32_t mem_start, uint32_t mem_end)
|
||||
bool KBuddyInit(struct KBuddy* pbuddy, uint32_t mem_start, uint32_t mem_end)
|
||||
{
|
||||
if (pbuddy->pages == NULL) {
|
||||
if ((pbuddy->pages = (struct KPage*)kalloc(((mem_end - mem_start) >> LEVEL4_PTE_SHIFT) * sizeof(struct KPage))) == NULL) {
|
||||
ERROR("Not space to init a buddy object.\n");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t i = 0;
|
||||
struct KPage* page = NULL;
|
||||
struct KFreeList* free_list = NULL;
|
||||
|
||||
// init spinlock
|
||||
spinlock_init(&pbuddy->lock, "kbuddy");
|
||||
|
||||
// init global kernel Buddy system
|
||||
pbuddy->mem_start = mem_start;
|
||||
pbuddy->mem_end = mem_end;
|
||||
|
@ -183,6 +181,16 @@ void KBuddySysInit(struct KBuddy* pbuddy, uint32_t mem_start, uint32_t mem_end)
|
|||
doubleListNodeInit(&page->node);
|
||||
KBuddyPagesFree(pbuddy, page);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void KBuddySysInit(struct KBuddy* pbuddy, uint32_t mem_start, uint32_t mem_end)
|
||||
{
|
||||
#define MAX_NR_PAGES MAX_NR_FREE_PAGES
|
||||
static struct KPage kern_free_pages[MAX_NR_PAGES];
|
||||
pbuddy->pages = kern_free_pages;
|
||||
KBuddyInit(pbuddy, mem_start, mem_end);
|
||||
}
|
||||
|
||||
char* KBuddyAlloc(struct KBuddy* pbuddy, uint32_t size)
|
||||
|
@ -221,6 +229,13 @@ bool KBuddyFree(struct KBuddy* pbuddy, char* vaddr)
|
|||
return true;
|
||||
}
|
||||
|
||||
void KBuddyDestory(struct KBuddy* pbuddy)
|
||||
{
|
||||
if (pbuddy->pages) {
|
||||
kfree((void*)pbuddy->pages);
|
||||
}
|
||||
}
|
||||
|
||||
void KFreePagesInfo(struct KBuddy* pbuddy)
|
||||
{
|
||||
DEBUG("Buddy structure:");
|
||||
|
|
|
@ -45,7 +45,7 @@ bool module_phymem_init()
|
|||
uint32_t user_freemem_start = PHY_USER_FREEMEM_BASE;
|
||||
uint32_t user_freemem_end = PHY_MEM_STOP;
|
||||
KBuddySysInit(&kern_virtmem_buddy, kern_freemem_start, kern_freemem_end);
|
||||
KBuddySysInit(&user_phy_freemem_buddy, user_freemem_start, user_freemem_end);
|
||||
KBuddyInit(&user_phy_freemem_buddy, user_freemem_start, user_freemem_end);
|
||||
LOG_PRINTF("Free memory organized done.\n");
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -293,6 +293,12 @@ void load_kern_pgdir(struct TraceTag* mmu_driver_tag, struct TraceTag* intr_driv
|
|||
// dev mem
|
||||
_map_pages((uintptr_t*)kern_pgdir.pd_addr, DEV_VRTMEM_BASE, DEV_PHYMEM_BASE, DEV_MEM_SZ, dev_attr);
|
||||
|
||||
// _p_pgtbl_mmu_access->LoadPgdir((uintptr_t)V2P(kern_pgdir.pd_addr));
|
||||
_p_pgtbl_mmu_access->LoadPgdirCrit((uintptr_t)V2P(kern_pgdir.pd_addr), intr_driver_tag);
|
||||
_p_pgtbl_mmu_access->LoadPgdir((uintptr_t)V2P(kern_pgdir.pd_addr));
|
||||
// _p_pgtbl_mmu_access->LoadPgdirCrit((uintptr_t)V2P(kern_pgdir.pd_addr), intr_driver_tag);
|
||||
}
|
||||
|
||||
void secondary_cpu_load_kern_pgdir(struct TraceTag* mmu_driver_tag, struct TraceTag* intr_driver_tag)
|
||||
{
|
||||
_p_pgtbl_mmu_access->LoadPgdir((uintptr_t)V2P(kern_pgdir.pd_addr));
|
||||
// _p_pgtbl_mmu_access->LoadPgdirCrit((uintptr_t)V2P(kern_pgdir.pd_addr), intr_driver_tag);
|
||||
}
|
|
@ -91,27 +91,39 @@ static uintptr_t map_task_share_page(struct TaskMicroDescriptor* task, const uin
|
|||
struct DCacheDone* p_dcache_done = AchieveResource(&right_group.dcache_driver_tag);
|
||||
struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag);
|
||||
|
||||
spinlock_lock(&task->lock);
|
||||
// map double vaddr page to support uniform ring buffer r/w
|
||||
uintptr_t vaddr = alloc_share_page_addr(task, nr_pages * 2);
|
||||
if (UNLIKELY(vaddr == 0)) {
|
||||
spinlock_unlock(&task->lock);
|
||||
uintptr_t vaddr = (uintptr_t)NULL;
|
||||
if (task->massive_ipc_allocator != NULL) {
|
||||
vaddr = (uintptr_t)KBuddyAlloc(task->massive_ipc_allocator, PAGE_SIZE * nr_pages * 2);
|
||||
} else {
|
||||
vaddr = alloc_share_page_addr(task, nr_pages * 2);
|
||||
if (vaddr >= USER_IPC_USE_ALLOCATOR_WATERMARK) {
|
||||
task->massive_ipc_allocator = (struct KBuddy*)slab_alloc(&xizi_task_manager.task_buddy_allocator);
|
||||
KBuddyInit(task->massive_ipc_allocator, USER_IPC_USE_ALLOCATOR_WATERMARK, USER_IPC_SPACE_TOP);
|
||||
if (!task->massive_ipc_allocator) {
|
||||
ERROR("Alloc task buddy failed.\n");
|
||||
return (uintptr_t)NULL;
|
||||
}
|
||||
return map_task_share_page(task, paddr, nr_pages);
|
||||
}
|
||||
}
|
||||
|
||||
if (UNLIKELY(vaddr == (uintptr_t)NULL)) {
|
||||
return (uintptr_t)NULL;
|
||||
}
|
||||
if (!xizi_pager.map_pages(task->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, false)) {
|
||||
spinlock_unlock(&task->lock);
|
||||
return (uintptr_t)NULL;
|
||||
}
|
||||
if (!xizi_pager.map_pages(task->pgdir.pd_addr, vaddr + (nr_pages * PAGE_SIZE), paddr, nr_pages * PAGE_SIZE, false)) {
|
||||
xizi_pager.unmap_pages(task->pgdir.pd_addr, vaddr, nr_pages * PAGE_SIZE);
|
||||
spinlock_unlock(&task->lock);
|
||||
return (uintptr_t)NULL;
|
||||
}
|
||||
spinlock_unlock(&task->lock);
|
||||
if (task == cur_cpu()->task) {
|
||||
p_mmu_driver->TlbFlush(vaddr, 2 * nr_pages * PAGE_SIZE);
|
||||
|
||||
/// @todo clean range rather than all
|
||||
p_dcache_done->flushall();
|
||||
// p_dcache_done->flushall();
|
||||
p_dcache_done->invalidateall();
|
||||
// p_dcache_done->flush(vaddr, vaddr + 2 * nr_pages * PAGE_SIZE);
|
||||
}
|
||||
return vaddr;
|
||||
|
@ -123,21 +135,21 @@ uintptr_t task_map_pages(struct TaskMicroDescriptor* task, const uintptr_t vaddr
|
|||
struct DCacheDone* p_dcache_done = AchieveResource(&right_group.dcache_driver_tag);
|
||||
struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag);
|
||||
|
||||
spinlock_lock(&task->lock);
|
||||
bool ret = false;
|
||||
if (is_dev) {
|
||||
ret = xizi_pager.map_pages(task->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, true);
|
||||
} else {
|
||||
ret = xizi_pager.map_pages(task->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, false);
|
||||
}
|
||||
spinlock_unlock(&task->lock);
|
||||
if (!ret) {
|
||||
return (uintptr_t)NULL;
|
||||
}
|
||||
if (task == cur_cpu()->task) {
|
||||
p_mmu_driver->TlbFlush(vaddr, nr_pages * PAGE_SIZE);
|
||||
|
||||
/// @todo clean range rather than all
|
||||
p_dcache_done->flushall();
|
||||
// p_dcache_done->flushall();
|
||||
p_dcache_done->invalidateall();
|
||||
// p_dcache_done->flush(vaddr, vaddr + nr_pages * PAGE_SIZE);
|
||||
}
|
||||
|
||||
|
@ -150,14 +162,17 @@ void unmap_task_share_pages(struct TaskMicroDescriptor* task, const uintptr_t ta
|
|||
struct DCacheDone* p_dcache_done = AchieveResource(&right_group.dcache_driver_tag);
|
||||
struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag);
|
||||
|
||||
spinlock_lock(&task->lock);
|
||||
xizi_pager.unmap_pages(task->pgdir.pd_addr, task_vaddr, nr_pages * PAGE_SIZE);
|
||||
xizi_pager.unmap_pages(task->pgdir.pd_addr, task_vaddr + (nr_pages * PAGE_SIZE), nr_pages * PAGE_SIZE);
|
||||
spinlock_unlock(&task->lock);
|
||||
if (task_vaddr >= USER_IPC_USE_ALLOCATOR_WATERMARK) {
|
||||
KBuddyFree(task->massive_ipc_allocator, (void*)task_vaddr);
|
||||
}
|
||||
if (task == cur_cpu()->task) {
|
||||
p_mmu_driver->TlbFlush(task_vaddr, 2 * nr_pages * PAGE_SIZE);
|
||||
|
||||
/// @todo clean range rather than all
|
||||
p_dcache_done->flushall();
|
||||
// p_dcache_done->flushall();
|
||||
p_dcache_done->invalidateall();
|
||||
// p_dcache_done->flush(task_vaddr, task_vaddr + 2 * nr_pages * PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
@ -181,12 +196,14 @@ struct session_backend* create_share_pages(struct TaskMicroDescriptor* client, s
|
|||
uintptr_t client_vaddr = map_task_share_page(client, V2P_WO(kern_vaddr), nr_pages);
|
||||
if (UNLIKELY(client_vaddr == 0)) {
|
||||
kfree((char*)kern_vaddr);
|
||||
slab_free(SessionAllocator(), session_backend);
|
||||
return NULL;
|
||||
}
|
||||
uintptr_t server_vaddr = map_task_share_page(server, V2P_WO(kern_vaddr), nr_pages);
|
||||
if (UNLIKELY(server_vaddr == 0)) {
|
||||
unmap_task_share_pages(client, client_vaddr, nr_pages);
|
||||
kfree((char*)kern_vaddr);
|
||||
slab_free(SessionAllocator(), session_backend);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -200,20 +217,19 @@ struct session_backend* create_share_pages(struct TaskMicroDescriptor* client, s
|
|||
session_backend->client_side.buf_addr = client_vaddr;
|
||||
session_backend->client_side.capacity = true_capacity;
|
||||
session_backend->client_side.closed = false;
|
||||
spinlock_lock(&client->lock);
|
||||
doubleListNodeInit(&session_backend->client_side.node);
|
||||
doubleListAddOnBack(&session_backend->client_side.node, &client->cli_sess_listhead);
|
||||
spinlock_unlock(&client->lock);
|
||||
// init server side session struct
|
||||
session_backend->server_side.buf_addr = server_vaddr;
|
||||
session_backend->server_side.capacity = true_capacity;
|
||||
session_backend->server_side.head = 0;
|
||||
session_backend->server_side.tail = 0;
|
||||
session_backend->server_side.closed = false;
|
||||
spinlock_lock(&server->lock);
|
||||
doubleListNodeInit(&session_backend->server_side.node);
|
||||
doubleListAddOnBack(&session_backend->server_side.node, &server->svr_sess_listhead);
|
||||
spinlock_unlock(&server->lock);
|
||||
|
||||
server->mem_size += true_capacity;
|
||||
client->mem_size += true_capacity;
|
||||
|
||||
return session_backend;
|
||||
}
|
||||
|
@ -232,17 +248,16 @@ int delete_share_pages(struct session_backend* session_backend)
|
|||
|
||||
/* unmap share pages */
|
||||
if (session_backend->client) {
|
||||
spinlock_lock(&session_backend->client->lock);
|
||||
doubleListDel(&session_backend->client_side.node);
|
||||
spinlock_unlock(&session_backend->client->lock);
|
||||
}
|
||||
|
||||
if (session_backend->server) {
|
||||
spinlock_lock(&session_backend->server->lock);
|
||||
doubleListDel(&session_backend->server_side.node);
|
||||
spinlock_unlock(&session_backend->server->lock);
|
||||
}
|
||||
|
||||
session_backend->server->mem_size -= session_backend->nr_pages * PAGE_SIZE;
|
||||
session_backend->client->mem_size -= session_backend->nr_pages * PAGE_SIZE;
|
||||
|
||||
/* free seesion backend */
|
||||
kfree((void*)session_backend->buf_kernel_addr);
|
||||
slab_free(SessionAllocator(), (void*)session_backend);
|
||||
|
|
|
@ -9,6 +9,7 @@ SRC_FILES := syscall.c \
|
|||
sys_register_irq.c \
|
||||
sys_exit.c \
|
||||
sys_state.c \
|
||||
sys_mmap.c
|
||||
sys_mmap.c \
|
||||
sys_kill.c
|
||||
|
||||
include $(KERNEL_ROOT)/compiler.mk
|
||||
|
|
|
@ -35,13 +35,13 @@ Modification:
|
|||
#include "syscall.h"
|
||||
#include "task.h"
|
||||
|
||||
int create_session_inner(struct TaskMicroDescriptor* client, struct TaskMicroDescriptor* server, int capacity, struct Session* user_session)
|
||||
struct session_backend* create_session_inner(struct TaskMicroDescriptor* client, struct TaskMicroDescriptor* server, int capacity, struct Session* user_session)
|
||||
{
|
||||
// create share pages
|
||||
struct session_backend* session_backend = xizi_share_page_manager.create_share_pages(client, server, capacity);
|
||||
if (UNLIKELY(session_backend == NULL)) {
|
||||
DEBUG("create_share_pages failed\n");
|
||||
return -1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// init user_session
|
||||
|
@ -51,7 +51,7 @@ int create_session_inner(struct TaskMicroDescriptor* client, struct TaskMicroDes
|
|||
user_session->tail = 0;
|
||||
user_session->id = session_backend->session_id;
|
||||
|
||||
return 0;
|
||||
return session_backend;
|
||||
}
|
||||
|
||||
int sys_connect_session(char* path, int capacity, struct Session* user_session)
|
||||
|
@ -70,11 +70,14 @@ int sys_connect_session(char* path, int capacity, struct Session* user_session)
|
|||
|
||||
struct TraceTag server_tag;
|
||||
if (!AchieveResourceTag(&server_tag, &server_identifier_owner, path)) {
|
||||
ERROR("Not server: %s\n", path);
|
||||
DEBUG("Not server: %s\n", path);
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct TaskMicroDescriptor* server = AchieveResource(&server_tag);
|
||||
assert(server != NULL);
|
||||
return create_session_inner(client, server, capacity, user_session);
|
||||
if (create_session_inner(client, server, capacity, user_session) == NULL) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -66,18 +66,21 @@ Modification:
|
|||
/// @param path path to elf file
|
||||
/// @param argv arguments giving to main
|
||||
/// @return
|
||||
int task_exec(struct TaskMicroDescriptor* task, struct Session* session, int fd, ipc_read_fn ipc_read, char* name, char** argv)
|
||||
int task_exec(struct TaskMicroDescriptor* task, char* img_start, char* name, char** argv)
|
||||
{
|
||||
/* load img to task */
|
||||
if (img_start == NULL) {
|
||||
return -1;
|
||||
}
|
||||
/* 1. load elf header */
|
||||
struct elfhdr elf;
|
||||
memcpy((void*)&elf, img_start, sizeof(elf));
|
||||
if (elf.magic != ELF_MAGIC) {
|
||||
return -1;
|
||||
}
|
||||
// pgdir for new task
|
||||
struct TopLevelPageDirectory pgdir;
|
||||
pgdir.pd_addr = NULL;
|
||||
|
||||
if (ipc_read(session, fd, (char*)&elf, 0, sizeof(elf)) < sizeof(elf) || elf.magic != ELF_MAGIC) {
|
||||
ERROR("invalide elf file.\n");
|
||||
goto error_exec;
|
||||
}
|
||||
|
||||
// pgdir for new task
|
||||
if (UNLIKELY(!xizi_pager.new_pgdir(&pgdir))) {
|
||||
ERROR("create new pgdir failed.\n");
|
||||
goto error_exec;
|
||||
|
@ -88,10 +91,8 @@ int task_exec(struct TaskMicroDescriptor* task, struct Session* session, int fd,
|
|||
uintptr_t load_size = 0;
|
||||
struct proghdr ph;
|
||||
for (int sec_idx = 0, off = elf.phoff; sec_idx < elf.phnum; sec_idx++, off += sizeof(ph)) {
|
||||
if (ipc_read(session, fd, (char*)&ph, off, sizeof(ph)) != sizeof(ph)) {
|
||||
ERROR("Read elf header failed\n");
|
||||
goto error_exec;
|
||||
}
|
||||
// load proghdr
|
||||
memcpy((char*)&ph, img_start + off, sizeof(ph));
|
||||
|
||||
if (ph.type != ELF_PROG_LOAD)
|
||||
continue;
|
||||
|
@ -111,13 +112,11 @@ int task_exec(struct TaskMicroDescriptor* task, struct Session* session, int fd,
|
|||
for (int addr_offset = 0; addr_offset < ph.filesz; addr_offset += PAGE_SIZE) {
|
||||
uintptr_t page_paddr = xizi_pager.address_translate(&pgdir, ph.vaddr + addr_offset);
|
||||
if (page_paddr == 0) {
|
||||
panic("copy elf file to unmapped addr");
|
||||
}
|
||||
uintptr_t read_size = (ph.filesz - addr_offset < PAGE_SIZE ? ph.filesz - addr_offset : PAGE_SIZE);
|
||||
if (read_size != ipc_read(session, fd, P2V(page_paddr), ph.off + addr_offset, read_size)) {
|
||||
ERROR("read size error, off: %d, read len: %d\n", ph.off + addr_offset, read_size);
|
||||
ERROR("copy elf file to unmapped addr\n");
|
||||
goto error_exec;
|
||||
}
|
||||
uintptr_t read_size = (ph.filesz - addr_offset < PAGE_SIZE ? ph.filesz - addr_offset : PAGE_SIZE);
|
||||
memcpy(P2V(page_paddr), img_start + (ph.off + addr_offset), read_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -176,13 +175,12 @@ int task_exec(struct TaskMicroDescriptor* task, struct Session* session, int fd,
|
|||
}
|
||||
strncpy(task->name, last, sizeof(task->name));
|
||||
|
||||
struct TopLevelPageDirectory old_pgdir = task->pgdir;
|
||||
if (task->pgdir.pd_addr != NULL) {
|
||||
xizi_pager.free_user_pgdir(&task->pgdir);
|
||||
}
|
||||
task->pgdir = pgdir;
|
||||
|
||||
/// @todo record mem size used b task
|
||||
task->mem_size = ALIGNUP(load_size, PAGE_SIZE);
|
||||
|
||||
xizi_pager.free_user_pgdir(&old_pgdir);
|
||||
task->heap_base = ALIGNUP(load_size, PAGE_SIZE);
|
||||
task->mem_size = task->heap_base + USER_STACK_SIZE;
|
||||
return 0;
|
||||
|
||||
error_exec:
|
||||
|
@ -193,7 +191,7 @@ error_exec:
|
|||
return -1;
|
||||
}
|
||||
|
||||
int sys_exec(struct KernReadTool* read_tool, char* name, char** argv)
|
||||
int sys_exec(char* img_start, char* name, char** argv)
|
||||
{
|
||||
/// @todo find a source of mmu_driver_tag instead of requiring from root
|
||||
static struct TraceTag mmu_driver_tag;
|
||||
|
@ -204,14 +202,9 @@ int sys_exec(struct KernReadTool* read_tool, char* name, char** argv)
|
|||
}
|
||||
|
||||
struct MmuCommonDone* p_mmu_driver = AchieveResource(&mmu_driver_tag);
|
||||
|
||||
struct TaskMicroDescriptor* current_task = cur_cpu()->task;
|
||||
struct Session* session = read_tool->session;
|
||||
int fd = read_tool->fd;
|
||||
ipc_read_fn ipc_read = read_tool->ipc_read;
|
||||
int ret = task_exec(current_task, session, fd, ipc_read, name, argv);
|
||||
int ret = task_exec(current_task, img_start, name, argv);
|
||||
if (ret >= 0) {
|
||||
spinlock_init(¤t_task->lock, current_task->name);
|
||||
p_mmu_driver->LoadPgdir((uintptr_t)V2P(current_task->pgdir.pd_addr));
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -36,58 +36,10 @@ Modification:
|
|||
#include "syscall.h"
|
||||
#include "task.h"
|
||||
|
||||
int sys_exit()
|
||||
int sys_exit(struct TaskMicroDescriptor* ptask)
|
||||
{
|
||||
|
||||
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
||||
assert(cur_task != NULL);
|
||||
|
||||
/* handle sessions for condition 1, ref. delete_share_pages() */
|
||||
// close all server_sessions
|
||||
struct server_session* server_session = NULL;
|
||||
while (!IS_DOUBLE_LIST_EMPTY(&cur_task->svr_sess_listhead)) {
|
||||
server_session = CONTAINER_OF(cur_task->svr_sess_listhead.next, struct server_session, node);
|
||||
// cut the connection from task to session
|
||||
if (!server_session->closed) {
|
||||
xizi_share_page_manager.unmap_task_share_pages(cur_task, server_session->buf_addr, CLIENT_SESSION_BACKEND(server_session)->nr_pages);
|
||||
server_session->closed = true;
|
||||
}
|
||||
doubleListDel(&server_session->node);
|
||||
SERVER_SESSION_BACKEND(server_session)->server = NULL;
|
||||
// delete session (also cut connection from session to task)
|
||||
if (SERVER_SESSION_BACKEND(server_session)->client_side.closed) {
|
||||
xizi_share_page_manager.delete_share_pages(SERVER_SESSION_BACKEND(server_session));
|
||||
}
|
||||
}
|
||||
// close all client_sessions
|
||||
struct client_session* client_session = NULL;
|
||||
while (!IS_DOUBLE_LIST_EMPTY(&cur_task->cli_sess_listhead)) {
|
||||
client_session = CONTAINER_OF(cur_task->cli_sess_listhead.next, struct client_session, node);
|
||||
// cut the connection from task to session
|
||||
if (!client_session->closed) {
|
||||
xizi_share_page_manager.unmap_task_share_pages(cur_task, client_session->buf_addr, CLIENT_SESSION_BACKEND(client_session)->nr_pages);
|
||||
client_session->closed = true;
|
||||
}
|
||||
doubleListDel(&client_session->node);
|
||||
CLIENT_SESSION_BACKEND(client_session)->client = NULL;
|
||||
// delete session (also cut connection from session to task)
|
||||
if (CLIENT_SESSION_BACKEND(client_session)->server_side.closed) {
|
||||
xizi_share_page_manager.delete_share_pages(CLIENT_SESSION_BACKEND(client_session));
|
||||
}
|
||||
}
|
||||
|
||||
if (cur_task->server_identifier.meta != NULL) {
|
||||
struct TraceTag server_identifier_owner;
|
||||
AchieveResourceTag(&server_identifier_owner, RequireRootTag(), "softkernel/server-identifier");
|
||||
assert(server_identifier_owner.meta != NULL);
|
||||
DeleteResource(&cur_task->server_identifier, &server_identifier_owner);
|
||||
}
|
||||
|
||||
// delete task for pcb_list
|
||||
xizi_task_manager.cur_task_yield_noschedule();
|
||||
spinlock_lock(&cur_task->lock);
|
||||
cur_task->state = DEAD;
|
||||
spinlock_unlock(&cur_task->lock);
|
||||
|
||||
assert(ptask != NULL);
|
||||
ptask->dead = true;
|
||||
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Copyright (c) 2020 AIIT XUOS Lab
|
||||
* XiUOS is licensed under Mulan PSL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PSL v2.
|
||||
* You may obtain a copy of Mulan PSL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPSL2
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PSL v2 for more details.
|
||||
*/
|
||||
/**
|
||||
* @file sys_kill.c
|
||||
* @brief task exit syscall
|
||||
* @version 3.0
|
||||
* @author AIIT XUOS Lab
|
||||
* @date 2024.03.19
|
||||
*/
|
||||
|
||||
/*************************************************
|
||||
File name: sys_kill.c
|
||||
Description: task kill syscall
|
||||
Others:
|
||||
History:
|
||||
1. Date: 2023-03-19
|
||||
Author: AIIT XUOS Lab
|
||||
Modification:
|
||||
1. first version
|
||||
*************************************************/
|
||||
#include "trap_common.h"
|
||||
|
||||
#include "task.h"
|
||||
|
||||
extern int sys_exit(struct TaskMicroDescriptor* task);
|
||||
int sys_kill(int id)
|
||||
{
|
||||
struct TaskMicroDescriptor* task = NULL;
|
||||
|
||||
for (int prio = 0; prio < TASK_MAX_PRIORITY; prio++) {
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[prio], node)
|
||||
{
|
||||
if (task->pid == id) {
|
||||
sys_exit(task);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
|
@ -63,5 +63,7 @@ int sys_mmap(uintptr_t vaddr, uintptr_t paddr, int len, int is_dev)
|
|||
load_len += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
cur_task->mem_size += true_len;
|
||||
return vaddr + true_len;
|
||||
}
|
|
@ -41,8 +41,6 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
|
|||
return -1;
|
||||
}
|
||||
|
||||
spinlock_lock(&cur_task->lock);
|
||||
|
||||
struct double_list_node* cur_node = NULL;
|
||||
struct server_session* server_session = NULL;
|
||||
/* update old sessions */
|
||||
|
@ -54,52 +52,44 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
|
|||
server_session = CONTAINER_OF(cur_node, struct server_session, node);
|
||||
if (UNLIKELY(server_session->buf_addr != (uintptr_t)userland_session_arr[i].buf)) {
|
||||
ERROR("mismatched old session addr, user buf: %x, server buf: %x\n", userland_session_arr[i].buf, server_session->buf_addr);
|
||||
spinlock_unlock(&cur_task->lock);
|
||||
return -1;
|
||||
}
|
||||
// update session_backend
|
||||
// if current session is handled
|
||||
if (server_session->head != userland_session_arr[i].head) {
|
||||
struct TaskMicroDescriptor* client = SERVER_SESSION_BACKEND(server_session)->client;
|
||||
if (client->state == BLOCKED) {
|
||||
xizi_task_manager.task_unblock(client);
|
||||
} else {
|
||||
client->current_ipc_handled = true;
|
||||
}
|
||||
}
|
||||
server_session->head = userland_session_arr[i].head;
|
||||
server_session->tail = userland_session_arr[i].tail;
|
||||
doubleListDel(cur_node);
|
||||
doubleListAddOnBack(cur_node, &cur_task->svr_sess_listhead);
|
||||
}
|
||||
spinlock_unlock(&cur_task->lock);
|
||||
|
||||
/* handle sessions for condition 2, ref. delete_share_pages() */
|
||||
bool has_delete = true;
|
||||
while (has_delete) {
|
||||
has_delete = false;
|
||||
|
||||
spinlock_lock(&cur_task->lock);
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(server_session, &cur_task->svr_sess_listhead, node)
|
||||
{
|
||||
if (SERVER_SESSION_BACKEND(server_session)->client_side.closed) {
|
||||
// client had closed it, then server will close it too
|
||||
struct session_backend* session_backend = SERVER_SESSION_BACKEND(server_session);
|
||||
|
||||
spinlock_unlock(&cur_task->lock);
|
||||
if (!session_backend->server_side.closed) {
|
||||
session_backend->server_side.closed = true;
|
||||
xizi_share_page_manager.unmap_task_share_pages(cur_task, session_backend->server_side.buf_addr, session_backend->nr_pages);
|
||||
}
|
||||
xizi_share_page_manager.delete_share_pages(session_backend);
|
||||
has_delete = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!has_delete) {
|
||||
spinlock_unlock(&cur_task->lock);
|
||||
}
|
||||
}
|
||||
|
||||
/* poll with new sessions */
|
||||
spinlock_lock(&cur_task->lock);
|
||||
int i = 0;
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(server_session, &cur_task->svr_sess_listhead, node)
|
||||
{
|
||||
if (i >= arr_capacity) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (SERVER_SESSION_BACKEND(server_session)->client_side.closed) {
|
||||
// client had closed it, then server will close it too
|
||||
struct session_backend* session_backend = SERVER_SESSION_BACKEND(server_session);
|
||||
|
||||
if (!session_backend->server_side.closed) {
|
||||
session_backend->server_side.closed = true;
|
||||
xizi_share_page_manager.unmap_task_share_pages(cur_task, session_backend->server_side.buf_addr, session_backend->nr_pages);
|
||||
}
|
||||
xizi_share_page_manager.delete_share_pages(session_backend);
|
||||
break;
|
||||
}
|
||||
|
||||
userland_session_arr[i++] = (struct Session) {
|
||||
.buf = (void*)server_session->buf_addr,
|
||||
.capacity = server_session->capacity,
|
||||
|
@ -111,7 +101,6 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
|
|||
if (LIKELY(i < arr_capacity)) {
|
||||
userland_session_arr[i].buf = 0;
|
||||
}
|
||||
spinlock_unlock(&cur_task->lock);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -34,55 +34,90 @@ Modification:
|
|||
#include "actracer.h"
|
||||
#include "assert.h"
|
||||
#include "ipc.h"
|
||||
#include "kalloc.h"
|
||||
#include "mmu_common.h"
|
||||
#include "multicores.h"
|
||||
#include "share_page.h"
|
||||
#include "syscall.h"
|
||||
#include "task.h"
|
||||
|
||||
static struct TaskMicroDescriptor kernel_irq_proxy;
|
||||
static struct TaskMicroDescriptor* kernel_irq_proxy;
|
||||
static struct {
|
||||
struct TaskMicroDescriptor* handle_task;
|
||||
struct Session* session;
|
||||
struct Session session;
|
||||
struct session_backend* p_kernel_session;
|
||||
int opcode;
|
||||
} irq_forward_table[NR_IRQS];
|
||||
|
||||
static void send_irq_to_user(int irq_num)
|
||||
{
|
||||
struct Session* session = &irq_forward_table[irq_num].session;
|
||||
int len = IPC_ARG_INFO_BASE_OFFSET;
|
||||
/* add session tail */
|
||||
struct IpcMsg* buf = irq_forward_table[irq_num].session->buf + irq_forward_table[irq_num].session->tail;
|
||||
irq_forward_table[irq_num].session->tail += len;
|
||||
len += sizeof(struct IpcArgInfo);
|
||||
|
||||
/* get message space and add session tail */
|
||||
void* session_kern_vaddr = P2V(xizi_pager.address_translate(&kernel_irq_proxy->pgdir, (uintptr_t)session->buf));
|
||||
struct IpcMsg* buf = session_kern_vaddr + session->tail;
|
||||
|
||||
/* check if server session is full */
|
||||
if (buf->header.magic == IPC_MSG_MAGIC && buf->header.done == 0) {
|
||||
DEBUG("irq server cannot handle new interrupt by now.\n");
|
||||
return;
|
||||
}
|
||||
memset((void*)buf, 0, len);
|
||||
session->tail = (session->tail + len) % session->capacity;
|
||||
|
||||
/* construct message */
|
||||
buf->header.len = len;
|
||||
buf->header.nr_args = 0;
|
||||
buf->header.nr_args = 1;
|
||||
buf->header.init = 1;
|
||||
buf->header.opcode = irq_forward_table[irq_num].opcode;
|
||||
buf->header.done = 0;
|
||||
buf->header.magic = IPC_MSG_MAGIC;
|
||||
buf->header.valid = 1;
|
||||
|
||||
/* add session head */
|
||||
irq_forward_table[irq_num].session->head += len;
|
||||
session->head = (session->head + len) % session->capacity;
|
||||
}
|
||||
|
||||
int user_irq_handler(int irq, void* tf, void* arg)
|
||||
{
|
||||
send_irq_to_user(irq);
|
||||
next_task_emergency = irq_forward_table[irq].handle_task;
|
||||
xizi_task_manager.cur_task_yield_noschedule();
|
||||
if (irq_forward_table[irq].handle_task != NULL) {
|
||||
send_irq_to_user(irq);
|
||||
|
||||
next_task_emergency = irq_forward_table[irq].handle_task;
|
||||
if (cur_cpu()->task != NULL) {
|
||||
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern int create_session_inner(struct TaskMicroDescriptor* client, struct TaskMicroDescriptor* server, int capacity, struct Session* user_session);
|
||||
extern struct session_backend* create_session_inner(struct TaskMicroDescriptor* client, struct TaskMicroDescriptor* server, int capacity, struct Session* user_session);
|
||||
/// @warning no tested.
|
||||
|
||||
static struct XiziTrapDriver* p_intr_driver = NULL;
|
||||
int sys_register_irq(int irq_num, int irq_opcode)
|
||||
{
|
||||
static struct TraceTag intr_ac_tag;
|
||||
if (!AchieveResourceTag(&intr_ac_tag, RequireRootTag(), "hardkernel/intr-ac-resource")) {
|
||||
ERROR("intr not initialized.\n");
|
||||
return -1;
|
||||
// init intr resource;
|
||||
if (p_intr_driver == NULL) {
|
||||
struct TraceTag intr_ac_tag;
|
||||
if (!AchieveResourceTag(&intr_ac_tag, RequireRootTag(), "hardkernel/intr-ac-resource")) {
|
||||
ERROR("intr not initialized.\n");
|
||||
return -1;
|
||||
}
|
||||
p_intr_driver = (struct XiziTrapDriver*)AchieveResource(&intr_ac_tag);
|
||||
}
|
||||
struct XiziTrapDriver* p_intr_driver = AchieveResource(&intr_ac_tag);
|
||||
|
||||
// init kerenl sender proxy
|
||||
if (kernel_irq_proxy == NULL) {
|
||||
kernel_irq_proxy = xizi_task_manager.new_task_cb();
|
||||
kernel_irq_proxy->state = NEVER_RUN;
|
||||
xizi_pager.new_pgdir(&kernel_irq_proxy->pgdir);
|
||||
memcpy(kernel_irq_proxy->pgdir.pd_addr, kern_pgdir.pd_addr, TOPLEVLE_PAGEDIR_SIZE);
|
||||
}
|
||||
|
||||
// bind irq to session
|
||||
if (p_intr_driver->sw_irqtbl[irq_num].handler != NULL) {
|
||||
ERROR("irq %d is occupied.\n", irq_num);
|
||||
return -1;
|
||||
|
@ -90,8 +125,32 @@ int sys_register_irq(int irq_num, int irq_opcode)
|
|||
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
||||
irq_forward_table[irq_num].handle_task = cur_task;
|
||||
irq_forward_table[irq_num].opcode = irq_opcode;
|
||||
create_session_inner(&kernel_irq_proxy, cur_task, PAGE_SIZE, irq_forward_table[irq_num].session);
|
||||
irq_forward_table[irq_num].p_kernel_session = create_session_inner(kernel_irq_proxy, cur_task, PAGE_SIZE, &irq_forward_table[irq_num].session);
|
||||
p_intr_driver->bind_irq_handler(irq_num, user_irq_handler);
|
||||
cur_task->bind_irq = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sys_unbind_irq(struct TaskMicroDescriptor* task, int irq_num)
|
||||
{
|
||||
if (irq_forward_table[irq_num].handle_task != task) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
irq_forward_table[irq_num].handle_task = NULL;
|
||||
sys_close_session(&irq_forward_table[irq_num].session);
|
||||
DEBUG("Unbind: %s to irq %d", task->name, irq_num);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sys_unbind_irq_all(struct TaskMicroDescriptor* task)
|
||||
{
|
||||
for (int idx = 0; idx < NR_IRQS; idx++) {
|
||||
if (irq_forward_table[idx].handle_task == task) {
|
||||
sys_unbind_irq(task, idx);
|
||||
}
|
||||
}
|
||||
task->bind_irq = false;
|
||||
return 0;
|
||||
}
|
|
@ -33,8 +33,8 @@ Modification:
|
|||
#include "syscall.h"
|
||||
#include "task.h"
|
||||
|
||||
extern int task_exec(struct TaskMicroDescriptor* task, struct Session* session, int fd, ipc_read_fn ipc_read, char* name, char** argv);
|
||||
int sys_spawn(struct KernReadTool* read_tool, char* name, char** argv)
|
||||
extern int task_exec(struct TaskMicroDescriptor* task, char* img_start, char* name, char** argv);
|
||||
int sys_spawn(char* img_start, char* name, char** argv)
|
||||
{
|
||||
// alloc a new pcb
|
||||
struct TaskMicroDescriptor* new_task_cb = xizi_task_manager.new_task_cb();
|
||||
|
@ -44,16 +44,12 @@ int sys_spawn(struct KernReadTool* read_tool, char* name, char** argv)
|
|||
}
|
||||
// init trapframe
|
||||
arch_init_trapframe(new_task_cb->main_thread.trapframe, 0, 0);
|
||||
|
||||
struct Session* session = read_tool->session;
|
||||
int fd = read_tool->fd;
|
||||
ipc_read_fn ipc_read = read_tool->ipc_read;
|
||||
if (UNLIKELY(task_exec(new_task_cb, session, fd, ipc_read, name, argv)) < 0) {
|
||||
if (UNLIKELY(task_exec(new_task_cb, img_start, name, argv)) < 0) {
|
||||
xizi_task_manager.free_pcb(new_task_cb);
|
||||
return -1;
|
||||
}
|
||||
// init pcb
|
||||
xizi_task_manager.task_set_default_schedule_attr(new_task_cb, RequireRootTag());
|
||||
xizi_task_manager.task_set_default_schedule_attr(new_task_cb);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -55,8 +55,17 @@ static inline void _padding(char* name)
|
|||
void show_tasks(void)
|
||||
{
|
||||
struct TaskMicroDescriptor* task = NULL;
|
||||
DEBUG_PRINTF("******************************************************\n");
|
||||
DEBUG_PRINTF("STAT ID TASK PRI LEFT_TICKS\n");
|
||||
LOG_PRINTF("******************************************************\n");
|
||||
for (int i = 0; i < NR_CPU; i++) {
|
||||
LOG_PRINTF("CPU %d: ", i);
|
||||
if (global_cpus[i].task != NULL) {
|
||||
LOG_PRINTF("%s\n", global_cpus[i].task->name);
|
||||
} else {
|
||||
LOG_PRINTF("NULL\n");
|
||||
}
|
||||
}
|
||||
LOG_PRINTF("******************************************************\n");
|
||||
LOG_PRINTF("STAT ID TASK PRI MEM(KB)\n");
|
||||
for (int i = 0; i < TASK_MAX_PRIORITY; i++) {
|
||||
if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[i])) {
|
||||
continue;
|
||||
|
@ -64,19 +73,27 @@ void show_tasks(void)
|
|||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[i], node)
|
||||
{
|
||||
if (task->state == INIT)
|
||||
DEBUG_PRINTF(" INIT ");
|
||||
LOG_PRINTF(" INIT ");
|
||||
else if (task->state == READY)
|
||||
DEBUG_PRINTF(" READY ");
|
||||
LOG_PRINTF(" READY ");
|
||||
else if (task->state == RUNNING)
|
||||
DEBUG_PRINTF("RUNNING ");
|
||||
LOG_PRINTF("RUNNING ");
|
||||
else if (task->state == DEAD)
|
||||
DEBUG_PRINTF(" DEAD ");
|
||||
LOG_PRINTF(" DEAD ");
|
||||
|
||||
_padding(task->name);
|
||||
DEBUG_PRINTF(" %d %s %d %d\n", task->pid, task->name, task->priority, task->remain_tick);
|
||||
LOG_PRINTF(" %d %s %d %d\n", task->pid, task->name, task->priority, task->mem_size >> 10);
|
||||
}
|
||||
}
|
||||
DEBUG_PRINTF("******************************************************\n");
|
||||
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_blocked_list_head, node)
|
||||
{
|
||||
LOG_PRINTF(" BLOCK ");
|
||||
_padding(task->name);
|
||||
LOG_PRINTF(" %d %s %d %d\n", task->pid, task->name, task->priority, task->mem_size >> 10);
|
||||
}
|
||||
|
||||
LOG_PRINTF("******************************************************\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -85,8 +102,8 @@ extern struct KBuddy kern_virtmem_buddy;
|
|||
extern uint32_t kernel_data_end[];
|
||||
void show_mem(void)
|
||||
{
|
||||
DEBUG_PRINTF("*********************************************************\n");
|
||||
DEBUG_PRINTF(" TOTAL(KB) USED(KB) FREE(KB) \n");
|
||||
LOG_PRINTF("*********************************************************\n");
|
||||
LOG_PRINTF(" TOTAL(KB) USED(KB) FREE(KB) \n");
|
||||
|
||||
uint32_t total = (PHY_MEM_STOP - V2P(kernel_data_end)) >> 10;
|
||||
uint32_t used = 0;
|
||||
|
@ -96,14 +113,14 @@ void show_mem(void)
|
|||
}
|
||||
used = used >> 10;
|
||||
|
||||
DEBUG_PRINTF(" %d %d %d\n", total, total - used, used);
|
||||
DEBUG_PRINTF("*********************************************************\n");
|
||||
LOG_PRINTF(" %d %d %d\n", total, total - used, used);
|
||||
LOG_PRINTF("*********************************************************\n");
|
||||
return;
|
||||
}
|
||||
|
||||
void show_cpu(void)
|
||||
{
|
||||
DEBUG_PRINTF("**********************************************************\n");
|
||||
LOG_PRINTF("**********************************************************\n");
|
||||
#ifdef ARCH_SMP
|
||||
/// @todo support smp
|
||||
KPrintf(" cpu VALUE \n");
|
||||
|
@ -116,10 +133,10 @@ void show_cpu(void)
|
|||
|
||||
_padding(current_task->name);
|
||||
|
||||
DEBUG_PRINTF(" ID COMMAND USED_TICKS FREE_TICKS \n");
|
||||
DEBUG_PRINTF(" %d %s %d %d\n", cpu_id, current_task->name, TASK_CLOCK_TICK - current_task->remain_tick, current_task->remain_tick);
|
||||
LOG_PRINTF(" ID COMMAND USED_TICKS FREE_TICKS \n");
|
||||
LOG_PRINTF(" %d %s %d %d\n", cpu_id, current_task->name, TASK_CLOCK_TICK - current_task->remain_tick, current_task->remain_tick);
|
||||
|
||||
DEBUG_PRINTF("***********************************************************\n");
|
||||
LOG_PRINTF("***********************************************************\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -129,7 +146,7 @@ int sys_state(sys_state_option option, sys_state_info* info)
|
|||
info->memblock_info.memblock_start = (uintptr_t)V2P(_binary_fs_img_start);
|
||||
info->memblock_info.memblock_end = (uintptr_t)V2P(_binary_fs_img_end);
|
||||
} else if (option == SYS_STATE_GET_HEAP_BASE) {
|
||||
return cur_cpu()->task->mem_size;
|
||||
return cur_cpu()->task->heap_base;
|
||||
} else if (option == SYS_STATE_SET_TASK_PRIORITY) {
|
||||
xizi_task_manager.set_cur_task_priority(info->priority);
|
||||
} else if (option == SYS_STATE_SHOW_TASKS) {
|
||||
|
|
|
@ -33,8 +33,18 @@ Modification:
|
|||
|
||||
#include "log.h"
|
||||
|
||||
int sys_yield()
|
||||
int sys_yield(task_yield_reason reason)
|
||||
{
|
||||
xizi_task_manager.cur_task_yield_noschedule();
|
||||
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
||||
xizi_task_manager.task_yield_noschedule(cur_task, false);
|
||||
|
||||
// handle ipc block
|
||||
if ((reason & SYS_TASK_YIELD_BLOCK_IPC) != 0) {
|
||||
if (cur_task->current_ipc_handled) {
|
||||
cur_task->current_ipc_handled = false;
|
||||
} else {
|
||||
xizi_task_manager.task_block(cur_task);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -28,6 +28,7 @@ Modification:
|
|||
1. first version
|
||||
*************************************************/
|
||||
#include "log.h"
|
||||
#include "multicores.h"
|
||||
#include "trap_common.h"
|
||||
|
||||
#include "syscall.h"
|
||||
|
@ -41,13 +42,13 @@ int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, u
|
|||
ret = 0;
|
||||
break;
|
||||
case SYSCALL_SPAWN:
|
||||
ret = sys_spawn((struct KernReadTool*)param1, (char*)param2, (char**)param3);
|
||||
ret = sys_spawn((char*)param1, (char*)param2, (char**)param3);
|
||||
break;
|
||||
case SYSCALL_EXIT:
|
||||
ret = sys_exit();
|
||||
ret = sys_exit(cur_cpu()->task);
|
||||
break;
|
||||
case SYSCALL_YIELD:
|
||||
ret = sys_yield();
|
||||
ret = sys_yield((task_yield_reason)param1);
|
||||
break;
|
||||
case SYSCALL_SERVER:
|
||||
ret = sys_register_as_server((char*)param1);
|
||||
|
@ -62,7 +63,7 @@ int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, u
|
|||
ret = sys_close_session((struct Session*)param1);
|
||||
break;
|
||||
case SYSCALL_EXEC:
|
||||
ret = sys_exec((struct KernReadTool*)param1, (char*)param2, (char**)param3);
|
||||
ret = sys_exec((char*)param1, (char*)param2, (char**)param3);
|
||||
break;
|
||||
case SYSCALL_SYS_STATE:
|
||||
ret = sys_state(param1, (sys_state_info*)param2);
|
||||
|
@ -73,6 +74,9 @@ int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, u
|
|||
case SYSCALL_REGISTER_IRQ:
|
||||
ret = sys_register_irq((int)param1, (int)param2);
|
||||
break;
|
||||
case SYSCALL_KILL:
|
||||
ret = sys_kill((int)param1);
|
||||
break;
|
||||
|
||||
default:
|
||||
ERROR("Unsurport syscall(%d) right now\n", sys_num);
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
SRC_FILES := task.c scheduler.c spawn_default_task.c
|
||||
SRC_FILES := task.c schedule.c
|
||||
|
||||
include $(KERNEL_ROOT)/compiler.mk
|
||||
|
|
|
@ -38,20 +38,13 @@ struct TaskMicroDescriptor* max_priority_runnable_task(void)
|
|||
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node)
|
||||
{
|
||||
spinlock_lock(&task->lock);
|
||||
if (task->state == READY) {
|
||||
if (task->state == READY && !task->dead) {
|
||||
// found a runnable task, stop this look up
|
||||
task->state = RUNNING;
|
||||
spinlock_unlock(&task->lock);
|
||||
return task;
|
||||
} else if (task->state == DEAD) {
|
||||
// found a killed task, stop this loop
|
||||
// change in pcb_list may break this loop, so find a runnable in next look up
|
||||
spinlock_unlock(&task->lock);
|
||||
} else if (task->dead && task->state != RUNNING) {
|
||||
xizi_task_manager.free_pcb(task);
|
||||
return NULL;
|
||||
}
|
||||
spinlock_unlock(&task->lock);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -62,21 +55,13 @@ struct TaskMicroDescriptor* round_robin_runnable_task(uint32_t priority)
|
|||
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node)
|
||||
{
|
||||
|
||||
spinlock_lock(&task->lock);
|
||||
if (task->state == READY) {
|
||||
if (task->state == READY && !task->dead) {
|
||||
// found a runnable task, stop this look up
|
||||
spinlock_unlock(&task->lock);
|
||||
task->state = RUNNING;
|
||||
return task;
|
||||
} else if (task->state == DEAD) {
|
||||
// found a killed task, stop this loop
|
||||
// change in pcb_list may break this loop, so find a runnable in next look up
|
||||
spinlock_unlock(&task->lock);
|
||||
} else if (task->dead && task->state != RUNNING) {
|
||||
xizi_task_manager.free_pcb(task);
|
||||
return NULL;
|
||||
}
|
||||
spinlock_unlock(&task->lock);
|
||||
}
|
||||
|
||||
return NULL;
|
|
@ -1,151 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2020 AIIT XUOS Lab
|
||||
* XiUOS is licensed under Mulan PSL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PSL v2.
|
||||
* You may obtain a copy of Mulan PSL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPSL2
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PSL v2 for more details.
|
||||
*/
|
||||
/**
|
||||
* @file spawn_default_task.c
|
||||
* @brief spawn task that embeded in kernel image
|
||||
* @version 3.0
|
||||
* @author AIIT XUOS Lab
|
||||
* @date 2023.08.25
|
||||
*/
|
||||
|
||||
/*************************************************
|
||||
File name: spawn_default_task.c
|
||||
Description: spawn task that embeded in kernel image
|
||||
Others:
|
||||
History:
|
||||
1. Date: 2023-08-28
|
||||
Author: AIIT XUOS Lab
|
||||
Modification:
|
||||
1. first version
|
||||
*************************************************/
|
||||
#include "actracer.h"
|
||||
#include "assert.h"
|
||||
#include "kalloc.h"
|
||||
#include "task.h"
|
||||
|
||||
#include "execelf.h"
|
||||
|
||||
int spawn_embedded_task(char* img_start, char* name, char** argv)
|
||||
{
|
||||
struct TaskMicroDescriptor* new_task_cb = xizi_task_manager.new_task_cb();
|
||||
if (UNLIKELY(!new_task_cb)) {
|
||||
ERROR("Unable to new task control block.\n");
|
||||
return -1;
|
||||
}
|
||||
// init trapframe
|
||||
arch_init_trapframe(new_task_cb->main_thread.trapframe, 0, 0);
|
||||
|
||||
/* load img to task */
|
||||
/* 1. load elf header */
|
||||
struct elfhdr* elf = (struct elfhdr*)img_start;
|
||||
// pgdir for new task
|
||||
struct TopLevelPageDirectory pgdir;
|
||||
if (UNLIKELY(!xizi_pager.new_pgdir(&pgdir))) {
|
||||
ERROR("create new pgdir failed.\n");
|
||||
goto error_exec;
|
||||
}
|
||||
memcpy(pgdir.pd_addr, kern_pgdir.pd_addr, TOPLEVLE_PAGEDIR_SIZE);
|
||||
|
||||
/* 2. load elf content */
|
||||
uint32_t load_size = 0;
|
||||
struct proghdr ph;
|
||||
for (int sec_idx = 0, off = elf->phoff; sec_idx < elf->phnum; sec_idx++, off += sizeof(ph)) {
|
||||
// load proghdr
|
||||
memcpy((char*)&ph, img_start + off, sizeof(ph));
|
||||
|
||||
if (ph.type != ELF_PROG_LOAD)
|
||||
continue;
|
||||
if (ph.memsz < ph.filesz) {
|
||||
ERROR("elf header mem size less than file size\n");
|
||||
goto error_exec;
|
||||
}
|
||||
|
||||
// read section
|
||||
// 1. alloc space
|
||||
if ((load_size = xizi_pager.resize_user_pgdir(&pgdir, load_size, ph.vaddr + ph.memsz))
|
||||
!= ph.vaddr + ph.memsz) {
|
||||
goto error_exec;
|
||||
}
|
||||
// 2. copy inode to space
|
||||
assert(ph.vaddr % PAGE_SIZE == 0);
|
||||
for (int addr_offset = 0; addr_offset < ph.filesz; addr_offset += PAGE_SIZE) {
|
||||
uintptr_t page_paddr = xizi_pager.address_translate(&pgdir, ph.vaddr + addr_offset);
|
||||
if (page_paddr == 0) {
|
||||
panic("copy elf file to unmapped addr");
|
||||
}
|
||||
uintptr_t read_size = (ph.filesz - addr_offset < PAGE_SIZE ? ph.filesz - addr_offset : PAGE_SIZE);
|
||||
memcpy(P2V(page_paddr), img_start + (ph.off + addr_offset), read_size);
|
||||
}
|
||||
}
|
||||
|
||||
/// elf file content now in memory
|
||||
// alloc stack page and map to TOP of user vspace
|
||||
uintptr_t* stack_bottom = (uintptr_t*)kalloc(USER_STACK_SIZE);
|
||||
if (UNLIKELY(stack_bottom == NULL)) {
|
||||
ERROR("No memory.\n");
|
||||
goto error_exec;
|
||||
}
|
||||
xizi_pager.map_pages(pgdir.pd_addr, USER_MEM_TOP - USER_STACK_SIZE, V2P(stack_bottom), USER_STACK_SIZE, false);
|
||||
|
||||
uintptr_t user_vspace_sp = USER_MEM_TOP;
|
||||
/// @todo change 32 to some macro
|
||||
uintptr_t user_stack_init[32];
|
||||
uintptr_t argc = 0;
|
||||
uintptr_t copy_len = 0;
|
||||
for (argc = 0; argv != NULL && argv[argc] != NULL; argc++) {
|
||||
/// @todo handle with large number of parameters
|
||||
|
||||
// copy param to user stack
|
||||
copy_len = strlen(argv[argc]) + 1;
|
||||
user_vspace_sp = (user_vspace_sp - copy_len) & ~3;
|
||||
uintptr_t copied_len = xizi_pager.cross_vspace_data_copy(&pgdir, user_vspace_sp, (uintptr_t)argv[argc], copy_len);
|
||||
if (UNLIKELY(copied_len != copy_len)) {
|
||||
ERROR("Something went wrong when copying params.\n");
|
||||
goto error_exec;
|
||||
}
|
||||
user_stack_init[argc] = user_vspace_sp;
|
||||
}
|
||||
user_stack_init[argc] = 0;
|
||||
copy_len = (argc + 1) * sizeof(uintptr_t);
|
||||
user_vspace_sp -= copy_len;
|
||||
uintptr_t copied_len = xizi_pager.cross_vspace_data_copy(&pgdir, user_vspace_sp, (uintptr_t)user_stack_init, copy_len);
|
||||
if (UNLIKELY(copied_len != copy_len)) {
|
||||
ERROR("Something went wrong when copying params.\n");
|
||||
goto error_exec;
|
||||
}
|
||||
|
||||
// init task trapframe, which stores in svc stack
|
||||
// do not go tp error_exec once we change trapframe!
|
||||
assert(copied_len == (argc + 1) * sizeof(uintptr_t));
|
||||
arch_trapframe_set_sp_pc(new_task_cb->main_thread.trapframe, user_vspace_sp, elf->entry);
|
||||
arch_set_main_params(new_task_cb->main_thread.trapframe, argc, user_vspace_sp);
|
||||
|
||||
// save program name
|
||||
strncpy(new_task_cb->name, name, sizeof(new_task_cb->name));
|
||||
|
||||
struct TopLevelPageDirectory old_pgdir = new_task_cb->pgdir;
|
||||
new_task_cb->pgdir = pgdir;
|
||||
|
||||
/// @todo record mem size used b task
|
||||
new_task_cb->mem_size = ALIGNUP(load_size, PAGE_SIZE);
|
||||
|
||||
xizi_pager.free_user_pgdir(&old_pgdir);
|
||||
|
||||
xizi_task_manager.task_set_default_schedule_attr(new_task_cb, RequireRootTag());
|
||||
return 0;
|
||||
|
||||
error_exec:
|
||||
if (pgdir.pd_addr != NULL) {
|
||||
xizi_pager.free_user_pgdir(&pgdir);
|
||||
}
|
||||
return -1;
|
||||
}
|
|
@ -32,10 +32,11 @@ Modification:
|
|||
#include "core.h"
|
||||
|
||||
#include "assert.h"
|
||||
#include "kalloc.h"
|
||||
#include "log.h"
|
||||
#include "multicores.h"
|
||||
#include "kalloc.h"
|
||||
#include "scheduler.h"
|
||||
#include "syscall.h"
|
||||
#include "task.h"
|
||||
|
||||
struct CPU global_cpus[NR_CPU];
|
||||
|
@ -43,14 +44,14 @@ uint32_t ready_task_priority;
|
|||
|
||||
static void _task_manager_init()
|
||||
{
|
||||
// init lock for task list
|
||||
spinlock_init(&xizi_task_manager.lock, "proclist");
|
||||
// init task list to NULL
|
||||
for (int i = 0; i < TASK_MAX_PRIORITY; i++) {
|
||||
doubleListNodeInit(&xizi_task_manager.task_list_head[i]);
|
||||
}
|
||||
doubleListNodeInit(&xizi_task_manager.task_blocked_list_head);
|
||||
// init task (slab) allocator
|
||||
slab_init(&xizi_task_manager.task_allocator, sizeof(struct TaskMicroDescriptor));
|
||||
slab_init(&xizi_task_manager.task_buddy_allocator, sizeof(struct KBuddy));
|
||||
|
||||
// pid pool
|
||||
xizi_task_manager.next_pid = 0;
|
||||
|
@ -62,24 +63,72 @@ static void _task_manager_init()
|
|||
/// @brief alloc a new task without init
|
||||
static struct TaskMicroDescriptor* _alloc_task_cb()
|
||||
{
|
||||
spinlock_lock(&xizi_task_manager.lock);
|
||||
// alloc task and add it to used task list
|
||||
struct TaskMicroDescriptor* task = (struct TaskMicroDescriptor*)slab_alloc(&xizi_task_manager.task_allocator);
|
||||
if (UNLIKELY(task == NULL)) {
|
||||
ERROR("Not enough memory\n");
|
||||
spinlock_unlock(&xizi_task_manager.lock);
|
||||
return NULL;
|
||||
}
|
||||
// set pid once task is allocated
|
||||
memset(task, 0, sizeof(*task));
|
||||
task->pid = xizi_task_manager.next_pid++;
|
||||
// update pcb used
|
||||
xizi_task_manager.nr_pcb_used += 1;
|
||||
|
||||
spinlock_unlock(&xizi_task_manager.lock);
|
||||
return task;
|
||||
}
|
||||
|
||||
int _task_retrieve_sys_resources(struct TaskMicroDescriptor* ptask)
|
||||
{
|
||||
assert(ptask != NULL);
|
||||
|
||||
/* handle sessions for condition 1, ref. delete_share_pages() */
|
||||
// close all server_sessions
|
||||
struct server_session* server_session = NULL;
|
||||
while (!IS_DOUBLE_LIST_EMPTY(&ptask->svr_sess_listhead)) {
|
||||
server_session = CONTAINER_OF(ptask->svr_sess_listhead.next, struct server_session, node);
|
||||
// cut the connection from task to session
|
||||
if (!server_session->closed) {
|
||||
xizi_share_page_manager.unmap_task_share_pages(ptask, server_session->buf_addr, CLIENT_SESSION_BACKEND(server_session)->nr_pages);
|
||||
server_session->closed = true;
|
||||
}
|
||||
doubleListDel(&server_session->node);
|
||||
SERVER_SESSION_BACKEND(server_session)->server = NULL;
|
||||
// delete session (also cut connection from session to task)
|
||||
if (SERVER_SESSION_BACKEND(server_session)->client_side.closed) {
|
||||
xizi_share_page_manager.delete_share_pages(SERVER_SESSION_BACKEND(server_session));
|
||||
}
|
||||
}
|
||||
// close all client_sessions
|
||||
struct client_session* client_session = NULL;
|
||||
while (!IS_DOUBLE_LIST_EMPTY(&ptask->cli_sess_listhead)) {
|
||||
client_session = CONTAINER_OF(ptask->cli_sess_listhead.next, struct client_session, node);
|
||||
// cut the connection from task to session
|
||||
if (!client_session->closed) {
|
||||
xizi_share_page_manager.unmap_task_share_pages(ptask, client_session->buf_addr, CLIENT_SESSION_BACKEND(client_session)->nr_pages);
|
||||
client_session->closed = true;
|
||||
}
|
||||
doubleListDel(&client_session->node);
|
||||
CLIENT_SESSION_BACKEND(client_session)->client = NULL;
|
||||
// delete session (also cut connection from session to task)
|
||||
if (CLIENT_SESSION_BACKEND(client_session)->server_side.closed) {
|
||||
xizi_share_page_manager.delete_share_pages(CLIENT_SESSION_BACKEND(client_session));
|
||||
}
|
||||
}
|
||||
|
||||
if (ptask->server_identifier.meta != NULL) {
|
||||
struct TraceTag server_identifier_owner;
|
||||
AchieveResourceTag(&server_identifier_owner, RequireRootTag(), "softkernel/server-identifier");
|
||||
assert(server_identifier_owner.meta != NULL);
|
||||
DeleteResource(&ptask->server_identifier, &server_identifier_owner);
|
||||
}
|
||||
|
||||
// delete registered irq if there is one
|
||||
if (ptask->bind_irq) {
|
||||
sys_unbind_irq_all(ptask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// @brief this function changes task list without locking, so it must be called inside a lock critical area
|
||||
/// @param task
|
||||
static void _dealloc_task_cb(struct TaskMicroDescriptor* task)
|
||||
|
@ -89,6 +138,8 @@ static void _dealloc_task_cb(struct TaskMicroDescriptor* task)
|
|||
return;
|
||||
}
|
||||
|
||||
_task_retrieve_sys_resources(task);
|
||||
|
||||
// stack is mapped in vspace, so it should be free by pgdir
|
||||
if (task->pgdir.pd_addr) {
|
||||
xizi_pager.free_user_pgdir(&task->pgdir);
|
||||
|
@ -102,8 +153,11 @@ static void _dealloc_task_cb(struct TaskMicroDescriptor* task)
|
|||
doubleListDel(cur_node);
|
||||
|
||||
// free task back to allocator
|
||||
if (task->massive_ipc_allocator != NULL) {
|
||||
KBuddyDestory(task->massive_ipc_allocator);
|
||||
slab_free(&xizi_task_manager.task_buddy_allocator, (void*)task->massive_ipc_allocator);
|
||||
}
|
||||
slab_free(&xizi_task_manager.task_allocator, (void*)task);
|
||||
xizi_task_manager.nr_pcb_used -= 1;
|
||||
|
||||
// remove priority
|
||||
if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[task->priority])) {
|
||||
|
@ -112,6 +166,13 @@ static void _dealloc_task_cb(struct TaskMicroDescriptor* task)
|
|||
}
|
||||
|
||||
/* alloc a new task with init */
|
||||
extern void trap_return(void);
|
||||
void task_prepare_enter()
|
||||
{
|
||||
xizi_leave_kernel();
|
||||
trap_return();
|
||||
}
|
||||
|
||||
static struct TaskMicroDescriptor* _new_task_cb()
|
||||
{
|
||||
// alloc task space
|
||||
|
@ -120,10 +181,7 @@ static struct TaskMicroDescriptor* _new_task_cb()
|
|||
return NULL;
|
||||
}
|
||||
// init vm
|
||||
if (!xizi_pager.new_pgdir(&task->pgdir)) {
|
||||
_dealloc_task_cb(task);
|
||||
return NULL;
|
||||
}
|
||||
task->pgdir.pd_addr = NULL;
|
||||
/* init basic task member */
|
||||
doubleListNodeInit(&task->cli_sess_listhead);
|
||||
doubleListNodeInit(&task->svr_sess_listhead);
|
||||
|
@ -153,13 +211,11 @@ static struct TaskMicroDescriptor* _new_task_cb()
|
|||
return task;
|
||||
}
|
||||
|
||||
static void _task_set_default_schedule_attr(struct TaskMicroDescriptor* task, struct TraceTag* cwd)
|
||||
static void _task_set_default_schedule_attr(struct TaskMicroDescriptor* task)
|
||||
{
|
||||
spinlock_init(&task->lock, task->name);
|
||||
task->remain_tick = TASK_CLOCK_TICK;
|
||||
task->maxium_tick = TASK_CLOCK_TICK * 10;
|
||||
task->state = READY;
|
||||
task->cwd = *cwd;
|
||||
task->priority = TASK_DEFAULT_PRIORITY;
|
||||
doubleListAddOnHead(&task->node, &xizi_task_manager.task_list_head[task->priority]);
|
||||
ready_task_priority |= (1 << task->priority);
|
||||
|
@ -170,70 +226,81 @@ extern void context_switch(struct context**, struct context*);
|
|||
static void _scheduler(struct SchedulerRightGroup right_group)
|
||||
{
|
||||
struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag);
|
||||
|
||||
struct TaskMicroDescriptor* next_task;
|
||||
|
||||
while (1) {
|
||||
spinlock_lock(&xizi_task_manager.lock);
|
||||
next_task = NULL;
|
||||
/* find next runnable task */
|
||||
assert(cur_cpu()->task == NULL);
|
||||
if (next_task_emergency != NULL) {
|
||||
if (next_task_emergency != NULL && next_task->state == READY) {
|
||||
next_task = next_task_emergency;
|
||||
spinlock_lock(&next_task->lock);
|
||||
next_task->state = RUNNING;
|
||||
spinlock_unlock(&next_task->lock);
|
||||
next_task_emergency = NULL;
|
||||
} else {
|
||||
next_task = xizi_task_manager.next_runnable_task();
|
||||
}
|
||||
spinlock_unlock(&xizi_task_manager.lock);
|
||||
next_task_emergency = NULL;
|
||||
if (next_task != NULL) {
|
||||
assert(next_task->state == READY);
|
||||
}
|
||||
spinlock_unlock(&whole_kernel_lock);
|
||||
|
||||
/* not a runnable task */
|
||||
if (UNLIKELY(next_task == NULL)) {
|
||||
spinlock_lock(&whole_kernel_lock);
|
||||
continue;
|
||||
}
|
||||
assert(next_task->state == RUNNING);
|
||||
// p_mmu_driver->LoadPgdirCrit((uintptr_t)V2P(next_task->pgdir.pd_addr), &right_group.intr_driver_tag);
|
||||
p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->pgdir.pd_addr));
|
||||
|
||||
/* a runnable task */
|
||||
spinlock_lock(&whole_kernel_lock);
|
||||
if (next_task->state == READY) {
|
||||
next_task->state = RUNNING;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
struct CPU* cpu = cur_cpu();
|
||||
cpu->task = next_task;
|
||||
p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->pgdir.pd_addr));
|
||||
context_switch(&cpu->scheduler, next_task->main_thread.context);
|
||||
assert(cur_cpu()->task == NULL);
|
||||
assert(next_task->state != RUNNING);
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t yield_cnt = 0;
|
||||
static void _cur_task_yield_noschedule(void)
|
||||
static void _task_yield_noschedule(struct TaskMicroDescriptor* task, bool blocking)
|
||||
{
|
||||
yield_cnt++;
|
||||
|
||||
spinlock_lock(&xizi_task_manager.lock);
|
||||
struct TaskMicroDescriptor* current_task = cur_cpu()->task;
|
||||
assert(current_task != NULL);
|
||||
assert(task != NULL);
|
||||
|
||||
// rearrage current task position
|
||||
doubleListDel(¤t_task->node);
|
||||
// DEBUG("%s,%d\n", current_task->name, strcmp(current_task->name, name1));
|
||||
if (current_task->maxium_tick <= 0) {
|
||||
if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[current_task->priority])) {
|
||||
ready_task_priority &= ~(1 << current_task->priority);
|
||||
}
|
||||
current_task->priority += 1;
|
||||
current_task->maxium_tick = TASK_CLOCK_TICK * 10;
|
||||
doubleListDel(&task->node);
|
||||
if (task->state == RUNNING) {
|
||||
task->state = READY;
|
||||
}
|
||||
doubleListAddOnBack(¤t_task->node, &xizi_task_manager.task_list_head[current_task->priority]);
|
||||
ready_task_priority |= (1 << current_task->priority);
|
||||
// set current task state
|
||||
spinlock_lock(¤t_task->lock);
|
||||
current_task->state = READY;
|
||||
current_task->remain_tick = TASK_CLOCK_TICK;
|
||||
spinlock_unlock(¤t_task->lock);
|
||||
cur_cpu()->task = NULL;
|
||||
if (yield_cnt == 50) {
|
||||
recover_priority();
|
||||
yield_cnt = 0;
|
||||
task->remain_tick = TASK_CLOCK_TICK;
|
||||
if (task == cur_cpu()->task) {
|
||||
cur_cpu()->task = NULL;
|
||||
}
|
||||
spinlock_unlock(&xizi_task_manager.lock);
|
||||
doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]);
|
||||
}
|
||||
|
||||
static void _task_block(struct TaskMicroDescriptor* task)
|
||||
{
|
||||
assert(task != NULL);
|
||||
assert(task->state != RUNNING);
|
||||
doubleListDel(&task->node);
|
||||
if (xizi_task_manager.task_list_head[task->priority].next == &xizi_task_manager.task_list_head[task->priority]) {
|
||||
ready_task_priority &= ~(1 << task->priority);
|
||||
}
|
||||
task->state = BLOCKED;
|
||||
doubleListAddOnHead(&task->node, &xizi_task_manager.task_blocked_list_head);
|
||||
}
|
||||
|
||||
static void _task_unblock(struct TaskMicroDescriptor* task)
|
||||
{
|
||||
assert(task != NULL);
|
||||
assert(task->state == BLOCKED);
|
||||
doubleListDel(&task->node);
|
||||
task->state = READY;
|
||||
doubleListAddOnHead(&task->node, &xizi_task_manager.task_list_head[task->priority]);
|
||||
ready_task_priority |= (1 << task->priority);
|
||||
}
|
||||
|
||||
static void _set_cur_task_priority(int priority)
|
||||
|
@ -264,7 +331,10 @@ struct XiziTaskManager xizi_task_manager = {
|
|||
|
||||
.next_runnable_task = max_priority_runnable_task,
|
||||
.task_scheduler = _scheduler,
|
||||
.cur_task_yield_noschedule = _cur_task_yield_noschedule,
|
||||
|
||||
.task_block = _task_block,
|
||||
.task_unblock = _task_unblock,
|
||||
.task_yield_noschedule = _task_yield_noschedule,
|
||||
.set_cur_task_priority = _set_cur_task_priority
|
||||
};
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
|
||||
SRC_FILES := default_irq_handler.c \
|
||||
clock_irq_handler.c \
|
||||
software_irq_handler.c
|
||||
software_irq_handler.c \
|
||||
abort_handler.c
|
||||
|
||||
include $(KERNEL_ROOT)/compiler.mk
|
||||
|
|
|
@ -0,0 +1,90 @@
|
|||
/* Copyright (c) 2006-2018 Frans Kaashoek, Robert Morris, Russ Cox,
|
||||
* Massachusetts Institute of Technology
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
/**
|
||||
* @file abort_handler.c
|
||||
* @brief handle program abort
|
||||
* @version 3.0
|
||||
* @author AIIT XUOS Lab
|
||||
* @date 2023.11.23
|
||||
*/
|
||||
|
||||
/*************************************************
|
||||
File name: abort_handler.c
|
||||
Description: handle program abort
|
||||
Others:
|
||||
History:
|
||||
1. Date: 2023-11-23
|
||||
Author: AIIT XUOS Lab
|
||||
Modification:
|
||||
1. Modify iabort and dabort handler(in dabort_handler() and iabort_handler())
|
||||
*************************************************/
|
||||
#include "core.h"
|
||||
#include "memlayout.h"
|
||||
#include "spinlock.h"
|
||||
#include "trap_common.h"
|
||||
|
||||
#include "assert.h"
|
||||
#include "multicores.h"
|
||||
#include "syscall.h"
|
||||
#include "task.h"
|
||||
|
||||
extern void context_switch(struct context**, struct context*);
|
||||
void dabort_handler(struct trapframe* r)
|
||||
{
|
||||
if (r->pc >= DEV_VRTMEM_BASE && is_spinlock_hold_by_current_cpu(&whole_kernel_lock)) {
|
||||
assert(is_spinlock_hold_by_current_cpu(&whole_kernel_lock));
|
||||
ERROR("dabort in kernel, current task: %s\n", cur_cpu()->task == NULL ? "NULL" : cur_cpu()->task->name);
|
||||
dabort_reason(r);
|
||||
panic("data abort exception\n");
|
||||
}
|
||||
|
||||
xizi_enter_kernel();
|
||||
|
||||
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
||||
ERROR("dabort in user space: %s\n", cur_task->name);
|
||||
dabort_reason(r);
|
||||
sys_exit(cur_task);
|
||||
assert(cur_cpu()->task == NULL);
|
||||
context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler);
|
||||
panic("dabort end should never be reashed.\n");
|
||||
}
|
||||
|
||||
void iabort_handler(struct trapframe* r)
|
||||
{
|
||||
if (r->pc >= DEV_VRTMEM_BASE && is_spinlock_hold_by_current_cpu(&whole_kernel_lock)) {
|
||||
assert(is_spinlock_hold_by_current_cpu(&whole_kernel_lock));
|
||||
ERROR("iabort in kernel, current task: %s\n", cur_cpu()->task == NULL ? "NULL" : cur_cpu()->task->name);
|
||||
iabort_reason(r);
|
||||
panic("kernel prefetch abort exception\n");
|
||||
}
|
||||
|
||||
xizi_enter_kernel();
|
||||
|
||||
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
||||
ERROR("iabort in user space: %s\n", cur_task->name);
|
||||
iabort_reason(r);
|
||||
sys_exit(cur_task);
|
||||
assert(cur_cpu()->task == NULL);
|
||||
context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler);
|
||||
panic("iabort end should never be reashed.\n");
|
||||
}
|
|
@ -58,7 +58,7 @@ int xizi_clock_handler(int irq, void* tf, void* arg)
|
|||
current_task->remain_tick--;
|
||||
current_task->maxium_tick--;
|
||||
if (current_task->remain_tick == 0) {
|
||||
xizi_task_manager.cur_task_yield_noschedule();
|
||||
xizi_task_manager.task_yield_noschedule(current_task, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,19 +56,18 @@ void default_interrupt_routine(void)
|
|||
extern void context_switch(struct context**, struct context*);
|
||||
void intr_irq_dispatch(struct trapframe* tf)
|
||||
{
|
||||
assert(p_intr_driver != NULL);
|
||||
xizi_enter_kernel();
|
||||
|
||||
p_intr_driver->cpu_irq_disable();
|
||||
// enter irq
|
||||
assert(p_intr_driver != NULL);
|
||||
uintptr_t int_info = 0;
|
||||
if ((int_info = p_intr_driver->hw_before_irq()) == 0) {
|
||||
return;
|
||||
goto intr_leave_interrupt;
|
||||
}
|
||||
|
||||
struct TaskMicroDescriptor* current_task = cur_cpu()->task;
|
||||
if (LIKELY(current_task != NULL)) {
|
||||
current_task->main_thread.trapframe = tf;
|
||||
}
|
||||
assert(current_task != NULL);
|
||||
current_task->main_thread.trapframe = tf;
|
||||
|
||||
unsigned cpu = p_intr_driver->hw_cur_int_cpu(int_info);
|
||||
unsigned irq = p_intr_driver->hw_cur_int_num(int_info);
|
||||
|
@ -76,7 +75,7 @@ void intr_irq_dispatch(struct trapframe* tf)
|
|||
|
||||
// distribute irq
|
||||
irq_handler_t isr = p_intr_driver->sw_irqtbl[irq].handler;
|
||||
if (isr) {
|
||||
if (isr != NULL) {
|
||||
isr(irq, tf, NULL);
|
||||
} else {
|
||||
default_interrupt_routine();
|
||||
|
@ -86,10 +85,34 @@ void intr_irq_dispatch(struct trapframe* tf)
|
|||
p_intr_driver->curr_int[cpu] = 0;
|
||||
p_intr_driver->hw_after_irq(int_info);
|
||||
|
||||
if (UNLIKELY(cur_cpu()->task == NULL && current_task != NULL)) {
|
||||
if (cur_cpu()->task == NULL || current_task->state != RUNNING) {
|
||||
cur_cpu()->task = NULL;
|
||||
context_switch(¤t_task->main_thread.context, cur_cpu()->scheduler);
|
||||
}
|
||||
assert(current_task == cur_cpu()->task);
|
||||
|
||||
p_intr_driver->cpu_irq_enable();
|
||||
intr_leave_interrupt:
|
||||
xizi_leave_kernel();
|
||||
}
|
||||
|
||||
void xizi_enter_kernel()
|
||||
{
|
||||
/// @warning trampoline is responsible for closing interrupt
|
||||
spinlock_lock(&whole_kernel_lock);
|
||||
}
|
||||
|
||||
bool xizi_try_enter_kernel()
|
||||
{
|
||||
/// @warning trampoline is responsible for closing interrupt
|
||||
if (spinlock_try_lock(&whole_kernel_lock)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void xizi_leave_kernel()
|
||||
{
|
||||
/// @warning trampoline is responsible for eabling interrupt by using user's state register
|
||||
spinlock_unlock(&whole_kernel_lock);
|
||||
}
|
||||
|
|
|
@ -48,9 +48,9 @@ bool swi_distributer_init(struct SwiDispatcherRightGroup* _right_group)
|
|||
extern void context_switch(struct context**, struct context*);
|
||||
void software_irq_dispatch(struct trapframe* tf)
|
||||
{
|
||||
xizi_enter_kernel();
|
||||
assert(p_intr_driver != NULL);
|
||||
|
||||
p_intr_driver->cpu_irq_disable();
|
||||
// get current task
|
||||
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
||||
/// @todo: Handle dead task
|
||||
|
@ -59,21 +59,22 @@ void software_irq_dispatch(struct trapframe* tf)
|
|||
if (cur_task && cur_task->state != DEAD) {
|
||||
cur_task->main_thread.trapframe = tf;
|
||||
// call syscall
|
||||
|
||||
int ret = arch_syscall(cur_task->main_thread.trapframe, &syscall_num);
|
||||
|
||||
if (syscall_num != SYSCALL_EXEC) {
|
||||
arch_set_return(tf, ret);
|
||||
}
|
||||
} else {
|
||||
ERROR("syscall by killed task.\n");
|
||||
}
|
||||
|
||||
if (cur_cpu()->task == NULL && cur_task != NULL) {
|
||||
if ((cur_cpu()->task == NULL && cur_task != NULL) || cur_task->state != RUNNING) {
|
||||
cur_cpu()->task = NULL;
|
||||
context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler);
|
||||
}
|
||||
assert(cur_task == cur_cpu()->task);
|
||||
if (syscall_num == SYSCALL_EXIT) {
|
||||
ERROR("Exit reaches");
|
||||
panic("Exit reaches");
|
||||
}
|
||||
p_intr_driver->cpu_irq_enable();
|
||||
|
||||
assert(cur_task == cur_cpu()->task);
|
||||
xizi_leave_kernel();
|
||||
}
|
Loading…
Reference in New Issue