Support whole kernel lock. TODO: fix spawn.

This commit is contained in:
TXuian 2024-03-14 14:28:11 +08:00
parent 01f4d45e0c
commit 3dc6d58bdb
11 changed files with 43 additions and 28 deletions

View File

@ -74,7 +74,7 @@ Modification:
#include "cortex_a9.h"
#define NR_CPU 2
#define NR_CPU 4
__attribute__((always_inline)) static inline uint32_t user_mode()
{
@ -107,11 +107,11 @@ struct context {
/// @brief init task context, set return address to trap return
/// @param
extern void trap_return(void);
extern void task_prepare_enter();
__attribute__((__always_inline__)) static inline void arch_init_context(struct context* ctx)
{
memset(ctx, 0, sizeof(*ctx));
ctx->lr = (uint32_t)(trap_return);
ctx->lr = (uint32_t)(task_prepare_enter + 4);
}
struct trapframe {

View File

@ -81,7 +81,7 @@ MEMORY
{
ocram (rwx) : ORIGIN = 0x00900000, LENGTH = 256K
ddr3 (rwx) : ORIGIN = 0x10000000, LENGTH = 1024M
virt_ddr3 (WRX) : ORIGIN = 0x90010000, LENGTH = 1024M
virt_ddr3 (WRX) : ORIGIN = 0x90011000, LENGTH = 1024M
}
SECTIONS
@ -155,7 +155,7 @@ SECTIONS
} > ddr3
/* Other Kernel code is placed over 0x80000000 + 128KB. */
.text : AT(0x10010000) {
.text : AT(0x10011000) {
*(.vectors)
. = ALIGN(0x1000);
*(.text .text.* .gnu.linkonce.t.*)

View File

@ -131,7 +131,6 @@ static bool xizi_gpt_init()
struct XiziTrapDriver* p_intr_driver = (struct XiziTrapDriver*)AchieveResource(&intr_driver_tag);
p_intr_driver->bind_irq_handler(p_clock_driver->get_clock_int(), xizi_clock_handler);
p_intr_driver->single_irq_enable(p_clock_driver->get_clock_int(), 0, 0);
return true;
}

View File

@ -89,12 +89,16 @@ static void load_boot_pgdir()
}
extern void main(void);
static bool _bss_inited = false;
void bootmain()
{
build_boot_pgdir();
load_boot_pgdir();
__asm__ __volatile__("add sp, sp, %0" ::"r"(KERN_MEM_BASE - PHY_MEM_BASE));
// memset(&kernel_data_begin, 0x00, (uint32_t)kernel_data_end - (uint32_t)kernel_data_begin);
if (!_bss_inited) {
memset(&kernel_data_begin, 0x00, (uint32_t)kernel_data_end - (uint32_t)kernel_data_begin);
_bss_inited = true;
}
main();
}

View File

@ -50,7 +50,8 @@ int main(void)
shellInit(&shell, shellBuffer, 512);
connect_session(&session_fs, "MemFS", 8092);
while (connect_session(&session_fs, "MemFS", 8092) < 0)
;
if (!session_fs.buf) {
printf("session connect faield\n");
return -1;

View File

@ -35,7 +35,7 @@ user_apps:
.section .rawdata_init
.globl initapp
initapp:
.incbin "../services/app/bin/init"
.incbin "../services/app/bin/shell"
.section .rawdata_memfs
.globl memfs

View File

@ -94,21 +94,18 @@ void cpu_start_secondary(uint8_t coreNumber, cpu_entry_point_t entryPoint, void*
switch (coreNumber) {
case 1:
HW_SRC_GPR3_WR((uint32_t)&_boot_start);
// HW_SRC_GPR4_WR((uint32_t)common_cpu_entry);
HW_SRC_SCR.B.CORE1_ENABLE = 1;
break;
case 2:
HW_SRC_GPR5_WR((uint32_t)&_boot_start);
// HW_SRC_GPR6_WR((uint32_t)common_cpu_entry);
HW_SRC_SCR.B.CORE2_ENABLE = 1;
break;
case 3:
HW_SRC_GPR7_WR((uint32_t)&_boot_start);
// HW_SRC_GPR8_WR((uint32_t)common_cpu_entry);
HW_SRC_SCR.B.CORE3_ENABLE = 1;
break;
@ -148,9 +145,11 @@ int main(void)
DEBUG_PRINTF("CPU %d started done.\n", cur_cpuid());
}
// struct TraceTag main_intr_tag;
// AchieveResourceTag(&main_intr_tag, &hardkernel_tag, "intr-ac-resource");
// struct XiziTrapDriver* p_intr_driver = (struct XiziTrapDriver*)AchieveResource(&main_intr_tag);
struct TraceTag main_intr_tag;
AchieveResourceTag(&main_intr_tag, &hardkernel_tag, "intr-ac-resource");
struct XiziTrapDriver* p_intr_driver = (struct XiziTrapDriver*)AchieveResource(&main_intr_tag);
p_intr_driver->cpu_irq_disable();
if (cpu_id == 0) {
/* init softkernel */
if (!softkernel_init(&hardkernel_tag, &softkernel_tag)) {
@ -174,6 +173,7 @@ int main(void)
init = true;
}
// p_intr_driver->cpu_irq_disable();
while (!init)
;

View File

@ -293,8 +293,8 @@ void load_kern_pgdir(struct TraceTag* mmu_driver_tag, struct TraceTag* intr_driv
// dev mem
_map_pages((uintptr_t*)kern_pgdir.pd_addr, DEV_VRTMEM_BASE, DEV_PHYMEM_BASE, DEV_MEM_SZ, dev_attr);
// _p_pgtbl_mmu_access->LoadPgdir((uintptr_t)V2P(kern_pgdir.pd_addr));
_p_pgtbl_mmu_access->LoadPgdirCrit((uintptr_t)V2P(kern_pgdir.pd_addr), intr_driver_tag);
_p_pgtbl_mmu_access->LoadPgdir((uintptr_t)V2P(kern_pgdir.pd_addr));
// _p_pgtbl_mmu_access->LoadPgdirCrit((uintptr_t)V2P(kern_pgdir.pd_addr), intr_driver_tag);
}
void secondary_cpu_load_kern_pgdir(struct TraceTag* mmu_driver_tag, struct TraceTag* intr_driver_tag)

View File

@ -112,6 +112,13 @@ static void _dealloc_task_cb(struct TaskMicroDescriptor* task)
}
/* alloc a new task with init */
extern void trap_return(void);
void task_prepare_enter()
{
spinlock_unlock(&whole_kernel_lock);
trap_return();
}
static struct TaskMicroDescriptor* _new_task_cb()
{
// alloc task space
@ -170,10 +177,11 @@ extern void context_switch(struct context**, struct context*);
static void _scheduler(struct SchedulerRightGroup right_group)
{
struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag);
struct TaskMicroDescriptor* next_task;
spinlock_lock(&whole_kernel_lock);
while (1) {
spinlock_unlock(&whole_kernel_lock);
spinlock_lock(&xizi_task_manager.lock);
next_task = NULL;
/* find next runnable task */
@ -192,12 +200,15 @@ static void _scheduler(struct SchedulerRightGroup right_group)
if (UNLIKELY(next_task == NULL)) {
continue;
}
spinlock_lock(&whole_kernel_lock);
assert(next_task->state == RUNNING);
// p_mmu_driver->LoadPgdirCrit((uintptr_t)V2P(next_task->pgdir.pd_addr), &right_group.intr_driver_tag);
p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->pgdir.pd_addr));
struct CPU* cpu = cur_cpu();
cpu->task = next_task;
// DEBUG_PRINTF("CPU %d switch to task %s\n", cur_cpuid(), next_task->name);
context_switch(&cpu->scheduler, next_task->main_thread.context);
}
}

View File

@ -59,11 +59,14 @@ void intr_irq_dispatch(struct trapframe* tf)
assert(p_intr_driver != NULL);
p_intr_driver->cpu_irq_disable();
// enter irq
uintptr_t int_info = 0;
if ((int_info = p_intr_driver->hw_before_irq()) == 0) {
return;
}
spinlock_lock(&whole_kernel_lock);
// DEBUG("CPU %d in kernel %s %d\n", cur_cpuid(), __func__, __LINE__);
struct TaskMicroDescriptor* current_task = cur_cpu()->task;
if (LIKELY(current_task != NULL)) {
@ -77,9 +80,7 @@ void intr_irq_dispatch(struct trapframe* tf)
// distribute irq
irq_handler_t isr = p_intr_driver->sw_irqtbl[irq].handler;
if (isr) {
// spinlock_lock(&whole_kernel_lock);
isr(irq, tf, NULL);
// spinlock_unlock(&whole_kernel_lock);
} else {
default_interrupt_routine();
}
@ -93,5 +94,7 @@ void intr_irq_dispatch(struct trapframe* tf)
}
assert(current_task == cur_cpu()->task);
// DEBUG("CPU %d out kernel %s %d\n", cur_cpuid(), __func__, __LINE__);
spinlock_unlock(&whole_kernel_lock);
p_intr_driver->cpu_irq_enable();
}

View File

@ -48,14 +48,12 @@ bool swi_distributer_init(struct SwiDispatcherRightGroup* _right_group)
extern void context_switch(struct context**, struct context*);
void software_irq_dispatch(struct trapframe* tf)
{
bool is_my_lock = false;
if (whole_kernel_lock.owner_cpu != cur_cpuid()) {
spinlock_lock(&whole_kernel_lock);
is_my_lock = true;
}
assert(p_intr_driver != NULL);
p_intr_driver->cpu_irq_disable();
spinlock_lock(&whole_kernel_lock);
// DEBUG("CPU %d in kernel %s %d\n", cur_cpuid(), __func__, __LINE__);
// get current task
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
/// @todo: Handle dead task
@ -82,8 +80,7 @@ void software_irq_dispatch(struct trapframe* tf)
ERROR("Exit reaches");
}
if (is_my_lock) {
spinlock_unlock(&whole_kernel_lock);
}
// DEBUG("CPU %d out kernel %s %d\n", cur_cpuid(), __func__, __LINE__);
spinlock_unlock(&whole_kernel_lock);
p_intr_driver->cpu_irq_enable();
}