forked from xuos/xiuos
Start multi cores in imx6q
This commit is contained in:
parent
63de0f983c
commit
01f4d45e0c
|
@ -50,7 +50,7 @@ Modification:
|
|||
#define CPSR_MODE (0x1f) //!< Current processor mode
|
||||
//@}
|
||||
|
||||
#define MODE_STACK_SIZE 0x4000
|
||||
#define MODE_STACK_SIZE 0x1000
|
||||
|
||||
//! @name Interrupt enable bits in CPSR
|
||||
//@{
|
||||
|
@ -74,7 +74,7 @@ Modification:
|
|||
|
||||
#include "cortex_a9.h"
|
||||
|
||||
#define NR_CPU 4
|
||||
#define NR_CPU 2
|
||||
|
||||
__attribute__((always_inline)) static inline uint32_t user_mode()
|
||||
{
|
||||
|
|
|
@ -71,7 +71,7 @@ arm_set_interrupt_state:
|
|||
cpu_get_current:
|
||||
mrc p15, 0, r0, c0, c0, 5
|
||||
and r0, r0, #3
|
||||
BX lr
|
||||
BX lr
|
||||
.endfunc @cpu_get_current()@
|
||||
|
||||
.global enable_neon_fpu
|
||||
|
@ -128,8 +128,8 @@ disable_L1_cache:
|
|||
get_arm_private_peripheral_base:
|
||||
|
||||
@ Get base address of private perpherial space
|
||||
mrc p15, 4, r0, c15, c0, 0 @ Read periph base address
|
||||
@ mov r0, #0x00A00000
|
||||
@ mrc p15, 4, r0, c15, c0, 0 @ Read periph base address
|
||||
mov r0, #0x00A00000
|
||||
bx lr
|
||||
|
||||
.endfunc @get_arm_private_peripheral_base()@
|
||||
|
@ -213,7 +213,8 @@ arm_branch_target_cache_invalidate_is:
|
|||
scu_enable:
|
||||
|
||||
@ mrc p15, 4, r0, c15, c0, 0 @ Read periph base address
|
||||
mov r0, #0x00A00000
|
||||
mov r0, #0x00A00000
|
||||
add r0, #0x80000000
|
||||
|
||||
ldr r1, [r0, #0x0] @ Read the SCU Control Register
|
||||
orr r1, r1, #0x1 @ Set bit 0 (The Enable bit)
|
||||
|
@ -268,7 +269,8 @@ scu_leave_smp:
|
|||
scu_get_cpus_in_smp:
|
||||
|
||||
@ mrc p15, 4, r0, c15, c0, 0 @ Read periph base address
|
||||
mov r0, #0x00A00000
|
||||
mov r0, #0x00A00000
|
||||
add r0, #0x80000000
|
||||
|
||||
ldr r0, [r0, #0x004] @ Read SCU Configuration register
|
||||
mov r0, r0, lsr #4 @ Bits 7:4 gives the cores in SMP mode, shift then mask
|
||||
|
@ -327,6 +329,7 @@ scu_secure_invalidate:
|
|||
mov r1, r1, lsl r0 @ Shift ways into the correct CPU field
|
||||
|
||||
mrc p15, 4, r2, c15, c0, 0 @ Read periph base address
|
||||
add r2, #0x80000000
|
||||
|
||||
str r1, [r2, #0x0C] @ Write to SCU Invalidate All in Secure State
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -36,6 +36,7 @@ Modification:
|
|||
#include "uart_common_ope.h"
|
||||
|
||||
#include "assert.h"
|
||||
#include "pagetable.h"
|
||||
|
||||
#define KERN_BOOT_DRIVER(n, bi, f) \
|
||||
{ \
|
||||
|
@ -196,4 +197,31 @@ bool hardkernel_init(struct TraceTag* _hardkernel_tag)
|
|||
LOG_PRINTF("\n");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool secondary_cpu_hardkernel_init(int cpu_id, struct TraceTag* _hardkernel_tag)
|
||||
{
|
||||
struct TraceTag init_intr_tag, init_icache_tag, init_dcache_tag, init_clock_tag, init_mmu_tag;
|
||||
AchieveResourceTag(&init_intr_tag, _hardkernel_tag, "intr-ac-resource");
|
||||
AchieveResourceTag(&init_icache_tag, _hardkernel_tag, "icache-ac-resource");
|
||||
AchieveResourceTag(&init_dcache_tag, _hardkernel_tag, "dcache-ac-resource");
|
||||
AchieveResourceTag(&init_clock_tag, _hardkernel_tag, "clock-ac-resource");
|
||||
AchieveResourceTag(&init_mmu_tag, _hardkernel_tag, "mmu-ac-resource");
|
||||
struct XiziTrapDriver* p_intr_driver = (struct XiziTrapDriver*)AchieveResource(&init_intr_tag);
|
||||
struct ICacheDone* p_icache_driver = (struct ICacheDone*)AchieveResource(&init_icache_tag);
|
||||
struct DCacheDone* p_dcache_driver = (struct DCacheDone*)AchieveResource(&init_dcache_tag);
|
||||
struct XiziClockDriver* p_clock_driver = (struct XiziClockDriver*)AchieveResource(&init_clock_tag);
|
||||
|
||||
// secondary cpu init hardwares
|
||||
// intr
|
||||
p_intr_driver->sys_irq_init(cpu_id);
|
||||
// cache
|
||||
p_icache_driver->enable();
|
||||
p_dcache_driver->enable();
|
||||
// clock
|
||||
p_clock_driver->sys_clock_init();
|
||||
p_intr_driver->single_irq_enable(p_clock_driver->get_clock_int(), cpu_id, 0);
|
||||
// mmu
|
||||
secondary_cpu_load_kern_pgdir(&init_mmu_tag, &init_intr_tag);
|
||||
return true;
|
||||
}
|
|
@ -82,22 +82,22 @@ void handle_fiq(void)
|
|||
panic("");
|
||||
}
|
||||
|
||||
static void _sys_irq_init()
|
||||
static void _sys_irq_init(int cpu_id)
|
||||
{
|
||||
/* load exception vectors */
|
||||
volatile uint32_t* vector_base = &_vector_start;
|
||||
|
||||
// Set Interrupt handler start address
|
||||
vector_base[1] = (uint32_t)trap_undefined_instruction; // Undefined Instruction
|
||||
vector_base[2] = (uint32_t)user_trap_swi_enter; // Software Interrupt
|
||||
vector_base[3] = (uint32_t)trap_iabort; // Prefetch Abort
|
||||
vector_base[4] = (uint32_t)trap_dabort; // Data Abort
|
||||
vector_base[5] = (uint32_t)handle_reserved; // Reserved
|
||||
vector_base[6] = (uint32_t)trap_irq_enter; // IRQ
|
||||
vector_base[7] = (uint32_t)handle_fiq; // FIQ
|
||||
|
||||
init_cpu_mode_stacks(0);
|
||||
init_cpu_mode_stacks(cpu_id);
|
||||
if (cpu_id == 0) {
|
||||
volatile uint32_t* vector_base = &_vector_start;
|
||||
|
||||
// Set Interrupt handler start address
|
||||
vector_base[1] = (uint32_t)trap_undefined_instruction; // Undefined Instruction
|
||||
vector_base[2] = (uint32_t)user_trap_swi_enter; // Software Interrupt
|
||||
vector_base[3] = (uint32_t)trap_iabort; // Prefetch Abort
|
||||
vector_base[4] = (uint32_t)trap_dabort; // Data Abort
|
||||
vector_base[5] = (uint32_t)handle_reserved; // Reserved
|
||||
vector_base[6] = (uint32_t)trap_irq_enter; // IRQ
|
||||
vector_base[7] = (uint32_t)handle_fiq; // FIQ
|
||||
}
|
||||
/* active hardware irq responser */
|
||||
gic_init();
|
||||
xizi_trap_driver.switch_hw_irqtbl((uint32_t*)&_vector_jumper);
|
||||
|
@ -240,7 +240,7 @@ static struct XiziTrapDriver xizi_trap_driver = {
|
|||
|
||||
struct XiziTrapDriver* hardkernel_intr_init(struct TraceTag* hardkernel_tag)
|
||||
{
|
||||
xizi_trap_driver.sys_irq_init();
|
||||
xizi_trap_driver.sys_irq_init(0);
|
||||
xizi_trap_driver.cpu_irq_disable();
|
||||
return &xizi_trap_driver;
|
||||
}
|
|
@ -21,6 +21,7 @@
|
|||
|
||||
#include "assert.h"
|
||||
#include "spinlock.h"
|
||||
#include "trap_common.h"
|
||||
|
||||
bool module_spinlock_use_intr_init(void)
|
||||
{
|
||||
|
@ -43,11 +44,11 @@ void spinlock_init(struct spinlock* lock, char* name)
|
|||
extern int _spinlock_lock(struct spinlock* lock, uint32_t timeout);
|
||||
void spinlock_lock(struct spinlock* lock)
|
||||
{
|
||||
if (lock->owner_cpu != SPINLOCK_STATE_UNLOCK) {
|
||||
if (lock->owner_cpu != SPINLOCK_STATE_UNLOCK && lock->owner_cpu == cur_cpuid()) {
|
||||
ERROR("spinlock %s lock double locked by core %d\n", lock->name, lock->owner_cpu);
|
||||
panic("");
|
||||
}
|
||||
assert(_spinlock_lock(lock, SPINLOCK_LOCK_WAITFOREVER) == 0);
|
||||
_spinlock_lock(lock, SPINLOCK_LOCK_WAITFOREVER);
|
||||
}
|
||||
|
||||
void _spinlock_unlock(struct spinlock* lock);
|
||||
|
|
|
@ -85,21 +85,23 @@ void handle_fiq(void)
|
|||
panic("");
|
||||
}
|
||||
|
||||
static void _sys_irq_init()
|
||||
static void _sys_irq_init(int cpu_id)
|
||||
{
|
||||
/* load exception vectors */
|
||||
volatile uint32_t* vector_base = &_vector_start;
|
||||
if (cpu_id == 0) {
|
||||
/* load exception vectors */
|
||||
volatile uint32_t* vector_base = &_vector_start;
|
||||
|
||||
// Set Interrupt handler start address
|
||||
vector_base[1] = (uint32_t)trap_undefined_instruction; // Undefined Instruction
|
||||
vector_base[2] = (uint32_t)user_trap_swi_enter; // Software Interrupt
|
||||
vector_base[3] = (uint32_t)trap_iabort; // Prefetch Abort
|
||||
vector_base[4] = (uint32_t)trap_dabort; // Data Abort
|
||||
vector_base[5] = (uint32_t)handle_reserved; // Reserved
|
||||
vector_base[6] = (uint32_t)trap_irq_enter; // IRQ
|
||||
vector_base[7] = (uint32_t)handle_fiq; // FIQ
|
||||
// Set Interrupt handler start address
|
||||
vector_base[1] = (uint32_t)trap_undefined_instruction; // Undefined Instruction
|
||||
vector_base[2] = (uint32_t)user_trap_swi_enter; // Software Interrupt
|
||||
vector_base[3] = (uint32_t)trap_iabort; // Prefetch Abort
|
||||
vector_base[4] = (uint32_t)trap_dabort; // Data Abort
|
||||
vector_base[5] = (uint32_t)handle_reserved; // Reserved
|
||||
vector_base[6] = (uint32_t)trap_irq_enter; // IRQ
|
||||
vector_base[7] = (uint32_t)handle_fiq; // FIQ
|
||||
}
|
||||
|
||||
init_cpu_mode_stacks(0);
|
||||
init_cpu_mode_stacks(cpu_id);
|
||||
|
||||
/* active hardware irq responser */
|
||||
XScuGic_Config* gic_config = XScuGic_LookupConfig(XPAR_PS7_SCUGIC_0_DEVICE_ID);
|
||||
|
|
|
@ -59,7 +59,7 @@ struct XiziTrapDriver {
|
|||
/* current irq number happening in cpu*/
|
||||
uint32_t curr_int[NR_CPU];
|
||||
|
||||
void (*sys_irq_init)();
|
||||
void (*sys_irq_init)(int);
|
||||
int (*cur_cpu_id)();
|
||||
|
||||
void (*cpu_irq_enable)();
|
||||
|
|
|
@ -94,6 +94,7 @@ void bootmain()
|
|||
build_boot_pgdir();
|
||||
load_boot_pgdir();
|
||||
__asm__ __volatile__("add sp, sp, %0" ::"r"(KERN_MEM_BASE - PHY_MEM_BASE));
|
||||
memset(&kernel_data_begin, 0x00, (uint32_t)kernel_data_end - (uint32_t)kernel_data_begin);
|
||||
// memset(&kernel_data_begin, 0x00, (uint32_t)kernel_data_end - (uint32_t)kernel_data_begin);
|
||||
|
||||
main();
|
||||
}
|
|
@ -447,9 +447,11 @@ bool tracer_delete_trace(struct TraceTag* target, struct TraceTag* owner)
|
|||
return true;
|
||||
}
|
||||
|
||||
static struct spinlock ac_tracer_lock;
|
||||
void tracer_init(void)
|
||||
{
|
||||
/* init sys_tracer, the manager */
|
||||
spinlock_init(&ac_tracer_lock, "tracerlock");
|
||||
spinlock_init(&sys_tracer.mem_chunk_bitmap_lock, "tracer_mem_chunk_bitmap");
|
||||
spinlock_init(&sys_tracer.trace_meta_bitmap_lock, "tracer_meta_bitmap");
|
||||
memset(sys_tracer.mem_chunks_bit_map, 0, sizeof(sys_tracer.mem_chunk_bitmap_lock));
|
||||
|
@ -494,26 +496,31 @@ void tracer_find_tag(struct TraceTag* target, struct TraceTag* const source, cha
|
|||
|
||||
bool AchieveResourceTag(struct TraceTag* target, struct TraceTag* owner, char* name)
|
||||
{
|
||||
spinlock_lock(&ac_tracer_lock);
|
||||
tracer_find_tag(target, owner, name);
|
||||
if (target->meta == NULL) {
|
||||
return false;
|
||||
}
|
||||
spinlock_unlock(&ac_tracer_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
void* AchieveResource(struct TraceTag* target)
|
||||
{
|
||||
spinlock_lock(&ac_tracer_lock);
|
||||
if (target->type == TRACER_OWNER) {
|
||||
return NULL;
|
||||
}
|
||||
void* p_resource = NULL;
|
||||
tracer_read_trace(target, (char*)&p_resource, 0, sizeof(void*));
|
||||
assert(p_resource != NULL);
|
||||
spinlock_unlock(&ac_tracer_lock);
|
||||
return p_resource;
|
||||
}
|
||||
|
||||
bool CreateResourceTag(struct TraceTag* new_tag, struct TraceTag* owner, char* name, tracemeta_ac_type type, void* p_resource)
|
||||
{
|
||||
// spinlock_lock(&ac_tracer_lock);
|
||||
new_tag->type = type;
|
||||
if (type == TRACER_OWNER) {
|
||||
return tracer_create_trace(new_tag, owner, name, type);
|
||||
|
@ -527,10 +534,14 @@ bool CreateResourceTag(struct TraceTag* new_tag, struct TraceTag* owner, char* n
|
|||
if (!tracer_create_trace(new_tag, owner, name, type)) {
|
||||
return false;
|
||||
}
|
||||
return tracer_write_trace(new_tag, (char*)&p_resource, 0, sizeof(void*)) == sizeof(void*);
|
||||
bool ret = tracer_write_trace(new_tag, (char*)&p_resource, 0, sizeof(void*)) == sizeof(void*);
|
||||
// spinlock_unlock(&ac_tracer_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool DeleteResource(struct TraceTag* target, struct TraceTag* owner)
|
||||
{
|
||||
spinlock_lock(&ac_tracer_lock);
|
||||
return tracer_delete_trace(target, owner);
|
||||
spinlock_unlock(&ac_tracer_lock);
|
||||
}
|
|
@ -19,7 +19,11 @@
|
|||
int main(int argc, char* argv[])
|
||||
{
|
||||
struct Session session;
|
||||
connect_session(&session, "MemFS", 8092);
|
||||
printf("connecting MemFS\n");
|
||||
while (connect_session(&session, "MemFS", 8092) < 0) {
|
||||
printf("connecting MemFS\n");
|
||||
}
|
||||
printf("connect MemFS success\n");
|
||||
|
||||
int fd;
|
||||
char* shell_task_param[2] = { "/shell", 0 };
|
||||
|
|
|
@ -66,7 +66,7 @@ int IPC_DO_SERVE_FUNC(Ipc_ls)(char* path)
|
|||
printf("ls: find target Inode failed, ip: %x(%d), dp: %x(%d)\n", ip, ip->inum, dp, dp->inum);
|
||||
return -1;
|
||||
}
|
||||
if (ip->type != T_DIR) {
|
||||
if (ip->type != FS_DIRECTORY) {
|
||||
printf("ls: not a dir\n");
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -37,5 +37,6 @@ struct XiziBootNode {
|
|||
};
|
||||
|
||||
bool hardkernel_init(struct TraceTag*);
|
||||
bool secondary_cpu_hardkernel_init(int cpu_id, struct TraceTag* _hardkernel_tag);
|
||||
bool softkernel_init(struct TraceTag* _hardkernel_tag, struct TraceTag* _softkernel_tag);
|
||||
void show_xizi_bar(void);
|
||||
|
|
|
@ -30,6 +30,7 @@ Modification:
|
|||
#pragma once
|
||||
|
||||
#include "core.h"
|
||||
#include "spinlock.h"
|
||||
#include "trap_common.h"
|
||||
|
||||
struct CPU {
|
||||
|
@ -44,4 +45,6 @@ extern struct CPU global_cpus[NR_CPU];
|
|||
static inline struct CPU* cur_cpu(void)
|
||||
{
|
||||
return &global_cpus[cur_cpuid()];
|
||||
}
|
||||
}
|
||||
|
||||
struct spinlock whole_kernel_lock;
|
|
@ -72,6 +72,7 @@ uintptr_t* _page_walk(uintptr_t* pgdir, uintptr_t vaddr, bool alloc);
|
|||
|
||||
extern struct TopLevelPageDirectory kern_pgdir;
|
||||
void load_kern_pgdir(struct TraceTag* mmu_driver_tag, struct TraceTag* intr_driver_tag);
|
||||
void secondary_cpu_load_kern_pgdir(struct TraceTag* mmu_driver_tag, struct TraceTag* intr_driver_tag);
|
||||
|
||||
extern struct XiziPageManager xizi_pager;
|
||||
bool module_pager_init(struct PagerRightGroup*);
|
|
@ -27,37 +27,130 @@ Author: AIIT XUOS Lab
|
|||
Modification:
|
||||
1. first version
|
||||
*************************************************/
|
||||
/// @todo use hardkernel
|
||||
#include "cortex_a9.h"
|
||||
#include "regssrc.h"
|
||||
|
||||
#include "kern_init.h"
|
||||
#include "multicores.h"
|
||||
|
||||
#include "assert.h"
|
||||
#include "task.h"
|
||||
|
||||
#include "trap_common.h"
|
||||
|
||||
void configure_cpu(uint32_t cpu)
|
||||
{
|
||||
const unsigned int all_ways = 0xf;
|
||||
|
||||
disable_strict_align_check();
|
||||
|
||||
// Enable branch prediction
|
||||
arm_branch_target_cache_invalidate();
|
||||
arm_branch_prediction_enable();
|
||||
|
||||
// Enable L1 caches
|
||||
// arm_dcache_enable();
|
||||
// arm_dcache_invalidate();
|
||||
// arm_icache_enable();
|
||||
// arm_icache_invalidate();
|
||||
|
||||
// Invalidate SCU copy of TAG RAMs
|
||||
scu_secure_invalidate(cpu, all_ways);
|
||||
|
||||
// Join SMP
|
||||
scu_join_smp();
|
||||
scu_enable_maintenance_broadcast();
|
||||
}
|
||||
|
||||
typedef void (*cpu_entry_point_t)(void* arg);
|
||||
typedef struct _core_startup_info {
|
||||
cpu_entry_point_t entry; //!< Function to call after starting a core.
|
||||
void* arg; //!< Argument to pass core entry point.
|
||||
} core_startup_info_t;
|
||||
static core_startup_info_t s_core_info[NR_CPU] = { { 0 } };
|
||||
|
||||
static void common_cpu_entry(void)
|
||||
{
|
||||
uint32_t myCoreNumber = cpu_get_current();
|
||||
core_startup_info_t* info = &s_core_info[myCoreNumber];
|
||||
|
||||
// Call the requested entry point for this CPU number.
|
||||
if (info->entry) {
|
||||
info->entry(info->arg);
|
||||
}
|
||||
}
|
||||
|
||||
extern void _boot_start();
|
||||
void cpu_start_secondary(uint8_t coreNumber, cpu_entry_point_t entryPoint, void* arg)
|
||||
{
|
||||
// Save entry point and arg.
|
||||
s_core_info[coreNumber].entry = entryPoint;
|
||||
s_core_info[coreNumber].arg = arg;
|
||||
|
||||
// Prepare pointers for ROM code. The entry point is always _start, which does some
|
||||
// basic preparatory work and then calls the common_cpu_entry function, which itself
|
||||
// calls the entry point saved in s_core_info.
|
||||
switch (coreNumber) {
|
||||
case 1:
|
||||
HW_SRC_GPR3_WR((uint32_t)&_boot_start);
|
||||
// HW_SRC_GPR4_WR((uint32_t)common_cpu_entry);
|
||||
|
||||
HW_SRC_SCR.B.CORE1_ENABLE = 1;
|
||||
break;
|
||||
|
||||
case 2:
|
||||
HW_SRC_GPR5_WR((uint32_t)&_boot_start);
|
||||
// HW_SRC_GPR6_WR((uint32_t)common_cpu_entry);
|
||||
|
||||
HW_SRC_SCR.B.CORE2_ENABLE = 1;
|
||||
break;
|
||||
|
||||
case 3:
|
||||
HW_SRC_GPR7_WR((uint32_t)&_boot_start);
|
||||
// HW_SRC_GPR8_WR((uint32_t)common_cpu_entry);
|
||||
|
||||
HW_SRC_SCR.B.CORE3_ENABLE = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
extern uint32_t _binary_init_start[], _binary_default_fs_start[];
|
||||
extern int sys_spawn(char* path, char** argv);
|
||||
static struct TraceTag hardkernel_tag, softkernel_tag;
|
||||
|
||||
static bool init = false;
|
||||
int main(void)
|
||||
{
|
||||
/* init tracer */
|
||||
// clang-format off
|
||||
tracer_init(); // init tracer system
|
||||
struct TraceTag hardkernel_tag, softkernel_tag;
|
||||
if (!CreateResourceTag(&hardkernel_tag, RequireRootTag(), "hardkernel", TRACER_OWNER, NULL) ||
|
||||
!CreateResourceTag(&softkernel_tag, RequireRootTag(), "softkernel", TRACER_OWNER, NULL)) {
|
||||
ERROR("Failed to create hardkernel owner and softkernel owner.\n");
|
||||
return -1;
|
||||
}
|
||||
// clang-format on
|
||||
uint32_t cpu_id = cur_cpuid();
|
||||
|
||||
/* init hardkernel */
|
||||
if (!hardkernel_init(&hardkernel_tag)) {
|
||||
return -1;
|
||||
if (cpu_id == 0) {
|
||||
tracer_init(); // init tracer system
|
||||
// clang-format off
|
||||
if (!CreateResourceTag(&hardkernel_tag, RequireRootTag(), "hardkernel", TRACER_OWNER, NULL) ||
|
||||
!CreateResourceTag(&softkernel_tag, RequireRootTag(), "softkernel", TRACER_OWNER, NULL)) {
|
||||
ERROR("Failed to create hardkernel owner and softkernel owner.\n");
|
||||
return -1;
|
||||
}
|
||||
// clang-format on
|
||||
/* init hardkernel */
|
||||
if (!hardkernel_init(&hardkernel_tag)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
spinlock_init(&whole_kernel_lock, "wklock");
|
||||
} else {
|
||||
configure_cpu(cpu_id);
|
||||
DEBUG_PRINTF("CPU %d started init: %d(at %x).\n", cur_cpuid(), init, &init);
|
||||
spinlock_lock(&whole_kernel_lock);
|
||||
secondary_cpu_hardkernel_init(cpu_id, &hardkernel_tag);
|
||||
spinlock_unlock(&whole_kernel_lock);
|
||||
DEBUG_PRINTF("CPU %d started done.\n", cur_cpuid());
|
||||
}
|
||||
|
||||
struct TraceTag main_intr_tag;
|
||||
AchieveResourceTag(&main_intr_tag, &hardkernel_tag, "intr-ac-resource");
|
||||
struct XiziTrapDriver* p_intr_driver = (struct XiziTrapDriver*)AchieveResource(&main_intr_tag);
|
||||
int cpu_id = p_intr_driver->cur_cpu_id();
|
||||
// struct TraceTag main_intr_tag;
|
||||
// AchieveResourceTag(&main_intr_tag, &hardkernel_tag, "intr-ac-resource");
|
||||
// struct XiziTrapDriver* p_intr_driver = (struct XiziTrapDriver*)AchieveResource(&main_intr_tag);
|
||||
if (cpu_id == 0) {
|
||||
/* init softkernel */
|
||||
if (!softkernel_init(&hardkernel_tag, &softkernel_tag)) {
|
||||
|
@ -66,18 +159,30 @@ int main(void)
|
|||
show_xizi_bar();
|
||||
|
||||
int cpu_count = NR_CPU;
|
||||
scu_enable();
|
||||
configure_cpu(cpu_id);
|
||||
for (int i = 1; i < cpu_count; i++) {
|
||||
// start secondary cpus
|
||||
cpu_start_secondary(i, NULL, 0);
|
||||
}
|
||||
|
||||
/* start first task */
|
||||
char* init_task_param[2] = { "/app/init", 0 };
|
||||
spawn_embedded_task((char*)_binary_init_start, "init", init_task_param);
|
||||
char* fs_server_task_param[2] = { "/app/fs_server", 0 };
|
||||
spawn_embedded_task((char*)_binary_default_fs_start, "memfs", fs_server_task_param);
|
||||
|
||||
init = true;
|
||||
}
|
||||
|
||||
/* start first task */
|
||||
char* init_task_param[2] = { "/app/init", 0 };
|
||||
spawn_embedded_task((char*)_binary_init_start, "init", init_task_param);
|
||||
char* fs_server_task_param[2] = { "/app/fs_server", 0 };
|
||||
spawn_embedded_task((char*)_binary_default_fs_start, "memfs", fs_server_task_param);
|
||||
while (!init)
|
||||
;
|
||||
|
||||
/* start scheduler */
|
||||
struct SchedulerRightGroup scheduler_rights;
|
||||
assert(AchieveResourceTag(&scheduler_rights.mmu_driver_tag, &hardkernel_tag, "mmu-ac-resource"));
|
||||
assert(AchieveResourceTag(&scheduler_rights.intr_driver_tag, &hardkernel_tag, "intr-ac-resource"));
|
||||
// while (true) { }
|
||||
xizi_task_manager.task_scheduler(scheduler_rights);
|
||||
|
||||
// never reached
|
||||
|
|
|
@ -293,6 +293,12 @@ void load_kern_pgdir(struct TraceTag* mmu_driver_tag, struct TraceTag* intr_driv
|
|||
// dev mem
|
||||
_map_pages((uintptr_t*)kern_pgdir.pd_addr, DEV_VRTMEM_BASE, DEV_PHYMEM_BASE, DEV_MEM_SZ, dev_attr);
|
||||
|
||||
// _p_pgtbl_mmu_access->LoadPgdir((uintptr_t)V2P(kern_pgdir.pd_addr));
|
||||
_p_pgtbl_mmu_access->LoadPgdirCrit((uintptr_t)V2P(kern_pgdir.pd_addr), intr_driver_tag);
|
||||
}
|
||||
|
||||
void secondary_cpu_load_kern_pgdir(struct TraceTag* mmu_driver_tag, struct TraceTag* intr_driver_tag)
|
||||
{
|
||||
// _p_pgtbl_mmu_access->LoadPgdir((uintptr_t)V2P(kern_pgdir.pd_addr));
|
||||
_p_pgtbl_mmu_access->LoadPgdirCrit((uintptr_t)V2P(kern_pgdir.pd_addr), intr_driver_tag);
|
||||
}
|
|
@ -49,6 +49,9 @@ bool clock_intr_handler_init(struct TraceTag* p_clock_driver_tag)
|
|||
uint64_t global_tick = 0;
|
||||
int xizi_clock_handler(int irq, void* tf, void* arg)
|
||||
{
|
||||
// spinlock_lock(&whole_kernel_lock);
|
||||
// DEBUG_PRINTF("CPU %d\n", cpu_get_current());
|
||||
// spinlock_unlock(&whole_kernel_lock);
|
||||
/* handle clock interrupt using driver */
|
||||
if (p_clock_driver->is_timer_expired()) {
|
||||
p_clock_driver->clear_clock_intr();
|
||||
|
|
|
@ -77,7 +77,9 @@ void intr_irq_dispatch(struct trapframe* tf)
|
|||
// distribute irq
|
||||
irq_handler_t isr = p_intr_driver->sw_irqtbl[irq].handler;
|
||||
if (isr) {
|
||||
// spinlock_lock(&whole_kernel_lock);
|
||||
isr(irq, tf, NULL);
|
||||
// spinlock_unlock(&whole_kernel_lock);
|
||||
} else {
|
||||
default_interrupt_routine();
|
||||
}
|
||||
|
|
|
@ -48,6 +48,11 @@ bool swi_distributer_init(struct SwiDispatcherRightGroup* _right_group)
|
|||
extern void context_switch(struct context**, struct context*);
|
||||
void software_irq_dispatch(struct trapframe* tf)
|
||||
{
|
||||
bool is_my_lock = false;
|
||||
if (whole_kernel_lock.owner_cpu != cur_cpuid()) {
|
||||
spinlock_lock(&whole_kernel_lock);
|
||||
is_my_lock = true;
|
||||
}
|
||||
assert(p_intr_driver != NULL);
|
||||
|
||||
p_intr_driver->cpu_irq_disable();
|
||||
|
@ -59,6 +64,7 @@ void software_irq_dispatch(struct trapframe* tf)
|
|||
if (cur_task && cur_task->state != DEAD) {
|
||||
cur_task->main_thread.trapframe = tf;
|
||||
// call syscall
|
||||
|
||||
int ret = arch_syscall(cur_task->main_thread.trapframe, &syscall_num);
|
||||
|
||||
if (syscall_num != SYSCALL_EXEC) {
|
||||
|
@ -75,5 +81,9 @@ void software_irq_dispatch(struct trapframe* tf)
|
|||
if (syscall_num == SYSCALL_EXIT) {
|
||||
ERROR("Exit reaches");
|
||||
}
|
||||
|
||||
if (is_my_lock) {
|
||||
spinlock_unlock(&whole_kernel_lock);
|
||||
}
|
||||
p_intr_driver->cpu_irq_enable();
|
||||
}
|
Loading…
Reference in New Issue