Add wklock to dabort and iabort.

This commit is contained in:
TXuian 2024-03-15 18:29:57 +08:00
parent b939557317
commit c60f29277a
4 changed files with 5 additions and 12 deletions

View File

@ -74,7 +74,7 @@ Modification:
#include "cortex_a9.h"
#define NR_CPU 1
#define NR_CPU 4
__attribute__((always_inline)) static inline uint32_t user_mode()
{

View File

@ -44,6 +44,7 @@ Modification:
#include "log.h"
#include "multicores.h"
#include "spinlock.h"
#include "syscall.h"
__attribute__((always_inline)) static inline void _abort_reason(uint32_t fault_status)
@ -94,6 +95,7 @@ void handle_undefined_instruction(struct trapframe* tf)
extern void context_switch(struct context**, struct context*);
void dabort_handler(struct trapframe* r)
{
spinlock_lock(&whole_kernel_lock);
uint32_t dfs, dfa;
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 0" : "=r"(dfs)::);
@ -105,8 +107,6 @@ void dabort_handler(struct trapframe* r)
LOG("data abort at 0x%x, status 0x%x\n", dfa, dfs);
_abort_reason(dfs);
dump_tf(r);
}
if (cur_cpu()->task != NULL) {
sys_exit();
context_switch(&cur_cpu()->task->main_thread.context, cur_cpu()->scheduler);
} else { // Exception occured in Kernel space: panic
@ -120,6 +120,7 @@ void dabort_handler(struct trapframe* r)
void iabort_handler(struct trapframe* r)
{
spinlock_lock(&whole_kernel_lock);
uint32_t ifs, ifa;
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 1" : "=r"(ifs)::);
@ -131,8 +132,6 @@ void iabort_handler(struct trapframe* r)
LOG("prefetch abort at 0x%x, status 0x%x\n", ifa, ifs);
_abort_reason(ifs);
dump_tf(r);
}
if (cur_cpu()->task != NULL) {
sys_exit();
context_switch(&cur_cpu()->task->main_thread.context, cur_cpu()->scheduler);
} else { // Exception occured in Kernel space: panic

View File

@ -85,8 +85,6 @@ static struct tracer_mem_chunk* tracer_get_mem_chunk_cache(uint32_t chunk_id)
b->flag |= TRACER_MEM_CHUNK_BUSY;
return b;
}
ERROR("tracer mem_chunk syner is locked\n");
panic("");
}
}
@ -126,7 +124,7 @@ void tracer_mem_chunk_write(struct tracer_mem_chunk* b)
void tracer_mem_chunk_release(struct tracer_mem_chunk* b)
{
if ((b->flag & TRACER_MEM_CHUNK_BUSY) == 0) {
panic("tracer mem_chunk release but it's busy occupied");
panic("tracer mem_chunk release but it's not busy occupied");
}
// move mem_chunk that just used to the head of cache list

View File

@ -122,13 +122,9 @@ int main(void)
return -1;
}
scu_enable();
configure_cpu(cpu_id);
spinlock_init(&whole_kernel_lock, "wklock");
} else {
spinlock_lock(&whole_kernel_lock);
configure_cpu(cpu_id);
secondary_cpu_hardkernel_init(cpu_id, &hardkernel_tag);
spinlock_unlock(&whole_kernel_lock);
}