forked from xuos/xiuos
Support O2 optimization
This commit is contained in:
parent
88ded7ca16
commit
077dcd66ac
|
@ -31,32 +31,14 @@ Modification:
|
||||||
|
|
||||||
context_switch:
|
context_switch:
|
||||||
# store original context to stack
|
# store original context to stack
|
||||||
str lr, [r13, #-4]!
|
stmfd r13!, {r4-r12, lr}
|
||||||
str r12, [r13, #-4]!
|
|
||||||
str r11, [r13, #-4]!
|
|
||||||
str r10, [r13, #-4]!
|
|
||||||
str r9, [r13, #-4]!
|
|
||||||
str r8, [r13, #-4]!
|
|
||||||
str r7, [r13, #-4]!
|
|
||||||
str r6, [r13, #-4]!
|
|
||||||
str r5, [r13, #-4]!
|
|
||||||
str r4, [r13, #-4]!
|
|
||||||
|
|
||||||
# switch the stack
|
# switch the stack
|
||||||
str r13, [r0] // save current sp to the old PCB (**old)
|
str r13, [r0] // save current sp to the old PCB (**old)
|
||||||
mov r13, r1 // load the next stack
|
mov r13, r1 // load the next stack
|
||||||
|
|
||||||
# restore context from stack
|
# restore context from stack
|
||||||
ldr r4, [r13], #4
|
ldmfd r13!, {r4-r12, lr}
|
||||||
ldr r5, [r13], #4
|
|
||||||
ldr r6, [r13], #4
|
|
||||||
ldr r7, [r13], #4
|
|
||||||
ldr r8, [r13], #4
|
|
||||||
ldr r9, [r13], #4
|
|
||||||
ldr r10, [r13], #4
|
|
||||||
ldr r11, [r13], #4
|
|
||||||
ldr r12, [r13], #4
|
|
||||||
ldr lr, [r13], #4
|
|
||||||
|
|
||||||
# return to the caller
|
# return to the caller
|
||||||
bx lr
|
bx lr
|
||||||
|
|
|
@ -76,7 +76,7 @@ Modification:
|
||||||
|
|
||||||
#define NR_CPU 4
|
#define NR_CPU 4
|
||||||
|
|
||||||
__attribute__((always_inline)) static inline uint32_t user_mode()
|
__attribute__((always_inline, optimize("O0"))) static inline uint32_t user_mode()
|
||||||
{
|
{
|
||||||
uint32_t val;
|
uint32_t val;
|
||||||
|
|
||||||
|
@ -103,12 +103,12 @@ struct context {
|
||||||
uint32_t r11;
|
uint32_t r11;
|
||||||
uint32_t r12;
|
uint32_t r12;
|
||||||
uint32_t lr;
|
uint32_t lr;
|
||||||
};
|
} __attribute__((packed));
|
||||||
|
|
||||||
/// @brief init task context, set return address to trap return
|
/// @brief init task context, set return address to trap return
|
||||||
/// @param
|
/// @param
|
||||||
extern void task_prepare_enter();
|
extern void task_prepare_enter();
|
||||||
__attribute__((__always_inline__)) static inline void arch_init_context(struct context* ctx)
|
__attribute__((always_inline, optimize("O0"))) static inline void arch_init_context(struct context* ctx)
|
||||||
{
|
{
|
||||||
memset(ctx, 0, sizeof(*ctx));
|
memset(ctx, 0, sizeof(*ctx));
|
||||||
ctx->lr = (uint32_t)(task_prepare_enter + 4);
|
ctx->lr = (uint32_t)(task_prepare_enter + 4);
|
||||||
|
@ -133,13 +133,13 @@ struct trapframe {
|
||||||
uint32_t r11;
|
uint32_t r11;
|
||||||
uint32_t r12;
|
uint32_t r12;
|
||||||
uint32_t pc;
|
uint32_t pc;
|
||||||
};
|
} __attribute__((packed));
|
||||||
|
|
||||||
/// @brief init task trapframe (*especially the user mode cpsr)
|
/// @brief init task trapframe (*especially the user mode cpsr)
|
||||||
/// @param tf
|
/// @param tf
|
||||||
/// @param sp
|
/// @param sp
|
||||||
/// @param pc
|
/// @param pc
|
||||||
__attribute__((__always_inline__)) static inline void arch_init_trapframe(struct trapframe* tf, uintptr_t sp, uintptr_t pc)
|
__attribute__((always_inline, optimize("O0"))) static inline void arch_init_trapframe(struct trapframe* tf, uintptr_t sp, uintptr_t pc)
|
||||||
{
|
{
|
||||||
memset(tf, 0, sizeof(*tf));
|
memset(tf, 0, sizeof(*tf));
|
||||||
tf->spsr = user_mode();
|
tf->spsr = user_mode();
|
||||||
|
@ -153,7 +153,7 @@ __attribute__((__always_inline__)) static inline void arch_init_trapframe(struct
|
||||||
/// @param tf
|
/// @param tf
|
||||||
/// @param sp
|
/// @param sp
|
||||||
/// @param pc
|
/// @param pc
|
||||||
__attribute__((__always_inline__)) static inline void arch_trapframe_set_sp_pc(struct trapframe* tf, uintptr_t sp, uintptr_t pc)
|
__attribute__((always_inline, optimize("O0"))) static inline void arch_trapframe_set_sp_pc(struct trapframe* tf, uintptr_t sp, uintptr_t pc)
|
||||||
{
|
{
|
||||||
tf->sp_usr = sp;
|
tf->sp_usr = sp;
|
||||||
tf->pc = pc;
|
tf->pc = pc;
|
||||||
|
@ -163,7 +163,7 @@ __attribute__((__always_inline__)) static inline void arch_trapframe_set_sp_pc(s
|
||||||
/// @param tf
|
/// @param tf
|
||||||
/// @param argc
|
/// @param argc
|
||||||
/// @param argv
|
/// @param argv
|
||||||
__attribute__((__always_inline__)) static inline void arch_set_main_params(struct trapframe* tf, int argc, uintptr_t argv)
|
__attribute__((always_inline, optimize("O0"))) static inline void arch_set_main_params(struct trapframe* tf, int argc, uintptr_t argv)
|
||||||
{
|
{
|
||||||
tf->r0 = (uint32_t)argc;
|
tf->r0 = (uint32_t)argc;
|
||||||
tf->r1 = (uint32_t)argv;
|
tf->r1 = (uint32_t)argv;
|
||||||
|
@ -178,7 +178,7 @@ __attribute__((__always_inline__)) static inline void arch_set_main_params(struc
|
||||||
/// @param param5
|
/// @param param5
|
||||||
/// @return
|
/// @return
|
||||||
extern int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4);
|
extern int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4);
|
||||||
__attribute__((__always_inline__)) static inline int arch_syscall(struct trapframe* tf, int* syscall_num)
|
__attribute__((always_inline, optimize("O0"))) static inline int arch_syscall(struct trapframe* tf, int* syscall_num)
|
||||||
{
|
{
|
||||||
// call syscall
|
// call syscall
|
||||||
*syscall_num = tf->r0;
|
*syscall_num = tf->r0;
|
||||||
|
@ -188,7 +188,7 @@ __attribute__((__always_inline__)) static inline int arch_syscall(struct trapfra
|
||||||
/// @brief set return reg to trapframe
|
/// @brief set return reg to trapframe
|
||||||
/// @param tf
|
/// @param tf
|
||||||
/// @param ret
|
/// @param ret
|
||||||
__attribute__((__always_inline__)) static inline void arch_set_return(struct trapframe* tf, int ret)
|
__attribute__((always_inline, optimize("O0"))) static inline void arch_set_return(struct trapframe* tf, int ret)
|
||||||
{
|
{
|
||||||
tf->r0 = (uint32_t)ret;
|
tf->r0 = (uint32_t)ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,6 +45,8 @@ Author: AIIT XUOS Lab
|
||||||
Modification:
|
Modification:
|
||||||
1. take only gicd part of functions
|
1. take only gicd part of functions
|
||||||
*************************************************/
|
*************************************************/
|
||||||
|
#include "string.h"
|
||||||
|
|
||||||
#include "gicv2_common_opa.h"
|
#include "gicv2_common_opa.h"
|
||||||
#include "gicv2_registers.h"
|
#include "gicv2_registers.h"
|
||||||
|
|
||||||
|
@ -139,7 +141,7 @@ void gic_send_sgi(uint32_t irqID, uint32_t target_list, uint32_t filter_list)
|
||||||
|
|
||||||
void gic_init(void)
|
void gic_init(void)
|
||||||
{
|
{
|
||||||
gicd_t* gicd = gic_get_gicd();
|
volatile gicd_t* gicd = gic_get_gicd();
|
||||||
|
|
||||||
// First disable the distributor.
|
// First disable the distributor.
|
||||||
gic_enable(false);
|
gic_enable(false);
|
||||||
|
@ -150,7 +152,9 @@ void gic_init(void)
|
||||||
|
|
||||||
for (uint32_t i = 0; i < 255; i++) {
|
for (uint32_t i = 0; i < 255; i++) {
|
||||||
*(uint32_t*)(&gicd->IPRIORITYRn[i * sizeof(uint32_t)]) = (uint32_t)0x80808080;
|
*(uint32_t*)(&gicd->IPRIORITYRn[i * sizeof(uint32_t)]) = (uint32_t)0x80808080;
|
||||||
|
// memset((void*)&gicd->IPRIORITYRn[i * sizeof(uint32_t)], 0x80, sizeof(uint32_t));
|
||||||
*(uint32_t*)(&gicd->ITARGETSRn[i * sizeof(uint32_t)]) = (uint32_t)0x01010101;
|
*(uint32_t*)(&gicd->ITARGETSRn[i * sizeof(uint32_t)]) = (uint32_t)0x01010101;
|
||||||
|
// memset((void*)&gicd->IPRIORITYRn[i * sizeof(uint32_t)], 0x01, sizeof(uint32_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init the GIC CPU interface.
|
// Init the GIC CPU interface.
|
||||||
|
|
|
@ -52,7 +52,7 @@ enum {
|
||||||
SPINLOCK_LOCK_WAITFOREVER = 0xFFFFFFFF,
|
SPINLOCK_LOCK_WAITFOREVER = 0xFFFFFFFF,
|
||||||
};
|
};
|
||||||
|
|
||||||
void spinlock_init(struct spinlock* lock, char* name)
|
__attribute__((optimize("O0"))) void spinlock_init(struct spinlock* lock, char* name)
|
||||||
{
|
{
|
||||||
lock->owner_cpu = SPINLOCK_STATE_UNLOCK;
|
lock->owner_cpu = SPINLOCK_STATE_UNLOCK;
|
||||||
strncpy(lock->name, name, 24);
|
strncpy(lock->name, name, 24);
|
||||||
|
@ -61,7 +61,7 @@ void spinlock_init(struct spinlock* lock, char* name)
|
||||||
extern int _spinlock_lock(struct spinlock* lock, uint32_t timeout);
|
extern int _spinlock_lock(struct spinlock* lock, uint32_t timeout);
|
||||||
void _spinlock_unlock(struct spinlock* lock);
|
void _spinlock_unlock(struct spinlock* lock);
|
||||||
|
|
||||||
void spinlock_lock(struct spinlock* lock)
|
__attribute__((optimize("O0"))) void spinlock_lock(struct spinlock* lock)
|
||||||
{
|
{
|
||||||
int cur_cpu_id = cur_cpuid();
|
int cur_cpu_id = cur_cpuid();
|
||||||
if (lock->owner_cpu != SPINLOCK_STATE_UNLOCK && lock->owner_cpu == cur_cpu_id) {
|
if (lock->owner_cpu != SPINLOCK_STATE_UNLOCK && lock->owner_cpu == cur_cpu_id) {
|
||||||
|
@ -80,7 +80,7 @@ void spinlock_lock(struct spinlock* lock)
|
||||||
_spinlock_lock(lock, SPINLOCK_LOCK_WAITFOREVER);
|
_spinlock_lock(lock, SPINLOCK_LOCK_WAITFOREVER);
|
||||||
}
|
}
|
||||||
|
|
||||||
void spinlock_unlock(struct spinlock* lock)
|
__attribute__((optimize("O0"))) void spinlock_unlock(struct spinlock* lock)
|
||||||
{
|
{
|
||||||
struct double_list_node* p_lock_node = &core_lock_request[cur_cpuid()].node;
|
struct double_list_node* p_lock_node = &core_lock_request[cur_cpuid()].node;
|
||||||
assert(lock_request_guard.next == p_lock_node);
|
assert(lock_request_guard.next == p_lock_node);
|
||||||
|
@ -91,7 +91,7 @@ void spinlock_unlock(struct spinlock* lock)
|
||||||
_spinlock_unlock(lock);
|
_spinlock_unlock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool spinlock_try_lock(struct spinlock* lock)
|
__attribute__((optimize("O0"))) bool spinlock_try_lock(struct spinlock* lock)
|
||||||
{
|
{
|
||||||
int cur_cpu_id = cur_cpuid();
|
int cur_cpu_id = cur_cpuid();
|
||||||
if (lock->owner_cpu != SPINLOCK_STATE_UNLOCK && lock->owner_cpu == cur_cpu_id) {
|
if (lock->owner_cpu != SPINLOCK_STATE_UNLOCK && lock->owner_cpu == cur_cpu_id) {
|
||||||
|
|
|
@ -101,7 +101,7 @@ static bool dealloc_trace_meta(struct TraceMeta* meta)
|
||||||
|
|
||||||
static tracer_mem_chunk_idx_t trace_meta_map_mem_chunk(struct TraceMeta* const p_trace_meta, tracer_mem_chunk_idx_t mem_chunk_num)
|
static tracer_mem_chunk_idx_t trace_meta_map_mem_chunk(struct TraceMeta* const p_trace_meta, tracer_mem_chunk_idx_t mem_chunk_num)
|
||||||
{
|
{
|
||||||
tracer_mem_chunk_idx_t addr;
|
tracer_mem_chunk_idx_t addr = 0;
|
||||||
/* direct mapping */
|
/* direct mapping */
|
||||||
if (mem_chunk_num < TRACEMETA_NR_DIRECT) {
|
if (mem_chunk_num < TRACEMETA_NR_DIRECT) {
|
||||||
if ((addr = p_trace_meta->addr[mem_chunk_num]) == 0) {
|
if ((addr = p_trace_meta->addr[mem_chunk_num]) == 0) {
|
||||||
|
|
|
@ -24,9 +24,9 @@ INC_DIR = -I$(KERNEL_ROOT)/services/shell/letter-shell \
|
||||||
-I$(KERNEL_ROOT)/services/app
|
-I$(KERNEL_ROOT)/services/app
|
||||||
|
|
||||||
ifeq ($(BOARD), imx6q-sabrelite)
|
ifeq ($(BOARD), imx6q-sabrelite)
|
||||||
all: init test_fs simple_client simple_server shell fs_server test_priority test_irq_hdlr test_irq_send readme.txt | bin
|
all: init test_fs simple_client simple_server shell fs_server test_irq_hdlr test_irq_send readme.txt | bin
|
||||||
else
|
else
|
||||||
all: init test_fs simple_client simple_server shell fs_server test_priority test_irq_hdlr readme.txt | bin
|
all: init test_fs simple_client simple_server shell fs_server test_irq_hdlr readme.txt | bin
|
||||||
endif
|
endif
|
||||||
../tools/mkfs/mkfs ./fs.img $^
|
../tools/mkfs/mkfs ./fs.img $^
|
||||||
@mv $(filter-out readme.txt, $^) bin
|
@mv $(filter-out readme.txt, $^) bin
|
||||||
|
|
|
@ -88,7 +88,8 @@ int main()
|
||||||
printf("%s: Mapping GIC\n", prog_name);
|
printf("%s: Mapping GIC\n", prog_name);
|
||||||
mmap(ARM_PERIPHERAL_VIRT_BASE, ARM_PERIPHERAL_BASE, 0x2000, true);
|
mmap(ARM_PERIPHERAL_VIRT_BASE, ARM_PERIPHERAL_BASE, 0x2000, true);
|
||||||
|
|
||||||
int send_time = 1000;
|
// int send_time = 1000;
|
||||||
|
int send_time = 1;
|
||||||
printf("%s: Sending soft interrupt for %d times\n", prog_name, send_time);
|
printf("%s: Sending soft interrupt for %d times\n", prog_name, send_time);
|
||||||
for (int i = 0; i < send_time; i++) {
|
for (int i = 0; i < send_time; i++) {
|
||||||
gic_send_sgi(SW_INTERRUPT_3, 0xF, kGicSgiFilter_UseTargetList);
|
gic_send_sgi(SW_INTERRUPT_3, 0xF, kGicSgiFilter_UseTargetList);
|
||||||
|
|
|
@ -47,7 +47,7 @@ static inline struct CPU* cur_cpu(void)
|
||||||
return &global_cpus[cur_cpuid()];
|
return &global_cpus[cur_cpuid()];
|
||||||
}
|
}
|
||||||
|
|
||||||
struct spinlock whole_kernel_lock;
|
extern struct spinlock whole_kernel_lock;
|
||||||
|
|
||||||
void xizi_enter_kernel();
|
void xizi_enter_kernel();
|
||||||
bool xizi_try_enter_kernel();
|
bool xizi_try_enter_kernel();
|
||||||
|
|
|
@ -92,7 +92,7 @@ int sys_kill(int id);
|
||||||
int sys_register_as_server(char* name);
|
int sys_register_as_server(char* name);
|
||||||
int sys_connect_session(char* path, int capacity, struct Session* user_session);
|
int sys_connect_session(char* path, int capacity, struct Session* user_session);
|
||||||
int sys_poll_session(struct Session* userland_session_arr, int arr_capacity);
|
int sys_poll_session(struct Session* userland_session_arr, int arr_capacity);
|
||||||
int sys_close_session(struct Session* session);
|
int sys_close_session(struct TaskMicroDescriptor* task, struct Session* session);
|
||||||
|
|
||||||
int sys_exec(char* img_start, char* name, char** argv);
|
int sys_exec(char* img_start, char* name, char** argv);
|
||||||
int sys_state(sys_state_option option, sys_state_info* info);
|
int sys_state(sys_state_option option, sys_state_info* info);
|
||||||
|
|
|
@ -35,12 +35,14 @@ Modification:
|
||||||
#include "assert.h"
|
#include "assert.h"
|
||||||
#include "task.h"
|
#include "task.h"
|
||||||
|
|
||||||
|
struct spinlock whole_kernel_lock;
|
||||||
|
|
||||||
extern uint32_t _binary_init_start[], _binary_default_fs_start[];
|
extern uint32_t _binary_init_start[], _binary_default_fs_start[];
|
||||||
extern int sys_spawn(char* img_start, char* name, char** argv);
|
extern int sys_spawn(char* img_start, char* name, char** argv);
|
||||||
|
|
||||||
static struct TraceTag hardkernel_tag, softkernel_tag;
|
static struct TraceTag hardkernel_tag, softkernel_tag;
|
||||||
static int core_init_done = 0;
|
static volatile int core_init_done = 0;
|
||||||
int main(void)
|
__attribute__((optimize("O0"))) int main(void)
|
||||||
{
|
{
|
||||||
/* init tracer */
|
/* init tracer */
|
||||||
uint32_t cpu_id = cur_cpuid();
|
uint32_t cpu_id = cur_cpuid();
|
||||||
|
|
|
@ -294,11 +294,9 @@ void load_kern_pgdir(struct TraceTag* mmu_driver_tag, struct TraceTag* intr_driv
|
||||||
_map_pages((uintptr_t*)kern_pgdir.pd_addr, DEV_VRTMEM_BASE, DEV_PHYMEM_BASE, DEV_MEM_SZ, dev_attr);
|
_map_pages((uintptr_t*)kern_pgdir.pd_addr, DEV_VRTMEM_BASE, DEV_PHYMEM_BASE, DEV_MEM_SZ, dev_attr);
|
||||||
|
|
||||||
_p_pgtbl_mmu_access->LoadPgdir((uintptr_t)V2P(kern_pgdir.pd_addr));
|
_p_pgtbl_mmu_access->LoadPgdir((uintptr_t)V2P(kern_pgdir.pd_addr));
|
||||||
// _p_pgtbl_mmu_access->LoadPgdirCrit((uintptr_t)V2P(kern_pgdir.pd_addr), intr_driver_tag);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void secondary_cpu_load_kern_pgdir(struct TraceTag* mmu_driver_tag, struct TraceTag* intr_driver_tag)
|
void secondary_cpu_load_kern_pgdir(struct TraceTag* mmu_driver_tag, struct TraceTag* intr_driver_tag)
|
||||||
{
|
{
|
||||||
_p_pgtbl_mmu_access->LoadPgdir((uintptr_t)V2P(kern_pgdir.pd_addr));
|
_p_pgtbl_mmu_access->LoadPgdir((uintptr_t)V2P(kern_pgdir.pd_addr));
|
||||||
// _p_pgtbl_mmu_access->LoadPgdirCrit((uintptr_t)V2P(kern_pgdir.pd_addr), intr_driver_tag);
|
|
||||||
}
|
}
|
|
@ -33,9 +33,8 @@ Modification:
|
||||||
#include "syscall.h"
|
#include "syscall.h"
|
||||||
#include "task.h"
|
#include "task.h"
|
||||||
|
|
||||||
int sys_close_session(struct Session* session)
|
int sys_close_session(struct TaskMicroDescriptor* cur_task, struct Session* session)
|
||||||
{
|
{
|
||||||
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
|
||||||
assert(cur_task != NULL);
|
assert(cur_task != NULL);
|
||||||
/* check if session is available */
|
/* check if session is available */
|
||||||
if (session->buf == NULL || (uintptr_t)session->buf < USER_IPC_SPACE_BASE || (uintptr_t)session->buf > USER_IPC_SPACE_TOP) {
|
if (session->buf == NULL || (uintptr_t)session->buf < USER_IPC_SPACE_BASE || (uintptr_t)session->buf > USER_IPC_SPACE_TOP) {
|
||||||
|
|
|
@ -35,7 +35,6 @@ Modification:
|
||||||
#include "assert.h"
|
#include "assert.h"
|
||||||
#include "ipc.h"
|
#include "ipc.h"
|
||||||
#include "kalloc.h"
|
#include "kalloc.h"
|
||||||
#include "mmu_common.h"
|
|
||||||
#include "multicores.h"
|
#include "multicores.h"
|
||||||
#include "share_page.h"
|
#include "share_page.h"
|
||||||
#include "syscall.h"
|
#include "syscall.h"
|
||||||
|
@ -51,33 +50,35 @@ static struct {
|
||||||
|
|
||||||
static void send_irq_to_user(int irq_num)
|
static void send_irq_to_user(int irq_num)
|
||||||
{
|
{
|
||||||
struct Session* session = &irq_forward_table[irq_num].session;
|
if (irq_forward_table[irq_num].handle_task != NULL) {
|
||||||
int len = IPC_ARG_INFO_BASE_OFFSET;
|
struct Session* session = &irq_forward_table[irq_num].session;
|
||||||
len += sizeof(struct IpcArgInfo);
|
int len = IPC_ARG_INFO_BASE_OFFSET;
|
||||||
|
len += sizeof(struct IpcArgInfo);
|
||||||
|
|
||||||
/* get message space and add session tail */
|
/* get message space and add session tail */
|
||||||
void* session_kern_vaddr = P2V(xizi_pager.address_translate(&kernel_irq_proxy->pgdir, (uintptr_t)session->buf));
|
void* session_kern_vaddr = P2V(xizi_pager.address_translate(&kernel_irq_proxy->pgdir, (uintptr_t)session->buf));
|
||||||
struct IpcMsg* buf = session_kern_vaddr + session->tail;
|
struct IpcMsg* buf = session_kern_vaddr + session->tail;
|
||||||
|
|
||||||
/* check if server session is full */
|
/* check if server session is full */
|
||||||
if (buf->header.magic == IPC_MSG_MAGIC && buf->header.done == 0) {
|
if (buf->header.magic == IPC_MSG_MAGIC && buf->header.done == 0) {
|
||||||
DEBUG("irq server cannot handle new interrupt by now.\n");
|
DEBUG("irq server cannot handle new interrupt by now.\n");
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
memset((void*)buf, 0, len);
|
||||||
|
session->tail = (session->tail + len) % session->capacity;
|
||||||
|
|
||||||
|
/* construct message */
|
||||||
|
buf->header.len = len;
|
||||||
|
buf->header.nr_args = 1;
|
||||||
|
buf->header.init = 1;
|
||||||
|
buf->header.opcode = irq_forward_table[irq_num].opcode;
|
||||||
|
buf->header.done = 0;
|
||||||
|
buf->header.magic = IPC_MSG_MAGIC;
|
||||||
|
buf->header.valid = 1;
|
||||||
|
|
||||||
|
/* add session head */
|
||||||
|
session->head = (session->head + len) % session->capacity;
|
||||||
}
|
}
|
||||||
memset((void*)buf, 0, len);
|
|
||||||
session->tail = (session->tail + len) % session->capacity;
|
|
||||||
|
|
||||||
/* construct message */
|
|
||||||
buf->header.len = len;
|
|
||||||
buf->header.nr_args = 1;
|
|
||||||
buf->header.init = 1;
|
|
||||||
buf->header.opcode = irq_forward_table[irq_num].opcode;
|
|
||||||
buf->header.done = 0;
|
|
||||||
buf->header.magic = IPC_MSG_MAGIC;
|
|
||||||
buf->header.valid = 1;
|
|
||||||
|
|
||||||
/* add session head */
|
|
||||||
session->head = (session->head + len) % session->capacity;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int user_irq_handler(int irq, void* tf, void* arg)
|
int user_irq_handler(int irq, void* tf, void* arg)
|
||||||
|
@ -118,7 +119,7 @@ int sys_register_irq(int irq_num, int irq_opcode)
|
||||||
}
|
}
|
||||||
|
|
||||||
// bind irq to session
|
// bind irq to session
|
||||||
if (p_intr_driver->sw_irqtbl[irq_num].handler != NULL) {
|
if (irq_forward_table[irq_num].handle_task != NULL) {
|
||||||
ERROR("irq %d is occupied.\n", irq_num);
|
ERROR("irq %d is occupied.\n", irq_num);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -139,7 +140,7 @@ int sys_unbind_irq(struct TaskMicroDescriptor* task, int irq_num)
|
||||||
}
|
}
|
||||||
|
|
||||||
irq_forward_table[irq_num].handle_task = NULL;
|
irq_forward_table[irq_num].handle_task = NULL;
|
||||||
sys_close_session(&irq_forward_table[irq_num].session);
|
sys_close_session(kernel_irq_proxy, &irq_forward_table[irq_num].session);
|
||||||
DEBUG("Unbind: %s to irq %d", task->name, irq_num);
|
DEBUG("Unbind: %s to irq %d", task->name, irq_num);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,7 +60,7 @@ int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, u
|
||||||
ret = sys_poll_session((struct Session*)param1, (int)param2);
|
ret = sys_poll_session((struct Session*)param1, (int)param2);
|
||||||
break;
|
break;
|
||||||
case SYSCALL_CLOSE_SESSION:
|
case SYSCALL_CLOSE_SESSION:
|
||||||
ret = sys_close_session((struct Session*)param1);
|
ret = sys_close_session(cur_cpu()->task, (struct Session*)param1);
|
||||||
break;
|
break;
|
||||||
case SYSCALL_EXEC:
|
case SYSCALL_EXEC:
|
||||||
ret = sys_exec((char*)param1, (char*)param2, (char**)param3);
|
ret = sys_exec((char*)param1, (char*)param2, (char**)param3);
|
||||||
|
|
|
@ -167,7 +167,7 @@ static void _dealloc_task_cb(struct TaskMicroDescriptor* task)
|
||||||
|
|
||||||
/* alloc a new task with init */
|
/* alloc a new task with init */
|
||||||
extern void trap_return(void);
|
extern void trap_return(void);
|
||||||
void task_prepare_enter()
|
__attribute__((optimize("O0"))) void task_prepare_enter()
|
||||||
{
|
{
|
||||||
xizi_leave_kernel();
|
xizi_leave_kernel();
|
||||||
trap_return();
|
trap_return();
|
||||||
|
@ -227,6 +227,7 @@ static void _scheduler(struct SchedulerRightGroup right_group)
|
||||||
{
|
{
|
||||||
struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag);
|
struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag);
|
||||||
struct TaskMicroDescriptor* next_task;
|
struct TaskMicroDescriptor* next_task;
|
||||||
|
struct CPU* cpu = cur_cpu();
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
next_task = NULL;
|
next_task = NULL;
|
||||||
|
@ -238,29 +239,21 @@ static void _scheduler(struct SchedulerRightGroup right_group)
|
||||||
next_task = xizi_task_manager.next_runnable_task();
|
next_task = xizi_task_manager.next_runnable_task();
|
||||||
}
|
}
|
||||||
next_task_emergency = NULL;
|
next_task_emergency = NULL;
|
||||||
if (next_task != NULL) {
|
|
||||||
assert(next_task->state == READY);
|
|
||||||
}
|
|
||||||
spinlock_unlock(&whole_kernel_lock);
|
|
||||||
|
|
||||||
/* not a runnable task */
|
/* if there's not a runnable task, wait for one */
|
||||||
if (UNLIKELY(next_task == NULL)) {
|
if (next_task == NULL) {
|
||||||
spinlock_lock(&whole_kernel_lock);
|
xizi_leave_kernel();
|
||||||
|
/* leave kernel for other cores, so they may create a runnable task */
|
||||||
|
xizi_enter_kernel();
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* a runnable task */
|
/* run the chosen task */
|
||||||
spinlock_lock(&whole_kernel_lock);
|
assert(next_task->state == READY);
|
||||||
if (next_task->state == READY) {
|
next_task->state = RUNNING;
|
||||||
next_task->state = RUNNING;
|
|
||||||
} else {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
struct CPU* cpu = cur_cpu();
|
|
||||||
cpu->task = next_task;
|
cpu->task = next_task;
|
||||||
p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->pgdir.pd_addr));
|
p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->pgdir.pd_addr));
|
||||||
context_switch(&cpu->scheduler, next_task->main_thread.context);
|
context_switch(&cpu->scheduler, next_task->main_thread.context);
|
||||||
assert(cur_cpu()->task == NULL);
|
|
||||||
assert(next_task->state != RUNNING);
|
assert(next_task->state != RUNNING);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,7 +49,7 @@ Modification:
|
||||||
#include "task.h"
|
#include "task.h"
|
||||||
|
|
||||||
extern void context_switch(struct context**, struct context*);
|
extern void context_switch(struct context**, struct context*);
|
||||||
void dabort_handler(struct trapframe* r)
|
__attribute__((optimize("O0"))) void dabort_handler(struct trapframe* r)
|
||||||
{
|
{
|
||||||
if (r->pc >= DEV_VRTMEM_BASE && is_spinlock_hold_by_current_cpu(&whole_kernel_lock)) {
|
if (r->pc >= DEV_VRTMEM_BASE && is_spinlock_hold_by_current_cpu(&whole_kernel_lock)) {
|
||||||
assert(is_spinlock_hold_by_current_cpu(&whole_kernel_lock));
|
assert(is_spinlock_hold_by_current_cpu(&whole_kernel_lock));
|
||||||
|
@ -69,7 +69,7 @@ void dabort_handler(struct trapframe* r)
|
||||||
panic("dabort end should never be reashed.\n");
|
panic("dabort end should never be reashed.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
void iabort_handler(struct trapframe* r)
|
__attribute__((optimize("O0"))) void iabort_handler(struct trapframe* r)
|
||||||
{
|
{
|
||||||
if (r->pc >= DEV_VRTMEM_BASE && is_spinlock_hold_by_current_cpu(&whole_kernel_lock)) {
|
if (r->pc >= DEV_VRTMEM_BASE && is_spinlock_hold_by_current_cpu(&whole_kernel_lock)) {
|
||||||
assert(is_spinlock_hold_by_current_cpu(&whole_kernel_lock));
|
assert(is_spinlock_hold_by_current_cpu(&whole_kernel_lock));
|
||||||
|
|
|
@ -54,7 +54,7 @@ void default_interrupt_routine(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void context_switch(struct context**, struct context*);
|
extern void context_switch(struct context**, struct context*);
|
||||||
void intr_irq_dispatch(struct trapframe* tf)
|
__attribute__((optimize("O0"))) void intr_irq_dispatch(struct trapframe* tf)
|
||||||
{
|
{
|
||||||
xizi_enter_kernel();
|
xizi_enter_kernel();
|
||||||
|
|
||||||
|
@ -101,7 +101,7 @@ void xizi_enter_kernel()
|
||||||
spinlock_lock(&whole_kernel_lock);
|
spinlock_lock(&whole_kernel_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool xizi_try_enter_kernel()
|
inline bool xizi_try_enter_kernel()
|
||||||
{
|
{
|
||||||
/// @warning trampoline is responsible for closing interrupt
|
/// @warning trampoline is responsible for closing interrupt
|
||||||
if (spinlock_try_lock(&whole_kernel_lock)) {
|
if (spinlock_try_lock(&whole_kernel_lock)) {
|
||||||
|
@ -111,7 +111,7 @@ bool xizi_try_enter_kernel()
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void xizi_leave_kernel()
|
inline void xizi_leave_kernel()
|
||||||
{
|
{
|
||||||
/// @warning trampoline is responsible for eabling interrupt by using user's state register
|
/// @warning trampoline is responsible for eabling interrupt by using user's state register
|
||||||
spinlock_unlock(&whole_kernel_lock);
|
spinlock_unlock(&whole_kernel_lock);
|
||||||
|
|
|
@ -46,7 +46,7 @@ bool swi_distributer_init(struct SwiDispatcherRightGroup* _right_group)
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void context_switch(struct context**, struct context*);
|
extern void context_switch(struct context**, struct context*);
|
||||||
void software_irq_dispatch(struct trapframe* tf)
|
__attribute__((optimize("O0"))) void software_irq_dispatch(struct trapframe* tf)
|
||||||
{
|
{
|
||||||
xizi_enter_kernel();
|
xizi_enter_kernel();
|
||||||
assert(p_intr_driver != NULL);
|
assert(p_intr_driver != NULL);
|
||||||
|
|
Loading…
Reference in New Issue