diff --git a/Ubiquitous/XiZi_AIoT/hardkernel/arch/arm/armv7-a/cortex-a9/core.h b/Ubiquitous/XiZi_AIoT/hardkernel/arch/arm/armv7-a/cortex-a9/core.h index 11043cb7c..f8af8ccc9 100644 --- a/Ubiquitous/XiZi_AIoT/hardkernel/arch/arm/armv7-a/cortex-a9/core.h +++ b/Ubiquitous/XiZi_AIoT/hardkernel/arch/arm/armv7-a/cortex-a9/core.h @@ -74,7 +74,7 @@ Modification: #include "cortex_a9.h" -#define NR_CPU 4 +#define NR_CPU 3 __attribute__((always_inline)) static inline uint32_t user_mode() { diff --git a/Ubiquitous/XiZi_AIoT/hardkernel/hardkernel_init.c b/Ubiquitous/XiZi_AIoT/hardkernel/hardkernel_init.c index 660b883f9..351a1266f 100644 --- a/Ubiquitous/XiZi_AIoT/hardkernel/hardkernel_init.c +++ b/Ubiquitous/XiZi_AIoT/hardkernel/hardkernel_init.c @@ -218,10 +218,7 @@ bool secondary_cpu_hardkernel_init(int cpu_id, struct TraceTag* _hardkernel_tag) // cache p_icache_driver->enable(); p_dcache_driver->enable(); - // p_icache_driver->disable(); - // p_dcache_driver->disable(); // clock - // p_clock_driver->sys_clock_init(); p_intr_driver->single_irq_enable(p_clock_driver->get_clock_int(), cpu_id, 0); // mmu secondary_cpu_load_kern_pgdir(&init_mmu_tag, &init_intr_tag); diff --git a/Ubiquitous/XiZi_AIoT/services/boards/imx6q-sabrelite/test_irq_sender.c b/Ubiquitous/XiZi_AIoT/services/boards/imx6q-sabrelite/test_irq_sender.c index cd0cc4262..223b1f4d8 100644 --- a/Ubiquitous/XiZi_AIoT/services/boards/imx6q-sabrelite/test_irq_sender.c +++ b/Ubiquitous/XiZi_AIoT/services/boards/imx6q-sabrelite/test_irq_sender.c @@ -24,6 +24,19 @@ enum { ARM_PERIPHERAL_VIRT_BASE = 0x50000000, }; +enum _gicd_sgi_filter { + //! Forward the interrupt to the CPU interfaces specified in the @a target_list parameter. + kGicSgiFilter_UseTargetList = 0, + + //! Forward the interrupt to all CPU interfaces except that of the processor that requested + //! the interrupt. + kGicSgiFilter_AllOtherCPUs = 1, + + //! Forward the interrupt only to the CPU interface of the processor that requested the + //! interrupt. + kGicSgiFilter_OnlyThisCPU = 2 +}; + struct _gicd_registers { uint32_t CTLR; //!< Distributor Control Register. uint32_t TYPER; //!< Interrupt Controller Type Register. @@ -76,7 +89,7 @@ int main() mmap(ARM_PERIPHERAL_VIRT_BASE, ARM_PERIPHERAL_BASE, 0x2000, true); printf("%s: Sending soft interrupt\n", prog_name); - gic_send_sgi(SW_INTERRUPT_3, 0, 2); + gic_send_sgi(SW_INTERRUPT_3, 0, kGicSgiFilter_OnlyThisCPU); printf("%s: Soft interrupt send done\n", prog_name); exit(); } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/services/lib/ipc/libipc.c b/Ubiquitous/XiZi_AIoT/services/lib/ipc/libipc.c index 0d9582aae..410048c31 100644 --- a/Ubiquitous/XiZi_AIoT/services/lib/ipc/libipc.c +++ b/Ubiquitous/XiZi_AIoT/services/lib/ipc/libipc.c @@ -178,24 +178,29 @@ void ipc_server_loop(struct IpcNode* ipc_node) a session could be delay in case one of its message(current message) needs to wait for an interrupt message's arrival interfaces[opcode] should explicitly call delay_session() and return to delay this session */ - while (msg->header.magic == IPC_MSG_MAGIC && msg->header.valid == 1 && msg->header.done != 1) { + while (msg->header.magic == IPC_MSG_MAGIC && msg->header.valid == 1) { // printf("session %d [%d, %d]\n", session_list[i].id, session_list[i].head, session_list[i].tail); if (session_used_size(&session_list[i]) == 0 && session_forward_tail(&session_list[i], msg->header.len) < 0) { break; } - if (ipc_node->interfaces[msg->header.opcode]) { - ipc_node->interfaces[msg->header.opcode](msg); - // check if this session is delayed by op handler, all messages after the delayed message in current session is blocked. - if (session_delayed) { - session_delayed = false; - break; + + // this is a message needs to handle + if (msg->header.done != 1) { + if (ipc_node->interfaces[msg->header.opcode]) { + ipc_node->interfaces[msg->header.opcode](msg); + // check if this session is delayed by op handler, all messages after the delayed message in current session is blocked. + if (session_delayed) { + session_delayed = false; + break; + } + } else { + printf("Unsupport opcode(%d) for server: %s\n", msg->header.opcode, ipc_node->name); } - } else { - printf("Unsupport opcode(%d) for server: %s\n", msg->header.opcode, ipc_node->name); } + // current msg is a message that needs to ignore // finish this message in server's perspective - while (session_forward_head(&session_list[i], msg->header.len) < 0) { - yield(); + if (session_forward_head(&session_list[i], msg->header.len) < 0) { + break; } msg = IPCSESSION_MSG(&session_list[i]); } diff --git a/Ubiquitous/XiZi_AIoT/services/lib/ipc/session.c b/Ubiquitous/XiZi_AIoT/services/lib/ipc/session.c index 27e88c706..a399dec18 100644 --- a/Ubiquitous/XiZi_AIoT/services/lib/ipc/session.c +++ b/Ubiquitous/XiZi_AIoT/services/lib/ipc/session.c @@ -61,6 +61,6 @@ bool session_free_buf(struct Session* session, int len) if (len > session_used_size(session)) { return false; } - assert(session_forward_head(session, len) != 1); + assert(session_forward_head(session, len) != -1); return true; } diff --git a/Ubiquitous/XiZi_AIoT/services/lib/ipc/session.h b/Ubiquitous/XiZi_AIoT/services/lib/ipc/session.h index 24952db0c..4979f74ca 100644 --- a/Ubiquitous/XiZi_AIoT/services/lib/ipc/session.h +++ b/Ubiquitous/XiZi_AIoT/services/lib/ipc/session.h @@ -55,7 +55,7 @@ __attribute__((__always_inline__)) static inline int session_remain_capacity(str __attribute__((__always_inline__)) static inline int session_forward_head(struct Session* session, int len) { - if (((session->head + len) % session->capacity) > session->tail) { + if (len > session_used_size(session)) { printf("forward head with too much size\n"); return -1; } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/include/task.h b/Ubiquitous/XiZi_AIoT/softkernel/include/task.h index fa9263196..726e4f518 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/include/task.h +++ b/Ubiquitous/XiZi_AIoT/softkernel/include/task.h @@ -48,6 +48,7 @@ enum ProcState { READY, RUNNING, DEAD, + BLOCKED, NEVER_RUN, }; @@ -118,7 +119,7 @@ struct XiziTaskManager { /* function that's runing by kernel thread context, schedule use tasks */ void (*task_scheduler)(struct SchedulerRightGroup); /* call to yield current use task */ - void (*cur_task_yield_noschedule)(void); + void (*task_yield_noschedule)(struct TaskMicroDescriptor* task, bool is_blocking); /* set task priority */ void (*set_cur_task_priority)(int priority); }; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/main.c b/Ubiquitous/XiZi_AIoT/softkernel/main.c index a3de7b8aa..e79beecaf 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/main.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/main.c @@ -90,13 +90,14 @@ int main(void) struct SchedulerRightGroup scheduler_rights; assert(AchieveResourceTag(&scheduler_rights.mmu_driver_tag, &hardkernel_tag, "mmu-ac-resource")); assert(AchieveResourceTag(&scheduler_rights.intr_driver_tag, &hardkernel_tag, "intr-ac-resource")); - core_init_done |= (1 << cpu_id); LOG_PRINTF("CPU %d init done\n", cpu_id); spinlock_unlock(&whole_kernel_lock); while (core_init_done != (1 << NR_CPU) - 1) ; + + xizi_enter_kernel(); start_smp_cache_broadcast(cpu_id); xizi_task_manager.task_scheduler(scheduler_rights); diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_exit.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_exit.c index bd5a99612..25ebadc11 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_exit.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_exit.c @@ -82,7 +82,7 @@ int sys_exit(struct TaskMicroDescriptor* ptask) } // delete task for pcb_list - xizi_task_manager.cur_task_yield_noschedule(); + xizi_task_manager.task_yield_noschedule(ptask, true); ptask->state = DEAD; return 0; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c index 0a953892a..5b0722070 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_register_irq.c @@ -84,7 +84,7 @@ int user_irq_handler(int irq, void* tf, void* arg) p_mmu_driver->LoadPgdir((uintptr_t)V2P(cur_cpu()->task->pgdir.pd_addr)); next_task_emergency = irq_forward_table[irq].handle_task; - xizi_task_manager.cur_task_yield_noschedule(); + xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false); return 0; } diff --git a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c index 6aa426047..f4dc7b868 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/syscall/sys_yield.c @@ -35,6 +35,6 @@ Modification: int sys_yield() { - xizi_task_manager.cur_task_yield_noschedule(); + xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false); return 0; } \ No newline at end of file diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/scheduler.c b/Ubiquitous/XiZi_AIoT/softkernel/task/scheduler.c index a8d8048dd..f693ab871 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/scheduler.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/scheduler.c @@ -40,7 +40,6 @@ struct TaskMicroDescriptor* max_priority_runnable_task(void) { if (task->state == READY) { // found a runnable task, stop this look up - task->state = RUNNING; return task; } else if (task->state == DEAD) { // found a killed task, stop this loop @@ -61,7 +60,6 @@ struct TaskMicroDescriptor* round_robin_runnable_task(uint32_t priority) if (task->state == READY) { // found a runnable task, stop this look up - task->state = RUNNING; return task; } else if (task->state == DEAD) { // found a killed task, stop this loop diff --git a/Ubiquitous/XiZi_AIoT/softkernel/task/task.c b/Ubiquitous/XiZi_AIoT/softkernel/task/task.c index 941f19d63..8e8898c3b 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/task/task.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/task/task.c @@ -171,7 +171,6 @@ struct TaskMicroDescriptor* next_task_emergency = NULL; extern void context_switch(struct context**, struct context*); static void _scheduler(struct SchedulerRightGroup right_group) { - xizi_enter_kernel(); struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag); struct TaskMicroDescriptor* next_task; @@ -182,20 +181,25 @@ static void _scheduler(struct SchedulerRightGroup right_group) if (next_task_emergency != NULL && next_task->state == READY) { next_task = next_task_emergency; next_task->state = RUNNING; - next_task_emergency = NULL; } else { next_task = xizi_task_manager.next_runnable_task(); } + next_task_emergency = NULL; + if (next_task != NULL) { + assert(next_task->state == READY); + next_task->state = RUNNING; + } spinlock_unlock(&whole_kernel_lock); /* not a runnable task */ - if (UNLIKELY(next_task == NULL) || UNLIKELY(next_task->state != RUNNING)) { + if (UNLIKELY(next_task == NULL)) { spinlock_lock(&whole_kernel_lock); continue; } /* a runnable task */ spinlock_lock(&whole_kernel_lock); + assert(next_task->state == RUNNING); struct CPU* cpu = cur_cpu(); cpu->task = next_task; p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->pgdir.pd_addr)); @@ -203,34 +207,24 @@ static void _scheduler(struct SchedulerRightGroup right_group) } } -static uint32_t yield_cnt = 0; -static void _cur_task_yield_noschedule(void) +static void _task_yield_noschedule(struct TaskMicroDescriptor* task, bool blocking) { - yield_cnt++; - - struct TaskMicroDescriptor* current_task = cur_cpu()->task; - assert(current_task != NULL); + assert(task != NULL); // rearrage current task position - doubleListDel(¤t_task->node); - // DEBUG("%s,%d\n", current_task->name, strcmp(current_task->name, name1)); - if (current_task->maxium_tick <= 0) { - if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[current_task->priority])) { - ready_task_priority &= ~(1 << current_task->priority); + doubleListDel(&task->node); + if (task->state == RUNNING) { + if (!blocking) { + task->state = READY; + } else { + task->state = BLOCKED; } - current_task->priority += 1; - current_task->maxium_tick = TASK_CLOCK_TICK * 10; } - doubleListAddOnBack(¤t_task->node, &xizi_task_manager.task_list_head[current_task->priority]); - ready_task_priority |= (1 << current_task->priority); - // set current task state - current_task->state = READY; - current_task->remain_tick = TASK_CLOCK_TICK; - cur_cpu()->task = NULL; - if (yield_cnt == 50) { - recover_priority(); - yield_cnt = 0; + task->remain_tick = TASK_CLOCK_TICK; + if (task == cur_cpu()->task) { + cur_cpu()->task = NULL; } + doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]); } static void _set_cur_task_priority(int priority) @@ -261,7 +255,7 @@ struct XiziTaskManager xizi_task_manager = { .next_runnable_task = max_priority_runnable_task, .task_scheduler = _scheduler, - .cur_task_yield_noschedule = _cur_task_yield_noschedule, + .task_yield_noschedule = _task_yield_noschedule, .set_cur_task_priority = _set_cur_task_priority }; diff --git a/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c b/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c index b3453dd2c..6c4c92915 100644 --- a/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c +++ b/Ubiquitous/XiZi_AIoT/softkernel/trap/clock_irq_handler.c @@ -58,7 +58,7 @@ int xizi_clock_handler(int irq, void* tf, void* arg) current_task->remain_tick--; current_task->maxium_tick--; if (current_task->remain_tick == 0) { - xizi_task_manager.cur_task_yield_noschedule(); + xizi_task_manager.task_yield_noschedule(current_task, false); } } }