fix sys_kill.

This commit is contained in:
TXuian 2024-04-26 11:05:10 +08:00
parent 10cc7cc270
commit 03039cbdab
14 changed files with 61 additions and 52 deletions

View File

@ -74,7 +74,7 @@ Modification:
#include "cortex_a9.h"
#define NR_CPU 4
#define NR_CPU 3
__attribute__((always_inline)) static inline uint32_t user_mode()
{

View File

@ -218,10 +218,7 @@ bool secondary_cpu_hardkernel_init(int cpu_id, struct TraceTag* _hardkernel_tag)
// cache
p_icache_driver->enable();
p_dcache_driver->enable();
// p_icache_driver->disable();
// p_dcache_driver->disable();
// clock
// p_clock_driver->sys_clock_init();
p_intr_driver->single_irq_enable(p_clock_driver->get_clock_int(), cpu_id, 0);
// mmu
secondary_cpu_load_kern_pgdir(&init_mmu_tag, &init_intr_tag);

View File

@ -24,6 +24,19 @@ enum {
ARM_PERIPHERAL_VIRT_BASE = 0x50000000,
};
enum _gicd_sgi_filter {
//! Forward the interrupt to the CPU interfaces specified in the @a target_list parameter.
kGicSgiFilter_UseTargetList = 0,
//! Forward the interrupt to all CPU interfaces except that of the processor that requested
//! the interrupt.
kGicSgiFilter_AllOtherCPUs = 1,
//! Forward the interrupt only to the CPU interface of the processor that requested the
//! interrupt.
kGicSgiFilter_OnlyThisCPU = 2
};
struct _gicd_registers {
uint32_t CTLR; //!< Distributor Control Register.
uint32_t TYPER; //!< Interrupt Controller Type Register.
@ -76,7 +89,7 @@ int main()
mmap(ARM_PERIPHERAL_VIRT_BASE, ARM_PERIPHERAL_BASE, 0x2000, true);
printf("%s: Sending soft interrupt\n", prog_name);
gic_send_sgi(SW_INTERRUPT_3, 0, 2);
gic_send_sgi(SW_INTERRUPT_3, 0, kGicSgiFilter_OnlyThisCPU);
printf("%s: Soft interrupt send done\n", prog_name);
exit();
}

View File

@ -178,24 +178,29 @@ void ipc_server_loop(struct IpcNode* ipc_node)
a session could be delay in case one of its message(current message) needs to wait for an interrupt message's arrival
interfaces[opcode] should explicitly call delay_session() and return to delay this session
*/
while (msg->header.magic == IPC_MSG_MAGIC && msg->header.valid == 1 && msg->header.done != 1) {
while (msg->header.magic == IPC_MSG_MAGIC && msg->header.valid == 1) {
// printf("session %d [%d, %d]\n", session_list[i].id, session_list[i].head, session_list[i].tail);
if (session_used_size(&session_list[i]) == 0 && session_forward_tail(&session_list[i], msg->header.len) < 0) {
break;
}
if (ipc_node->interfaces[msg->header.opcode]) {
ipc_node->interfaces[msg->header.opcode](msg);
// check if this session is delayed by op handler, all messages after the delayed message in current session is blocked.
if (session_delayed) {
session_delayed = false;
break;
// this is a message needs to handle
if (msg->header.done != 1) {
if (ipc_node->interfaces[msg->header.opcode]) {
ipc_node->interfaces[msg->header.opcode](msg);
// check if this session is delayed by op handler, all messages after the delayed message in current session is blocked.
if (session_delayed) {
session_delayed = false;
break;
}
} else {
printf("Unsupport opcode(%d) for server: %s\n", msg->header.opcode, ipc_node->name);
}
} else {
printf("Unsupport opcode(%d) for server: %s\n", msg->header.opcode, ipc_node->name);
}
// current msg is a message that needs to ignore
// finish this message in server's perspective
while (session_forward_head(&session_list[i], msg->header.len) < 0) {
yield();
if (session_forward_head(&session_list[i], msg->header.len) < 0) {
break;
}
msg = IPCSESSION_MSG(&session_list[i]);
}

View File

@ -61,6 +61,6 @@ bool session_free_buf(struct Session* session, int len)
if (len > session_used_size(session)) {
return false;
}
assert(session_forward_head(session, len) != 1);
assert(session_forward_head(session, len) != -1);
return true;
}

View File

@ -55,7 +55,7 @@ __attribute__((__always_inline__)) static inline int session_remain_capacity(str
__attribute__((__always_inline__)) static inline int session_forward_head(struct Session* session, int len)
{
if (((session->head + len) % session->capacity) > session->tail) {
if (len > session_used_size(session)) {
printf("forward head with too much size\n");
return -1;
}

View File

@ -48,6 +48,7 @@ enum ProcState {
READY,
RUNNING,
DEAD,
BLOCKED,
NEVER_RUN,
};
@ -118,7 +119,7 @@ struct XiziTaskManager {
/* function that's runing by kernel thread context, schedule use tasks */
void (*task_scheduler)(struct SchedulerRightGroup);
/* call to yield current use task */
void (*cur_task_yield_noschedule)(void);
void (*task_yield_noschedule)(struct TaskMicroDescriptor* task, bool is_blocking);
/* set task priority */
void (*set_cur_task_priority)(int priority);
};

View File

@ -90,13 +90,14 @@ int main(void)
struct SchedulerRightGroup scheduler_rights;
assert(AchieveResourceTag(&scheduler_rights.mmu_driver_tag, &hardkernel_tag, "mmu-ac-resource"));
assert(AchieveResourceTag(&scheduler_rights.intr_driver_tag, &hardkernel_tag, "intr-ac-resource"));
core_init_done |= (1 << cpu_id);
LOG_PRINTF("CPU %d init done\n", cpu_id);
spinlock_unlock(&whole_kernel_lock);
while (core_init_done != (1 << NR_CPU) - 1)
;
xizi_enter_kernel();
start_smp_cache_broadcast(cpu_id);
xizi_task_manager.task_scheduler(scheduler_rights);

View File

@ -82,7 +82,7 @@ int sys_exit(struct TaskMicroDescriptor* ptask)
}
// delete task for pcb_list
xizi_task_manager.cur_task_yield_noschedule();
xizi_task_manager.task_yield_noschedule(ptask, true);
ptask->state = DEAD;
return 0;

View File

@ -84,7 +84,7 @@ int user_irq_handler(int irq, void* tf, void* arg)
p_mmu_driver->LoadPgdir((uintptr_t)V2P(cur_cpu()->task->pgdir.pd_addr));
next_task_emergency = irq_forward_table[irq].handle_task;
xizi_task_manager.cur_task_yield_noschedule();
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false);
return 0;
}

View File

@ -35,6 +35,6 @@ Modification:
int sys_yield()
{
xizi_task_manager.cur_task_yield_noschedule();
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false);
return 0;
}

View File

@ -40,7 +40,6 @@ struct TaskMicroDescriptor* max_priority_runnable_task(void)
{
if (task->state == READY) {
// found a runnable task, stop this look up
task->state = RUNNING;
return task;
} else if (task->state == DEAD) {
// found a killed task, stop this loop
@ -61,7 +60,6 @@ struct TaskMicroDescriptor* round_robin_runnable_task(uint32_t priority)
if (task->state == READY) {
// found a runnable task, stop this look up
task->state = RUNNING;
return task;
} else if (task->state == DEAD) {
// found a killed task, stop this loop

View File

@ -171,7 +171,6 @@ struct TaskMicroDescriptor* next_task_emergency = NULL;
extern void context_switch(struct context**, struct context*);
static void _scheduler(struct SchedulerRightGroup right_group)
{
xizi_enter_kernel();
struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag);
struct TaskMicroDescriptor* next_task;
@ -182,20 +181,25 @@ static void _scheduler(struct SchedulerRightGroup right_group)
if (next_task_emergency != NULL && next_task->state == READY) {
next_task = next_task_emergency;
next_task->state = RUNNING;
next_task_emergency = NULL;
} else {
next_task = xizi_task_manager.next_runnable_task();
}
next_task_emergency = NULL;
if (next_task != NULL) {
assert(next_task->state == READY);
next_task->state = RUNNING;
}
spinlock_unlock(&whole_kernel_lock);
/* not a runnable task */
if (UNLIKELY(next_task == NULL) || UNLIKELY(next_task->state != RUNNING)) {
if (UNLIKELY(next_task == NULL)) {
spinlock_lock(&whole_kernel_lock);
continue;
}
/* a runnable task */
spinlock_lock(&whole_kernel_lock);
assert(next_task->state == RUNNING);
struct CPU* cpu = cur_cpu();
cpu->task = next_task;
p_mmu_driver->LoadPgdir((uintptr_t)V2P(next_task->pgdir.pd_addr));
@ -203,34 +207,24 @@ static void _scheduler(struct SchedulerRightGroup right_group)
}
}
static uint32_t yield_cnt = 0;
static void _cur_task_yield_noschedule(void)
static void _task_yield_noschedule(struct TaskMicroDescriptor* task, bool blocking)
{
yield_cnt++;
struct TaskMicroDescriptor* current_task = cur_cpu()->task;
assert(current_task != NULL);
assert(task != NULL);
// rearrage current task position
doubleListDel(&current_task->node);
// DEBUG("%s,%d\n", current_task->name, strcmp(current_task->name, name1));
if (current_task->maxium_tick <= 0) {
if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[current_task->priority])) {
ready_task_priority &= ~(1 << current_task->priority);
doubleListDel(&task->node);
if (task->state == RUNNING) {
if (!blocking) {
task->state = READY;
} else {
task->state = BLOCKED;
}
current_task->priority += 1;
current_task->maxium_tick = TASK_CLOCK_TICK * 10;
}
doubleListAddOnBack(&current_task->node, &xizi_task_manager.task_list_head[current_task->priority]);
ready_task_priority |= (1 << current_task->priority);
// set current task state
current_task->state = READY;
current_task->remain_tick = TASK_CLOCK_TICK;
cur_cpu()->task = NULL;
if (yield_cnt == 50) {
recover_priority();
yield_cnt = 0;
task->remain_tick = TASK_CLOCK_TICK;
if (task == cur_cpu()->task) {
cur_cpu()->task = NULL;
}
doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]);
}
static void _set_cur_task_priority(int priority)
@ -261,7 +255,7 @@ struct XiziTaskManager xizi_task_manager = {
.next_runnable_task = max_priority_runnable_task,
.task_scheduler = _scheduler,
.cur_task_yield_noschedule = _cur_task_yield_noschedule,
.task_yield_noschedule = _task_yield_noschedule,
.set_cur_task_priority = _set_cur_task_priority
};

View File

@ -58,7 +58,7 @@ int xizi_clock_handler(int irq, void* tf, void* arg)
current_task->remain_tick--;
current_task->maxium_tick--;
if (current_task->remain_tick == 0) {
xizi_task_manager.cur_task_yield_noschedule();
xizi_task_manager.task_yield_noschedule(current_task, false);
}
}
}