forked from xuos/xiuos
Update spinlock to avoid hungry.
This commit is contained in:
parent
50dab7b553
commit
08c8f0b952
|
@ -74,7 +74,7 @@ Modification:
|
|||
|
||||
#include "cortex_a9.h"
|
||||
|
||||
#define NR_CPU 1
|
||||
#define NR_CPU 4
|
||||
|
||||
__attribute__((always_inline)) static inline uint32_t user_mode()
|
||||
{
|
||||
|
|
|
@ -233,7 +233,6 @@ struct ICacheDone* hardkernel_icache_init(struct TraceTag* hardkernel_tag)
|
|||
{
|
||||
/* init icache */
|
||||
icache_done.enable();
|
||||
// icache_done.disable();
|
||||
return &icache_done;
|
||||
}
|
||||
|
||||
|
@ -241,6 +240,5 @@ struct DCacheDone* hardkernel_dcache_init(struct TraceTag* hardkernel_tag)
|
|||
{
|
||||
/* init dcache */
|
||||
dcache_done.enable();
|
||||
// dcache_done.disable();
|
||||
return &dcache_done;
|
||||
}
|
|
@ -216,10 +216,10 @@ bool secondary_cpu_hardkernel_init(int cpu_id, struct TraceTag* _hardkernel_tag)
|
|||
p_intr_driver->sys_irq_init(cpu_id);
|
||||
p_intr_driver->cpu_irq_disable();
|
||||
// cache
|
||||
// p_icache_driver->enable();
|
||||
// p_dcache_driver->enable();
|
||||
p_icache_driver->disable();
|
||||
p_dcache_driver->disable();
|
||||
p_icache_driver->enable();
|
||||
p_dcache_driver->enable();
|
||||
// p_icache_driver->disable();
|
||||
// p_dcache_driver->disable();
|
||||
// clock
|
||||
// p_clock_driver->sys_clock_init();
|
||||
p_intr_driver->single_irq_enable(p_clock_driver->get_clock_int(), cpu_id, 0);
|
||||
|
|
|
@ -25,8 +25,24 @@
|
|||
#include "task.h"
|
||||
#include "trap_common.h"
|
||||
|
||||
#include "list.h"
|
||||
|
||||
struct lock_node {
|
||||
int cpu_id;
|
||||
struct double_list_node node;
|
||||
};
|
||||
|
||||
static struct double_list_node lock_request_guard;
|
||||
static struct lock_node core_lock_request[NR_CPU];
|
||||
static struct spinlock request_lock;
|
||||
bool module_spinlock_use_intr_init(void)
|
||||
{
|
||||
for (int i = 0; i < NR_CPU; i++) {
|
||||
core_lock_request[i].cpu_id = i;
|
||||
doubleListNodeInit(&core_lock_request[i].node);
|
||||
}
|
||||
doubleListNodeInit(&lock_request_guard);
|
||||
spinlock_init(&request_lock, "requestlock");
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -44,18 +60,31 @@ void spinlock_init(struct spinlock* lock, char* name)
|
|||
}
|
||||
|
||||
extern int _spinlock_lock(struct spinlock* lock, uint32_t timeout);
|
||||
void _spinlock_unlock(struct spinlock* lock);
|
||||
|
||||
void spinlock_lock(struct spinlock* lock)
|
||||
{
|
||||
if (lock->owner_cpu != SPINLOCK_STATE_UNLOCK && lock->owner_cpu == cur_cpuid()) {
|
||||
ERROR("spinlock %s lock double locked by core %d\n", lock->name, lock->owner_cpu);
|
||||
panic("");
|
||||
}
|
||||
// _spinlock_lock(&request_lock, SPINLOCK_LOCK_WAITFOREVER);
|
||||
// doubleListAddOnBack(&core_lock_request[cur_cpuid()].node, &lock_request_guard);
|
||||
// _spinlock_unlock(&request_lock);
|
||||
|
||||
// while (lock_request_guard.next != &core_lock_request[cur_cpuid()].node)
|
||||
// ;
|
||||
|
||||
_spinlock_lock(lock, SPINLOCK_LOCK_WAITFOREVER);
|
||||
}
|
||||
|
||||
void _spinlock_unlock(struct spinlock* lock);
|
||||
void spinlock_unlock(struct spinlock* lock)
|
||||
{
|
||||
// assert(lock_request_guard.next == &core_lock_request[cur_cpuid()].node);
|
||||
// _spinlock_lock(&request_lock, SPINLOCK_LOCK_WAITFOREVER);
|
||||
// _double_list_del(core_lock_request[cur_cpuid()].node.prev, core_lock_request[cur_cpuid()].node.next);
|
||||
// _spinlock_unlock(&request_lock);
|
||||
|
||||
_spinlock_unlock(lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,6 @@ void GetUsrDevPteAttr(uintptr_t* attr)
|
|||
init = 1;
|
||||
|
||||
usr_pte_attr.entry = 0;
|
||||
usr_pte_attr.S = 1;
|
||||
usr_pte_attr.desc_type = PAGE_4K;
|
||||
usr_pte_attr.AP1_0 = AccessPermission_KernelUser;
|
||||
}
|
||||
|
@ -70,7 +69,6 @@ void GetDevPteAttr(uintptr_t* attr)
|
|||
init = 1;
|
||||
|
||||
dev_pte_attr.entry = 0;
|
||||
dev_pte_attr.S = 1;
|
||||
dev_pte_attr.desc_type = PAGE_4K;
|
||||
dev_pte_attr.AP1_0 = AccessPermission_KernelOnly;
|
||||
}
|
||||
|
|
|
@ -42,30 +42,6 @@ Modification:
|
|||
extern uint32_t _binary_init_start[], _binary_default_fs_start[];
|
||||
static struct TraceTag hardkernel_tag, softkernel_tag;
|
||||
|
||||
void configure_cpu(uint32_t cpu)
|
||||
{
|
||||
const unsigned int all_ways = 0xf;
|
||||
|
||||
disable_strict_align_check();
|
||||
|
||||
// Enable branch prediction
|
||||
arm_branch_target_cache_invalidate();
|
||||
arm_branch_prediction_enable();
|
||||
|
||||
// Enable L1 caches
|
||||
// arm_dcache_enable();
|
||||
// arm_dcache_invalidate();
|
||||
// arm_icache_enable();
|
||||
// arm_icache_invalidate();
|
||||
|
||||
// Invalidate SCU copy of TAG RAMs
|
||||
scu_secure_invalidate(cpu, all_ways);
|
||||
|
||||
// Join SMP
|
||||
scu_join_smp();
|
||||
scu_enable_maintenance_broadcast();
|
||||
}
|
||||
|
||||
extern void _boot_start();
|
||||
void cpu_start_secondary(uint8_t coreNumber)
|
||||
{
|
||||
|
@ -87,6 +63,8 @@ void cpu_start_secondary(uint8_t coreNumber)
|
|||
HW_SRC_GPR7_WR((uint32_t)&_boot_start);
|
||||
HW_SRC_SCR.B.CORE3_ENABLE = 1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -125,8 +103,7 @@ int main(void)
|
|||
}
|
||||
show_xizi_bar();
|
||||
|
||||
int cpu_count = NR_CPU;
|
||||
for (int i = 1; i < cpu_count; i++) {
|
||||
for (int i = 1; i < NR_CPU; i++) {
|
||||
// start secondary cpus
|
||||
cpu_start_secondary(i);
|
||||
}
|
||||
|
@ -143,38 +120,12 @@ int main(void)
|
|||
assert(AchieveResourceTag(&scheduler_rights.mmu_driver_tag, &hardkernel_tag, "mmu-ac-resource"));
|
||||
assert(AchieveResourceTag(&scheduler_rights.intr_driver_tag, &hardkernel_tag, "intr-ac-resource"));
|
||||
|
||||
struct TraceTag main_icache_tag, main_dcache_tag;
|
||||
AchieveResourceTag(&main_icache_tag, &hardkernel_tag, "icache-ac-resource");
|
||||
AchieveResourceTag(&main_dcache_tag, &hardkernel_tag, "dcache-ac-resource");
|
||||
struct ICacheDone* p_icache_driver = AchieveResource(&main_icache_tag);
|
||||
struct DCacheDone* p_dcache_driver = AchieveResource(&main_dcache_tag);
|
||||
|
||||
core_init_done |= (1 << cpu_id);
|
||||
LOG_PRINTF("CPU %d init done\n", cpu_id);
|
||||
spinlock_unlock(&whole_kernel_lock);
|
||||
|
||||
while (core_init_done != (1 << NR_CPU) - 1)
|
||||
;
|
||||
DEBUG_PRINTF("%d", cpu_id);
|
||||
|
||||
// scu_enable();
|
||||
// configure_cpu(cpu_id);
|
||||
// p_dcache_driver->enable();
|
||||
// p_icache_driver->enable();
|
||||
|
||||
// spinlock_lock(&whole_kernel_lock);
|
||||
// p_dcache_driver->flushall();
|
||||
// spinlock_unlock(&whole_kernel_lock);
|
||||
|
||||
// while (true) {
|
||||
// spinlock_lock(&whole_kernel_lock);
|
||||
// DEBUG("CPU: %d\n", cpu_id);
|
||||
// secondary_cpu_load_kern_pgdir(&scheduler_rights.mmu_driver_tag, NULL);
|
||||
// CLEARTLB(0);
|
||||
// p_dcache_driver->flushall();
|
||||
// spinlock_unlock(&whole_kernel_lock);
|
||||
// }
|
||||
|
||||
xizi_task_manager.task_scheduler(scheduler_rights);
|
||||
|
||||
// never reached
|
||||
|
|
|
@ -106,7 +106,7 @@ int task_exec(struct TaskMicroDescriptor* task, char* img_start, char* name, cha
|
|||
for (int addr_offset = 0; addr_offset < ph.filesz; addr_offset += PAGE_SIZE) {
|
||||
uintptr_t page_paddr = xizi_pager.address_translate(&pgdir, ph.vaddr + addr_offset);
|
||||
if (page_paddr == 0) {
|
||||
ERROR("copy elf file to unmapped addr");
|
||||
ERROR("copy elf file to unmapped addr\n");
|
||||
goto error_exec;
|
||||
}
|
||||
uintptr_t read_size = (ph.filesz - addr_offset < PAGE_SIZE ? ph.filesz - addr_offset : PAGE_SIZE);
|
||||
|
|
Loading…
Reference in New Issue