forked from xuos/xiuos
Use only whole kernel lock
This commit is contained in:
parent
d987bf0357
commit
b939557317
|
@ -74,7 +74,7 @@ Modification:
|
|||
|
||||
#include "cortex_a9.h"
|
||||
|
||||
#define NR_CPU 4
|
||||
#define NR_CPU 1
|
||||
|
||||
__attribute__((always_inline)) static inline uint32_t user_mode()
|
||||
{
|
||||
|
|
|
@ -221,7 +221,7 @@ bool secondary_cpu_hardkernel_init(int cpu_id, struct TraceTag* _hardkernel_tag)
|
|||
p_icache_driver->disable();
|
||||
p_dcache_driver->disable();
|
||||
// clock
|
||||
p_clock_driver->sys_clock_init();
|
||||
// p_clock_driver->sys_clock_init();
|
||||
p_intr_driver->single_irq_enable(p_clock_driver->get_clock_int(), cpu_id, 0);
|
||||
// mmu
|
||||
secondary_cpu_load_kern_pgdir(&init_mmu_tag, &init_intr_tag);
|
||||
|
|
|
@ -34,6 +34,7 @@ Modification:
|
|||
#include "trap_common.h"
|
||||
|
||||
#include "log.h"
|
||||
#include "multicores.h"
|
||||
|
||||
extern void init_stack(uint32_t, uint32_t);
|
||||
extern void user_trap_swi_enter(void);
|
||||
|
@ -47,6 +48,7 @@ static struct XiziTrapDriver xizi_trap_driver;
|
|||
void panic(char* s)
|
||||
{
|
||||
xizi_trap_driver.cpu_irq_disable();
|
||||
spinlock_unlock(&whole_kernel_lock);
|
||||
KPrintf("panic: %s\n", s);
|
||||
for (;;)
|
||||
;
|
||||
|
|
|
@ -60,8 +60,6 @@ static inline int namecmp(const char* s, const char* t)
|
|||
static struct TraceMeta* alloc_trace_meta()
|
||||
{
|
||||
int index = -1;
|
||||
|
||||
spinlock_lock(&sys_tracer.trace_meta_bitmap_lock);
|
||||
for (uint32_t idx = 0; idx < BITS_TRACEMETA_BITMAP; idx++) {
|
||||
if (sys_tracer.trace_meta_bit_map[idx] == 0xFFFFFFFF) {
|
||||
continue;
|
||||
|
@ -74,7 +72,6 @@ static struct TraceMeta* alloc_trace_meta()
|
|||
break;
|
||||
}
|
||||
}
|
||||
spinlock_unlock(&sys_tracer.trace_meta_bitmap_lock);
|
||||
|
||||
if (index == -1) {
|
||||
panic("Tracer no enough TracerMeta.");
|
||||
|
@ -87,15 +84,12 @@ static struct TraceMeta* alloc_trace_meta()
|
|||
static bool dealloc_trace_meta(struct TraceMeta* meta)
|
||||
{
|
||||
int index = meta->index;
|
||||
|
||||
spinlock_lock(&sys_tracer.trace_meta_bitmap_lock);
|
||||
// clear bitmap
|
||||
uint32_t outer_index = index / 32;
|
||||
uint32_t inner_index = index % 32;
|
||||
sys_tracer.trace_meta_bit_map[outer_index] &= (uint32_t)(~(1 << inner_index));
|
||||
// clear meta
|
||||
sys_tracer.trace_meta_poll[index].type = TRACER_INVALID;
|
||||
spinlock_unlock(&sys_tracer.trace_meta_bitmap_lock);
|
||||
|
||||
if (index == -1) {
|
||||
panic("Tracer no enough TracerMeta.");
|
||||
|
@ -337,7 +331,7 @@ static struct TraceMeta* tracer_find_meta(struct TraceMeta* const p_owner, char*
|
|||
return p_owner_inside;
|
||||
}
|
||||
if ((vnp = tracer_find_meta_onestep(p_owner_inside, name, NULL)) == 0) {
|
||||
ERROR("Not such object: %s\n", path);
|
||||
DEBUG("Not such object: %s\n", path);
|
||||
return NULL;
|
||||
}
|
||||
p_owner_inside = vnp;
|
||||
|
|
|
@ -76,8 +76,6 @@ void mem_chunk_synchronizer_init(uintptr_t mem_chunk_base, uint32_t mem_chunk_si
|
|||
|
||||
static struct tracer_mem_chunk* tracer_get_mem_chunk_cache(uint32_t chunk_id)
|
||||
{
|
||||
spinlock_lock(&tracer_mem_chunk_syner.lock);
|
||||
|
||||
// cached mem_chunk cache
|
||||
struct tracer_mem_chunk* b;
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(b, &tracer_mem_chunk_syner.head, list_node)
|
||||
|
@ -85,7 +83,6 @@ static struct tracer_mem_chunk* tracer_get_mem_chunk_cache(uint32_t chunk_id)
|
|||
if (b->chunk_id == chunk_id) {
|
||||
if (!(b->flag & TRACER_MEM_CHUNK_BUSY)) {
|
||||
b->flag |= TRACER_MEM_CHUNK_BUSY;
|
||||
spinlock_unlock(&tracer_mem_chunk_syner.lock);
|
||||
return b;
|
||||
}
|
||||
ERROR("tracer mem_chunk syner is locked\n");
|
||||
|
@ -99,7 +96,6 @@ static struct tracer_mem_chunk* tracer_get_mem_chunk_cache(uint32_t chunk_id)
|
|||
if ((b->flag & TRACER_MEM_CHUNK_BUSY) == 0) {
|
||||
b->chunk_id = chunk_id;
|
||||
b->flag = TRACER_MEM_CHUNK_BUSY;
|
||||
spinlock_unlock(&tracer_mem_chunk_syner.lock);
|
||||
return b;
|
||||
}
|
||||
}
|
||||
|
@ -134,13 +130,9 @@ void tracer_mem_chunk_release(struct tracer_mem_chunk* b)
|
|||
}
|
||||
|
||||
// move mem_chunk that just used to the head of cache list
|
||||
spinlock_lock(&tracer_mem_chunk_syner.lock);
|
||||
|
||||
doubleListDel(&b->list_node);
|
||||
doubleListAddOnHead(&b->list_node, &tracer_mem_chunk_syner.head);
|
||||
b->flag &= ~TRACER_MEM_CHUNK_BUSY;
|
||||
|
||||
spinlock_unlock(&tracer_mem_chunk_syner.lock);
|
||||
}
|
||||
|
||||
static void tracer_mem_chunk_zero(uint32_t chunk_id)
|
||||
|
@ -157,7 +149,6 @@ static void tracer_mem_chunk_zero(uint32_t chunk_id)
|
|||
static uint32_t find_first_free_mem_chunk()
|
||||
{
|
||||
/// @todo another mem_chunk
|
||||
spinlock_lock(&sys_tracer.mem_chunk_bitmap_lock);
|
||||
for (uint32_t idx = 0; idx < BITS_MEM_CHUNK_BITMAP; idx++) {
|
||||
if (sys_tracer.mem_chunks_bit_map[idx] == 0xFFFFFFFF) {
|
||||
continue;
|
||||
|
@ -165,11 +156,9 @@ static uint32_t find_first_free_mem_chunk()
|
|||
uint32_t position = __builtin_ffs(~sys_tracer.mem_chunks_bit_map[idx]);
|
||||
if (position != 32) {
|
||||
sys_tracer.mem_chunks_bit_map[idx] |= (1 << (position - 1));
|
||||
spinlock_unlock(&sys_tracer.mem_chunk_bitmap_lock);
|
||||
return idx * 32 + position;
|
||||
}
|
||||
}
|
||||
spinlock_unlock(&sys_tracer.mem_chunk_bitmap_lock);
|
||||
panic("Tracer no enough space.");
|
||||
return 0;
|
||||
}
|
||||
|
@ -184,11 +173,9 @@ uint32_t tracer_mem_chunk_alloc()
|
|||
void tracer_mem_chunk_free(uint32_t chunk_id)
|
||||
{
|
||||
assert(chunk_id >= 0 && chunk_id < NR_TRACER_MEM_CHUNKS);
|
||||
spinlock_lock(&sys_tracer.mem_chunk_bitmap_lock);
|
||||
uint32_t idx = chunk_id % 32;
|
||||
uint32_t inner_mem_chunk_bit = chunk_id / 32;
|
||||
// assert mem_chunk is allocated
|
||||
assert((sys_tracer.mem_chunks_bit_map[idx] & (1 << inner_mem_chunk_bit)) != 0);
|
||||
sys_tracer.mem_chunks_bit_map[idx] &= (uint32_t)(~(1 << inner_mem_chunk_bit));
|
||||
spinlock_unlock(&sys_tracer.mem_chunk_bitmap_lock);
|
||||
}
|
|
@ -75,8 +75,6 @@ static struct KPage* KBuddyPagesAlloc(struct KBuddy* pbuddy, int nPages)
|
|||
struct KFreeList* list = NULL;
|
||||
int i = 0, order = 0;
|
||||
|
||||
spinlock_lock(&pbuddy->lock);
|
||||
|
||||
// find order
|
||||
for (order = 0; (FREE_LIST_INDEX(order)) < nPages; order++)
|
||||
;
|
||||
|
@ -99,12 +97,10 @@ static struct KPage* KBuddyPagesAlloc(struct KBuddy* pbuddy, int nPages)
|
|||
// set the pages' order
|
||||
_buddy_set_pages_order(page, order);
|
||||
|
||||
spinlock_unlock(&pbuddy->lock);
|
||||
return page;
|
||||
}
|
||||
|
||||
// there is no enough free page to satisfy the nPages
|
||||
spinlock_unlock(&pbuddy->lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -116,8 +112,6 @@ static void KBuddyPagesFree(struct KBuddy* pbuddy, struct KPage* page)
|
|||
uint32_t buddy_idx = 0, new_buddy_idx = 0;
|
||||
uint32_t page_idx = page - pbuddy->pages;
|
||||
|
||||
spinlock_lock(&pbuddy->lock);
|
||||
|
||||
for (; order < MAX_BUDDY_ORDER - 1; order++) {
|
||||
// find and delete buddy to combine
|
||||
buddy_idx = BUDDY_PAGE_INDEX(page_idx, order);
|
||||
|
@ -141,7 +135,6 @@ static void KBuddyPagesFree(struct KBuddy* pbuddy, struct KPage* page)
|
|||
doubleListAddOnHead(&page->node, &pbuddy->free_list[order].list_head);
|
||||
pbuddy->free_list[order].n_free_pages++;
|
||||
|
||||
spinlock_unlock(&pbuddy->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -91,23 +91,18 @@ static uintptr_t map_task_share_page(struct TaskMicroDescriptor* task, const uin
|
|||
struct DCacheDone* p_dcache_done = AchieveResource(&right_group.dcache_driver_tag);
|
||||
struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag);
|
||||
|
||||
spinlock_lock(&task->lock);
|
||||
// map double vaddr page to support uniform ring buffer r/w
|
||||
uintptr_t vaddr = alloc_share_page_addr(task, nr_pages * 2);
|
||||
if (UNLIKELY(vaddr == 0)) {
|
||||
spinlock_unlock(&task->lock);
|
||||
return (uintptr_t)NULL;
|
||||
}
|
||||
if (!xizi_pager.map_pages(task->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, false)) {
|
||||
spinlock_unlock(&task->lock);
|
||||
return (uintptr_t)NULL;
|
||||
}
|
||||
if (!xizi_pager.map_pages(task->pgdir.pd_addr, vaddr + (nr_pages * PAGE_SIZE), paddr, nr_pages * PAGE_SIZE, false)) {
|
||||
xizi_pager.unmap_pages(task->pgdir.pd_addr, vaddr, nr_pages * PAGE_SIZE);
|
||||
spinlock_unlock(&task->lock);
|
||||
return (uintptr_t)NULL;
|
||||
}
|
||||
spinlock_unlock(&task->lock);
|
||||
if (task == cur_cpu()->task) {
|
||||
p_mmu_driver->TlbFlush(vaddr, 2 * nr_pages * PAGE_SIZE);
|
||||
/// @todo clean range rather than all
|
||||
|
@ -123,14 +118,12 @@ uintptr_t task_map_pages(struct TaskMicroDescriptor* task, const uintptr_t vaddr
|
|||
struct DCacheDone* p_dcache_done = AchieveResource(&right_group.dcache_driver_tag);
|
||||
struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag);
|
||||
|
||||
spinlock_lock(&task->lock);
|
||||
bool ret = false;
|
||||
if (is_dev) {
|
||||
ret = xizi_pager.map_pages(task->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, true);
|
||||
} else {
|
||||
ret = xizi_pager.map_pages(task->pgdir.pd_addr, vaddr, paddr, nr_pages * PAGE_SIZE, false);
|
||||
}
|
||||
spinlock_unlock(&task->lock);
|
||||
if (!ret) {
|
||||
return (uintptr_t)NULL;
|
||||
}
|
||||
|
@ -150,10 +143,8 @@ void unmap_task_share_pages(struct TaskMicroDescriptor* task, const uintptr_t ta
|
|||
struct DCacheDone* p_dcache_done = AchieveResource(&right_group.dcache_driver_tag);
|
||||
struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag);
|
||||
|
||||
spinlock_lock(&task->lock);
|
||||
xizi_pager.unmap_pages(task->pgdir.pd_addr, task_vaddr, nr_pages * PAGE_SIZE);
|
||||
xizi_pager.unmap_pages(task->pgdir.pd_addr, task_vaddr + (nr_pages * PAGE_SIZE), nr_pages * PAGE_SIZE);
|
||||
spinlock_unlock(&task->lock);
|
||||
if (task == cur_cpu()->task) {
|
||||
p_mmu_driver->TlbFlush(task_vaddr, 2 * nr_pages * PAGE_SIZE);
|
||||
/// @todo clean range rather than all
|
||||
|
@ -200,20 +191,16 @@ struct session_backend* create_share_pages(struct TaskMicroDescriptor* client, s
|
|||
session_backend->client_side.buf_addr = client_vaddr;
|
||||
session_backend->client_side.capacity = true_capacity;
|
||||
session_backend->client_side.closed = false;
|
||||
spinlock_lock(&client->lock);
|
||||
doubleListNodeInit(&session_backend->client_side.node);
|
||||
doubleListAddOnBack(&session_backend->client_side.node, &client->cli_sess_listhead);
|
||||
spinlock_unlock(&client->lock);
|
||||
// init server side session struct
|
||||
session_backend->server_side.buf_addr = server_vaddr;
|
||||
session_backend->server_side.capacity = true_capacity;
|
||||
session_backend->server_side.head = 0;
|
||||
session_backend->server_side.tail = 0;
|
||||
session_backend->server_side.closed = false;
|
||||
spinlock_lock(&server->lock);
|
||||
doubleListNodeInit(&session_backend->server_side.node);
|
||||
doubleListAddOnBack(&session_backend->server_side.node, &server->svr_sess_listhead);
|
||||
spinlock_unlock(&server->lock);
|
||||
|
||||
return session_backend;
|
||||
}
|
||||
|
@ -232,15 +219,11 @@ int delete_share_pages(struct session_backend* session_backend)
|
|||
|
||||
/* unmap share pages */
|
||||
if (session_backend->client) {
|
||||
spinlock_lock(&session_backend->client->lock);
|
||||
doubleListDel(&session_backend->client_side.node);
|
||||
spinlock_unlock(&session_backend->client->lock);
|
||||
}
|
||||
|
||||
if (session_backend->server) {
|
||||
spinlock_lock(&session_backend->server->lock);
|
||||
doubleListDel(&session_backend->server_side.node);
|
||||
spinlock_unlock(&session_backend->server->lock);
|
||||
}
|
||||
|
||||
/* free seesion backend */
|
||||
|
|
|
@ -70,7 +70,7 @@ int sys_connect_session(char* path, int capacity, struct Session* user_session)
|
|||
|
||||
struct TraceTag server_tag;
|
||||
if (!AchieveResourceTag(&server_tag, &server_identifier_owner, path)) {
|
||||
ERROR("Not server: %s\n", path);
|
||||
DEBUG("Not server: %s\n", path);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -106,7 +106,8 @@ int task_exec(struct TaskMicroDescriptor* task, char* img_start, char* name, cha
|
|||
for (int addr_offset = 0; addr_offset < ph.filesz; addr_offset += PAGE_SIZE) {
|
||||
uintptr_t page_paddr = xizi_pager.address_translate(&pgdir, ph.vaddr + addr_offset);
|
||||
if (page_paddr == 0) {
|
||||
panic("copy elf file to unmapped addr");
|
||||
ERROR("copy elf file to unmapped addr");
|
||||
goto error_exec;
|
||||
}
|
||||
uintptr_t read_size = (ph.filesz - addr_offset < PAGE_SIZE ? ph.filesz - addr_offset : PAGE_SIZE);
|
||||
memcpy(P2V(page_paddr), img_start + (ph.off + addr_offset), read_size);
|
||||
|
|
|
@ -85,9 +85,7 @@ int sys_exit()
|
|||
|
||||
// delete task for pcb_list
|
||||
xizi_task_manager.cur_task_yield_noschedule();
|
||||
spinlock_lock(&cur_task->lock);
|
||||
cur_task->state = DEAD;
|
||||
spinlock_unlock(&cur_task->lock);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -41,8 +41,6 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
|
|||
return -1;
|
||||
}
|
||||
|
||||
spinlock_lock(&cur_task->lock);
|
||||
|
||||
struct double_list_node* cur_node = NULL;
|
||||
struct server_session* server_session = NULL;
|
||||
/* update old sessions */
|
||||
|
@ -54,7 +52,6 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
|
|||
server_session = CONTAINER_OF(cur_node, struct server_session, node);
|
||||
if (UNLIKELY(server_session->buf_addr != (uintptr_t)userland_session_arr[i].buf)) {
|
||||
ERROR("mismatched old session addr, user buf: %x, server buf: %x\n", userland_session_arr[i].buf, server_session->buf_addr);
|
||||
spinlock_unlock(&cur_task->lock);
|
||||
return -1;
|
||||
}
|
||||
// update session_backend
|
||||
|
@ -63,21 +60,18 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
|
|||
doubleListDel(cur_node);
|
||||
doubleListAddOnBack(cur_node, &cur_task->svr_sess_listhead);
|
||||
}
|
||||
spinlock_unlock(&cur_task->lock);
|
||||
|
||||
/* handle sessions for condition 2, ref. delete_share_pages() */
|
||||
bool has_delete = true;
|
||||
while (has_delete) {
|
||||
has_delete = false;
|
||||
|
||||
spinlock_lock(&cur_task->lock);
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(server_session, &cur_task->svr_sess_listhead, node)
|
||||
{
|
||||
if (SERVER_SESSION_BACKEND(server_session)->client_side.closed) {
|
||||
// client had closed it, then server will close it too
|
||||
struct session_backend* session_backend = SERVER_SESSION_BACKEND(server_session);
|
||||
|
||||
spinlock_unlock(&cur_task->lock);
|
||||
if (!session_backend->server_side.closed) {
|
||||
session_backend->server_side.closed = true;
|
||||
xizi_share_page_manager.unmap_task_share_pages(cur_task, session_backend->server_side.buf_addr, session_backend->nr_pages);
|
||||
|
@ -87,13 +81,9 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (!has_delete) {
|
||||
spinlock_unlock(&cur_task->lock);
|
||||
}
|
||||
}
|
||||
|
||||
/* poll with new sessions */
|
||||
spinlock_lock(&cur_task->lock);
|
||||
int i = 0;
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(server_session, &cur_task->svr_sess_listhead, node)
|
||||
{
|
||||
|
@ -111,7 +101,6 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
|
|||
if (LIKELY(i < arr_capacity)) {
|
||||
userland_session_arr[i].buf = 0;
|
||||
}
|
||||
spinlock_unlock(&cur_task->lock);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -38,20 +38,16 @@ struct TaskMicroDescriptor* max_priority_runnable_task(void)
|
|||
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node)
|
||||
{
|
||||
spinlock_lock(&task->lock);
|
||||
if (task->state == READY) {
|
||||
// found a runnable task, stop this look up
|
||||
task->state = RUNNING;
|
||||
spinlock_unlock(&task->lock);
|
||||
return task;
|
||||
} else if (task->state == DEAD) {
|
||||
// found a killed task, stop this loop
|
||||
// change in pcb_list may break this loop, so find a runnable in next look up
|
||||
spinlock_unlock(&task->lock);
|
||||
xizi_task_manager.free_pcb(task);
|
||||
return NULL;
|
||||
}
|
||||
spinlock_unlock(&task->lock);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -63,20 +59,16 @@ struct TaskMicroDescriptor* round_robin_runnable_task(uint32_t priority)
|
|||
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_list_head[priority], node)
|
||||
{
|
||||
|
||||
spinlock_lock(&task->lock);
|
||||
if (task->state == READY) {
|
||||
// found a runnable task, stop this look up
|
||||
spinlock_unlock(&task->lock);
|
||||
task->state = RUNNING;
|
||||
return task;
|
||||
} else if (task->state == DEAD) {
|
||||
// found a killed task, stop this loop
|
||||
// change in pcb_list may break this loop, so find a runnable in next look up
|
||||
spinlock_unlock(&task->lock);
|
||||
xizi_task_manager.free_pcb(task);
|
||||
return NULL;
|
||||
}
|
||||
spinlock_unlock(&task->lock);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
|
|
@ -62,12 +62,10 @@ static void _task_manager_init()
|
|||
/// @brief alloc a new task without init
|
||||
static struct TaskMicroDescriptor* _alloc_task_cb()
|
||||
{
|
||||
spinlock_lock(&xizi_task_manager.lock);
|
||||
// alloc task and add it to used task list
|
||||
struct TaskMicroDescriptor* task = (struct TaskMicroDescriptor*)slab_alloc(&xizi_task_manager.task_allocator);
|
||||
if (UNLIKELY(task == NULL)) {
|
||||
ERROR("Not enough memory\n");
|
||||
spinlock_unlock(&xizi_task_manager.lock);
|
||||
return NULL;
|
||||
}
|
||||
// set pid once task is allocated
|
||||
|
@ -76,7 +74,6 @@ static struct TaskMicroDescriptor* _alloc_task_cb()
|
|||
// update pcb used
|
||||
xizi_task_manager.nr_pcb_used += 1;
|
||||
|
||||
spinlock_unlock(&xizi_task_manager.lock);
|
||||
return task;
|
||||
}
|
||||
|
||||
|
@ -213,7 +210,6 @@ static void _cur_task_yield_noschedule(void)
|
|||
{
|
||||
yield_cnt++;
|
||||
|
||||
spinlock_lock(&xizi_task_manager.lock);
|
||||
struct TaskMicroDescriptor* current_task = cur_cpu()->task;
|
||||
assert(current_task != NULL);
|
||||
|
||||
|
@ -230,16 +226,13 @@ static void _cur_task_yield_noschedule(void)
|
|||
doubleListAddOnBack(¤t_task->node, &xizi_task_manager.task_list_head[current_task->priority]);
|
||||
ready_task_priority |= (1 << current_task->priority);
|
||||
// set current task state
|
||||
spinlock_lock(¤t_task->lock);
|
||||
current_task->state = READY;
|
||||
current_task->remain_tick = TASK_CLOCK_TICK;
|
||||
spinlock_unlock(¤t_task->lock);
|
||||
cur_cpu()->task = NULL;
|
||||
if (yield_cnt == 50) {
|
||||
recover_priority();
|
||||
yield_cnt = 0;
|
||||
}
|
||||
spinlock_unlock(&xizi_task_manager.lock);
|
||||
}
|
||||
|
||||
static void _set_cur_task_priority(int priority)
|
||||
|
|
|
@ -49,9 +49,6 @@ bool clock_intr_handler_init(struct TraceTag* p_clock_driver_tag)
|
|||
uint64_t global_tick = 0;
|
||||
int xizi_clock_handler(int irq, void* tf, void* arg)
|
||||
{
|
||||
// spinlock_lock(&whole_kernel_lock);
|
||||
// DEBUG_PRINTF("CPU %d\n", cpu_get_current());
|
||||
// spinlock_unlock(&whole_kernel_lock);
|
||||
/* handle clock interrupt using driver */
|
||||
if (p_clock_driver->is_timer_expired()) {
|
||||
p_clock_driver->clear_clock_intr();
|
||||
|
|
Loading…
Reference in New Issue