forked from xuos/xiuos
Finish micorkernel functions.
This commit is contained in:
parent
7a296b2744
commit
50ecc1e520
|
@ -92,6 +92,16 @@ __attribute__((always_inline, optimize("O0"))) static inline uint32_t user_mode(
|
|||
return val;
|
||||
}
|
||||
|
||||
__attribute__((always_inline, optimize("O0"))) static inline void cpu_into_low_power()
|
||||
{
|
||||
WFE();
|
||||
}
|
||||
|
||||
__attribute__((always_inline, optimize("O0"))) static inline void cpu_leave_low_power()
|
||||
{
|
||||
SEV();
|
||||
}
|
||||
|
||||
struct context {
|
||||
uint32_t r4;
|
||||
uint32_t r5;
|
||||
|
|
|
@ -84,7 +84,7 @@ int main(int argc, char** argv)
|
|||
if (argc >= 2) {
|
||||
id = string_to_integer(argv[1]);
|
||||
}
|
||||
printf("This is Simple Client %d, size is 0x%x\n", id, task_heap_base());
|
||||
// printf("This is Simple Client %d, size is 0x%x\n", id, task_heap_base());
|
||||
|
||||
struct Session session_wait;
|
||||
struct Session session_nowait;
|
||||
|
|
|
@ -69,6 +69,7 @@ struct XiziPageManager {
|
|||
|
||||
extern struct MmuCommonDone* _p_pgtbl_mmu_access;
|
||||
uintptr_t* _page_walk(uintptr_t* pgdir, uintptr_t vaddr, bool alloc);
|
||||
void _free_user_pgdir(struct TopLevelPageDirectory* pgdir);
|
||||
|
||||
extern struct TopLevelPageDirectory kern_pgdir;
|
||||
void load_kern_pgdir(struct TraceTag* mmu_driver_tag, struct TraceTag* intr_driver_tag);
|
||||
|
|
|
@ -96,11 +96,11 @@ __attribute__((optimize("O0"))) int main(void)
|
|||
LOG_PRINTF("CPU %d init done\n", cpu_id);
|
||||
spinlock_unlock(&whole_kernel_lock);
|
||||
|
||||
while (core_init_done != (1 << NR_CPU) - 1)
|
||||
;
|
||||
|
||||
xizi_enter_kernel();
|
||||
// sync memory
|
||||
__sync_synchronize();
|
||||
start_smp_cache_broadcast(cpu_id);
|
||||
// enter kernel seriously
|
||||
xizi_enter_kernel();
|
||||
xizi_task_manager.task_scheduler(scheduler_rights);
|
||||
|
||||
// never reached
|
||||
|
|
|
@ -84,9 +84,14 @@ void* slab_alloc(struct slab_allocator* const allocator)
|
|||
allocator->full->prev = full_head;
|
||||
}
|
||||
allocator->full = full_head;
|
||||
return allocator->full->data + slot * allocator->element_size;
|
||||
|
||||
void* return_addr = allocator->full->data + slot * allocator->element_size;
|
||||
memset(return_addr, 0, allocator->element_size);
|
||||
return return_addr;
|
||||
} else {
|
||||
return allocator->partial->data + slot * allocator->element_size;
|
||||
void* return_addr = allocator->partial->data + slot * allocator->element_size;
|
||||
memset(return_addr, 0, allocator->element_size);
|
||||
return return_addr;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -109,6 +114,8 @@ void* slab_alloc(struct slab_allocator* const allocator)
|
|||
allocator->partial->refcount = 1;
|
||||
}
|
||||
allocator->partial->bitmap = allocator->bitmap_empty ^ BITMAP_FIRST_BIT;
|
||||
assert(allocator->partial->data != NULL);
|
||||
memset(allocator->partial->data, 0, allocator->element_size);
|
||||
return allocator->partial->data;
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,8 @@ static bool _map_pages(uintptr_t* pgdir, uintptr_t vaddr, uintptr_t paddr, uintp
|
|||
|
||||
uintptr_t* pte;
|
||||
while (true) {
|
||||
if ((pte = _page_walk(pgdir, vaddr, true)) == 0) {
|
||||
if ((pte = _page_walk(pgdir, vaddr, true)) == NULL) {
|
||||
ERROR("pte not found for vaddr %x.\n", vaddr);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -81,6 +82,7 @@ static bool _map_pages(uintptr_t* pgdir, uintptr_t vaddr, uintptr_t paddr, uintp
|
|||
paddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
assert(vaddr == vaddr_last);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -94,7 +96,8 @@ static bool _unmap_pages(uintptr_t* pgdir, uintptr_t vaddr, uintptr_t len)
|
|||
|
||||
uintptr_t* pte;
|
||||
while (true) {
|
||||
if ((pte = _page_walk(pgdir, vaddr, true)) == 0) {
|
||||
if ((pte = _page_walk(pgdir, vaddr, false)) == NULL) {
|
||||
ERROR("pte not found for vaddr %x.\n", vaddr);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -112,6 +115,7 @@ static bool _unmap_pages(uintptr_t* pgdir, uintptr_t vaddr, uintptr_t len)
|
|||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
assert(vaddr == vaddr_last);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -140,33 +144,6 @@ static bool _map_user_pages(uintptr_t* pgdir, uintptr_t vaddr, uintptr_t paddr,
|
|||
return _map_pages(pgdir, vaddr, paddr, len, mem_attr);
|
||||
}
|
||||
|
||||
static void _free_user_pgdir(struct TopLevelPageDirectory* pgdir)
|
||||
{
|
||||
uintptr_t low_bound = kern_virtmem_buddy.mem_start, high_bound = kern_virtmem_buddy.mem_end;
|
||||
uintptr_t user_low_bound = user_phy_freemem_buddy.mem_start, user_high_bound = user_phy_freemem_buddy.mem_end;
|
||||
uintptr_t end_idx = USER_MEM_TOP >> LEVEL3_PDE_SHIFT;
|
||||
|
||||
for (uintptr_t i = 0; i < end_idx; i++) {
|
||||
// free each page table
|
||||
uintptr_t* pgtbl_paddr = (uintptr_t*)LEVEL4_PTE_ADDR(pgdir->pd_addr[i]);
|
||||
if (pgtbl_paddr != 0) {
|
||||
// free each page
|
||||
for (uintptr_t j = 0; j < NUM_LEVEL4_PTE; j++) {
|
||||
uintptr_t* page_paddr = (uintptr_t*)ALIGNDOWN(((uintptr_t*)P2V(pgtbl_paddr))[j], PAGE_SIZE);
|
||||
if (page_paddr != NULL) {
|
||||
if (LIKELY((uintptr_t)page_paddr >= low_bound && (uintptr_t)page_paddr < high_bound)) {
|
||||
kfree(P2V(page_paddr));
|
||||
} else if (LIKELY((uintptr_t)page_paddr >= user_low_bound && (uintptr_t)page_paddr < user_high_bound)) {
|
||||
raw_free((char*)page_paddr);
|
||||
}
|
||||
}
|
||||
}
|
||||
kfree(P2V(pgtbl_paddr));
|
||||
}
|
||||
}
|
||||
kfree((char*)pgdir->pd_addr);
|
||||
}
|
||||
|
||||
/// assume that a user pagedir is allocated from [0, size)
|
||||
/// if new_size > old_size, allocate more space,
|
||||
/// if old_size > new_size, free extra space, to avoid unnecessary alloc/free.
|
||||
|
@ -191,7 +168,9 @@ static uintptr_t _resize_user_pgdir(struct TopLevelPageDirectory* pgdir, uintptr
|
|||
}
|
||||
memset(new_page, 0, PAGE_SIZE);
|
||||
|
||||
xizi_pager.map_pages(pgdir->pd_addr, cur_size, V2P(new_page), PAGE_SIZE, false);
|
||||
if (!xizi_pager.map_pages(pgdir->pd_addr, cur_size, V2P(new_page), PAGE_SIZE, false)) {
|
||||
return cur_size;
|
||||
}
|
||||
cur_size += PAGE_SIZE;
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,11 @@ Modification:
|
|||
*************************************************/
|
||||
#include <stdint.h>
|
||||
|
||||
#include "core.h"
|
||||
#include "memlayout.h"
|
||||
|
||||
#include "assert.h"
|
||||
#include "buddy.h"
|
||||
#include "kalloc.h"
|
||||
#include "pagetable.h"
|
||||
|
||||
|
@ -36,6 +41,7 @@ uintptr_t* _page_walk(uintptr_t* pgdir, uintptr_t vaddr, bool alloc)
|
|||
{
|
||||
|
||||
// get page table addr
|
||||
assert(pgdir != NULL);
|
||||
uintptr_t pde_attr = 0;
|
||||
_p_pgtbl_mmu_access->MmuPdeAttr(&pde_attr);
|
||||
|
||||
|
@ -56,3 +62,36 @@ uintptr_t* _page_walk(uintptr_t* pgdir, uintptr_t vaddr, bool alloc)
|
|||
|
||||
return &pgtbl_vaddr[LEVEL4_PTE_IDX(vaddr)];
|
||||
}
|
||||
|
||||
void _free_user_pgdir(struct TopLevelPageDirectory* pgdir)
|
||||
{
|
||||
uintptr_t low_bound = kern_virtmem_buddy.mem_start, high_bound = kern_virtmem_buddy.mem_end;
|
||||
uintptr_t user_low_bound = user_phy_freemem_buddy.mem_start, user_high_bound = user_phy_freemem_buddy.mem_end;
|
||||
uintptr_t end_idx = USER_MEM_TOP >> LEVEL3_PDE_SHIFT;
|
||||
|
||||
for (uintptr_t level4_entry_idx = 0; level4_entry_idx < end_idx; level4_entry_idx++) {
|
||||
// free each level4 page table
|
||||
uintptr_t* pgtbl_paddr = (uintptr_t*)LEVEL4_PTE_ADDR(pgdir->pd_addr[level4_entry_idx]);
|
||||
if (pgtbl_paddr != NULL) {
|
||||
// free each page
|
||||
for (uintptr_t page_entry_idx = 0; page_entry_idx < NUM_LEVEL4_PTE; page_entry_idx++) {
|
||||
uintptr_t vaddr = (level4_entry_idx << LEVEL3_PDE_SHIFT) | (page_entry_idx << LEVEL4_PTE_SHIFT);
|
||||
|
||||
// get page paddr
|
||||
uintptr_t* page_paddr = (uintptr_t*)ALIGNDOWN(((uintptr_t*)P2V(pgtbl_paddr))[page_entry_idx], PAGE_SIZE);
|
||||
if (page_paddr != NULL) {
|
||||
// IPC vaddr should not be addressed here.
|
||||
assert(vaddr < USER_IPC_SPACE_BASE || vaddr >= USER_IPC_SPACE_TOP);
|
||||
|
||||
if (LIKELY((uintptr_t)page_paddr >= low_bound && (uintptr_t)page_paddr < high_bound)) {
|
||||
kfree(P2V(page_paddr));
|
||||
} else if (LIKELY((uintptr_t)page_paddr >= user_low_bound && (uintptr_t)page_paddr < user_high_bound)) {
|
||||
raw_free((char*)page_paddr);
|
||||
}
|
||||
}
|
||||
}
|
||||
kfree(P2V(pgtbl_paddr));
|
||||
}
|
||||
}
|
||||
kfree((char*)pgdir->pd_addr);
|
||||
}
|
|
@ -95,6 +95,7 @@ static uintptr_t map_task_share_page(struct TaskMicroDescriptor* task, const uin
|
|||
uintptr_t vaddr = (uintptr_t)NULL;
|
||||
if (task->massive_ipc_allocator != NULL) {
|
||||
vaddr = (uintptr_t)KBuddyAlloc(task->massive_ipc_allocator, PAGE_SIZE * nr_pages * 2);
|
||||
assert(xizi_pager.address_translate(&task->pgdir, vaddr) == (uintptr_t)NULL);
|
||||
} else {
|
||||
vaddr = alloc_share_page_addr(task, nr_pages * 2);
|
||||
if (vaddr >= USER_IPC_USE_ALLOCATOR_WATERMARK) {
|
||||
|
@ -122,9 +123,7 @@ static uintptr_t map_task_share_page(struct TaskMicroDescriptor* task, const uin
|
|||
p_mmu_driver->TlbFlush(vaddr, 2 * nr_pages * PAGE_SIZE);
|
||||
|
||||
/// @todo clean range rather than all
|
||||
// p_dcache_done->flushall();
|
||||
p_dcache_done->invalidateall();
|
||||
// p_dcache_done->flush(vaddr, vaddr + 2 * nr_pages * PAGE_SIZE);
|
||||
}
|
||||
return vaddr;
|
||||
}
|
||||
|
@ -148,9 +147,7 @@ uintptr_t task_map_pages(struct TaskMicroDescriptor* task, const uintptr_t vaddr
|
|||
p_mmu_driver->TlbFlush(vaddr, nr_pages * PAGE_SIZE);
|
||||
|
||||
/// @todo clean range rather than all
|
||||
// p_dcache_done->flushall();
|
||||
p_dcache_done->invalidateall();
|
||||
// p_dcache_done->flush(vaddr, vaddr + nr_pages * PAGE_SIZE);
|
||||
}
|
||||
|
||||
return vaddr;
|
||||
|
@ -171,9 +168,7 @@ void unmap_task_share_pages(struct TaskMicroDescriptor* task, const uintptr_t ta
|
|||
p_mmu_driver->TlbFlush(task_vaddr, 2 * nr_pages * PAGE_SIZE);
|
||||
|
||||
/// @todo clean range rather than all
|
||||
// p_dcache_done->flushall();
|
||||
p_dcache_done->invalidateall();
|
||||
// p_dcache_done->flush(task_vaddr, task_vaddr + 2 * nr_pages * PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -246,21 +241,31 @@ int delete_share_pages(struct session_backend* session_backend)
|
|||
return -1;
|
||||
}
|
||||
|
||||
assert(session_backend->server_side.closed || session_backend->client_side.closed);
|
||||
|
||||
/* unmap share pages */
|
||||
if (session_backend->client) {
|
||||
doubleListDel(&session_backend->client_side.node);
|
||||
}
|
||||
|
||||
if (session_backend->server) {
|
||||
// close ssesion in server's perspective
|
||||
if (session_backend->server_side.closed && session_backend->server != NULL) {
|
||||
xizi_share_page_manager.unmap_task_share_pages(session_backend->server, session_backend->server_side.buf_addr, session_backend->nr_pages);
|
||||
doubleListDel(&session_backend->server_side.node);
|
||||
session_backend->server->mem_size -= session_backend->nr_pages * PAGE_SIZE;
|
||||
session_backend->server = NULL;
|
||||
}
|
||||
|
||||
session_backend->server->mem_size -= session_backend->nr_pages * PAGE_SIZE;
|
||||
// close ssesion in client's perspective
|
||||
if (session_backend->client_side.closed && session_backend->client != NULL) {
|
||||
xizi_share_page_manager.unmap_task_share_pages(session_backend->client, session_backend->client_side.buf_addr, session_backend->nr_pages);
|
||||
doubleListDel(&session_backend->client_side.node);
|
||||
session_backend->client->mem_size -= session_backend->nr_pages * PAGE_SIZE;
|
||||
session_backend->client = NULL;
|
||||
}
|
||||
|
||||
/* free seesion backend */
|
||||
if (session_backend->server_side.closed && session_backend->client_side.closed) {
|
||||
assert(session_backend->client == NULL && session_backend->server == NULL);
|
||||
kfree((void*)session_backend->buf_kernel_addr);
|
||||
slab_free(SessionAllocator(), (void*)session_backend);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -33,6 +33,11 @@ Modification:
|
|||
#include "syscall.h"
|
||||
#include "task.h"
|
||||
|
||||
/// @brief close a session syscall
|
||||
/// @warning best to be called by a client
|
||||
/// @param cur_task
|
||||
/// @param session
|
||||
/// @return
|
||||
int sys_close_session(struct TaskMicroDescriptor* cur_task, struct Session* session)
|
||||
{
|
||||
assert(cur_task != NULL);
|
||||
|
@ -43,18 +48,16 @@ int sys_close_session(struct TaskMicroDescriptor* cur_task, struct Session* sess
|
|||
|
||||
/* check if session is a client one or a server one */
|
||||
struct session_backend* session_backend = NULL;
|
||||
bool session_valid = false;
|
||||
|
||||
struct client_session* client_session = NULL;
|
||||
DOUBLE_LIST_FOR_EACH_ENTRY(client_session, &cur_task->cli_sess_listhead, node)
|
||||
{
|
||||
if ((uintptr_t)session->buf == client_session->buf_addr) {
|
||||
session_valid = true;
|
||||
session_backend = CLIENT_SESSION_BACKEND(client_session);
|
||||
if (!client_session->closed) {
|
||||
assert(session_backend->client == cur_task);
|
||||
assert(client_session->closed == false);
|
||||
client_session->closed = true;
|
||||
xizi_share_page_manager.unmap_task_share_pages(cur_task, session_backend->client_side.buf_addr, session_backend->nr_pages);
|
||||
}
|
||||
xizi_share_page_manager.delete_share_pages(session_backend);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -63,12 +66,11 @@ int sys_close_session(struct TaskMicroDescriptor* cur_task, struct Session* sess
|
|||
DOUBLE_LIST_FOR_EACH_ENTRY(server_session, &cur_task->svr_sess_listhead, node)
|
||||
{
|
||||
if ((uintptr_t)session->buf == server_session->buf_addr) {
|
||||
session_valid = true;
|
||||
session_backend = SERVER_SESSION_BACKEND(server_session);
|
||||
if (!server_session->closed) {
|
||||
assert(session_backend->server == cur_task);
|
||||
assert(server_session->closed == false);
|
||||
server_session->closed = true;
|
||||
xizi_share_page_manager.unmap_task_share_pages(cur_task, session_backend->server_side.buf_addr, session_backend->nr_pages);
|
||||
}
|
||||
xizi_share_page_manager.delete_share_pages(session_backend);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -78,9 +80,6 @@ int sys_close_session(struct TaskMicroDescriptor* cur_task, struct Session* sess
|
|||
if (session_backend == NULL) {
|
||||
return -1;
|
||||
}
|
||||
if (UNLIKELY(session_backend->client_side.closed && session_backend->server_side.closed) && LIKELY(session_valid)) {
|
||||
xizi_share_page_manager.delete_share_pages(session_backend);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -38,9 +38,10 @@ Modification:
|
|||
struct session_backend* create_session_inner(struct TaskMicroDescriptor* client, struct TaskMicroDescriptor* server, int capacity, struct Session* user_session)
|
||||
{
|
||||
// create share pages
|
||||
assert(server != NULL && client != NULL);
|
||||
struct session_backend* session_backend = xizi_share_page_manager.create_share_pages(client, server, capacity);
|
||||
if (UNLIKELY(session_backend == NULL)) {
|
||||
DEBUG("create_share_pages failed\n");
|
||||
DEBUG("create_share_pages to server: %s failed\n", server->name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ int task_exec(struct TaskMicroDescriptor* task, char* img_start, char* name, cha
|
|||
for (int addr_offset = 0; addr_offset < ph.filesz; addr_offset += PAGE_SIZE) {
|
||||
uintptr_t page_paddr = xizi_pager.address_translate(&pgdir, ph.vaddr + addr_offset);
|
||||
if (page_paddr == 0) {
|
||||
ERROR("copy elf file to unmapped addr\n");
|
||||
ERROR("copy elf file to unmapped addr: %x(pgdir: %x)\n", ph.vaddr + addr_offset, pgdir.pd_addr);
|
||||
goto error_exec;
|
||||
}
|
||||
uintptr_t read_size = (ph.filesz - addr_offset < PAGE_SIZE ? ph.filesz - addr_offset : PAGE_SIZE);
|
||||
|
|
|
@ -92,12 +92,12 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
|
|||
if (SERVER_SESSION_BACKEND(server_session)->client_side.closed) {
|
||||
// client had closed it, then server will close it too
|
||||
struct session_backend* session_backend = SERVER_SESSION_BACKEND(server_session);
|
||||
|
||||
if (!session_backend->server_side.closed) {
|
||||
session_backend->server_side.closed = true;
|
||||
xizi_share_page_manager.unmap_task_share_pages(cur_task, session_backend->server_side.buf_addr, session_backend->nr_pages);
|
||||
}
|
||||
assert(session_backend->server == cur_task);
|
||||
assert(session_backend->client == NULL);
|
||||
assert(server_session->closed == false);
|
||||
server_session->closed = true;
|
||||
xizi_share_page_manager.delete_share_pages(session_backend);
|
||||
// signal that there is a middle deletion of session
|
||||
has_middle_delete = true;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -102,37 +102,28 @@ int _task_retrieve_sys_resources(struct TaskMicroDescriptor* ptask)
|
|||
assert(ptask != NULL);
|
||||
|
||||
/* handle sessions for condition 1, ref. delete_share_pages() */
|
||||
struct session_backend* session_backend = NULL;
|
||||
// close all server_sessions
|
||||
struct server_session* server_session = NULL;
|
||||
while (!IS_DOUBLE_LIST_EMPTY(&ptask->svr_sess_listhead)) {
|
||||
server_session = CONTAINER_OF(ptask->svr_sess_listhead.next, struct server_session, node);
|
||||
assert(server_session != NULL);
|
||||
session_backend = SERVER_SESSION_BACKEND(server_session);
|
||||
assert(session_backend->server == ptask);
|
||||
// cut the connection from task to session
|
||||
if (!server_session->closed) {
|
||||
xizi_share_page_manager.unmap_task_share_pages(ptask, server_session->buf_addr, CLIENT_SESSION_BACKEND(server_session)->nr_pages);
|
||||
server_session->closed = true;
|
||||
}
|
||||
doubleListDel(&server_session->node);
|
||||
SERVER_SESSION_BACKEND(server_session)->server = NULL;
|
||||
// delete session (also cut connection from session to task)
|
||||
if (SERVER_SESSION_BACKEND(server_session)->client_side.closed) {
|
||||
xizi_share_page_manager.delete_share_pages(SERVER_SESSION_BACKEND(server_session));
|
||||
}
|
||||
xizi_share_page_manager.delete_share_pages(session_backend);
|
||||
}
|
||||
// close all client_sessions
|
||||
struct client_session* client_session = NULL;
|
||||
while (!IS_DOUBLE_LIST_EMPTY(&ptask->cli_sess_listhead)) {
|
||||
client_session = CONTAINER_OF(ptask->cli_sess_listhead.next, struct client_session, node);
|
||||
assert(client_session != NULL);
|
||||
session_backend = CLIENT_SESSION_BACKEND(client_session);
|
||||
assert(session_backend->client == ptask);
|
||||
// cut the connection from task to session
|
||||
if (!client_session->closed) {
|
||||
xizi_share_page_manager.unmap_task_share_pages(ptask, client_session->buf_addr, CLIENT_SESSION_BACKEND(client_session)->nr_pages);
|
||||
client_session->closed = true;
|
||||
}
|
||||
doubleListDel(&client_session->node);
|
||||
CLIENT_SESSION_BACKEND(client_session)->client = NULL;
|
||||
// delete session (also cut connection from session to task)
|
||||
if (CLIENT_SESSION_BACKEND(client_session)->server_side.closed) {
|
||||
xizi_share_page_manager.delete_share_pages(CLIENT_SESSION_BACKEND(client_session));
|
||||
}
|
||||
xizi_share_page_manager.delete_share_pages(session_backend);
|
||||
}
|
||||
|
||||
if (ptask->server_identifier.meta != NULL) {
|
||||
|
@ -265,8 +256,13 @@ static void _scheduler(struct SchedulerRightGroup right_group)
|
|||
/* if there's not a runnable task, wait for one */
|
||||
if (next_task == NULL) {
|
||||
xizi_leave_kernel();
|
||||
// there is no task to run, into low power mode
|
||||
cpu_into_low_power();
|
||||
|
||||
/* leave kernel for other cores, so they may create a runnable task */
|
||||
xizi_enter_kernel();
|
||||
// activate cpu
|
||||
cpu_leave_low_power();
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ void default_interrupt_routine(void)
|
|||
}
|
||||
|
||||
extern void context_switch(struct context**, struct context*);
|
||||
__attribute__((optimize("O0"))) void intr_irq_dispatch(struct trapframe* tf)
|
||||
void intr_irq_dispatch(struct trapframe* tf)
|
||||
{
|
||||
xizi_enter_kernel();
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ bool swi_distributer_init(struct SwiDispatcherRightGroup* _right_group)
|
|||
}
|
||||
|
||||
extern void context_switch(struct context**, struct context*);
|
||||
__attribute__((optimize("O0"))) void software_irq_dispatch(struct trapframe* tf)
|
||||
void software_irq_dispatch(struct trapframe* tf)
|
||||
{
|
||||
xizi_enter_kernel();
|
||||
assert(p_intr_driver != NULL);
|
||||
|
|
Loading…
Reference in New Issue