Merge pull request 'Update Kernel' (#461) from fit_5g into 5g_usb

This commit is contained in:
xuedongliang 2024-12-30 09:38:38 +08:00
commit d395435c53
61 changed files with 1786 additions and 507 deletions

View File

@ -74,7 +74,7 @@ Modification:
#include "cortex_a9.h" #include "cortex_a9.h"
#define NR_CPU 1 #define NR_CPU 4
__attribute__((always_inline, optimize("O0"))) static inline uint32_t user_mode() __attribute__((always_inline, optimize("O0"))) static inline uint32_t user_mode()
{ {

View File

@ -129,7 +129,7 @@ static bool xizi_gpt_init()
return false; return false;
} }
// register clock handler to intr // register clock handler to intr
struct XiziTrapDriver* p_intr_driver = (struct XiziTrapDriver*)AchieveResource(&intr_driver_tag); struct XiziTrapDriver* p_intr_driver = GetSysObject(struct XiziTrapDriver, &intr_driver_tag);
p_intr_driver->bind_irq_handler(p_clock_driver->get_clock_int(), xizi_clock_handler); p_intr_driver->bind_irq_handler(p_clock_driver->get_clock_int(), xizi_clock_handler);
p_intr_driver->single_irq_enable(p_clock_driver->get_clock_int(), 0, 0); p_intr_driver->single_irq_enable(p_clock_driver->get_clock_int(), 0, 0);
return true; return true;

View File

@ -73,4 +73,11 @@ Modification:
#define KERN_MEM_BASE (0x90000000) // First kernel virtual address #define KERN_MEM_BASE (0x90000000) // First kernel virtual address
#define KERN_OFFSET (KERN_MEM_BASE - PHY_MEM_BASE) #define KERN_OFFSET (KERN_MEM_BASE - PHY_MEM_BASE)
/* virtual and physical addr translate */
#define V2P(a) ((uint32_t)((uint32_t)(a)-KERN_OFFSET))
#define P2V(a) ((void*)((void*)(a) + KERN_OFFSET))
#define V2P_WO(x) ((x)-KERN_OFFSET) // same as V2P, but without casts
#define P2V_WO(x) ((x) + KERN_OFFSET) // same as V2P, but without casts
// clang-format on // clang-format on

View File

@ -92,13 +92,6 @@ When the process switches, the flush TLB is no longer required anymore.
#define CONTEXTIDR_R(val) __asm__ volatile("mrc p15, 0, %0, c13, c0, 1" : "=r"(val)) #define CONTEXTIDR_R(val) __asm__ volatile("mrc p15, 0, %0, c13, c0, 1" : "=r"(val))
#define CONTEXTIDR_W(val) __asm__ volatile("mcr p15, 0, %0, c13, c0, 1" ::"r"(val)) #define CONTEXTIDR_W(val) __asm__ volatile("mcr p15, 0, %0, c13, c0, 1" ::"r"(val))
/* virtual and physical addr translate */
#define V2P(a) ((uint32_t)((uint32_t)(a)-KERN_OFFSET))
#define P2V(a) ((void*)((void*)(a) + KERN_OFFSET))
#define V2P_WO(x) ((x)-KERN_OFFSET) // same as V2P, but without casts
#define P2V_WO(x) ((x) + KERN_OFFSET) // same as V2P, but without casts
#ifndef __ASSEMBLER__ #ifndef __ASSEMBLER__
#include <stdint.h> #include <stdint.h>
__attribute__((always_inline)) static inline uint32_t v2p(void* a) { return ((uint32_t)(a)) - KERN_MEM_BASE; } __attribute__((always_inline)) static inline uint32_t v2p(void* a) { return ((uint32_t)(a)) - KERN_MEM_BASE; }

View File

@ -41,15 +41,16 @@ static void tracer_init_node(TracerNode* node, char* name, tracemeta_ac_type typ
node->parent = NULL; node->parent = NULL;
if (name != NULL) { if (name != NULL) {
char* p_name = (char*)slab_alloc(&sys_tracer.node_name_allocator); char* p_name = (char*)slab_alloc(&sys_tracer.node_name_allocator);
if (!p_name) {
p_name = "BAD_NAME(NOMEM)";
} else {
strcpy(p_name, name); strcpy(p_name, name);
p_name[TRACER_NODE_NAME_LEN - 1] = '\0'; p_name[TRACER_NODE_NAME_LEN - 1] = '\0';
node->name = p_name; node->name = p_name;
} }
if (node->type == TRACER_OWNER) {
doubleListNodeInit(&node->children_guard);
} else {
node->p_resource = p_resource;
} }
doubleListNodeInit(&node->children_guard);
node->p_resource = p_resource;
doubleListNodeInit(&node->list_node); doubleListNodeInit(&node->list_node);
} }
@ -58,16 +59,16 @@ void sys_tracer_init()
// set sys_tracer resource identity // set sys_tracer resource identity
tracer_init_node(&sys_tracer.root_node, NULL, TRACER_OWNER, NULL); tracer_init_node(&sys_tracer.root_node, NULL, TRACER_OWNER, NULL);
sys_tracer.root_node.name = root_name; sys_tracer.root_node.name = root_name;
sys_tracer.sys_tracer_tag.meta = &sys_tracer.root_node; sys_tracer.sys_tracer_tag.inner_node = &sys_tracer.root_node;
// init memory allocator // init memory allocator
slab_init(&sys_tracer.node_allocator, sizeof(TracerNode)); slab_init(&sys_tracer.node_allocator, sizeof(TracerNode), "TracerNodeAllocator");
slab_init(&sys_tracer.node_name_allocator, sizeof(char[TRACER_NODE_NAME_LEN])); slab_init(&sys_tracer.node_name_allocator, sizeof(char[TRACER_NODE_NAME_LEN]), "TracerNodeNameAllocator");
} }
static char* parse_path(char* path, char* const name) static char* parse_path(char* path, char* const name)
{ {
// skip extra '/' // Skip extra '/'
while (*path == '/') { while (*path == '/') {
path++; path++;
} }
@ -75,21 +76,19 @@ static char* parse_path(char* path, char* const name)
return NULL; return NULL;
} }
// start of current name // Start of current name
char* cur_start = path; char* cur_start = path;
while (*path != '/' && *path != '\0') { while (*path != '/' && *path != '\0') {
path++; path++;
} }
// handle current name // Handle current name
int len = path - cur_start; size_t len = path - cur_start;
if (len >= TRACER_NODE_NAME_LEN) { if (len >= TRACER_NODE_NAME_LEN) {
strncpy(name, cur_start, TRACER_NODE_NAME_LEN); len = TRACER_NODE_NAME_LEN - 1;
name[TRACER_NODE_NAME_LEN - 1] = '\0';
} else {
strncpy(name, cur_start, len);
name[len] = '\0';
} }
memcpy(name, cur_start, len);
name[len] = '\0';
return path; return path;
} }
@ -121,39 +120,40 @@ bool AchieveResourceTag(TraceTag* target, TraceTag* owner, char* name)
{ {
static char name_buffer[TRACER_NODE_NAME_LEN]; static char name_buffer[TRACER_NODE_NAME_LEN];
TracerNode* inner_node = owner->meta; TracerNode* inner_node = owner->inner_node;
assert(inner_node != NULL && inner_node->type == TRACER_OWNER); assert(inner_node != NULL && inner_node->type == TRACER_OWNER);
while ((name = parse_path(name, name_buffer)) != NULL) { while ((name = parse_path(name, name_buffer)) != NULL) {
if ((inner_node = tracer_find_node_onestep(inner_node, name_buffer)) == NULL) { if ((inner_node = tracer_find_node_onestep(inner_node, name_buffer)) == NULL) {
DEBUG("Tracer: No such object, owner: %s, child: %s\n", // DEBUG("Tracer: No such object, owner: %s, child: %s\n", //
owner->meta->name == NULL ? "NULL" : owner->meta->name, name == NULL ? "NULL" : name_buffer); owner->inner_node->name == NULL ? "NULL" : owner->inner_node->name, name == NULL ? "NULL" : name_buffer);
return false; return false;
} }
} }
target->meta = inner_node; target->inner_node = inner_node;
return true; return true;
} }
void* AchieveResource(TraceTag* tag) void* AchieveResource(TraceTag* tag)
{ {
assert(tag != NULL); assert(tag != NULL);
if (tag->meta == NULL || tag->meta->type == TRACER_OWNER) { if (tag->inner_node == NULL || tag->inner_node->type == TRACER_OWNER) {
return NULL; return NULL;
} }
return tag->meta->p_resource; return tag->inner_node->p_resource;
} }
bool CreateResourceTag(TraceTag* new_tag, TraceTag* owner, char* name, tracemeta_ac_type type, void* p_resource) bool CreateResourceTag(TraceTag* new_tag, TraceTag* owner, char* name, tracemeta_ac_type type, void* p_resource)
{ {
assert(owner != NULL); assert(owner != NULL);
if (owner->meta == NULL) { if (owner->inner_node == NULL) {
ERROR("Tracer: Empty owner\n"); ERROR("Tracer: Empty owner, node name: %s\n", name);
return false; return false;
} }
assert(owner->meta->type == TRACER_OWNER); // assert(owner->inner_node->type == TRACER_OWNER);
if (tracer_find_node_onestep(owner->meta, name) != NULL) { if (type == TRACER_SERVER_IDENTITY_AC_RESOURCE && //
tracer_find_node_onestep(owner->inner_node, name) != NULL) {
return false; return false;
} }
@ -165,11 +165,11 @@ bool CreateResourceTag(TraceTag* new_tag, TraceTag* owner, char* name, tracemeta
tracer_init_node(new_node, name, type, p_resource); tracer_init_node(new_node, name, type, p_resource);
// new node add to owner's children list // new node add to owner's children list
doubleListAddOnHead(&new_node->list_node, &owner->meta->children_guard); doubleListAddOnHead(&new_node->list_node, &owner->inner_node->children_guard);
new_node->parent = owner->meta; new_node->parent = owner->inner_node;
if (new_tag != NULL) { if (new_tag != NULL) {
new_tag->meta = new_node; new_tag->inner_node = new_node;
} }
return true; return true;
} }
@ -177,54 +177,64 @@ bool CreateResourceTag(TraceTag* new_tag, TraceTag* owner, char* name, tracemeta
bool DeleteResource(TraceTag* target, TraceTag* owner) bool DeleteResource(TraceTag* target, TraceTag* owner)
{ {
assert(target != NULL && owner != NULL); assert(target != NULL && owner != NULL);
assert(owner->meta != NULL && owner->meta->type == TRACER_OWNER); assert(owner->inner_node != NULL && owner->inner_node->type == TRACER_OWNER);
if (target->meta == NULL) { if (target->inner_node == NULL) {
ERROR("Tracer: Delete a empty resource\n"); ERROR("Tracer: Delete a empty resource, owner: %s\n", owner->inner_node->name);
return false; return false;
} }
assert(target->meta->parent == owner->meta); assert(target->inner_node->parent == owner->inner_node);
doubleListDel(&target->meta->list_node); doubleListDel(&target->inner_node->list_node);
// delete name // delete name
if (target->meta->name != NULL) { if (target->inner_node->name != NULL) {
slab_free(&sys_tracer.node_name_allocator, target->meta->name); slab_free(&sys_tracer.node_name_allocator, target->inner_node->name);
} }
// delete all children // delete all children
if (target->meta->type == TRACER_OWNER) { if (target->inner_node->type == TRACER_OWNER) {
while (!IS_DOUBLE_LIST_EMPTY(&target->meta->children_guard)) { while (!IS_DOUBLE_LIST_EMPTY(&target->inner_node->children_guard)) {
TraceTag tmp_node = { TraceTag tmp_node = {
.meta = DOUBLE_LIST_ENTRY(target->meta->children_guard.next, TracerNode, list_node), .inner_node = DOUBLE_LIST_ENTRY(target->inner_node->children_guard.next, TracerNode, list_node),
}; };
DeleteResource(&tmp_node, target); DeleteResource(&tmp_node, target);
} }
} }
slab_free(&sys_tracer.node_allocator, target->meta); slab_free(&sys_tracer.node_allocator, target->inner_node);
target->meta = NULL; target->inner_node = NULL;
return true; return true;
} }
void debug_list_tracetree_inner(TracerNode* cur_node) #define debug_print_blanks(n) \
for (int __i = 0; __i < n; __i++) { \
DEBUG_PRINTF(" "); \
}
void debug_list_tracetree_inner(TracerNode* cur_node, int nr_blanks)
{ {
DEBUG("[%s] ", cur_node->name); debug_print_blanks(nr_blanks);
if (cur_node->name == NULL) {
DEBUG_PRINTF("[ANON %d] ", cur_node->type);
} else {
DEBUG_PRINTF("[%s %d] ", cur_node->name, cur_node->type);
}
TracerNode* tmp = NULL; TracerNode* tmp = NULL;
DOUBLE_LIST_FOR_EACH_ENTRY(tmp, &cur_node->children_guard, list_node) DOUBLE_LIST_FOR_EACH_ENTRY(tmp, &cur_node->children_guard, list_node)
{ {
if (tmp->name != NULL) { if (tmp->name != NULL) {
DEBUG("%s ", tmp->name); DEBUG_PRINTF("%s ", tmp->name);
} else { } else {
DEBUG("ANON "); DEBUG_PRINTF("ANON ");
} }
} }
DEBUG("\n"); DEBUG_PRINTF("\n");
DOUBLE_LIST_FOR_EACH_ENTRY(tmp, &cur_node->children_guard, list_node) DOUBLE_LIST_FOR_EACH_ENTRY(tmp, &cur_node->children_guard, list_node)
{ {
debug_list_tracetree_inner(tmp); debug_list_tracetree_inner(tmp, nr_blanks + 1);
} }
} }
void debug_list_tracetree() void debug_list_tracetree()
{ {
TracerNode* ref_root = RequireRootTag()->meta; TracerNode* ref_root = RequireRootTag()->inner_node;
debug_list_tracetree_inner(ref_root); debug_list_tracetree_inner(ref_root, 0);
} }

View File

@ -30,35 +30,12 @@ Modification:
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>
#include "list.h" #include "actracer_tag.h"
#include "object_allocator.h" #include "object_allocator.h"
#define TRACER_NODE_NAME_LEN 32 #define TRACER_NODE_NAME_LEN 32
typedef enum { #define GetSysObject(type, target_tag) (type*)AchieveResource(target_tag)
TRACER_INVALID = 0,
TRACER_OWNER,
TRACER_HARDKERNEL_AC_RESOURCE,
TRACER_TASK_DESCRIPTOR_AC_RESOURCE,
TRACER_SERVER_IDENTITY_AC_RESOURCE,
TRACER_MEM_FROM_BUDDY_AC_RESOURCE,
} tracemeta_ac_type;
typedef struct TracerNode {
tracemeta_ac_type type;
char* name;
union {
struct double_list_node children_guard;
void* p_resource;
};
struct TracerNode* parent;
struct double_list_node list_node;
} TracerNode;
/// @brief tag for other module to reference trace meta
typedef struct TraceTag {
TracerNode* meta;
} TraceTag;
struct SysTracer { struct SysTracer {
TracerNode root_node; TracerNode root_node;
@ -73,3 +50,5 @@ bool AchieveResourceTag(struct TraceTag* target, struct TraceTag* owner, char* n
void* AchieveResource(struct TraceTag* tag); void* AchieveResource(struct TraceTag* tag);
bool CreateResourceTag(struct TraceTag* new_tag, struct TraceTag* owner, char* name, tracemeta_ac_type type, void* p_resource); bool CreateResourceTag(struct TraceTag* new_tag, struct TraceTag* owner, char* name, tracemeta_ac_type type, void* p_resource);
bool DeleteResource(struct TraceTag* target, struct TraceTag* owner); bool DeleteResource(struct TraceTag* target, struct TraceTag* owner);
void debug_list_tracetree();

View File

@ -0,0 +1,27 @@
#pragma once
#include "list.h"
typedef enum {
TRACER_INVALID = 0,
TRACER_OWNER,
TRACER_HARDKERNEL_AC_RESOURCE,
TRACER_TASK_DESCRIPTOR_AC_RESOURCE,
TRACER_SERVER_IDENTITY_AC_RESOURCE,
TRACER_MEM_SIGNATURE,
TRACER_SYSOBJECT,
} tracemeta_ac_type;
typedef struct TracerNode {
tracemeta_ac_type type;
char* name;
void* p_resource;
struct TracerNode* parent;
struct double_list_node list_node;
struct double_list_node children_guard;
} TracerNode;
/// @brief tag for other module to reference trace inner_node
typedef struct TraceTag {
TracerNode* inner_node;
uint32_t authority;
} TraceTag;

View File

@ -69,6 +69,10 @@ epit_server: timer.o epit.o ccm_pll.o usyscall.o arch_usyscall.o libserial.o pri
@${objdump} -S $@ > $@.asm @${objdump} -S $@ > $@.asm
endif endif
test_sleep: test_sleep.o libserial.o printf.o usyscall.o arch_usyscall.o
@${ld} ${user_ldflags} -e main -o $@ $^ ${board_specs}
@${objdump} -S $@ > $@.asm
test_semaphore: test_semaphore.o libserial.o printf.o usyscall.o arch_usyscall.o test_semaphore: test_semaphore.o libserial.o printf.o usyscall.o arch_usyscall.o
@${ld} ${user_ldflags} -e main -o $@ $^ ${board_specs} @${ld} ${user_ldflags} -e main -o $@ $^ ${board_specs}
@${objdump} -S $@ > $@.asm @${objdump} -S $@ > $@.asm

View File

@ -0,0 +1,24 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiUOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "libserial.h"
#include "usyscall.h"
int main(int argc, char* argv[])
{
while (true) {
printf("sleep for 2 seconds\n");
sleep(2000);
}
exit(0);
}

View File

@ -1,4 +1,8 @@
SRC_DIR := $(BOARD) usb SRC_DIR := $(BOARD)
ifeq ($(BOARD), 3568)
SRC_DIR += usb
endif
include $(KERNEL_ROOT)/compiler.mk include $(KERNEL_ROOT)/compiler.mk

View File

@ -131,14 +131,14 @@ bool ipc_msg_get_nth_arg(struct IpcMsg* msg, const int arg_num, void* data, cons
return true; return true;
} }
void ipc_msg_send_wait(struct IpcMsg* msg) void ipc_msg_send_wait(struct Session* session, struct IpcMsg* msg)
{ {
msg->header.magic = IPC_MSG_MAGIC; msg->header.magic = IPC_MSG_MAGIC;
msg->header.valid = 1; msg->header.valid = 1;
msg->header.done = 0; msg->header.done = 0;
while (msg->header.done == 0) { while (msg->header.done == 0) {
/// @todo syscall yield with prio decrease /// @todo syscall yield with prio decrease
yield(SYS_TASK_YIELD_BLOCK_IPC); wait_session_call(session);
} }
assert(msg->header.done == 1); assert(msg->header.done == 1);
} }
@ -155,7 +155,7 @@ int ipc_session_wait(struct Session* session)
struct IpcMsg* msg = IPCSESSION_MSG(session); struct IpcMsg* msg = IPCSESSION_MSG(session);
while (msg->header.done == 0) { while (msg->header.done == 0) {
/// @todo syscall yield with prio decrease /// @todo syscall yield with prio decrease
yield(SYS_TASK_YIELD_BLOCK_IPC); wait_session_call(session);
} }
assert(msg->header.done == 1); assert(msg->header.done == 1);
return msg->header.ret_val; return msg->header.ret_val;

View File

@ -180,7 +180,7 @@ __attribute__((__always_inline__)) static inline bool ipc_session_forward(struct
struct IpcMsg* new_ipc_msg(struct Session* session, const int argc, const int* arg_size); struct IpcMsg* new_ipc_msg(struct Session* session, const int argc, const int* arg_size);
bool ipc_msg_set_nth_arg(struct IpcMsg* msg, const int arg_num, const void* const data, const int len); bool ipc_msg_set_nth_arg(struct IpcMsg* msg, const int arg_num, const void* const data, const int len);
bool ipc_msg_get_nth_arg(struct IpcMsg* msg, const int arg_num, void* data, const int len); bool ipc_msg_get_nth_arg(struct IpcMsg* msg, const int arg_num, void* data, const int len);
void ipc_msg_send_wait(struct IpcMsg* msg); void ipc_msg_send_wait(struct Session* session, struct IpcMsg* msg);
void ipc_msg_send_nowait(struct IpcMsg* msg); void ipc_msg_send_nowait(struct IpcMsg* msg);
int ipc_session_wait(struct Session* session); int ipc_session_wait(struct Session* session);
@ -230,7 +230,7 @@ void ipc_server_loop(struct IpcNode* ipc_node);
struct IpcMsg* msg = IPC_CREATE_MSG_FUNC(ipc_name)(session, _VA_FRONT_ARG##argc(__VA_ARGS__)); \ struct IpcMsg* msg = IPC_CREATE_MSG_FUNC(ipc_name)(session, _VA_FRONT_ARG##argc(__VA_ARGS__)); \
int ret = IPC_MSG_ARGS_COPY_SET_FUNC(ipc_name)(msg, _VA_FRONT_ARG##argc(__VA_ARGS__)); \ int ret = IPC_MSG_ARGS_COPY_SET_FUNC(ipc_name)(msg, _VA_FRONT_ARG##argc(__VA_ARGS__)); \
ret = ipc_msg_set_opcode(msg, ipc_name); \ ret = ipc_msg_set_opcode(msg, ipc_name); \
ipc_msg_send_wait(msg); \ ipc_msg_send_wait(session, msg); \
ret = IPC_MSG_ARGS_COPY_GET_FUNC(ipc_name)(msg, _VA_FRONT_ARG##argc(__VA_ARGS__)); \ ret = IPC_MSG_ARGS_COPY_GET_FUNC(ipc_name)(msg, _VA_FRONT_ARG##argc(__VA_ARGS__)); \
int32_t res = 0; \ int32_t res = 0; \
ipc_msg_get_return(msg, &res); \ ipc_msg_get_return(msg, &res); \
@ -278,9 +278,10 @@ uintptr_t _ipc_buf_to_addr(char* buf);
char addr_buf[17]; \ char addr_buf[17]; \
_ipc_addr_to_buf((uintptr_t)msg, addr_buf); \ _ipc_addr_to_buf((uintptr_t)msg, addr_buf); \
char* param[] = { #ipc_name, addr_buf, NULL }; \ char* param[] = { #ipc_name, addr_buf, NULL }; \
int tid = thread(IPC_THREAD_SERVE(ipc_name), #ipc_name, param); \
if (tid > 0) { \
msg->header.handling = 1; \ msg->header.handling = 1; \
int tid = thread(IPC_THREAD_SERVE(ipc_name), #ipc_name, param); \
if (tid <= 0) { \
msg->header.handling = 0; \
} \ } \
return 0; \ return 0; \
} }

View File

@ -36,7 +36,7 @@ Modification:
#include "libserial.h" #include "libserial.h"
struct Session { struct Session {
int id; uintptr_t id;
int capacity; int capacity;
int head; int head;
int tail; int tail;

View File

@ -72,6 +72,11 @@ int close_session(struct Session* session)
return syscall(SYSCALL_CLOSE_SESSION, (intptr_t)session, 0, 0, 0); return syscall(SYSCALL_CLOSE_SESSION, (intptr_t)session, 0, 0, 0);
} }
int wait_session_call(struct Session* userland_session)
{
return syscall(SYSCALL_WAIT_SESSION, (intptr_t)userland_session, 0, 0, 0);
}
int get_memblock_info(sys_state_info* info) int get_memblock_info(sys_state_info* info)
{ {
return syscall(SYSCALL_SYS_STATE, SYS_STATE_MEMBLOCK_INFO, (intptr_t)info, 0, 0); return syscall(SYSCALL_SYS_STATE, SYS_STATE_MEMBLOCK_INFO, (intptr_t)info, 0, 0);
@ -102,6 +107,11 @@ int show_cpu()
return syscall(SYSCALL_SYS_STATE, SYS_STATE_SHOW_CPU_INFO, 0, 0, 0); return syscall(SYSCALL_SYS_STATE, SYS_STATE_SHOW_CPU_INFO, 0, 0, 0);
} }
int show_actree()
{
return syscall(SYSCALL_SYS_STATE, SYS_STATE_SHOW_ACTREE, 0, 0, 0);
}
uintptr_t get_second() uintptr_t get_second()
{ {
sys_state_info info; sys_state_info info;
@ -118,8 +128,12 @@ uintptr_t get_tick()
uintptr_t mmap(uintptr_t vaddr, uintptr_t paddr, int len, bool is_dev) uintptr_t mmap(uintptr_t vaddr, uintptr_t paddr, int len, bool is_dev)
{ {
sys_mmap_info info = {
.type = SYS_MMAP_NORMAL,
.is_dev = is_dev,
};
uintptr_t vaddr_inner = vaddr, paddr_inner = paddr; uintptr_t vaddr_inner = vaddr, paddr_inner = paddr;
if (syscall(SYSCALL_MMAP, (intptr_t)&vaddr_inner, (intptr_t)&paddr_inner, (intptr_t)len, (intptr_t)is_dev) < 0) { if (syscall(SYSCALL_MMAP, (intptr_t)&vaddr_inner, (intptr_t)&paddr_inner, (intptr_t)len, (intptr_t)&info) < 0) {
return (uintptr_t)NULL; return (uintptr_t)NULL;
} }
return vaddr_inner; return vaddr_inner;
@ -127,7 +141,30 @@ uintptr_t mmap(uintptr_t vaddr, uintptr_t paddr, int len, bool is_dev)
int naive_mmap(uintptr_t* vaddr, uintptr_t* paddr, int len, bool is_dev) int naive_mmap(uintptr_t* vaddr, uintptr_t* paddr, int len, bool is_dev)
{ {
return syscall(SYSCALL_MMAP, (uintptr_t)vaddr, (intptr_t)paddr, (intptr_t)len, (intptr_t)is_dev); sys_mmap_info info = {
.type = SYS_MMAP_NORMAL,
.is_dev = is_dev,
};
return syscall(SYSCALL_MMAP, (uintptr_t)vaddr, (intptr_t)paddr, (intptr_t)len, (intptr_t)&info);
}
int mmap_with_attr(uintptr_t vaddr, uintptr_t paddr, int len, uintptr_t attr)
{
sys_mmap_info info = {
.type = SYS_MMAP_CUSTOMIZE,
.attr = attr,
};
uintptr_t vaddr_inner = vaddr, paddr_inner = paddr;
return syscall(SYSCALL_MMAP, (intptr_t)&vaddr_inner, (intptr_t)&paddr_inner, (intptr_t)len, (intptr_t)&info);
}
int naive_mmap_with_attr(uintptr_t* vaddr, uintptr_t* paddr, int len, uintptr_t attr)
{
sys_mmap_info info = {
.type = SYS_MMAP_NORMAL,
.attr = attr,
};
return syscall(SYSCALL_MMAP, (uintptr_t)vaddr, (intptr_t)paddr, (intptr_t)len, (intptr_t)&info);
} }
int register_irq(int irq, int opcode) int register_irq(int irq, int opcode)

View File

@ -35,6 +35,8 @@
#define SYSCALL_SEMAPHORE 13 // semaphore related operations #define SYSCALL_SEMAPHORE 13 // semaphore related operations
#define SYSCALL_SLEEP 14 // sleep #define SYSCALL_SLEEP 14 // sleep
#define SYSCALL_WAIT_SESSION 15
// clang-format on // clang-format on
typedef enum { typedef enum {
@ -47,6 +49,7 @@ typedef enum {
SYS_STATE_SHOW_CPU_INFO, SYS_STATE_SHOW_CPU_INFO,
SYS_STATE_GET_CURRENT_TICK, SYS_STATE_GET_CURRENT_TICK,
SYS_STATE_GET_CURRENT_SECOND, SYS_STATE_GET_CURRENT_SECOND,
SYS_STATE_SHOW_ACTREE,
} sys_state_option; } sys_state_option;
typedef enum { typedef enum {
@ -55,6 +58,17 @@ typedef enum {
SYS_TASK_YIELD_BLOCK_IPC = 0x2, SYS_TASK_YIELD_BLOCK_IPC = 0x2,
} task_yield_reason; } task_yield_reason;
typedef enum {
SYS_MMAP_NORMAL = 0x0,
SYS_MMAP_CUSTOMIZE,
} sys_mmap_type;
typedef struct sys_mmap_info {
sys_mmap_type type;
uintptr_t attr;
bool is_dev;
} sys_mmap_info;
typedef union { typedef union {
struct { struct {
uintptr_t memblock_start; uintptr_t memblock_start;
@ -86,12 +100,15 @@ int kill(int pid);
int register_server(char* name); int register_server(char* name);
int session(char* path, int capacity, struct Session* user_session); int session(char* path, int capacity, struct Session* user_session);
int poll_session(struct Session* userland_session_arr, int arr_capacity); int poll_session(struct Session* userland_session, int arr_capacity);
int wait_session_call(struct Session* userland_session);
int close_session(struct Session* session); int close_session(struct Session* session);
int register_irq(int irq, int opcode); int register_irq(int irq, int opcode);
uintptr_t mmap(uintptr_t vaddr, uintptr_t paddr, int len, bool is_dev); uintptr_t mmap(uintptr_t vaddr, uintptr_t paddr, int len, bool is_dev);
int naive_mmap(uintptr_t* vaddr, uintptr_t* paddr, int len, bool is_dev); int naive_mmap(uintptr_t* vaddr, uintptr_t* paddr, int len, bool is_dev);
int mmap_with_attr(uintptr_t vaddr, uintptr_t paddr, int len, uintptr_t attr);
int naive_mmap_with_attr(uintptr_t* vaddr, uintptr_t* paddr, int len, uintptr_t attr);
int task_heap_base(); int task_heap_base();
int get_memblock_info(sys_state_info* info); int get_memblock_info(sys_state_info* info);
@ -99,6 +116,10 @@ int set_priority(sys_state_info* info);
int show_task(); int show_task();
int show_mem(); int show_mem();
int show_cpu(); int show_cpu();
int show_actree();
uintptr_t get_second();
uintptr_t get_tick();
uintptr_t get_second(); uintptr_t get_second();
uintptr_t get_tick(); uintptr_t get_tick();

View File

@ -1797,6 +1797,11 @@ void shellShowCpusInfo()
show_cpu(); show_cpu();
} }
void shellShowActree()
{
show_actree();
}
#if SHELL_EXEC_UNDEF_FUNC == 1 #if SHELL_EXEC_UNDEF_FUNC == 1
/** /**
* @brief shell执行未定义函数 * @brief shell执行未定义函数

View File

@ -40,6 +40,7 @@ extern void shellKill(int pid);
extern void shellShowTasks(); extern void shellShowTasks();
extern void shellShowMemInfo(); extern void shellShowMemInfo();
extern void shellShowCpusInfo(); extern void shellShowCpusInfo();
extern void shellShowActree();
#if SHELL_EXEC_UNDEF_FUNC == 1 #if SHELL_EXEC_UNDEF_FUNC == 1
extern int shellExecute(int argc, char* argv[]); extern int shellExecute(int argc, char* argv[]);
@ -118,6 +119,8 @@ const ShellCommand shellCommandList[] = {
showMemInfo, shellShowMemInfo, display memory info), showMemInfo, shellShowMemInfo, display memory info),
SHELL_CMD_ITEM(SHELL_CMD_PERMISSION(0) | SHELL_CMD_TYPE(SHELL_TYPE_CMD_FUNC) | SHELL_CMD_DISABLE_RETURN, SHELL_CMD_ITEM(SHELL_CMD_PERMISSION(0) | SHELL_CMD_TYPE(SHELL_TYPE_CMD_FUNC) | SHELL_CMD_DISABLE_RETURN,
showCpusInfo, shellShowCpusInfo, display cpus info), showCpusInfo, shellShowCpusInfo, display cpus info),
SHELL_CMD_ITEM(SHELL_CMD_PERMISSION(0) | SHELL_CMD_TYPE(SHELL_TYPE_CMD_FUNC) | SHELL_CMD_DISABLE_RETURN,
showActree, shellShowActree, display actracer tree),
#if SHELL_EXEC_UNDEF_FUNC == 1 #if SHELL_EXEC_UNDEF_FUNC == 1
SHELL_CMD_ITEM(SHELL_CMD_PERMISSION(0) | SHELL_CMD_TYPE(SHELL_TYPE_CMD_MAIN) | SHELL_CMD_DISABLE_RETURN, SHELL_CMD_ITEM(SHELL_CMD_PERMISSION(0) | SHELL_CMD_TYPE(SHELL_TYPE_CMD_MAIN) | SHELL_CMD_DISABLE_RETURN,

View File

@ -1,4 +1,4 @@
SRC_DIR := init memory trap task syscall SRC_DIR := init memory trap task syscall tools
SRC_FILES := main.c load_apps.S SRC_FILES := main.c load_apps.S

View File

@ -41,3 +41,5 @@ extern void panic(char*);
#define LIKELY(exp) __builtin_expect(exp, 1) #define LIKELY(exp) __builtin_expect(exp, 1)
#define UNLIKELY(exp) __builtin_expect(exp, 0) #define UNLIKELY(exp) __builtin_expect(exp, 0)
#define ERROR_FREE

View File

@ -46,22 +46,23 @@ static inline void bitmap64_init(struct bitmap64* bitmap)
static inline int bitmap64_alloc(struct bitmap64* bitmap) static inline int bitmap64_alloc(struct bitmap64* bitmap)
{ {
int free_bit = -1; int free_bit = -1;
// free bit is the first 0 bit, from [1, 64] // free bit is the first 0 bit, from [0, 63]
free_bit = __builtin_ffsl(~(uint64_t)(bitmap->map)); free_bit = __builtin_ffsl(~(uint64_t)(bitmap->map));
// handle if bitmap is full (no using 64th bit here) // handle if bitmap is full (no using 64th bit here)
if (free_bit == 0) { if (free_bit == 0) {
return -1; return -1;
} }
assert(free_bit < 64 && free_bit >= 1); free_bit -= 1;
assert(free_bit < 64 && free_bit >= 0);
// alloc and return // alloc and return
bitmap->map |= (1 << (free_bit - 1)); bitmap->map |= (1ULL << free_bit);
return free_bit - 1; return free_bit;
} }
static inline void bitmap64_free(struct bitmap64* bitmap, int idx) static inline void bitmap64_free(struct bitmap64* bitmap, int idx)
{ {
// usages of bitmap64 must be correct // usages of bitmap64 must be correct
assert((bitmap->map & (1 << idx)) != 0); assert((bitmap->map & (1ULL << idx)) != 0);
// free bit // free bit
bitmap->map &= ~(uint64_t)(1 << idx); bitmap->map &= ~(uint64_t)(1ULL << idx);
} }

View File

@ -31,7 +31,6 @@ Modification:
#include "list.h" #include "list.h"
#include "memlayout.h" #include "memlayout.h"
#include "spinlock.h"
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>
@ -70,7 +69,6 @@ struct KFreeList {
struct KBuddy { struct KBuddy {
uintptr_t n_pages; uintptr_t n_pages;
uintptr_t use_lock; uintptr_t use_lock;
struct spinlock lock;
struct KFreeList free_list[MAX_BUDDY_ORDER]; struct KFreeList free_list[MAX_BUDDY_ORDER];
struct KPage* first_page; struct KPage* first_page;
uintptr_t mem_start; uintptr_t mem_start;

View File

@ -29,15 +29,26 @@ Modification:
*************************************************/ *************************************************/
#pragma once #pragma once
#include "pagetable.h" #include "actracer.h"
#include "rbtree.h"
struct MemUsage {
TraceTag tag;
RbtTree mem_block_map;
};
bool module_phymem_init(); bool module_phymem_init();
char* kalloc(size_t size); char* kalloc(size_t size);
bool kfree(char* vaddr); bool kfree(char* vaddr);
bool raw_kfree(char* paddr); bool raw_kfree(char* paddr);
void* kalloc_by_ownership(TraceTag owner, uintptr_t size);
bool kfree_by_ownership(TraceTag owner, void* vaddr);
char* raw_alloc(size_t size); char* raw_alloc(size_t size);
bool raw_free(char* paddr); bool raw_free(char* paddr);
void* raw_alloc_by_ownership(TraceTag owner, uintptr_t size);
bool raw_free_by_ownership(TraceTag owner, void* vaddr);
void show_phymem_info(); void show_phymem_info();

View File

@ -23,6 +23,14 @@
#include "list.h" #include "list.h"
#include "object_allocator.h" #include "object_allocator.h"
#include "rbtree.h"
typedef uintptr_t sem_id_t;
typedef int32_t sem_val_t;
enum {
INVALID_SEM_ID = 0,
};
/// @warning this is no in use /// @warning this is no in use
enum { enum {
@ -30,8 +38,8 @@ enum {
}; };
struct ksemaphore { struct ksemaphore {
uint32_t id; sem_id_t id;
int val; sem_val_t val;
/* list of waiting threads */ /* list of waiting threads */
struct double_list_node wait_list_guard; struct double_list_node wait_list_guard;
/* list to manage semaphores */ /* list to manage semaphores */
@ -40,12 +48,17 @@ struct ksemaphore {
}; };
struct XiziSemaphorePool { struct XiziSemaphorePool {
uint32_t next_sem_id; sem_id_t next_sem_id;
struct slab_allocator allocator; struct slab_allocator allocator;
struct double_list_node sem_list_guard; struct double_list_node sem_list_guard;
RbtTree sem_pool_map;
sem_val_t nr_sem;
}; };
void semaphore_pool_init(struct XiziSemaphorePool* sem_pool); void semaphore_pool_init(struct XiziSemaphorePool* sem_pool);
int ksemaphore_alloc(struct XiziSemaphorePool* sem_pool, int val); sem_id_t ksemaphore_alloc(struct XiziSemaphorePool* sem_pool, sem_val_t val);
bool ksemaphore_free(struct XiziSemaphorePool* sem_pool, uint32_t sem_id); bool ksemaphore_free(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id);
bool ksemaphore_signal(struct XiziSemaphorePool* sem_pool, uint32_t sem_id); bool ksemaphore_signal(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id);
bool ksemaphore_signal_no_wake(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id);
bool ksemaphore_consume(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id, sem_val_t decre);

View File

@ -32,6 +32,7 @@ Modification:
#include "actracer.h" #include "actracer.h"
#include "bitmap64.h" #include "bitmap64.h"
#include "buddy.h" #include "buddy.h"
#include "kalloc.h"
#include "list.h" #include "list.h"
struct TopLevelPageDirectory { struct TopLevelPageDirectory {
@ -48,6 +49,11 @@ struct ThreadStackPointer {
struct MemSpace { struct MemSpace {
/* trace node */ /* trace node */
TraceTag tag; TraceTag tag;
/* mem usage info */
struct MemUsage kernspace_mem_usage;
struct MemUsage userspace_mem_usage;
struct MemUsage customized_mapping_mem_map;
/* task memory resources */ /* task memory resources */
struct TopLevelPageDirectory pgdir; // [phy] vm pgtbl base address struct TopLevelPageDirectory pgdir; // [phy] vm pgtbl base address
uintptr_t heap_base; // mem size of proc used(allocated by kernel) uintptr_t heap_base; // mem size of proc used(allocated by kernel)
@ -63,7 +69,7 @@ struct MemSpace {
struct Thread* thread_to_notify; struct Thread* thread_to_notify;
}; };
struct MemSpace* alloc_memspace(); struct MemSpace* alloc_memspace(char* name);
void free_memspace(struct MemSpace* pmemspace); void free_memspace(struct MemSpace* pmemspace);
uintptr_t* load_memspace(struct MemSpace* pmemspace, char* img_start); uintptr_t* load_memspace(struct MemSpace* pmemspace, char* img_start);
struct ThreadStackPointer load_user_stack(struct MemSpace* pmemspace, char** argv); struct ThreadStackPointer load_user_stack(struct MemSpace* pmemspace, char** argv);

View File

@ -29,10 +29,12 @@ Modification:
*************************************************/ *************************************************/
#pragma once #pragma once
#include "actracer_tag.h"
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
struct slab_state { struct slab_state {
TraceTag owner_tag;
struct slab_state *prev, *next; struct slab_state *prev, *next;
uint64_t bitmap; uint64_t bitmap;
uintptr_t refcount; uintptr_t refcount;
@ -45,9 +47,10 @@ struct slab_allocator {
size_t slabsize; size_t slabsize;
uint64_t bitmap_empty; uint64_t bitmap_empty;
struct slab_state *partial, *empty, *full; struct slab_state *partial, *empty, *full;
char* name;
}; };
void slab_init(struct slab_allocator*, size_t); void slab_init(struct slab_allocator*, size_t, char* name);
void slab_destroy(const struct slab_allocator*); void slab_destroy(const struct slab_allocator*);
void* slab_alloc(struct slab_allocator*); void* slab_alloc(struct slab_allocator*);

View File

@ -0,0 +1,22 @@
#pragma once
#include <stddef.h>
typedef struct QueueNode {
uintptr_t key;
void* data;
struct QueueNode* next;
} QueueNode;
typedef struct Queue {
QueueNode* front;
QueueNode* rear;
int nr_ele;
} Queue;
void queue_init(Queue* queue);
QueueNode* queue_front(Queue* queue);
bool queue_is_empty(Queue* queue);
bool dequeue(Queue* queue);
bool enqueue(Queue* queue, uintptr_t key, void* data);
void module_queue_factory_init(TraceTag* _softkernel_tag);

View File

@ -0,0 +1,39 @@
#pragma once
#include <stddef.h>
#include <stdint.h>
#include "actracer.h"
// CLRS
// Insertion and Deletion in a Red Black Tree
enum rbt_type {
RED,
BLACK
};
typedef struct RbtNode {
uintptr_t key;
void* data;
struct RbtNode* left;
struct RbtNode* right;
struct RbtNode* parent;
enum rbt_type color;
} RbtNode;
typedef struct RbtTree {
RbtNode* root;
int nr_ele;
} RbtTree;
void rbtree_init(RbtTree* tree);
int rbt_insert(RbtTree* tree, uintptr_t key, void* data);
RbtNode* rbt_search(RbtTree* tree, uintptr_t key);
int rbt_delete(RbtTree* tree, uintptr_t key);
void module_rbt_factory_init(TraceTag* _softkernel_tag);
static inline bool rbt_is_empty(RbtTree* tree)
{
return tree->nr_ele == 0;
}

View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiUOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/**
* @file scheduler.h
* @brief scheduler algorithm declaration
* @version 3.0
* @author AIIT XUOS Lab
* @date 2023.08.25
*/
/*************************************************
File name: scheduler.h
Description: scheduler algorithm declaration
Others:
History:
1. Date: 2023-08-28
Author: AIIT XUOS Lab
Modification:
1. first version
*************************************************/
#pragma once
#include "task.h"
struct Thread* max_priority_runnable_task(void);
struct Thread* round_robin_runnable_task(uint32_t priority);
void recover_priority(void);

View File

@ -1,36 +1,21 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiUOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/**
* @file scheduler.h
* @brief scheduler algorithm declaration
* @version 3.0
* @author AIIT XUOS Lab
* @date 2023.08.25
*/
/*************************************************
File name: scheduler.h
Description: scheduler algorithm declaration
Others:
History:
1. Date: 2023-08-28
Author: AIIT XUOS Lab
Modification:
1. first version
*************************************************/
#pragma once #pragma once
#include "actracer.h"
#include "ksemaphore.h"
#include "task.h" #define TASK_MAX_PRIORITY 32
struct Thread* max_priority_runnable_task(void); struct ScheduleNode {
struct Thread* round_robin_runnable_task(uint32_t priority); TraceTag task_ref;
void recover_priority(void); struct double_list_node list_node;
};
struct Scheduler {
TraceTag tag;
struct double_list_node task_list_head[TASK_MAX_PRIORITY]; /* list of task control blocks that are allocated */
struct double_list_node task_running_list_head;
struct double_list_node task_blocked_list_head;
struct double_list_node task_sleep_list_head;
struct XiziSemaphorePool semaphore_pool;
};

View File

@ -32,12 +32,17 @@ Modification:
#include <stdint.h> #include <stdint.h>
#include "actracer.h" #include "actracer.h"
#include "ksemaphore.h"
#include "list.h" #include "list.h"
#include "task.h" #include "task.h"
enum {
INVALID_SESS_ID = 0,
};
/// @brief userland session info copy /// @brief userland session info copy
struct Session { struct Session {
int id; uintptr_t id;
int capacity; int capacity;
int head; int head;
int tail; int tail;
@ -48,7 +53,7 @@ struct Session {
#define CLIENT_SESSION_BACKEND(session) CONTAINER_OF(session, struct session_backend, client_side) #define CLIENT_SESSION_BACKEND(session) CONTAINER_OF(session, struct session_backend, client_side)
struct server_session { struct server_session {
struct double_list_node node; // list_head of server task's ipc pipes struct double_list_node node; // list node of server task's ipc pipes
uintptr_t buf_addr; uintptr_t buf_addr;
int capacity; int capacity;
int head; int head;
@ -57,7 +62,7 @@ struct server_session {
}; };
struct client_session { struct client_session {
struct double_list_node node; // list_head of client task's ipc pipes struct double_list_node node; // list node of client task's ipc pipes
uintptr_t buf_addr; uintptr_t buf_addr;
int capacity; int capacity;
bool closed; bool closed;
@ -67,11 +72,12 @@ struct client_session {
struct session_backend { struct session_backend {
struct server_session server_side; struct server_session server_side;
struct client_session client_side; struct client_session client_side;
int session_id; // id of this session uintptr_t session_id; // id of this session
int nr_pages; // pages used by this pipe int nr_pages; // pages used by this pipe
struct Thread* client; // client of this pipe struct Thread* client; // client of this pipe
struct Thread* server; // server of this pipe struct Thread* server; // server of this pipe
sem_id_t client_sem_to_wait;
uintptr_t buf_kernel_addr; uintptr_t buf_kernel_addr;
}; };
@ -90,3 +96,6 @@ struct XiziSharePageManager {
extern struct XiziSharePageManager xizi_share_page_manager; extern struct XiziSharePageManager xizi_share_page_manager;
int module_share_page_init(struct SharePageRightGroup* right_group); int module_share_page_init(struct SharePageRightGroup* right_group);
void client_close_session(struct Thread* thd, struct client_session* cli_sess);
void server_close_session(struct Thread* thd, struct server_session* svr_sess);

View File

@ -49,6 +49,8 @@ Modification:
#define SYSCALL_SEMAPHORE 13 // semaphore related operations #define SYSCALL_SEMAPHORE 13 // semaphore related operations
#define SYSCALL_SLEEP 14 // sleep #define SYSCALL_SLEEP 14 // sleep
#define SYSCALL_WAIT_SESSION 15
// clang-format on // clang-format on
#ifndef __ASSEMBLER__ #ifndef __ASSEMBLER__
@ -67,6 +69,7 @@ typedef enum {
SYS_STATE_SHOW_CPU_INFO, SYS_STATE_SHOW_CPU_INFO,
SYS_STATE_GET_CURRENT_TICK, SYS_STATE_GET_CURRENT_TICK,
SYS_STATE_GET_CURRENT_SECOND, SYS_STATE_GET_CURRENT_SECOND,
SYS_STATE_SHOW_ACTREE,
} sys_state_option; } sys_state_option;
typedef enum { typedef enum {
@ -75,6 +78,17 @@ typedef enum {
SYS_TASK_YIELD_BLOCK_IPC = 0x2, SYS_TASK_YIELD_BLOCK_IPC = 0x2,
} task_yield_reason; } task_yield_reason;
typedef enum {
SYS_MMAP_NORMAL = 0x0,
SYS_MMAP_CUSTOMIZE,
} sys_mmap_type;
typedef struct {
sys_mmap_type type;
uintptr_t attr;
bool is_dev;
} sys_mmap_info;
typedef union { typedef union {
struct { struct {
uintptr_t memblock_start; uintptr_t memblock_start;
@ -104,10 +118,12 @@ int sys_register_as_server(char* name);
int sys_connect_session(char* path, int capacity, struct Session* user_session); int sys_connect_session(char* path, int capacity, struct Session* user_session);
int sys_poll_session(struct Session* userland_session_arr, int arr_capacity); int sys_poll_session(struct Session* userland_session_arr, int arr_capacity);
int sys_close_session(struct Thread* task, struct Session* session); int sys_close_session(struct Thread* task, struct Session* session);
int sys_wait_session(struct Session* userland_session);
int sys_exec(char* img_start, char* name, char** argv); int sys_exec(char* img_start, char* name, char** argv);
int sys_state(sys_state_option option, sys_state_info* info); int sys_state(sys_state_option option, sys_state_info* info);
int sys_mmap(uintptr_t* vaddr, uintptr_t* paddr, int len, int is_dev); int sys_mmap(uintptr_t* vaddr, uintptr_t* paddr, int len, int is_dev);
int sys_mmap_v2(uintptr_t* vaddr, uintptr_t* paddr, int len, sys_mmap_info* info);
int sys_register_irq(int irq_num, int irq_opcode); int sys_register_irq(int irq_num, int irq_opcode);
int sys_unbind_irq_all(struct Thread* task); int sys_unbind_irq_all(struct Thread* task);

View File

@ -37,6 +37,7 @@ Modification:
#include "memspace.h" #include "memspace.h"
#include "object_allocator.h" #include "object_allocator.h"
#include "pagetable.h" #include "pagetable.h"
#include "queue.h"
#include "share_page.h" #include "share_page.h"
#include "spinlock.h" #include "spinlock.h"
@ -47,7 +48,8 @@ Modification:
#define SLEEP_MONITOR_CORE 0 #define SLEEP_MONITOR_CORE 0
enum ProcState { enum ProcState {
INIT = 0, UNINIT = 0,
INIT,
READY, READY,
RUNNING, RUNNING,
DEAD, DEAD,
@ -98,8 +100,12 @@ struct Thread {
/* task communication resources */ /* task communication resources */
struct double_list_node cli_sess_listhead; struct double_list_node cli_sess_listhead;
struct double_list_node svr_sess_listhead; struct double_list_node svr_sess_listhead;
RbtTree cli_sess_map;
RbtTree svr_sess_map;
Queue sessions_to_be_handle;
Queue sessions_in_handle;
struct TraceTag server_identifier; struct TraceTag server_identifier;
bool advance_unblock; bool advance_unblock; // @todo abandon
/* task schedule attributes */ /* task schedule attributes */
struct double_list_node node; struct double_list_node node;
@ -115,6 +121,22 @@ struct SchedulerRightGroup {
struct TraceTag mmu_driver_tag; struct TraceTag mmu_driver_tag;
}; };
/* @todo task pool to maintain task lifetime and support fast task search */
struct GlobalTaskPool {
RbtTree thd_ref_map;
struct double_list_node thd_listing_head;
};
struct TaskScheduler {
};
struct TaskLifecycleOperations {
/* new a task control block, checkout #sys_spawn for usage */
struct Thread* (*new_thread)(struct MemSpace* pmemspace);
/* free a task control block, this calls #free_user_pgdir to free all vitual spaces */
void (*free_thread)(struct Thread*);
};
struct XiziTaskManager { struct XiziTaskManager {
TraceTag tag; TraceTag tag;
/* thead schedule lists */ /* thead schedule lists */
@ -123,6 +145,10 @@ struct XiziTaskManager {
struct double_list_node task_blocked_list_head; struct double_list_node task_blocked_list_head;
struct double_list_node task_sleep_list_head; struct double_list_node task_sleep_list_head;
struct XiziSemaphorePool semaphore_pool; struct XiziSemaphorePool semaphore_pool;
/* living task pool */
TraceTag task_pool_tag;
/* task lifecycle Ops */
TraceTag task_lifecycle_ops_tag;
/* mem allocator */ /* mem allocator */
struct slab_allocator memspace_allocator; struct slab_allocator memspace_allocator;
@ -132,10 +158,6 @@ struct XiziTaskManager {
/* init task manager */ /* init task manager */
void (*init)(); void (*init)();
/* new a task control block, checkout #sys_spawn for usage */
struct Thread* (*new_task_cb)(struct MemSpace* pmemspace);
/* free a task control block, this calls #free_user_pgdir to free all vitual spaces */
void (*free_pcb)(struct Thread*);
/* init a task control block, set name, remain_tick, state, cwd, priority, etc. */ /* init a task control block, set name, remain_tick, state, cwd, priority, etc. */
void (*task_set_default_schedule_attr)(struct Thread*); void (*task_set_default_schedule_attr)(struct Thread*);

View File

@ -31,10 +31,14 @@ Modification:
#include "assert.h" #include "assert.h"
#include "log.h" #include "log.h"
#include "rbtree.h"
#include "task.h" #include "task.h"
bool softkernel_init(struct TraceTag* _hardkernel_tag, struct TraceTag* _softkernel_tag) bool softkernel_init(TraceTag* _hardkernel_tag, struct TraceTag* _softkernel_tag)
{ {
module_rbt_factory_init(_softkernel_tag);
module_queue_factory_init(_softkernel_tag);
struct TraceTag server_identifier_owner; struct TraceTag server_identifier_owner;
CreateResourceTag(&server_identifier_owner, _softkernel_tag, "server-identifier", TRACER_OWNER, NULL); CreateResourceTag(&server_identifier_owner, _softkernel_tag, "server-identifier", TRACER_OWNER, NULL);

View File

@ -59,6 +59,7 @@ int main(void)
if (cpu_id == 0) { if (cpu_id == 0) {
/* init memory management first */ /* init memory management first */
module_phymem_init(); // init buddy management system module_phymem_init(); // init buddy management system
/* init tracer system */ /* init tracer system */
sys_tracer_init(); sys_tracer_init();

View File

@ -31,6 +31,7 @@ Modification:
#include "buddy.h" #include "buddy.h"
#include "kalloc.h" #include "kalloc.h"
#include "log.h" #include "log.h"
#include "pagetable.h"
static void _buddy_split_page(struct KPage* page, uintptr_t low_order, uintptr_t high_order, struct KFreeList* list) static void _buddy_split_page(struct KPage* page, uintptr_t low_order, uintptr_t high_order, struct KFreeList* list)
{ {
@ -161,7 +162,8 @@ bool KBuddyInit(struct KBuddy* pbuddy, uintptr_t mem_start, uintptr_t mem_end)
// total number of free pages // total number of free pages
pbuddy->n_pages = (pbuddy->mem_end - (uintptr_t)pbuddy->mem_start) >> LEVEL4_PTE_SHIFT; pbuddy->n_pages = (pbuddy->mem_end - (uintptr_t)pbuddy->mem_start) >> LEVEL4_PTE_SHIFT;
memset(pbuddy->pages, 0, pbuddy->n_pages); memset(pbuddy->pages, 0, pbuddy->n_pages * sizeof(struct KPage));
// memset(pbuddy->pages, 0, pbuddy->n_pages);
// init each free page list from 2^0 to 2^8 // init each free page list from 2^0 to 2^8
for (; i < MAX_BUDDY_ORDER; i++) { for (; i < MAX_BUDDY_ORDER; i++) {

View File

@ -30,6 +30,9 @@ Modification:
#include "kalloc.h" #include "kalloc.h"
#include "assert.h" #include "assert.h"
#include "memlayout.h"
#include "pagetable.h"
#include "actracer.h" #include "actracer.h"
#include "buddy.h" #include "buddy.h"
@ -64,11 +67,37 @@ char* kalloc(uintptr_t size)
return mem_alloc; return mem_alloc;
} }
void* kalloc_by_ownership(TraceTag owner, uintptr_t size)
{
void* new_mem = kalloc(size);
if (NULL == new_mem) {
return NULL;
}
struct MemUsage* usage = GetSysObject(struct MemUsage, &owner);
if (0 != rbt_insert(&usage->mem_block_map, (uintptr_t)new_mem, NULL)) {
kfree(new_mem);
return NULL;
}
return new_mem;
}
bool kfree(char* vaddr) bool kfree(char* vaddr)
{ {
return KBuddyFree(&kern_virtmem_buddy, V2P_WO(vaddr)); return KBuddyFree(&kern_virtmem_buddy, V2P_WO(vaddr));
} }
bool kfree_by_ownership(TraceTag owner, void* vaddr)
{
struct MemUsage* usage = GetSysObject(struct MemUsage, &owner);
// DEBUG("%p %p %p %p\n", usage, usage->mem_block_root, usage->tag, vaddr);
RbtNode* node = rbt_search(&usage->mem_block_map, (uintptr_t)vaddr);
assert(NULL != node);
assert(0 == rbt_delete(&usage->mem_block_map, node->key));
return kfree(vaddr);
}
bool raw_kfree(char* paddr) bool raw_kfree(char* paddr)
{ {
return KBuddyFree(&kern_virtmem_buddy, paddr); return KBuddyFree(&kern_virtmem_buddy, paddr);
@ -83,11 +112,36 @@ char* raw_alloc(size_t size)
return mem_alloc; return mem_alloc;
} }
void* raw_alloc_by_ownership(TraceTag owner, uintptr_t size)
{
void* new_mem = raw_alloc(size);
if (!new_mem) {
return NULL;
}
struct MemUsage* usage = GetSysObject(struct MemUsage, &owner);
if (0 != rbt_insert(&usage->mem_block_map, (uintptr_t)new_mem, NULL)) {
raw_free(new_mem);
return NULL;
}
return new_mem;
}
bool raw_free(char* paddr) bool raw_free(char* paddr)
{ {
return KBuddyFree(&user_phy_freemem_buddy, paddr); return KBuddyFree(&user_phy_freemem_buddy, paddr);
} }
bool raw_free_by_ownership(TraceTag owner, void* vaddr)
{
struct MemUsage* usage = GetSysObject(struct MemUsage, &owner);
RbtNode* node = rbt_search(&usage->mem_block_map, (uintptr_t)vaddr);
assert(NULL != node);
assert(0 == rbt_delete(&usage->mem_block_map, node->key));
return raw_free(vaddr);
}
void show_phymem_info() void show_phymem_info()
{ {
KFreePagesInfo(&user_phy_freemem_buddy); KFreePagesInfo(&user_phy_freemem_buddy);

View File

@ -32,6 +32,7 @@ Modification:
#include "assert.h" #include "assert.h"
#include "kalloc.h" #include "kalloc.h"
#include "object_allocator.h" #include "object_allocator.h"
#include "pagetable.h"
#define BITMAP_BITS_EMPTY_FULL ((uint64_t)0) #define BITMAP_BITS_EMPTY_FULL ((uint64_t)0)
#define BITMAP_FIRST_BIT ((uint64_t)1) #define BITMAP_FIRST_BIT ((uint64_t)1)
@ -47,7 +48,7 @@ Modification:
#define ARENA_SIZE_PER_INCREASE PAGE_SIZE #define ARENA_SIZE_PER_INCREASE PAGE_SIZE
#define MAX_NR_ELEMENT_PER_SLABPAGE 64 #define MAX_NR_ELEMENT_PER_SLABPAGE 64
void slab_init(struct slab_allocator* const allocator, const size_t element_size) void slab_init(struct slab_allocator* const allocator, const size_t element_size, char* name)
{ {
if (allocator == NULL) { if (allocator == NULL) {
panic("init a NULL slab_allocator\n"); panic("init a NULL slab_allocator\n");
@ -63,8 +64,11 @@ void slab_init(struct slab_allocator* const allocator, const size_t element_size
allocator->nr_elements = allocator->nr_elements > MAX_NR_ELEMENT_PER_SLABPAGE ? MAX_NR_ELEMENT_PER_SLABPAGE : allocator->nr_elements; allocator->nr_elements = allocator->nr_elements > MAX_NR_ELEMENT_PER_SLABPAGE ? MAX_NR_ELEMENT_PER_SLABPAGE : allocator->nr_elements;
allocator->bitmap_empty = ~BITMAP_BITS_EMPTY_FULL >> (MAX_NR_ELEMENT_PER_SLABPAGE - allocator->nr_elements); allocator->bitmap_empty = ~BITMAP_BITS_EMPTY_FULL >> (MAX_NR_ELEMENT_PER_SLABPAGE - allocator->nr_elements);
allocator->partial = allocator->empty = allocator->full = NULL; allocator->partial = allocator->empty = allocator->full = NULL;
if (name) {
allocator->name = name;
}
} }
void* slab_alloc(struct slab_allocator* const allocator) void* slab_alloc(struct slab_allocator* const allocator)
@ -107,7 +111,7 @@ void* slab_alloc(struct slab_allocator* const allocator)
/* achieve slab from outer arena */ /* achieve slab from outer arena */
allocator->partial = (struct slab_state*)LOWLEVEL_ALLOC(allocator->slabsize); allocator->partial = (struct slab_state*)LOWLEVEL_ALLOC(allocator->slabsize);
if (UNLIKELY(allocator->partial == NULL)) { if (UNLIKELY(allocator->partial == NULL)) {
ERROR("no enough memory\n"); ERROR("slab %s: no enough memory\n", allocator->name);
return allocator->partial = NULL; return allocator->partial = NULL;
} }
allocator->partial->prev = allocator->partial->next = NULL; allocator->partial->prev = allocator->partial->next = NULL;

View File

@ -143,6 +143,20 @@ static bool _map_user_pages(struct MemSpace* pmemspace, uintptr_t vaddr, uintptr
return _map_pages(pmemspace->pgdir.pd_addr, vaddr, paddr, (intptr_t)len, mem_attr); return _map_pages(pmemspace->pgdir.pd_addr, vaddr, paddr, (intptr_t)len, mem_attr);
} }
bool _map_customizable_page(struct MemSpace* pmemspace, uintptr_t vaddr, uintptr_t paddr, int len, uintptr_t attr)
{
if (len < 0) {
return false;
}
if (UNLIKELY(vaddr >= USER_MEM_TOP)) {
ERROR("mapping kernel space.\n");
return false;
}
return _map_pages(pmemspace->pgdir.pd_addr, vaddr, paddr, (intptr_t)len, attr);
}
/// assume that a user pagedir is allocated from [0, size) /// assume that a user pagedir is allocated from [0, size)
/// if new_size > old_size, allocate more space, /// if new_size > old_size, allocate more space,
/// if old_size > new_size, free extra space, to avoid unnecessary alloc/free. /// if old_size > new_size, free extra space, to avoid unnecessary alloc/free.
@ -160,7 +174,8 @@ static uintptr_t _resize_user_pgdir(struct MemSpace* pmemspace, uintptr_t old_si
uintptr_t cur_size = ALIGNUP(old_size, PAGE_SIZE); uintptr_t cur_size = ALIGNUP(old_size, PAGE_SIZE);
uintptr_t size_needed = ALIGNUP(new_size, PAGE_SIZE) - cur_size; uintptr_t size_needed = ALIGNUP(new_size, PAGE_SIZE) - cur_size;
char* new_page = kalloc(size_needed); // char* new_page = kalloc(size_needed);
char* new_page = kalloc_by_ownership(pmemspace->kernspace_mem_usage.tag, size_needed);
if (new_page == NULL) { if (new_page == NULL) {
ERROR("No memory\n"); ERROR("No memory\n");
return cur_size; return cur_size;
@ -169,7 +184,6 @@ static uintptr_t _resize_user_pgdir(struct MemSpace* pmemspace, uintptr_t old_si
if (!xizi_pager.map_pages(pmemspace, cur_size, V2P(new_page), size_needed, false)) { if (!xizi_pager.map_pages(pmemspace, cur_size, V2P(new_page), size_needed, false)) {
return cur_size; return cur_size;
} }
CreateResourceTag(NULL, &pmemspace->tag, NULL, TRACER_MEM_FROM_BUDDY_AC_RESOURCE, V2P_WO(new_page));
return new_size; return new_size;
} }
@ -251,7 +265,7 @@ struct TopLevelPageDirectory kern_pgdir;
void load_kern_pgdir(struct TraceTag* mmu_driver_tag, struct TraceTag* intr_driver_tag) void load_kern_pgdir(struct TraceTag* mmu_driver_tag, struct TraceTag* intr_driver_tag)
{ {
if (mmu_driver_tag->meta == NULL) { if (mmu_driver_tag->inner_node == NULL) {
ERROR("Invalid mmu driver tag.\n"); ERROR("Invalid mmu driver tag.\n");
return; return;
} }

View File

@ -44,7 +44,7 @@ static struct slab_allocator* SessionAllocator()
static bool init = false; static bool init = false;
static struct slab_allocator session_slab; static struct slab_allocator session_slab;
if (!init) { if (!init) {
slab_init(&session_slab, sizeof(struct session_backend)); slab_init(&session_slab, sizeof(struct session_backend), "SessionAllocator");
} }
return &session_slab; return &session_slab;
} }
@ -204,14 +204,37 @@ void unmap_task_share_pages(struct Thread* task, const uintptr_t task_vaddr, con
} }
} }
static int next_session_id = 1; static int next_session_id = INVALID_SESS_ID + 1;
struct session_backend* create_share_pages(struct Thread* client, struct Thread* server, const int capacity) struct session_backend* create_share_pages(struct Thread* client, struct Thread* server, const int capacity)
{ {
/* alloc session backend */ /* alloc session backend */
struct session_backend* session_backend = (struct session_backend*)slab_alloc(SessionAllocator()); struct session_backend* session_backend = (struct session_backend*)slab_alloc(SessionAllocator());
if (UNLIKELY(session_backend == NULL)) { if (UNLIKELY(session_backend == NULL)) {
return NULL; return NULL;
} }
session_backend->session_id = next_session_id++;
if (0 != rbt_insert(&client->cli_sess_map, session_backend->session_id, &session_backend->client_side)) {
DEBUG("Rbt of %s no memory\n", client->name);
slab_free(SessionAllocator(), session_backend);
return NULL;
}
if (0 != rbt_insert(&server->svr_sess_map, session_backend->session_id, &session_backend->server_side)) {
DEBUG("Rbt of %s no memory\n", server->name);
rbt_delete(&client->cli_sess_map, session_backend->session_id);
slab_free(SessionAllocator(), session_backend);
return NULL;
}
sem_id_t new_sem_id = ksemaphore_alloc(&xizi_task_manager.semaphore_pool, 0);
if (new_sem_id == INVALID_SEM_ID) {
ERROR("No memory to alloc sem\n");
slab_free(SessionAllocator(), session_backend);
return NULL;
}
session_backend->client_sem_to_wait = new_sem_id;
int true_capacity = ALIGNUP(capacity, PAGE_SIZE); int true_capacity = ALIGNUP(capacity, PAGE_SIZE);
int nr_pages = true_capacity / PAGE_SIZE; int nr_pages = true_capacity / PAGE_SIZE;
@ -220,6 +243,7 @@ struct session_backend* create_share_pages(struct Thread* client, struct Thread*
if (UNLIKELY(kern_vaddr == (uintptr_t)NULL)) { if (UNLIKELY(kern_vaddr == (uintptr_t)NULL)) {
ERROR("No memory for session\n"); ERROR("No memory for session\n");
slab_free(SessionAllocator(), session_backend); slab_free(SessionAllocator(), session_backend);
ksemaphore_free(&xizi_task_manager.semaphore_pool, new_sem_id);
return NULL; return NULL;
} }
@ -229,6 +253,7 @@ struct session_backend* create_share_pages(struct Thread* client, struct Thread*
if (UNLIKELY(client_vaddr == (uintptr_t)NULL)) { if (UNLIKELY(client_vaddr == (uintptr_t)NULL)) {
kfree((char*)kern_vaddr); kfree((char*)kern_vaddr);
slab_free(SessionAllocator(), session_backend); slab_free(SessionAllocator(), session_backend);
ksemaphore_free(&xizi_task_manager.semaphore_pool, new_sem_id);
return NULL; return NULL;
} }
@ -238,11 +263,11 @@ struct session_backend* create_share_pages(struct Thread* client, struct Thread*
unmap_task_share_pages(client, client_vaddr, nr_pages); unmap_task_share_pages(client, client_vaddr, nr_pages);
kfree((char*)kern_vaddr); kfree((char*)kern_vaddr);
slab_free(SessionAllocator(), session_backend); slab_free(SessionAllocator(), session_backend);
ksemaphore_free(&xizi_task_manager.semaphore_pool, new_sem_id);
return NULL; return NULL;
} }
/* build session_backend */ /* build session_backend */
session_backend->session_id = next_session_id++;
session_backend->buf_kernel_addr = kern_vaddr; session_backend->buf_kernel_addr = kern_vaddr;
session_backend->nr_pages = nr_pages; session_backend->nr_pages = nr_pages;
session_backend->client = client; session_backend->client = client;
@ -286,25 +311,41 @@ int delete_share_pages(struct session_backend* session_backend)
// close ssesion in server's perspective // close ssesion in server's perspective
if (session_backend->server_side.closed && session_backend->server != NULL) { if (session_backend->server_side.closed && session_backend->server != NULL) {
xizi_share_page_manager.unmap_task_share_pages(session_backend->server, session_backend->server_side.buf_addr, session_backend->nr_pages); xizi_share_page_manager.unmap_task_share_pages(session_backend->server, session_backend->server_side.buf_addr, session_backend->nr_pages);
ERROR_FREE
{
assert(0 == rbt_delete(&session_backend->server->svr_sess_map, session_backend->session_id));
doubleListDel(&session_backend->server_side.node); doubleListDel(&session_backend->server_side.node);
session_backend->server->memspace->mem_size -= session_backend->nr_pages * PAGE_SIZE; session_backend->server->memspace->mem_size -= session_backend->nr_pages * PAGE_SIZE;
session_backend->server = NULL; session_backend->server = NULL;
} }
}
// close ssesion in client's perspective // close ssesion in client's perspective
if (session_backend->client_side.closed && session_backend->client != NULL) { if (session_backend->client_side.closed && session_backend->client != NULL) {
xizi_share_page_manager.unmap_task_share_pages(session_backend->client, session_backend->client_side.buf_addr, session_backend->nr_pages); xizi_share_page_manager.unmap_task_share_pages(session_backend->client, session_backend->client_side.buf_addr, session_backend->nr_pages);
ERROR_FREE
{
assert(0 == rbt_delete(&session_backend->client->cli_sess_map, session_backend->session_id));
doubleListDel(&session_backend->client_side.node); doubleListDel(&session_backend->client_side.node);
session_backend->client->memspace->mem_size -= session_backend->nr_pages * PAGE_SIZE; session_backend->client->memspace->mem_size -= session_backend->nr_pages * PAGE_SIZE;
session_backend->client = NULL; session_backend->client = NULL;
} }
extern bool ksemaphore_free_safe(struct XiziSemaphorePool * sem_pool, sem_id_t sem_id, struct Thread * target);
assert(ksemaphore_free_safe(&xizi_task_manager.semaphore_pool, session_backend->client_sem_to_wait, session_backend->client));
}
/* free seesion backend */ /* free seesion backend */
if (session_backend->server_side.closed && session_backend->client_side.closed) { if (session_backend->server_side.closed && session_backend->client_side.closed) {
ERROR_FREE
{
assert(session_backend->client == NULL && session_backend->server == NULL); assert(session_backend->client == NULL && session_backend->server == NULL);
kfree((void*)session_backend->buf_kernel_addr); assert(kfree((void*)session_backend->buf_kernel_addr));
slab_free(SessionAllocator(), (void*)session_backend); slab_free(SessionAllocator(), (void*)session_backend);
} }
}
return 0; return 0;
} }
@ -322,3 +363,27 @@ int module_share_page_init(struct SharePageRightGroup* _right_group)
right_group = *_right_group; right_group = *_right_group;
return 0; return 0;
} }
void client_close_session(struct Thread* thd, struct client_session* cli_sess)
{
assert(cli_sess != NULL);
struct session_backend* sess_backend = CLIENT_SESSION_BACKEND(cli_sess);
assert(sess_backend->client == thd);
assert(cli_sess->closed == false);
uintptr_t sess_id = sess_backend->session_id;
cli_sess->closed = true;
xizi_share_page_manager.delete_share_pages(sess_backend);
assert(NULL == rbt_search(&thd->cli_sess_map, sess_id));
}
void server_close_session(struct Thread* thd, struct server_session* svr_sess)
{
assert(svr_sess != NULL);
struct session_backend* sess_backend = SERVER_SESSION_BACKEND(svr_sess);
assert(sess_backend->server == thd);
assert(svr_sess->closed == false);
uintptr_t sess_id = sess_backend->session_id;
svr_sess->closed = true;
xizi_share_page_manager.delete_share_pages(sess_backend);
assert(NULL == rbt_search(&thd->svr_sess_map, sess_id));
}

View File

@ -12,6 +12,7 @@ SRC_FILES := syscall.c \
sys_state.c \ sys_state.c \
sys_mmap.c \ sys_mmap.c \
sys_kill.c \ sys_kill.c \
sys_semaphore.c sys_semaphore.c \
sys_wait_session.c
include $(KERNEL_ROOT)/compiler.mk include $(KERNEL_ROOT)/compiler.mk

View File

@ -46,35 +46,50 @@ int sys_close_session(struct Thread* cur_task, struct Session* session)
return -1; return -1;
} }
/* check if session is a client one or a server one */
struct session_backend* session_backend = NULL; struct session_backend* session_backend = NULL;
struct client_session* client_session = NULL; /* check if session is a client one or a server one */
DOUBLE_LIST_FOR_EACH_ENTRY(client_session, &cur_task->cli_sess_listhead, node) RbtNode* client_session_node = rbt_search(&cur_task->cli_sess_map, session->id);
{ if (client_session_node != NULL) {
if ((uintptr_t)session->buf == client_session->buf_addr) { struct client_session* client_session = (struct client_session*)client_session_node->data;
if (CLIENT_SESSION_BACKEND(client_session)->session_id != session->id || //
client_session->buf_addr != (uintptr_t)session->buf) {
ERROR("Error closing session from %s: Invalid session\n", cur_task->name);
return -1;
}
/* close client session */
session_backend = CLIENT_SESSION_BACKEND(client_session); session_backend = CLIENT_SESSION_BACKEND(client_session);
assert(session_backend->client == cur_task); assert(session_backend->client == cur_task);
assert(client_session->closed == false); assert(client_session->closed == false);
client_session->closed = true; client_session->closed = true;
xizi_share_page_manager.delete_share_pages(session_backend); xizi_share_page_manager.delete_share_pages(session_backend);
break;
struct Thread* server_to_info = session_backend->server;
if (!enqueue(&server_to_info->sessions_to_be_handle, 0, (void*)&session_backend->server_side)) {
// @todo fix memory leak
} else {
assert(!queue_is_empty(&server_to_info->sessions_to_be_handle));
if (server_to_info->state == BLOCKED) {
xizi_task_manager.task_unblock(session_backend->server);
}
} }
} }
if (UNLIKELY(session_backend == NULL)) { RbtNode* server_session_node = rbt_search(&cur_task->svr_sess_map, session->id);
struct server_session* server_session = NULL; if (server_session_node != NULL) {
DOUBLE_LIST_FOR_EACH_ENTRY(server_session, &cur_task->svr_sess_listhead, node) struct server_session* server_session = (struct server_session*)server_session_node->data;
{ if (SERVER_SESSION_BACKEND(server_session)->session_id != session->id || //
if ((uintptr_t)session->buf == server_session->buf_addr) { server_session->buf_addr != (uintptr_t)session->buf) {
ERROR("Error closing session from %s: Invalid session\n", cur_task->name);
return -1;
}
session_backend = SERVER_SESSION_BACKEND(server_session); session_backend = SERVER_SESSION_BACKEND(server_session);
assert(session_backend->server == cur_task); assert(session_backend->server == cur_task);
assert(server_session->closed == false); assert(server_session->closed == false);
server_session->closed = true; server_session->closed = true;
xizi_share_page_manager.delete_share_pages(session_backend); xizi_share_page_manager.delete_share_pages(session_backend);
break;
}
}
} }
/* close this session */ /* close this session */

View File

@ -69,7 +69,7 @@ int sys_connect_session(char* path, int capacity, struct Session* user_session)
if (!AchieveResourceTag(&server_identifier_owner, RequireRootTag(), "softkernel/server-identifier")) { if (!AchieveResourceTag(&server_identifier_owner, RequireRootTag(), "softkernel/server-identifier")) {
panic("Server identifier not initialized.\b"); panic("Server identifier not initialized.\b");
} }
assert(server_identifier_owner.meta != NULL); assert(server_identifier_owner.inner_node != NULL);
struct TraceTag server_tag; struct TraceTag server_tag;
if (!AchieveResourceTag(&server_tag, &server_identifier_owner, path)) { if (!AchieveResourceTag(&server_tag, &server_identifier_owner, path)) {

View File

@ -42,7 +42,8 @@ int sys_exit(struct Thread* ptask)
ptask->dead = true; ptask->dead = true;
// free that task straightly if it's a blocked task // free that task straightly if it's a blocked task
if (ptask->state == BLOCKED) { if (ptask->state == BLOCKED) {
xizi_task_manager.free_pcb(ptask); struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
tlo->free_thread(ptask);
} }
// yield current task in case it wants to exit itself // yield current task in case it wants to exit itself
xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false); xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false);

View File

@ -54,15 +54,15 @@ int sys_mmap(uintptr_t* vaddr, uintptr_t* paddr, int len, int is_dev)
} }
} else { } else {
uintptr_t load_vaddr = *vaddr; uintptr_t load_vaddr = *vaddr;
char* new_paddr = raw_alloc(true_len); char* new_paddr = raw_alloc_by_ownership(cur_task->memspace->userspace_mem_usage.tag, true_len);
if (new_paddr == NULL) { if (new_paddr == NULL) {
return -1; return -1;
} }
if (xizi_share_page_manager.task_map_pages(cur_task, load_vaddr, (uintptr_t)new_paddr, true_len / PAGE_SIZE, false) == (uintptr_t)NULL) { if (xizi_share_page_manager.task_map_pages(cur_task, load_vaddr, (uintptr_t)new_paddr, true_len / PAGE_SIZE, false) == (uintptr_t)NULL) {
raw_free(new_paddr); raw_free_by_ownership(cur_task->memspace->userspace_mem_usage.tag, new_paddr);
return -1; return -1;
} }
CreateResourceTag(NULL, &cur_task->memspace->tag, NULL, TRACER_MEM_FROM_BUDDY_AC_RESOURCE, new_paddr); CreateResourceTag(NULL, &cur_task->memspace->tag, "USER_MEMORY", TRACER_MEM_SIGNATURE, new_paddr);
*paddr = (uintptr_t)new_paddr; *paddr = (uintptr_t)new_paddr;
} }
@ -70,3 +70,119 @@ int sys_mmap(uintptr_t* vaddr, uintptr_t* paddr, int len, int is_dev)
*vaddr = *vaddr + true_len; *vaddr = *vaddr + true_len;
return 0; return 0;
} }
extern bool _map_customizable_page(struct MemSpace* pmemspace, uintptr_t vaddr, uintptr_t paddr, int len, uintptr_t attr);
int sys_mmap_v2(uintptr_t* vaddr, uintptr_t* paddr, int len, sys_mmap_info* info)
{
struct Thread* cur_task = cur_cpu()->task;
assert(cur_task != NULL);
if (vaddr == NULL || paddr == NULL) {
ERROR("Invalid vaddr and paddr param from %s\n", cur_task->name);
return -1;
}
int true_len = ALIGNUP(len, PAGE_SIZE);
sys_mmap_type type = info->type;
uintptr_t vaddr_to_map = *vaddr;
if (type == SYS_MMAP_CUSTOMIZE) {
if (*paddr != (uintptr_t)NULL) {
if (*paddr == (uintptr_t)NULL || vaddr_to_map == (uintptr_t)NULL) {
ERROR("Customized mapping from %s must have vaddr(%p) and paddr(%p)\n", cur_task->name, vaddr, paddr);
return -1;
}
uintptr_t paddr_to_map = *paddr;
TraceTag mem_signature_tag;
if (!CreateResourceTag(&mem_signature_tag, &cur_task->memspace->tag, "CUSTOMIZED_MEMORY", TRACER_MEM_SIGNATURE, (void*)vaddr)) {
ERROR("Sign memory signature failed from %s\n", cur_task->name);
return -1;
}
if (!_map_customizable_page(cur_task->memspace, vaddr_to_map, paddr_to_map, len, info->attr)) {
ERROR("%s mapping page failed(Short of memory)\n", cur_task->name);
DeleteResource(&mem_signature_tag, &cur_task->memspace->tag);
return -1;
}
} else {
char* new_paddr = raw_alloc_by_ownership(cur_task->memspace->userspace_mem_usage.tag, true_len);
if (new_paddr == NULL) {
ERROR("Alloc dynamic memory failed\n");
return -1;
}
TraceTag mem_signature_tag;
if (!CreateResourceTag(&mem_signature_tag, &cur_task->memspace->tag, "USER_MEMORY", TRACER_MEM_SIGNATURE, new_paddr)) {
raw_free_by_ownership(cur_task->memspace->userspace_mem_usage.tag, new_paddr);
ERROR("Sign memory signature failed from %s\n", cur_task->name);
return -1;
}
if (_map_customizable_page(cur_task->memspace, vaddr_to_map, (uintptr_t)new_paddr, len, info->attr) == (uintptr_t)NULL) {
raw_free_by_ownership(cur_task->memspace->userspace_mem_usage.tag, new_paddr);
DeleteResource(&mem_signature_tag, &cur_task->memspace->tag);
return -1;
}
// assign new_paddr back to user
*paddr = (uintptr_t)new_paddr;
}
cur_task->memspace->mem_size += true_len;
*vaddr = *vaddr + true_len;
return 0;
}
if (type == SYS_MMAP_NORMAL) {
bool is_dev = info->is_dev;
if (*paddr != (uintptr_t)NULL) {
if (paddr == NULL || *paddr == (uintptr_t)NULL || vaddr_to_map == (uintptr_t)NULL) {
ERROR("Invalid mapping from %s\n", cur_task->name);
return -1;
}
uintptr_t paddr_to_map = *paddr;
if (paddr_to_map >= PHY_MEM_BASE && paddr_to_map < PHY_MEM_STOP && cur_task->tid > 1) {
ERROR("mapping invalid memory: 0x%p\n", paddr_to_map);
return -1;
}
if (xizi_share_page_manager.task_map_pages(cur_task, vaddr_to_map, paddr_to_map, true_len / PAGE_SIZE, is_dev) == (uintptr_t)NULL) {
ERROR("%s mapping page failed(Short of memory)\n", cur_task->name);
return -1;
}
} else {
char* new_paddr = raw_alloc_by_ownership(cur_task->memspace->userspace_mem_usage.tag, true_len);
if (new_paddr == NULL) {
ERROR("Alloc dynamic memory failed\n");
return -1;
}
TraceTag mem_signature_tag;
if (!CreateResourceTag(&mem_signature_tag, &cur_task->memspace->tag, "USER_MEMORY", TRACER_MEM_SIGNATURE, new_paddr)) {
raw_free_by_ownership(cur_task->memspace->userspace_mem_usage.tag, new_paddr);
ERROR("Sign memory signature failed from %s\n", cur_task->name);
return -1;
}
if (xizi_share_page_manager.task_map_pages(cur_task, vaddr_to_map, (uintptr_t)new_paddr, true_len / PAGE_SIZE, false) == (uintptr_t)NULL) {
raw_free_by_ownership(cur_task->memspace->userspace_mem_usage.tag, new_paddr);
DeleteResource(&mem_signature_tag, &cur_task->memspace->tag);
return -1;
}
// assign new_paddr back to user
*paddr = (uintptr_t)new_paddr;
}
cur_task->memspace->mem_size += true_len;
*vaddr = *vaddr + true_len;
return 0;
}
return -1;
}

View File

@ -34,14 +34,7 @@ Modification:
#include "syscall.h" #include "syscall.h"
#include "task.h" #include "task.h"
#define IPCSESSION_MSG(session) ((struct IpcMsg*)((char*)((session)->buf) + (session)->head)) extern bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, sem_id_t sem_id);
static inline bool is_msg_needed(struct IpcMsg* msg)
{
assert(msg != NULL);
return msg->header.magic == IPC_MSG_MAGIC && msg->header.valid == 1 && msg->header.done == 0 && msg->header.handling == 0;
}
int sys_poll_session(struct Session* userland_session_arr, int arr_capacity) int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
{ {
struct Thread* cur_task = cur_cpu()->task; struct Thread* cur_task = cur_cpu()->task;
@ -50,46 +43,40 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
return -1; return -1;
} }
struct double_list_node* cur_node = NULL;
struct server_session* server_session = NULL;
/* update old sessions */ /* update old sessions */
for (int i = 0; i < arr_capacity; i++) { int cur_userland_idx = 0;
if (UNLIKELY(userland_session_arr[i].buf == NULL)) { while (!queue_is_empty(&cur_task->sessions_in_handle)) {
break; struct server_session* server_session = (struct server_session*)queue_front(&cur_task->sessions_in_handle)->data;
} assert(server_session != NULL);
cur_node = cur_task->svr_sess_listhead.next;
server_session = CONTAINER_OF(cur_node, struct server_session, node); // wrong session info
if (UNLIKELY(server_session->buf_addr != (uintptr_t)userland_session_arr[i].buf)) { if (userland_session_arr[cur_userland_idx].id != SERVER_SESSION_BACKEND(server_session)->session_id || //
ERROR("mismatched old session addr, user buf: %x, server buf: %x\n", userland_session_arr[i].buf, server_session->buf_addr); (uintptr_t)userland_session_arr[cur_userland_idx].buf != server_session->buf_addr) {
return -1; ERROR("mismatched old session from %s, user buf: %x, server buf: %x\n", cur_task->name, userland_session_arr[cur_userland_idx].buf, server_session->buf_addr);
}
// update session_backend
// if current session is handled
if (server_session->head != userland_session_arr[i].head) {
struct Thread* client = SERVER_SESSION_BACKEND(server_session)->client;
if (client->state == BLOCKED) {
xizi_task_manager.task_unblock(client);
} else { } else {
client->advance_unblock = true; // update session_backend
ksemaphore_signal(&xizi_task_manager.semaphore_pool, SERVER_SESSION_BACKEND(server_session)->client_sem_to_wait);
server_session->head = userland_session_arr[cur_userland_idx].head;
server_session->tail = userland_session_arr[cur_userland_idx].tail;
userland_session_arr[cur_userland_idx].buf = NULL;
userland_session_arr[cur_userland_idx].id = -1;
} }
}
server_session->head = userland_session_arr[i].head; assert(dequeue(&cur_task->sessions_in_handle));
server_session->tail = userland_session_arr[i].tail; cur_userland_idx++;
doubleListDel(cur_node);
doubleListAddOnBack(cur_node, &cur_task->svr_sess_listhead);
} }
/* poll with new sessions */ /* poll with new sessions */
int nr_sessions_need_to_handle = 0; cur_userland_idx = 0;
bool has_middle_delete = false; while (!queue_is_empty(&cur_task->sessions_to_be_handle)) {
int session_idx = 0; if (cur_userland_idx == arr_capacity) {
DOUBLE_LIST_FOR_EACH_ENTRY(server_session, &cur_task->svr_sess_listhead, node)
{
if (session_idx >= arr_capacity) {
break; break;
} }
struct server_session* server_session = (struct server_session*)queue_front(&cur_task->sessions_to_be_handle)->data;
assert(server_session != NULL);
if (SERVER_SESSION_BACKEND(server_session)->client_side.closed) { if (SERVER_SESSION_BACKEND(server_session)->client_side.closed) {
// client had closed it, then server will close it too // client had closed it, then server will close it too
struct session_backend* session_backend = SERVER_SESSION_BACKEND(server_session); struct session_backend* session_backend = SERVER_SESSION_BACKEND(server_session);
@ -98,12 +85,11 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
assert(server_session->closed == false); assert(server_session->closed == false);
server_session->closed = true; server_session->closed = true;
xizi_share_page_manager.delete_share_pages(session_backend); xizi_share_page_manager.delete_share_pages(session_backend);
// signal that there is a middle deletion of session dequeue(&cur_task->sessions_to_be_handle);
has_middle_delete = true; continue;
break;
} }
userland_session_arr[session_idx] = (struct Session) { userland_session_arr[cur_userland_idx] = (struct Session) {
.buf = (void*)server_session->buf_addr, .buf = (void*)server_session->buf_addr,
.capacity = server_session->capacity, .capacity = server_session->capacity,
.head = server_session->head, .head = server_session->head,
@ -111,25 +97,24 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
.id = SERVER_SESSION_BACKEND(server_session)->session_id, .id = SERVER_SESSION_BACKEND(server_session)->session_id,
}; };
struct IpcMsg* msg = IPCSESSION_MSG(&userland_session_arr[session_idx]); if (!enqueue(&cur_task->sessions_in_handle, 0, (void*)server_session)) {
if (msg != NULL && is_msg_needed(msg)) { userland_session_arr[cur_userland_idx].buf = NULL;
nr_sessions_need_to_handle++; userland_session_arr[cur_userland_idx].id = 0;
break;
}
assert(dequeue(&cur_task->sessions_to_be_handle));
cur_userland_idx++;
} }
session_idx++; // end of userland copy
if (cur_userland_idx < arr_capacity) {
userland_session_arr[cur_userland_idx].buf = NULL;
} }
if (session_idx < arr_capacity) { if (queue_is_empty(&cur_task->sessions_in_handle) && queue_is_empty(&cur_task->sessions_to_be_handle)) {
userland_session_arr[session_idx].buf = NULL;
if (!has_middle_delete && nr_sessions_need_to_handle == 0) {
if (cur_task->advance_unblock) {
cur_task->advance_unblock = false;
} else {
xizi_task_manager.task_yield_noschedule(cur_task, false); xizi_task_manager.task_yield_noschedule(cur_task, false);
xizi_task_manager.task_block(&xizi_task_manager.task_blocked_list_head, cur_task); // @todo support blocking(now bug at 4 cores running)
// xizi_task_manager.task_block(&xizi_task_manager.task_blocked_list_head, cur_task);
} }
}
}
return 0; return 0;
} }

View File

@ -35,8 +35,6 @@ Modification:
#include "syscall.h" #include "syscall.h"
#include "task.h" #include "task.h"
#define SERVER_DIR_NAME_SIZE 14
int sys_register_as_server(char* name) int sys_register_as_server(char* name)
{ {
// get server thread // get server thread
@ -48,7 +46,7 @@ int sys_register_as_server(char* name)
if (!AchieveResourceTag(&server_identifier_set_tag, RequireRootTag(), "softkernel/server-identifier")) { if (!AchieveResourceTag(&server_identifier_set_tag, RequireRootTag(), "softkernel/server-identifier")) {
panic("Server identifier not initialized.\b"); panic("Server identifier not initialized.\b");
} }
assert(server_identifier_set_tag.meta != NULL); assert(server_identifier_set_tag.inner_node != NULL);
// create server tag under server tag owner // create server tag under server tag owner
if (!CreateResourceTag(&server->server_identifier, &server_identifier_set_tag, name, TRACER_SERVER_IDENTITY_AC_RESOURCE, server)) { if (!CreateResourceTag(&server->server_identifier, &server_identifier_set_tag, name, TRACER_SERVER_IDENTITY_AC_RESOURCE, server)) {

View File

@ -76,6 +76,12 @@ static void send_irq_to_user(int irq_num)
buf->header.magic = IPC_MSG_MAGIC; buf->header.magic = IPC_MSG_MAGIC;
buf->header.valid = 1; buf->header.valid = 1;
struct session_backend* session_backend = irq_forward_table[irq_num].p_kernel_session;
struct Thread* server_to_call = session_backend->server;
if (!enqueue(&server_to_call->sessions_to_be_handle, 0, (void*)&session_backend->server_side)) {
return;
}
assert(!queue_is_empty(&server_to_call->sessions_to_be_handle));
if (irq_forward_table[irq_num].handle_task->state == BLOCKED) { if (irq_forward_table[irq_num].handle_task->state == BLOCKED) {
xizi_task_manager.task_unblock(irq_forward_table[irq_num].handle_task); xizi_task_manager.task_unblock(irq_forward_table[irq_num].handle_task);
} }
@ -119,14 +125,15 @@ int sys_register_irq(int irq_num, int irq_opcode)
// init kerenl sender proxy // init kerenl sender proxy
if (kernel_irq_proxy == NULL) { if (kernel_irq_proxy == NULL) {
/// @todo handle corner cases /// @todo handle corner cases
struct MemSpace* pmemspace = alloc_memspace(); struct MemSpace* pmemspace = alloc_memspace("KernelIrqProxy");
if (pmemspace == NULL) { if (pmemspace == NULL) {
return -1; return -1;
} }
xizi_pager.new_pgdir(&pmemspace->pgdir); xizi_pager.new_pgdir(&pmemspace->pgdir);
memcpy(pmemspace->pgdir.pd_addr, kern_pgdir.pd_addr, TOPLEVLE_PAGEDIR_SIZE); memcpy(pmemspace->pgdir.pd_addr, kern_pgdir.pd_addr, TOPLEVLE_PAGEDIR_SIZE);
kernel_irq_proxy = xizi_task_manager.new_task_cb(pmemspace); struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
kernel_irq_proxy = tlo->new_thread(pmemspace);
kernel_irq_proxy->state = NEVER_RUN; kernel_irq_proxy->state = NEVER_RUN;
} }

View File

@ -22,7 +22,7 @@
#include "syscall.h" #include "syscall.h"
#include "task.h" #include "task.h"
extern bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, uint32_t sem_id); extern bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, sem_id_t sem_id);
int sys_semaphore(sys_sem_option op, int param) int sys_semaphore(sys_sem_option op, int param)
{ {
bool ret = false; bool ret = false;

View File

@ -38,7 +38,7 @@ extern int sys_new_thread(struct MemSpace* pmemspace, struct Thread* task, uintp
int sys_spawn(char* img_start, char* name, char** argv) int sys_spawn(char* img_start, char* name, char** argv)
{ {
// alloc a new memspace // alloc a new memspace
struct MemSpace* pmemspace = alloc_memspace(); struct MemSpace* pmemspace = alloc_memspace(name);
if (pmemspace == NULL) { if (pmemspace == NULL) {
return -1; return -1;
} }
@ -52,10 +52,16 @@ int sys_spawn(char* img_start, char* name, char** argv)
} }
// alloc a new pcb // alloc a new pcb
struct Thread* new_task_cb = xizi_task_manager.new_task_cb(pmemspace); struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
struct Thread* new_task_cb = tlo->new_thread(pmemspace);
if (UNLIKELY(!new_task_cb)) { if (UNLIKELY(!new_task_cb)) {
ERROR("Unable to new task control block.\n"); ERROR("Unable to new task control block %x.\n");
// error task allocation may free memspace before hand
// @todo use task ref map to handle this scene
if (NULL != pmemspace->tag.inner_node) {
free_memspace(pmemspace); free_memspace(pmemspace);
}
return -1; return -1;
} }
assert(!IS_DOUBLE_LIST_EMPTY(&pmemspace->thread_list_guard)); assert(!IS_DOUBLE_LIST_EMPTY(&pmemspace->thread_list_guard));

View File

@ -30,6 +30,7 @@ Modification:
#include <stdint.h> #include <stdint.h>
#include <string.h> #include <string.h>
#include "actracer.h"
#include "assert.h" #include "assert.h"
#include "buddy.h" #include "buddy.h"
#include "log.h" #include "log.h"
@ -187,6 +188,10 @@ int sys_state(sys_state_option option, sys_state_info* info)
hw_current_second(&info->current_second); hw_current_second(&info->current_second);
break; break;
} }
case SYS_STATE_SHOW_ACTREE: {
debug_list_tracetree();
break;
}
case SYS_STATE_TEST: case SYS_STATE_TEST:
default: default:
break; break;

View File

@ -40,8 +40,9 @@ int sys_new_thread(struct MemSpace* pmemspace, struct Thread* task, uintptr_t en
struct ThreadStackPointer loaded_sp = load_user_stack(pmemspace, argv); struct ThreadStackPointer loaded_sp = load_user_stack(pmemspace, argv);
if (loaded_sp.stack_idx == -1) { if (loaded_sp.stack_idx == -1) {
ERROR("Uable to load params to memspace.\n"); ERROR("Uable to load params to memspace.\n");
/* memspace is freed alone with free_pcb() */ /* memspace is freed alone with free_thread() */
xizi_task_manager.free_pcb(task); struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
tlo->free_thread(task);
return -1; return -1;
} }
@ -60,7 +61,7 @@ int sys_new_thread(struct MemSpace* pmemspace, struct Thread* task, uintptr_t en
last = name + 1; last = name + 1;
} }
} }
strncpy(task->name, last, sizeof(task->name)); strncpy(task->name, last, sizeof(task->name) - 1);
// init pcb schedule attributes // init pcb schedule attributes
xizi_task_manager.task_set_default_schedule_attr(task); xizi_task_manager.task_set_default_schedule_attr(task);
@ -80,7 +81,8 @@ int sys_thread(uintptr_t entry, char* name, char** argv)
// use current task's memspace // use current task's memspace
struct MemSpace* pmemspace = cur_task->memspace; struct MemSpace* pmemspace = cur_task->memspace;
struct Thread* task = xizi_task_manager.new_task_cb(pmemspace); struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
struct Thread* task = tlo->new_thread(pmemspace);
if (UNLIKELY(!task)) { if (UNLIKELY(!task)) {
ERROR("Unable to new task control block.\n"); ERROR("Unable to new task control block.\n");
return -1; return -1;

View File

@ -0,0 +1,70 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiUOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/**
* @file sys_wait_session.c
* @brief
* @version 3.0
* @author AIIT XUOS Lab
* @date 2023.08.25
*/
/*************************************************
File name: sys_poll_session.c
Description: server poll its connected sessions
Others:
History:
1. Date: 2023-08-28
Author: AIIT XUOS Lab
Modification:
1. first version
*************************************************/
#include "multicores.h"
#include "share_page.h"
#include "syscall.h"
extern bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, sem_id_t sem_id);
int sys_wait_session(struct Session* userland_session)
{
struct Thread* cur_task = cur_cpu()->task;
RbtNode* client_session_node = rbt_search(&cur_task->cli_sess_map, userland_session->id);
if (client_session_node == NULL) {
ERROR("Error waiting session from %s: Invalid session %d\n", cur_task->name, userland_session->id);
sys_exit(cur_task);
return -1;
}
struct client_session* client_session = (struct client_session*)client_session_node->data;
if (CLIENT_SESSION_BACKEND(client_session)->session_id != userland_session->id || //
client_session->buf_addr != (uintptr_t)userland_session->buf) {
sys_exit(cur_task);
ERROR("Error waiting session from %s: Invalid session %d\n", cur_task->name, userland_session->id);
return -1;
}
/* handle calling */
struct session_backend* session_backend = CLIENT_SESSION_BACKEND(client_session);
struct Thread* server_to_call = session_backend->server;
if (!enqueue(&server_to_call->sessions_to_be_handle, 0, (void*)&session_backend->server_side)) {
sys_exit(cur_task);
return -1;
}
assert(!queue_is_empty(&server_to_call->sessions_to_be_handle));
ksemaphore_wait(&xizi_task_manager.semaphore_pool, cur_task, session_backend->client_sem_to_wait);
if (server_to_call->state == BLOCKED) {
xizi_task_manager.task_unblock(session_backend->server);
}
return 0;
}

View File

@ -37,27 +37,5 @@ int sys_yield(task_yield_reason reason)
{ {
struct Thread* cur_task = cur_cpu()->task; struct Thread* cur_task = cur_cpu()->task;
xizi_task_manager.task_yield_noschedule(cur_task, false); xizi_task_manager.task_yield_noschedule(cur_task, false);
// handle ipc block
if ((reason & SYS_TASK_YIELD_BLOCK_IPC) != 0) {
if (cur_task->advance_unblock) {
cur_task->advance_unblock = false;
return 0;
} else {
xizi_task_manager.task_block(&xizi_task_manager.task_blocked_list_head, cur_task);
}
// wake up all possible server
struct client_session* client_session = NULL;
DOUBLE_LIST_FOR_EACH_ENTRY(client_session, &cur_task->cli_sess_listhead, node)
{
assert(client_session != NULL);
struct session_backend* session_backend = CLIENT_SESSION_BACKEND(client_session);
if (session_backend->server->state == BLOCKED) {
xizi_task_manager.task_unblock(session_backend->server);
}
}
}
return 0; return 0;
} }

View File

@ -69,7 +69,7 @@ int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, u
ret = sys_state(param1, (sys_state_info*)param2); ret = sys_state(param1, (sys_state_info*)param2);
break; break;
case SYSCALL_MMAP: case SYSCALL_MMAP:
ret = sys_mmap((uintptr_t*)param1, (uintptr_t*)param2, (int)param3, (int)param4); ret = sys_mmap_v2((uintptr_t*)param1, (uintptr_t*)param2, (int)param3, (sys_mmap_info*)param4);
break; break;
case SYSCALL_REGISTER_IRQ: case SYSCALL_REGISTER_IRQ:
ret = sys_register_irq((int)param1, (int)param2); ret = sys_register_irq((int)param1, (int)param2);
@ -83,6 +83,9 @@ int syscall(int sys_num, uintptr_t param1, uintptr_t param2, uintptr_t param3, u
case SYSCALL_SLEEP: case SYSCALL_SLEEP:
ret = sys_sleep((intptr_t)param1); ret = sys_sleep((intptr_t)param1);
break; break;
case SYSCALL_WAIT_SESSION:
ret = sys_wait_session((struct Session*)param1);
break;
default: default:
ERROR("Unsurport syscall(%d) right now\n", sys_num); ERROR("Unsurport syscall(%d) right now\n", sys_num);
ret = -1; ret = -1;

View File

@ -41,7 +41,7 @@ Modification:
#define MAX_SUPPORT_PARAMS 32 #define MAX_SUPPORT_PARAMS 32
struct MemSpace* alloc_memspace() struct MemSpace* alloc_memspace(char* name)
{ {
struct MemSpace* pmemspace = slab_alloc(&xizi_task_manager.memspace_allocator); struct MemSpace* pmemspace = slab_alloc(&xizi_task_manager.memspace_allocator);
if (pmemspace == NULL) { if (pmemspace == NULL) {
@ -56,28 +56,51 @@ struct MemSpace* alloc_memspace()
pmemspace->mem_size = 0; pmemspace->mem_size = 0;
pmemspace->pgdir.pd_addr = 0; pmemspace->pgdir.pd_addr = 0;
pmemspace->thread_to_notify = NULL; pmemspace->thread_to_notify = NULL;
CreateResourceTag(&pmemspace->tag, &xizi_task_manager.tag, NULL, TRACER_OWNER, (void*)pmemspace); if (!CreateResourceTag(&pmemspace->tag, &xizi_task_manager.tag, name, TRACER_OWNER, (void*)pmemspace)) {
DEBUG("Register MemSpace %s failed\n", name);
slab_free(&xizi_task_manager.memspace_allocator, (void*)pmemspace);
return NULL;
}
assert(pmemspace->tag.inner_node != NULL);
if (!CreateResourceTag(&pmemspace->kernspace_mem_usage.tag, &pmemspace->tag, "MemUsage", TRACER_SYSOBJECT, (void*)&pmemspace->kernspace_mem_usage) || //
!CreateResourceTag(&pmemspace->userspace_mem_usage.tag, &pmemspace->tag, "UserMemUsage", TRACER_SYSOBJECT, (void*)&pmemspace->userspace_mem_usage) || //
!CreateResourceTag(&pmemspace->customized_mapping_mem_map.tag, &pmemspace->tag, "CustomizaedMemMapping", TRACER_SYSOBJECT, (void*)&pmemspace->customized_mapping_mem_map)) {
DEBUG("Register MemUsage %s failed\n", name);
slab_free(&xizi_task_manager.memspace_allocator, (void*)pmemspace);
DeleteResource(&pmemspace->tag, &xizi_task_manager.tag);
return NULL;
}
rbtree_init(&pmemspace->kernspace_mem_usage.mem_block_map);
rbtree_init(&pmemspace->userspace_mem_usage.mem_block_map);
rbtree_init(&pmemspace->customized_mapping_mem_map.mem_block_map);
return pmemspace; return pmemspace;
} }
void free_memspace(struct MemSpace* pmemspace) void free_memspace(struct MemSpace* pmemspace)
{ {
assert(pmemspace != NULL); assert(pmemspace != NULL);
assert(IS_DOUBLE_LIST_EMPTY(&pmemspace->thread_list_guard));
/* free page table and all its allocated memories */ /* free page table and all its allocated memories */
if (pmemspace->pgdir.pd_addr != NULL) { if (pmemspace->pgdir.pd_addr != NULL) {
xizi_pager.free_user_pgdir(&pmemspace->pgdir); xizi_pager.free_user_pgdir(&pmemspace->pgdir);
} }
TracerNode* tmp_node = NULL; // delete space
DOUBLE_LIST_FOR_EACH_ENTRY(tmp_node, &pmemspace->tag.meta->children_guard, list_node) RbtNode* rbt_node = pmemspace->kernspace_mem_usage.mem_block_map.root;
{ while (rbt_node != NULL) {
assert((uintptr_t)tmp_node->p_resource >= PHY_MEM_BASE && (uintptr_t)tmp_node->p_resource < PHY_MEM_STOP); assert((uintptr_t)V2P(rbt_node->key) >= PHY_MEM_BASE && (uintptr_t)V2P(rbt_node->key) < PHY_MEM_STOP);
if ((uintptr_t)tmp_node->p_resource < PHY_USER_FREEMEM_BASE) { kfree_by_ownership(pmemspace->kernspace_mem_usage.tag, (void*)rbt_node->key);
kfree(P2V(tmp_node->p_resource)); rbt_node = pmemspace->kernspace_mem_usage.mem_block_map.root;
} else {
raw_free(tmp_node->p_resource);
} }
rbt_node = pmemspace->userspace_mem_usage.mem_block_map.root;
while (rbt_node != NULL) {
assert((uintptr_t)rbt_node->key >= PHY_MEM_BASE && (uintptr_t)rbt_node->key < PHY_MEM_STOP);
raw_free_by_ownership(pmemspace->userspace_mem_usage.tag, (void*)rbt_node->key);
rbt_node = pmemspace->userspace_mem_usage.mem_block_map.root;
} }
/* free ipc virt address allocator */ /* free ipc virt address allocator */
@ -213,7 +236,7 @@ struct ThreadStackPointer load_user_stack(struct MemSpace* pmemspace, char** arg
} }
/* allocate memory space for user stack */ /* allocate memory space for user stack */
uintptr_t* stack_bottom = (uintptr_t*)kalloc(USER_STACK_SIZE); uintptr_t* stack_bottom = (uintptr_t*)kalloc_by_ownership(pmemspace->kernspace_mem_usage.tag, USER_STACK_SIZE);
if (UNLIKELY(stack_bottom == NULL)) { if (UNLIKELY(stack_bottom == NULL)) {
ERROR("No memory to alloc user stack.\n"); ERROR("No memory to alloc user stack.\n");
handle_error_stack_loading(pmemspace, stack_idx, stack_bottom, false); handle_error_stack_loading(pmemspace, stack_idx, stack_bottom, false);

View File

@ -28,7 +28,7 @@ Modification:
1. first version 1. first version
*************************************************/ *************************************************/
#include "log.h" #include "log.h"
#include "scheduler.h" #include "schedule_algo.h"
struct Thread* max_priority_runnable_task(void) struct Thread* max_priority_runnable_task(void)
{ {
@ -47,7 +47,9 @@ struct Thread* max_priority_runnable_task(void)
// found a runnable task, stop this look up // found a runnable task, stop this look up
return task; return task;
} else if (task->dead && task->state != RUNNING) { } else if (task->dead && task->state != RUNNING) {
xizi_task_manager.free_pcb(task);
struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
tlo->free_thread(task);
return NULL; return NULL;
} }
} }
@ -64,7 +66,8 @@ struct Thread* round_robin_runnable_task(uint32_t priority)
// found a runnable task, stop this look up // found a runnable task, stop this look up
return task; return task;
} else if (task->dead && task->state != RUNNING) { } else if (task->dead && task->state != RUNNING) {
xizi_task_manager.free_pcb(task); struct TaskLifecycleOperations* tlo = GetSysObject(struct TaskLifecycleOperations, &xizi_task_manager.task_lifecycle_ops_tag);
tlo->free_thread(task);
return NULL; return NULL;
} }
} }

View File

@ -24,29 +24,28 @@
void semaphore_pool_init(struct XiziSemaphorePool* sem_pool) void semaphore_pool_init(struct XiziSemaphorePool* sem_pool)
{ {
assert(sem_pool != NULL); assert(sem_pool != NULL);
sem_pool->next_sem_id = 1; sem_pool->next_sem_id = INVALID_SEM_ID + 1;
slab_init(&sem_pool->allocator, sizeof(struct ksemaphore)); slab_init(&sem_pool->allocator, sizeof(struct ksemaphore), "SemAllocator");
doubleListNodeInit(&sem_pool->sem_list_guard); doubleListNodeInit(&sem_pool->sem_list_guard);
rbtree_init(&sem_pool->sem_pool_map);
sem_pool->nr_sem = 0;
} }
static inline struct ksemaphore* ksemaphore_get_by_id(struct XiziSemaphorePool* sem_pool, int sem_id) static inline struct ksemaphore* ksemaphore_get_by_id(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id)
{ {
struct ksemaphore* sem = NULL; RbtNode* target_sem_node = rbt_search(&sem_pool->sem_pool_map, sem_id);
DOUBLE_LIST_FOR_EACH_ENTRY(sem, &sem_pool->sem_list_guard, sem_list_node) if (target_sem_node == NULL) {
{
if (sem->id == sem_id) {
return sem;
}
}
return NULL; return NULL;
}
return (struct ksemaphore*)target_sem_node->data;
} }
int ksemaphore_alloc(struct XiziSemaphorePool* sem_pool, int val) sem_id_t ksemaphore_alloc(struct XiziSemaphorePool* sem_pool, sem_val_t val)
{ {
struct ksemaphore* sem = (struct ksemaphore*)slab_alloc(&sem_pool->allocator); struct ksemaphore* sem = (struct ksemaphore*)slab_alloc(&sem_pool->allocator);
if (sem == NULL) { if (sem == NULL) {
ERROR("No memeory to alloc new semaphore.\n"); ERROR("No memeory to alloc new semaphore.\n");
return -1; return INVALID_SEM_ID;
} }
/* No error down here */ /* No error down here */
@ -55,19 +54,38 @@ int ksemaphore_alloc(struct XiziSemaphorePool* sem_pool, int val)
sem->id = sem_pool->next_sem_id++; sem->id = sem_pool->next_sem_id++;
if (UNLIKELY(sem->id == 0)) { if (UNLIKELY(sem->id == 0)) {
slab_free(&sem_pool->allocator, sem); slab_free(&sem_pool->allocator, sem);
return -1; return INVALID_SEM_ID;
} }
sem->val = val; sem->val = val;
doubleListNodeInit(&sem->sem_list_node); doubleListNodeInit(&sem->sem_list_node);
doubleListNodeInit(&sem->wait_list_guard); doubleListNodeInit(&sem->wait_list_guard);
/* list sem to sem_pool */ if (0 != rbt_insert(&sem_pool->sem_pool_map, sem->id, sem)) {
slab_free(&sem_pool->allocator, sem);
return INVALID_SEM_ID;
}
doubleListAddOnHead(&sem->sem_list_node, &sem_pool->sem_list_guard); doubleListAddOnHead(&sem->sem_list_node, &sem_pool->sem_list_guard);
sem_pool->nr_sem++;
return sem->id; return sem->id;
} }
bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, uint32_t sem_id) bool ksemaphore_consume(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id, sem_val_t decre)
{
struct ksemaphore* sem = ksemaphore_get_by_id(sem_pool, sem_id);
// invalid sem id
if (sem == NULL) {
return false;
}
// if (decre >= 0) {
sem->val -= decre;
// }
return true;
}
bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, sem_id_t sem_id)
{ {
assert(thd != NULL); assert(thd != NULL);
assert(thd->state == RUNNING); assert(thd->state == RUNNING);
@ -77,6 +95,7 @@ bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, uin
if (sem == NULL) { if (sem == NULL) {
return false; return false;
} }
// DEBUG("%s waiting sem %lu(%d), nr_sem: %d I\n", thd->name, sem_id, sem->val, sem_pool->nr_sem);
// no need to wait // no need to wait
if (sem->val > 0) { if (sem->val > 0) {
@ -91,7 +110,7 @@ bool ksemaphore_wait(struct XiziSemaphorePool* sem_pool, struct Thread* thd, uin
return true; return true;
} }
bool ksemaphore_signal(struct XiziSemaphorePool* sem_pool, uint32_t sem_id) bool ksemaphore_signal(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id)
{ {
/* find sem */ /* find sem */
struct ksemaphore* sem = ksemaphore_get_by_id(sem_pool, sem_id); struct ksemaphore* sem = ksemaphore_get_by_id(sem_pool, sem_id);
@ -112,7 +131,44 @@ bool ksemaphore_signal(struct XiziSemaphorePool* sem_pool, uint32_t sem_id)
return true; return true;
} }
bool ksemaphore_free(struct XiziSemaphorePool* sem_pool, uint32_t sem_id) bool ksemaphore_signal_no_wake(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id)
{
/* find sem */
struct ksemaphore* sem = ksemaphore_get_by_id(sem_pool, sem_id);
// invalid sem id
if (sem == NULL) {
return false;
}
sem->val++;
return true;
}
bool ksemaphore_free(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id)
{
/* find sem */
struct ksemaphore* sem = ksemaphore_get_by_id(sem_pool, sem_id);
// invalid sem id
if (sem == NULL) {
return false;
}
struct Thread* thd = NULL;
DOUBLE_LIST_FOR_EACH_ENTRY(thd, &sem->wait_list_guard, node)
{
assert(thd != NULL && thd->state == BLOCKED);
xizi_task_manager.task_unblock(thd);
}
rbt_delete(&sem_pool->sem_pool_map, sem_id);
doubleListDel(&sem->sem_list_node);
slab_free(&sem_pool->allocator, sem);
sem_pool->nr_sem--;
return true;
}
bool ksemaphore_free_safe(struct XiziSemaphorePool* sem_pool, sem_id_t sem_id, struct Thread* target)
{ {
/* find sem */ /* find sem */
struct ksemaphore* sem = ksemaphore_get_by_id(sem_pool, sem_id); struct ksemaphore* sem = ksemaphore_get_by_id(sem_pool, sem_id);
@ -125,11 +181,15 @@ bool ksemaphore_free(struct XiziSemaphorePool* sem_pool, uint32_t sem_id)
DOUBLE_LIST_FOR_EACH_ENTRY(thd, &sem->wait_list_guard, node) DOUBLE_LIST_FOR_EACH_ENTRY(thd, &sem->wait_list_guard, node)
{ {
assert(thd != NULL); assert(thd != NULL);
if (target != thd && thd->state == BLOCKED) {
xizi_task_manager.task_unblock(thd); xizi_task_manager.task_unblock(thd);
} }
}
rbt_delete(&sem_pool->sem_pool_map, sem_id);
doubleListDel(&sem->sem_list_node); doubleListDel(&sem->sem_list_node);
slab_free(&sem_pool->allocator, sem); slab_free(&sem_pool->allocator, sem);
sem_pool->nr_sem--;
return true; return true;
} }

View File

@ -35,7 +35,7 @@ Modification:
#include "kalloc.h" #include "kalloc.h"
#include "memspace.h" #include "memspace.h"
#include "multicores.h" #include "multicores.h"
#include "scheduler.h" #include "schedule_algo.h"
#include "syscall.h" #include "syscall.h"
#include "task.h" #include "task.h"
#include "trap_common.h" #include "trap_common.h"
@ -43,6 +43,9 @@ Modification:
struct CPU global_cpus[NR_CPU]; struct CPU global_cpus[NR_CPU];
uint32_t ready_task_priority; uint32_t ready_task_priority;
struct GlobalTaskPool global_task_pool;
extern struct TaskLifecycleOperations task_lifecycle_ops;
static inline void task_node_leave_list(struct Thread* task) static inline void task_node_leave_list(struct Thread* task)
{ {
doubleListDel(&task->node); doubleListDel(&task->node);
@ -65,19 +68,30 @@ static inline void task_node_add_to_ready_list_back(struct Thread* task)
static void _task_manager_init() static void _task_manager_init()
{ {
assert(CreateResourceTag(&xizi_task_manager.task_lifecycle_ops_tag, &xizi_task_manager.tag, //
"TaskLifeCycleOpTool", TRACER_SYSOBJECT, (void*)&task_lifecycle_ops));
// init task list to NULL // init task list to NULL
for (int i = 0; i < TASK_MAX_PRIORITY; i++) { for (int i = 0; i < TASK_MAX_PRIORITY; i++) {
doubleListNodeInit(&xizi_task_manager.task_list_head[i]); doubleListNodeInit(&xizi_task_manager.task_list_head[i]);
} }
/* task scheduling list */
doubleListNodeInit(&xizi_task_manager.task_blocked_list_head); doubleListNodeInit(&xizi_task_manager.task_blocked_list_head);
doubleListNodeInit(&xizi_task_manager.task_running_list_head); doubleListNodeInit(&xizi_task_manager.task_running_list_head);
doubleListNodeInit(&xizi_task_manager.task_sleep_list_head); doubleListNodeInit(&xizi_task_manager.task_sleep_list_head);
// init task (slab) allocator // init task (slab) allocator
slab_init(&xizi_task_manager.memspace_allocator, sizeof(struct MemSpace)); slab_init(&xizi_task_manager.memspace_allocator, sizeof(struct MemSpace), "MemlpaceCtrlBlockAllocator");
slab_init(&xizi_task_manager.task_allocator, sizeof(struct Thread)); slab_init(&xizi_task_manager.task_allocator, sizeof(struct Thread), "ThreadCtrlBlockAllocator");
slab_init(&xizi_task_manager.task_buddy_allocator, sizeof(struct KBuddy)); slab_init(&xizi_task_manager.task_buddy_allocator, sizeof(struct KBuddy), "DMBuddyAllocator");
/* global semaphore factory */
semaphore_pool_init(&xizi_task_manager.semaphore_pool); semaphore_pool_init(&xizi_task_manager.semaphore_pool);
/* task pool */
doubleListNodeInit(&global_task_pool.thd_listing_head);
rbtree_init(&global_task_pool.thd_ref_map);
// tid pool // tid pool
xizi_task_manager.next_pid = 0; xizi_task_manager.next_pid = 0;
@ -85,104 +99,119 @@ static void _task_manager_init()
ready_task_priority = 0; ready_task_priority = 0;
} }
/// @brief alloc a new task without init extern void trap_return(void);
static struct Thread* _alloc_task_cb() __attribute__((optimize("O0"))) void task_prepare_enter()
{ {
// alloc task and add it to used task list xizi_leave_kernel();
struct Thread* task = (struct Thread*)slab_alloc(&xizi_task_manager.task_allocator); trap_return();
if (UNLIKELY(task == NULL)) {
ERROR("Not enough memory\n");
return NULL;
}
// set tid once task is allocated
memset(task, 0, sizeof(*task));
task->tid = xizi_task_manager.next_pid++;
task->thread_context.user_stack_idx = -1;
return task;
}
int _task_return_sys_resources(struct Thread* ptask)
{
assert(ptask != NULL);
/* handle sessions for condition 1, ref. delete_share_pages() */
struct session_backend* session_backend = NULL;
// close all server_sessions
struct server_session* server_session = NULL;
while (!IS_DOUBLE_LIST_EMPTY(&ptask->svr_sess_listhead)) {
server_session = CONTAINER_OF(ptask->svr_sess_listhead.next, struct server_session, node);
assert(server_session != NULL);
session_backend = SERVER_SESSION_BACKEND(server_session);
assert(session_backend->server == ptask);
// cut the connection from task to session
server_session->closed = true;
xizi_share_page_manager.delete_share_pages(session_backend);
}
// close all client_sessions
struct client_session* client_session = NULL;
while (!IS_DOUBLE_LIST_EMPTY(&ptask->cli_sess_listhead)) {
client_session = CONTAINER_OF(ptask->cli_sess_listhead.next, struct client_session, node);
assert(client_session != NULL);
session_backend = CLIENT_SESSION_BACKEND(client_session);
assert(session_backend->client == ptask);
// cut the connection from task to session
client_session->closed = true;
xizi_share_page_manager.delete_share_pages(session_backend);
}
if (ptask->server_identifier.meta != NULL) {
struct TraceTag server_identifier_owner;
AchieveResourceTag(&server_identifier_owner, RequireRootTag(), "softkernel/server-identifier");
assert(server_identifier_owner.meta != NULL);
DeleteResource(&ptask->server_identifier, &server_identifier_owner);
}
// delete registered irq if there is one
if (ptask->bind_irq) {
sys_unbind_irq_all(ptask);
}
return 0;
} }
/// @brief this function changes task list without locking, so it must be called inside a lock critical area /// @brief this function changes task list without locking, so it must be called inside a lock critical area
/// @param task /// @param task
static void _dealloc_task_cb(struct Thread* task) static void _free_thread(struct Thread* task)
{ {
if (UNLIKELY(task == NULL)) { if (UNLIKELY(task == NULL)) {
ERROR("deallocating a NULL task\n"); ERROR("deallocating a NULL task\n");
return; return;
} }
assert(task->state >= INIT);
assert(task->memspace != NULL);
_task_return_sys_resources(task); // ignore [name, tid, dead, ]
// case thread context [kern_stack_addr, ]
/* 1. close all ipcall sessions */
ERROR_FREE
{
/* handle sessions for condition 1, ref. delete_share_pages() */
// close all server_sessions
while (!IS_DOUBLE_LIST_EMPTY(&task->svr_sess_listhead)) {
// RbtNode* sess_ref_node = ptask->svr_sess_map.root;
struct server_session* svr_session = CONTAINER_OF(task->svr_sess_listhead.next, struct server_session, node);
server_close_session(task, svr_session);
}
// close all client_sessions
while (!IS_DOUBLE_LIST_EMPTY(&task->cli_sess_listhead)) {
// RbtNode* sess_ref_node = ptask->cli_sess_map.root;
struct client_session* cli_session = CONTAINER_OF(task->cli_sess_listhead.next, struct client_session, node);
client_close_session(task, cli_session);
// info server that session is closed
struct session_backend* session_backend = CLIENT_SESSION_BACKEND(cli_session);
struct Thread* server_to_info = session_backend->server;
if (!enqueue(&server_to_info->sessions_to_be_handle, 0, (void*)&session_backend->server_side)) {
// @todo fix memory leak
} else {
assert(!queue_is_empty(&server_to_info->sessions_to_be_handle));
if (server_to_info->state == BLOCKED) {
xizi_task_manager.task_unblock(session_backend->server);
}
}
}
assert(IS_DOUBLE_LIST_EMPTY(&task->svr_sess_listhead));
assert(IS_DOUBLE_LIST_EMPTY(&task->cli_sess_listhead));
// assert(rbt_is_empty(&task->svr_sess_map));
// assert(rbt_is_empty(&task->cli_sess_map));
/// @todo handle server transition
/* delete server identifier */
if (task->server_identifier.inner_node != NULL) {
// @todo figure out server-identifier ownership
struct TraceTag server_identifier_owner;
AchieveResourceTag(&server_identifier_owner, RequireRootTag(), "softkernel/server-identifier");
assert(server_identifier_owner.inner_node != NULL);
assert(DeleteResource(&task->server_identifier, &server_identifier_owner));
}
}
/* 2. quit interrupt handling */
ERROR_FREE
{
// delete registered irq if there is one
if (task->bind_irq) {
sys_unbind_irq_all(task);
}
}
/* 3. quit schedule */
ERROR_FREE
{
// remove thread from used task list
task_node_leave_list(task);
/// ignore [ticks, sleep context, state]
}
/* 3. free context */
ERROR_FREE
{
/* free thread's kernel stack */
assert(task->thread_context.kern_stack_addr != (uintptr_t)NULL);
assert(kfree_by_ownership(task->memspace->kernspace_mem_usage.tag, (void*)task->thread_context.kern_stack_addr));
/* free thread's user stack */ /* free thread's user stack */
if (task->thread_context.user_stack_idx != -1) { if (task->thread_context.user_stack_idx != -1) {
// stack is mapped in vspace, so it should be freed from pgdir // stack is mapped in vspace, so it should be freed from pgdir
assert(task->thread_context.user_stack_idx >= 0 && task->thread_context.user_stack_idx < 64);
assert(task->memspace != NULL); assert(task->memspace != NULL);
assert(task->thread_context.user_stack_idx >= 0 && task->thread_context.user_stack_idx < 64);
/* the stack must have be set in memspace if bitmap has been set */ /* the stack must have be set in memspace if bitmap has been set */
assert(xizi_pager.unmap_pages(task->memspace->pgdir.pd_addr, task->thread_context.uspace_stack_addr, USER_STACK_SIZE)); assert(xizi_pager.unmap_pages(task->memspace->pgdir.pd_addr, task->thread_context.uspace_stack_addr, USER_STACK_SIZE));
bitmap64_free(&task->memspace->thread_stack_idx_bitmap, task->thread_context.user_stack_idx); bitmap64_free(&task->memspace->thread_stack_idx_bitmap, task->thread_context.user_stack_idx);
/* thread's user stack space is also allocated for kernel free space */ /* thread's user stack space is also allocated for kernel free space */
assert(kfree((char*)task->thread_context.ustack_kvaddr)); assert(kfree_by_ownership(task->memspace->kernspace_mem_usage.tag, (char*)task->thread_context.ustack_kvaddr));
if (task->memspace != NULL) {
task->memspace->mem_size -= USER_STACK_SIZE;
} }
} }
/* free thread's kernel stack */
if (task->thread_context.kern_stack_addr) {
kfree((char*)task->thread_context.kern_stack_addr);
}
/* free memspace if needed to */ /* free memspace if needed to */
if (task->memspace != NULL) { doubleListDel(&task->memspace_list_node);
/* free memspace if thread is the last one using it */
if (IS_DOUBLE_LIST_EMPTY(&task->memspace->thread_list_guard)) {
// free memspace
free_memspace(task->memspace);
} else if (task->memspace->thread_to_notify != NULL) {
// awake deamon in this memspace // awake deamon in this memspace
if (task->memspace->thread_to_notify != NULL) {
if (task->memspace->thread_to_notify != task) { if (task->memspace->thread_to_notify != task) {
if (task->memspace->thread_to_notify->state == BLOCKED) { if (task->memspace->thread_to_notify->state == BLOCKED) {
xizi_task_manager.task_unblock(task->memspace->thread_to_notify); xizi_task_manager.task_unblock(task->memspace->thread_to_notify);
@ -194,82 +223,89 @@ static void _dealloc_task_cb(struct Thread* task)
} }
} }
doubleListDel(&task->memspace_list_node);
/* free memspace if thread is the last one using it */
if (IS_DOUBLE_LIST_EMPTY(&task->memspace->thread_list_guard)) {
// free memspace
free_memspace(task->memspace);
}
}
// remove thread from used task list
task_node_leave_list(task);
// free task back to allocator // free task back to allocator
ERROR_FREE
{
slab_free(&xizi_task_manager.task_allocator, (void*)task); slab_free(&xizi_task_manager.task_allocator, (void*)task);
}
} }
/* alloc a new task with init */ /* alloc a new task with init */
extern void trap_return(void); static struct Thread* _new_thread(struct MemSpace* pmemspace)
__attribute__((optimize("O0"))) void task_prepare_enter()
{ {
xizi_leave_kernel();
trap_return();
}
static struct Thread* _new_task_cb(struct MemSpace* pmemspace)
{
// alloc task space
struct Thread* task = _alloc_task_cb();
if (!task) {
return NULL;
}
/* init basic task member */
doubleListNodeInit(&task->cli_sess_listhead);
doubleListNodeInit(&task->svr_sess_listhead);
/* when creating a new task, memspace will be freed outside during memory shortage */
task->memspace = NULL;
/* init main thread of task */
task->thread_context.task = task;
// alloc stack page for task
if ((void*)(task->thread_context.kern_stack_addr = (uintptr_t)kalloc(USER_STACK_SIZE)) == NULL) {
/* here inside, will no free memspace */
_dealloc_task_cb(task);
return NULL;
}
/* from now on, _new_task_cb() will not generate error */
/* init vm */
assert(pmemspace != NULL); assert(pmemspace != NULL);
task->memspace = pmemspace;
// alloc task space
struct Thread* task = (struct Thread*)slab_alloc(&xizi_task_manager.task_allocator);
if (task == NULL) {
ERROR("Not enough memory\n");
return NULL;
}
// alloc stack page for task
if ((void*)(task->thread_context.kern_stack_addr = (uintptr_t)kalloc_by_ownership(pmemspace->kernspace_mem_usage.tag, USER_STACK_SIZE)) == NULL) {
/* here inside, will no free memspace */
slab_free(&xizi_task_manager.task_allocator, (void*)task);
return NULL;
}
ERROR_FREE
{
/* init basic task ref member */
task->tid = xizi_task_manager.next_pid++;
task->bind_irq = false;
/* vm & memory member */
task->thread_context.user_stack_idx = -1; task->thread_context.user_stack_idx = -1;
task->memspace = pmemspace;
doubleListNodeInit(&task->memspace_list_node); doubleListNodeInit(&task->memspace_list_node);
doubleListAddOnBack(&task->memspace_list_node, &pmemspace->thread_list_guard); doubleListAddOnBack(&task->memspace_list_node, &pmemspace->thread_list_guard);
/* set context of main thread stack */ /* thread context */
/// stack bottom task->thread_context.task = task;
memset((void*)task->thread_context.kern_stack_addr, 0x00, USER_STACK_SIZE); memset((void*)task->thread_context.kern_stack_addr, 0x00, USER_STACK_SIZE);
/// stack bottom
char* sp = (char*)task->thread_context.kern_stack_addr + USER_STACK_SIZE - 4; char* sp = (char*)task->thread_context.kern_stack_addr + USER_STACK_SIZE - 4;
/// 1. trap frame into stack, for process to nomally return by trap_return /// 1. trap frame into stack, for process to nomally return by trap_return
/// trapframe (user context)
sp -= sizeof(*task->thread_context.trapframe); sp -= sizeof(*task->thread_context.trapframe);
task->thread_context.trapframe = (struct trapframe*)sp; task->thread_context.trapframe = (struct trapframe*)sp;
/// 2. context into stack /// 2. context into stack
// (kernel context)
sp -= sizeof(*task->thread_context.context); sp -= sizeof(*task->thread_context.context);
task->thread_context.context = (struct context*)sp; task->thread_context.context = (struct context*)sp;
arch_init_context(task->thread_context.context); arch_init_context(task->thread_context.context);
/* ipc member */
doubleListNodeInit(&task->cli_sess_listhead);
doubleListNodeInit(&task->svr_sess_listhead);
rbtree_init(&task->cli_sess_map);
rbtree_init(&task->svr_sess_map);
queue_init(&task->sessions_in_handle);
queue_init(&task->sessions_to_be_handle);
/// server identifier
task->server_identifier.inner_node = NULL;
}
// [name]
// [schedule related]
task->state = INIT;
return task; return task;
} }
struct TaskLifecycleOperations task_lifecycle_ops = {
.new_thread = _new_thread,
.free_thread = _free_thread,
};
static void _task_set_default_schedule_attr(struct Thread* task) static void _task_set_default_schedule_attr(struct Thread* task)
{ {
task->remain_tick = TASK_CLOCK_TICK; task->remain_tick = TASK_CLOCK_TICK;
task->maxium_tick = TASK_CLOCK_TICK * 10; task->maxium_tick = TASK_CLOCK_TICK * 10;
task->dead = false;
task->state = READY; task->state = READY;
task->priority = TASK_DEFAULT_PRIORITY; task->priority = TASK_DEFAULT_PRIORITY;
task_node_add_to_ready_list_head(task); task_node_add_to_ready_list_head(task);
@ -288,7 +324,7 @@ extern void context_switch(struct context**, struct context*);
static void _scheduler(struct SchedulerRightGroup right_group) static void _scheduler(struct SchedulerRightGroup right_group)
{ {
struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag); struct MmuCommonDone* p_mmu_driver = AchieveResource(&right_group.mmu_driver_tag);
// struct XiziTrapDriver* p_intr_driver = AchieveResource(&right_group.intr_driver_tag); // struct XiziTrapDriver* p_intr_driver = AchieveResource(&right_group.intr_driver_tag);
struct Thread* next_task; struct Thread* next_task;
struct CPU* cpu = cur_cpu(); struct CPU* cpu = cur_cpu();
@ -379,8 +415,6 @@ static void _set_cur_task_priority(int priority)
struct XiziTaskManager xizi_task_manager = { struct XiziTaskManager xizi_task_manager = {
.init = _task_manager_init, .init = _task_manager_init,
.new_task_cb = _new_task_cb,
.free_pcb = _dealloc_task_cb,
.task_set_default_schedule_attr = _task_set_default_schedule_attr, .task_set_default_schedule_attr = _task_set_default_schedule_attr,
.next_runnable_task = max_priority_runnable_task, .next_runnable_task = max_priority_runnable_task,

View File

@ -0,0 +1,4 @@
SRC_FILES := queue.c rbtree.c
include $(KERNEL_ROOT)/compiler.mk

View File

@ -0,0 +1,79 @@
#include "actracer.h"
#include "assert.h"
#include "queue.h"
struct QueueFactory {
TraceTag tag;
struct slab_allocator queue_ele_allocator;
};
static struct QueueFactory queue_factory;
void module_queue_factory_init(TraceTag* _softkernel_tag)
{
CreateResourceTag(&queue_factory.tag, _softkernel_tag, "GlobalQueueFactory", TRACER_SYSOBJECT, &queue_factory);
slab_init(&queue_factory.queue_ele_allocator, sizeof(struct QueueNode), "QueueNodeAllocator");
}
void queue_init(Queue* queue)
{
queue->front = NULL;
queue->rear = NULL;
queue->nr_ele = 0;
}
struct QueueNode* queue_front(Queue* queue)
{
return queue->front;
}
bool queue_is_empty(Queue* queue)
{
if (queue->front == NULL) {
assert(queue->nr_ele == 0);
return true;
}
return false;
}
bool dequeue(Queue* queue)
{
struct QueueNode* temp = queue->front;
if (queue->front == NULL) {
return false;
}
if (queue->front == queue->rear)
queue->front = queue->rear = NULL;
else
queue->front = queue->front->next;
queue->nr_ele--;
slab_free(&queue_factory.queue_ele_allocator, (void*)temp);
return true;
}
bool enqueue(Queue* queue, uintptr_t key, void* data)
{
QueueNode* temp = (struct QueueNode*)slab_alloc(&queue_factory.queue_ele_allocator);
if (temp == NULL) {
return false;
}
temp->key = key;
temp->data = data;
temp->next = NULL;
if (queue->front == NULL && queue->rear == NULL) {
queue->front = queue->rear = temp;
} else {
queue->rear->next = temp;
queue->rear = temp;
}
queue->nr_ele++;
return true;
}

View File

@ -0,0 +1,467 @@
#include <stddef.h>
#include "assert.h"
#include "rbtree.h"
struct RbtFactory {
TraceTag tag;
struct slab_allocator rbtnode_ele_allocator;
};
static struct RbtFactory rbt_factory;
void module_rbt_factory_init(TraceTag* _softkernel_tag)
{
CreateResourceTag(&rbt_factory.tag, _softkernel_tag, "GlobalRbtFactory", TRACER_SYSOBJECT, &rbt_factory);
slab_init(&rbt_factory.rbtnode_ele_allocator, sizeof(struct RbtNode), "RbtNodeAllocator");
}
void delete_case1(RbtTree* tree, RbtNode* node);
void delete_case2(RbtTree* tree, RbtNode* node);
void delete_case3(RbtTree* tree, RbtNode* node);
void delete_case4(RbtTree* tree, RbtNode* node);
void delete_case5(RbtTree* tree, RbtNode* node);
void delete_case6(RbtTree* tree, RbtNode* node);
static inline enum rbt_type get_color(RbtNode* node)
{
if (node == NULL)
return BLACK;
else
return node->color;
}
static inline void set_color(enum rbt_type color, RbtNode* node)
{
assert(node != NULL);
node->color = color;
}
static inline RbtNode* get_parent(RbtNode* node)
{
assert(node != NULL);
return node->parent;
}
static inline void set_parent(RbtNode* parent, RbtNode* node)
{
assert(node != NULL);
node->parent = parent;
}
static int is_root(RbtNode* node)
{
assert(node != NULL);
return (get_parent(node) == NULL);
}
static inline int is_black(RbtNode* node)
{
assert(node != NULL);
return (get_color(node) == BLACK);
}
static inline int is_red(RbtNode* node)
{
assert(node != NULL);
return (get_color(node) == RED);
}
RbtNode* sibling(RbtNode* node)
{
assert(node != NULL);
assert(node->parent != NULL); /* Root node has no sibling */
if (node == node->parent->left)
return node->parent->right;
else
return node->parent->left;
}
static inline RbtNode* get_min(RbtNode* node)
{
assert(node != NULL);
while (node->left) {
node = node->left;
}
return node;
}
static inline RbtNode* get_max(RbtNode* node)
{
assert(node != NULL);
while (node->right) {
node = node->right;
}
return node;
}
RbtNode* rbtree_min(RbtTree* tree)
{
if (tree->root == NULL)
return NULL;
else {
return get_min(tree->root);
}
}
RbtNode* rbtree_max(RbtTree* tree)
{
if (tree->root == NULL)
return NULL;
else {
return get_max(tree->root);
}
}
RbtNode* rbtree_prev(RbtNode* node)
{
assert(node != NULL);
if (node->left) {
return get_max(node->left);
} else {
RbtNode* parent;
while ((parent = get_parent(node)) && parent->left == node) {
node = parent;
}
return parent;
}
}
RbtNode* rbtree_next(RbtNode* node)
{
assert(node != NULL);
if (node->right)
return get_min(node->right);
else {
RbtNode* parent = NULL;
while ((parent = get_parent(node)) != NULL && parent->right == node) {
node = parent;
}
return parent;
}
}
RbtNode* rbtree_createnode(uintptr_t key, void* data)
{
RbtNode* newnode = slab_alloc(&rbt_factory.rbtnode_ele_allocator);
if (newnode == NULL)
return NULL;
newnode->key = key;
newnode->data = data;
newnode->parent = NULL;
newnode->left = NULL;
newnode->right = NULL;
return newnode;
}
static inline int compare(uintptr_t key_a, uintptr_t key_b)
{
if (key_a > key_b)
return 1;
else if (key_a == key_b)
return 0;
else
return -1;
}
RbtNode* do_lookup(uintptr_t key,
RbtTree* tree,
RbtNode** pparent)
{
RbtNode* current = tree->root;
while (current) {
int ret = compare(current->key, key);
if (ret == 0)
return current;
else {
if (pparent != NULL) {
*pparent = current;
}
if (ret < 0)
current = current->right;
else
current = current->left;
}
}
return NULL;
}
RbtNode* rbt_search(RbtTree* tree, uintptr_t key)
{
RbtNode* node;
node = do_lookup(key, tree, NULL);
return node;
}
static void set_child(RbtTree* tree, RbtNode* node, RbtNode* child)
{
int ret = compare(node->key, child->key);
assert(ret != 0);
if (ret > 0) {
node->left = child;
} else {
node->right = child;
}
}
static void rotate_left(RbtNode* node, RbtTree* tree)
{
RbtNode* p = node;
RbtNode* q = node->right;
RbtNode* parent = node->parent;
if (parent == NULL) {
tree->root = q;
} else {
if (parent->left == p)
parent->left = q;
else
parent->right = q;
}
set_parent(parent, q);
set_parent(q, p);
p->right = q->left;
if (q->left)
set_parent(p, q->left);
q->left = p;
}
static void rotate_right(RbtNode* node, RbtTree* tree)
{
RbtNode* p = node;
RbtNode* q = node->left; /* can't be NULL */
RbtNode* parent = get_parent(p);
if (!is_root(p)) {
if (parent->left == p)
parent->left = q;
else
parent->right = q;
} else
tree->root = q;
set_parent(parent, q);
set_parent(q, p);
p->left = q->right;
if (p->left)
set_parent(p, p->left);
q->right = p;
}
void rbtree_init(RbtTree* tree)
{
tree->root = NULL;
tree->nr_ele = 0;
}
RbtNode* __rbtree_insert(RbtNode* node, RbtTree* tree)
{
RbtNode* samenode = NULL;
RbtNode* parent = NULL;
samenode = do_lookup(node->key, tree, &parent);
if (samenode != NULL)
return samenode;
node->left = node->right = NULL;
set_color(RED, node);
set_parent(parent, node);
if (parent == NULL)
tree->root = node;
else {
set_child(tree, parent, node);
}
while ((parent = get_parent(node)) != NULL && parent->color == RED) {
RbtNode* grandpa = get_parent(parent); // grandpa must be existed
// because root is black ,and parent is red,
// parent can not be root of tree. and parent is red,so grandpa must be black
if (parent == grandpa->left) {
RbtNode* uncle = grandpa->right;
if (uncle && get_color(uncle) == RED) {
set_color(RED, grandpa);
set_color(BLACK, parent);
set_color(BLACK, uncle);
node = grandpa;
} else {
if (node == parent->right) {
rotate_left(parent, tree);
node = parent;
parent = get_parent(parent);
}
set_color(BLACK, parent);
set_color(RED, grandpa);
rotate_right(grandpa, tree);
}
} else {
RbtNode* uncle = grandpa->left;
if (uncle && uncle->color == RED) {
set_color(RED, grandpa);
set_color(BLACK, parent);
set_color(BLACK, uncle);
node = grandpa;
} else {
if (node == parent->left) {
rotate_right(parent, tree);
node = parent;
parent = get_parent(node);
}
set_color(BLACK, parent);
set_color(RED, grandpa);
rotate_left(grandpa, tree);
}
}
}
set_color(BLACK, tree->root);
return NULL;
}
int rbt_insert(RbtTree* tree, uintptr_t key, void* data)
{
if (rbt_search(tree, key) != NULL) {
return -2;
}
RbtNode* node = rbtree_createnode(key, data);
RbtNode* samenode = NULL;
if (node == NULL)
return -1;
else
samenode = __rbtree_insert(node, tree);
assert(samenode == NULL);
tree->nr_ele++;
return 0;
}
void replace_node(RbtTree* t, RbtNode* oldn, RbtNode* newn)
{
if (oldn->parent == NULL) {
t->root = newn;
} else {
if (oldn == oldn->parent->left)
oldn->parent->left = newn;
else
oldn->parent->right = newn;
}
if (newn != NULL) {
newn->parent = oldn->parent;
}
}
void delete_case1(RbtTree* tree, RbtNode* node)
{
if (node->parent == NULL)
return;
else
delete_case2(tree, node);
}
void delete_case2(RbtTree* tree, RbtNode* node)
{
if (get_color(sibling(node)) == RED) {
node->parent->color = RED;
sibling(node)->color = BLACK;
if (node == node->parent->left) {
rotate_left(node->parent, tree);
} else {
rotate_right(node->parent, tree);
}
}
delete_case3(tree, node);
}
void delete_case3(RbtTree* tree, RbtNode* node)
{
if (node->parent->color == BLACK && get_color(sibling(node)) == BLACK && get_color(sibling(node)->right) == BLACK && get_color(sibling(node)->left) == BLACK) {
sibling(node)->color = RED;
delete_case1(tree, node->parent);
} else {
delete_case4(tree, node);
}
}
void delete_case4(RbtTree* t, RbtNode* n)
{
if (get_color(n->parent) == RED && get_color(sibling(n)) == BLACK && get_color(sibling(n)->left) == BLACK && get_color(sibling(n)->right) == BLACK) {
sibling(n)->color = RED; // sibling's two son is black ,so it can changed to red
n->parent->color = BLACK;
} else
delete_case5(t, n);
}
void delete_case5(RbtTree* t, RbtNode* n)
{
if (n == n->parent->left && get_color(sibling(n)) == BLACK && get_color(sibling(n)->left) == RED && get_color(sibling(n)->right) == BLACK) {
sibling(n)->color = RED;
sibling(n)->left->color = BLACK;
rotate_right(sibling(n), t);
} else if (n == n->parent->right && get_color(sibling(n)) == BLACK && get_color(sibling(n)->right) == RED && get_color(sibling(n)->left) == BLACK) {
sibling(n)->color = RED;
sibling(n)->right->color = BLACK;
rotate_left(sibling(n), t);
}
delete_case6(t, n);
}
void delete_case6(RbtTree* t, RbtNode* n)
{
sibling(n)->color = get_color(n->parent);
n->parent->color = BLACK;
if (n == n->parent->left) {
assert(get_color(sibling(n)->right) == RED);
sibling(n)->right->color = BLACK;
rotate_left(n->parent, t);
} else {
assert(get_color(sibling(n)->left) == RED);
sibling(n)->left->color = BLACK;
rotate_right(n->parent, t);
}
}
void __rbtree_remove(RbtNode* node, RbtTree* tree)
{
RbtNode* left = node->left;
RbtNode* right = node->right;
RbtNode* child = NULL;
if (left != NULL && right != NULL) {
RbtNode* next = get_min(right);
node->key = next->key;
node->data = next->data;
node = next;
}
assert(node->left == NULL || node->right == NULL);
child = (node->right == NULL ? node->left : node->right);
if (get_color(node) == BLACK) {
set_color(get_color(child), node);
delete_case1(tree, node);
}
replace_node(tree, node, child);
if (node->parent == NULL && child != NULL) // node is root,root should be black
set_color(BLACK, child);
slab_free(&rbt_factory.rbtnode_ele_allocator, (void*)node);
}
int rbt_delete(RbtTree* tree, uintptr_t key)
{
RbtNode* node = do_lookup(key, tree, NULL);
if (node == NULL)
return -1;
else
__rbtree_remove(node, tree);
tree->nr_ele--;
if (rbt_is_empty(tree)) {
assert(tree->root == NULL);
}
return 0;
}