This commit is contained in:
liuqh 2024-05-10 15:42:42 +08:00
commit e62863bc22
23 changed files with 221 additions and 778 deletions

View File

@ -37,6 +37,7 @@ Modification:
#include "assert.h"
#include "pagetable.h"
#include "spinlock.h"
#define KERN_BOOT_DRIVER(n, bi, f) \
{ \

View File

@ -115,5 +115,5 @@ __attribute__((optimize("O0"))) bool spinlock_try_lock(struct spinlock* lock)
bool is_spinlock_hold_by_current_cpu(struct spinlock* lock)
{
return lock->owner_cpu;
return lock->owner_cpu == cur_cpuid();
}

View File

@ -1,4 +1,4 @@
SRC_DIR :=
SRC_FILES := actracer.c actracer_mem_chunk.c
SRC_FILES := actracer.c
include $(KERNEL_ROOT)/compiler.mk

View File

@ -29,267 +29,43 @@ Modification:
#include <stddef.h>
#include <string.h>
#include "trap_common.h"
#include "assert.h"
#include "multicores.h"
#include "spinlock.h"
#include "task.h"
#include "actracer.h"
#include "assert.h"
#ifndef min
#define min(a, b) ((a) < (b) ? (a) : (b))
#endif
static struct SysTracer sys_tracer;
static char root_name[TRACER_NODE_NAME_LEN] = "ROOT\0";
struct SysTracer sys_tracer;
char* tracer_space[TRACER_MEM_CHUNK_SIZE * NR_TRACER_MEM_CHUNKS];
struct TraceTag* const RequireRootTag()
static void tracer_init_node(TracerNode* node, char* name, tracemeta_ac_type type, void* p_resource)
{
static struct TraceTag root_trace_tag = { NULL };
return &root_trace_tag;
}
static inline int namecmp(const char* s, const char* t)
{
return strncmp(s, t, RESOURCE_NAME_SIZE);
}
/// @brief alloc a trace meta to trace resource
static struct TraceMeta* alloc_trace_meta()
{
int index = -1;
for (uint32_t idx = 0; idx < BITS_TRACEMETA_BITMAP; idx++) {
if (sys_tracer.trace_meta_bit_map[idx] == 0xFFFFFFFF) {
continue;
}
uint32_t position = __builtin_ffs(~sys_tracer.trace_meta_bit_map[idx]) - 1;
if (position != 31) {
// found a free bit
sys_tracer.trace_meta_bit_map[idx] |= (1 << (position));
index = idx * 32 + position;
break;
}
node->type = type;
node->parent = NULL;
if (name != NULL) {
char* p_name = (char*)slab_alloc(&sys_tracer.node_name_allocator);
strcpy(p_name, name);
p_name[TRACER_NODE_NAME_LEN - 1] = '\0';
node->name = p_name;
}
if (index == -1) {
panic("Tracer no enough TracerMeta.");
}
sys_tracer.trace_meta_poll[index].index = index;
return &sys_tracer.trace_meta_poll[index];
}
static bool dealloc_trace_meta(struct TraceMeta* meta)
{
int index = meta->index;
// clear bitmap
uint32_t outer_index = index / 32;
uint32_t inner_index = index % 32;
sys_tracer.trace_meta_bit_map[outer_index] &= (uint32_t)(~(1 << inner_index));
// clear meta
sys_tracer.trace_meta_poll[index].type = TRACER_INVALID;
if (index == -1) {
panic("Tracer no enough TracerMeta.");
}
sys_tracer.trace_meta_poll[index].index = index;
return &sys_tracer.trace_meta_poll[index];
}
static tracer_mem_chunk_idx_t trace_meta_map_mem_chunk(struct TraceMeta* const p_trace_meta, tracer_mem_chunk_idx_t mem_chunk_num)
{
tracer_mem_chunk_idx_t addr = 0;
/* direct mapping */
if (mem_chunk_num < TRACEMETA_NR_DIRECT) {
if ((addr = p_trace_meta->addr[mem_chunk_num]) == 0) {
p_trace_meta->addr[mem_chunk_num] = addr = tracer_mem_chunk_alloc();
}
return addr;
}
/* indirect mapping */
mem_chunk_num -= TRACEMETA_NR_DIRECT;
int indirect_mem_chunk_id = mem_chunk_num / NR_ADDR_PER_MEM_CHUNK;
if (indirect_mem_chunk_id < TRACEMETA_NR_INDIRECT) {
if ((addr = p_trace_meta->addr[TRACEMETA_NR_DIRECT + indirect_mem_chunk_id]) == 0) {
p_trace_meta->addr[TRACEMETA_NR_DIRECT + indirect_mem_chunk_id] = addr = tracer_mem_chunk_alloc();
}
mem_chunk_num -= indirect_mem_chunk_id * NR_ADDR_PER_MEM_CHUNK;
if (node->type == TRACER_OWNER) {
doubleListNodeInit(&node->children_guard);
} else {
panic("tracer inode, bmap out of range");
// no return
node->p_resource = p_resource;
}
// index mem_chunk
struct tracer_mem_chunk* tracer_mem_chunk = tracer_mem_chunk_read(addr);
tracer_mem_chunk_idx_t* indirect_list = (tracer_mem_chunk_idx_t*)tracer_mem_chunk->data;
if ((addr = indirect_list[mem_chunk_num]) == 0) {
indirect_list[mem_chunk_num] = addr = tracer_mem_chunk_alloc();
tracer_mem_chunk_write(tracer_mem_chunk);
}
tracer_mem_chunk_release(tracer_mem_chunk);
return addr;
doubleListNodeInit(&node->list_node);
}
/// @brief write trace info by trace meta
static int trace_write_info(struct TraceMeta* const p_trace_meta, char* src, uint32_t off, uint32_t n)
void sys_tracer_init()
{
if (p_trace_meta->type == TRACER_INVALID) {
return -1;
}
// set sys_tracer resource identity
tracer_init_node(&sys_tracer.root_node, NULL, TRACER_OWNER, NULL);
sys_tracer.root_node.name = root_name;
sys_tracer.sys_tracer_tag.meta = &sys_tracer.root_node;
// fast path
if (off == 0 && n <= sizeof(uintptr_t)) {
p_trace_meta->reserved = *(uintptr_t*)src;
return n;
}
if (UNLIKELY(off > p_trace_meta->size || off + n > VFS_FILE_MAXSIZE * TRACER_MEM_CHUNK_SIZE || off + n < off)) {
return -1;
}
struct tracer_mem_chunk* tracer_mem_chunk;
uint32_t m;
for (uint32_t tot = 0; tot < n; tot += m, off += m, src += m) {
tracer_mem_chunk = tracer_mem_chunk_read(trace_meta_map_mem_chunk(p_trace_meta, off / TRACER_MEM_CHUNK_SIZE));
m = min(n - tot, TRACER_MEM_CHUNK_SIZE - off % TRACER_MEM_CHUNK_SIZE);
memmove(tracer_mem_chunk->data + off % TRACER_MEM_CHUNK_SIZE, src, m);
tracer_mem_chunk_write(tracer_mem_chunk);
tracer_mem_chunk_release(tracer_mem_chunk);
}
if (n > 0 && off > p_trace_meta->size) {
p_trace_meta->size = off;
}
return n;
// init memory allocator
slab_init(&sys_tracer.node_allocator, sizeof(TracerNode));
slab_init(&sys_tracer.node_name_allocator, sizeof(char[TRACER_NODE_NAME_LEN]));
}
/// @brief read trace info by trace meta
static int trace_read_info(struct TraceMeta* const p_trace_meta, char* dst, uint32_t off, uint32_t n)
{
if (p_trace_meta->type == TRACER_INVALID) {
return -1;
}
if (off == 0 && n <= sizeof(uintptr_t)) {
*(uintptr_t*)dst = p_trace_meta->reserved;
return n;
}
if (UNLIKELY(off > p_trace_meta->size || off + n < off)) {
return -1;
}
if (UNLIKELY(off + n > p_trace_meta->size)) {
n = p_trace_meta->size - off;
}
static struct tracer_mem_chunk* tracer_mem_chunk;
uint32_t m;
for (uint32_t tot = 0; tot < n; tot += m, off += m, dst += m) {
tracer_mem_chunk = tracer_mem_chunk_read(trace_meta_map_mem_chunk(p_trace_meta, off / TRACER_MEM_CHUNK_SIZE));
m = min(n - tot, TRACER_MEM_CHUNK_SIZE - off % TRACER_MEM_CHUNK_SIZE);
memmove(dst, tracer_mem_chunk->data + off % TRACER_MEM_CHUNK_SIZE, m);
tracer_mem_chunk_release(tracer_mem_chunk);
}
return n;
}
static struct TraceMeta* tracer_find_meta_onestep(struct TraceMeta* const p_owner, char* name, uint32_t* poff)
{
struct TraceResourceEntry resource_entry;
if (p_owner->type != TRACER_OWNER) {
ERROR("tracer_find_meta_onestep, not a dir, index: %d\n", p_owner->index);
return NULL;
}
for (uint32_t off = 0; off < p_owner->size; off += sizeof(resource_entry)) {
if (trace_read_info(p_owner, (char*)&resource_entry, off, sizeof(resource_entry)) != sizeof(resource_entry)) {
panic("tracer_find_meta_onestep: read trace owner's resources failed\n");
}
if (resource_entry.index == 0) {
continue;
}
if (namecmp(name, resource_entry.name) == 0) {
if (poff) {
*poff = off;
}
uint32_t vindex = resource_entry.index;
assert(vindex >= 0 && vindex < NR_MAX_TRACEMETA);
return &sys_tracer.trace_meta_poll[vindex];
}
}
return NULL;
}
// Write a new vdirectory entry (name, index) into the vdirectory dp.
static int tracer_append_meta(struct TraceMeta* p_owner, char* name, uint32_t index)
{
struct TraceResourceEntry resource_entry;
int offset = 0;
for (offset = 0; offset < p_owner->size; offset += sizeof(resource_entry)) {
if (trace_read_info(p_owner, (char*)&resource_entry, offset, sizeof(resource_entry)) != sizeof(resource_entry)) {
ERROR("tracer_append_meta failed, read owner's resources failed.\n");
return -1;
}
if (resource_entry.index == 0) {
break;
}
}
strncpy(resource_entry.name, name, RESOURCE_NAME_SIZE);
resource_entry.index = index;
if (trace_write_info(p_owner, (char*)&resource_entry, offset, sizeof(resource_entry)) != sizeof(resource_entry)) {
ERROR("tracer_append_meta failed, append resource to owner failed.\n");
return -1;
}
return 0;
}
static struct TraceMeta* tracer_new_meta(struct TraceMeta* p_owner, char* name, short type)
{
struct TraceMeta* p_trace_meta;
// check if owner entry exists
uint32_t offset;
if ((p_trace_meta = tracer_find_meta_onestep(p_owner, name, &offset)) != 0) {
LOG("create resource(trace meta) failed, %s is existed\n", name);
return NULL;
}
if ((p_trace_meta = alloc_trace_meta()) == 0) {
ERROR("create resource(trace meta) failed, cache is no free\n");
return NULL;
}
p_trace_meta->type = type;
p_trace_meta->size = 0;
// update parent directory
tracer_append_meta(p_owner, name, p_trace_meta->index);
// update "." and ".." for vfs inode
if (p_trace_meta->type == TRACER_OWNER) {
tracer_append_meta(p_trace_meta, ".", p_trace_meta->index);
tracer_append_meta(p_trace_meta, "..", p_owner->index);
}
return p_trace_meta;
}
static char* parse_path(char* path, char* name)
static char* parse_path(char* path, char* const name)
{
// skip extra '/'
while (*path == '/') {
@ -307,8 +83,9 @@ static char* parse_path(char* path, char* name)
// handle current name
int len = path - cur_start;
if (len >= RESOURCE_NAME_SIZE) {
strncpy(name, cur_start, RESOURCE_NAME_SIZE);
if (len >= TRACER_NODE_NAME_LEN) {
strncpy(name, cur_start, TRACER_NODE_NAME_LEN);
name[TRACER_NODE_NAME_LEN - 1] = '\0';
} else {
strncpy(name, cur_start, len);
name[len] = '\0';
@ -317,216 +94,101 @@ static char* parse_path(char* path, char* name)
return path;
}
static struct TraceMeta* tracer_find_meta(struct TraceMeta* const p_owner, char* path, int nameiparent, char* name)
static TracerNode* tracer_find_node_onestep(TracerNode* const owner, const char* const name)
{
struct TraceMeta* p_owner_inside = p_owner;
struct TraceMeta* vnp;
/* traverse TRACER_OWNER */
while ((path = parse_path(path, name)) != 0) {
if (p_owner_inside->type != TRACER_OWNER) {
return NULL;
}
if (nameiparent && *path == '\0') {
return p_owner_inside;
}
if ((vnp = tracer_find_meta_onestep(p_owner_inside, name, NULL)) == 0) {
DEBUG("Not such object: %s\n", path);
return NULL;
}
p_owner_inside = vnp;
}
if (nameiparent) {
return NULL;
}
return p_owner_inside;
}
int tracer_write_trace(struct TraceTag* const p_trace_tag, char* src, uint32_t off, uint32_t n)
{
if (src == NULL || p_trace_tag == NULL || p_trace_tag->meta == NULL) {
return -1;
}
return trace_write_info(p_trace_tag->meta, src, off, n);
}
int tracer_read_trace(struct TraceTag* const p_trace_tag, char* dst, uint32_t off, uint32_t n)
{
if (dst == NULL || p_trace_tag == NULL || p_trace_tag->meta == NULL) {
return -1;
}
return trace_read_info(p_trace_tag->meta, dst, off, n);
}
/// @brief
static void trace_locate_inner(struct TraceTag* target, struct TraceTag* const p_trace_tag, char* path, bool parent)
{
char name[RESOURCE_NAME_SIZE];
struct TraceMeta* p_trace_meta = tracer_find_meta(p_trace_tag->meta, path, parent, name);
// p_trace_meta: TRACER_OWNER, VT_FS or other.
// TRACER_OWNER: path: "", name: "dir name"
// other: path: "", name: "file name"
if (p_trace_meta == NULL) {
DEBUG("trace_locate, not found\n");
return;
}
target->type = p_trace_meta->type;
target->meta = p_trace_meta;
}
static inline void trace_locate(struct TraceTag* target, struct TraceTag* const p_trace_tag, char* path)
{
trace_locate_inner(target, p_trace_tag, path, 0);
}
static inline void trace_locate_parent(struct TraceTag* target, struct TraceTag* const p_trace_tag, char* path)
{
trace_locate_inner(target, p_trace_tag, path, 1);
}
bool tracer_create_trace(struct TraceTag* target, struct TraceTag* p_trace_tag, char* path, short type)
{
struct TraceMeta *p_trace_meta, *p_owner;
// find parent vfs inode
if ((p_owner = p_trace_tag->meta) == 0) {
LOG("create tracemeta failed, parent is null\n");
target->meta = NULL;
return false;
}
p_trace_meta = tracer_new_meta(p_owner, path, type);
target->meta = p_trace_meta;
return true;
}
bool tracer_delete_trace(struct TraceTag* target, struct TraceTag* owner)
{
if (target->meta == NULL || owner->type != TRACER_OWNER) {
return false;
}
struct TraceMeta* p_trace_meta = target->meta;
struct TraceMeta* p_owner_meta = owner->meta;
assert(p_trace_meta->type != TRACER_INVALID);
if (p_trace_meta->type == TRACER_OWNER) {
/// @todo support recursive delete
}
struct TraceResourceEntry resource_entry;
bool is_owned = false;
for (uint32_t off = 0; off < p_owner_meta->size; off += sizeof(resource_entry)) {
if (trace_read_info(p_owner_meta, (char*)&resource_entry, off, sizeof(resource_entry)) != sizeof(resource_entry)) {
panic("tracer_find_meta_onestep: read trace owner's resources failed\n");
}
if (resource_entry.index == 0) {
TracerNode* iter = NULL;
assert(owner->type == TRACER_OWNER);
DOUBLE_LIST_FOR_EACH_ENTRY(iter, &owner->children_guard, list_node)
{
if (iter->name == NULL) {
continue;
}
if (resource_entry.index == p_trace_meta->index) {
resource_entry.index = 0;
trace_write_info(owner->meta, (char*)&resource_entry, off, sizeof(resource_entry));
is_owned = true;
break;
if (strcmp(name, iter->name) == 0) {
return iter;
}
}
if (!is_owned) {
ERROR("delete trace(%d) not owned by given owner(%d).\n", target->meta->index, owner->meta->index);
return false;
}
dealloc_trace_meta(p_trace_meta);
return NULL;
}
TraceTag* const RequireRootTag()
{
return &sys_tracer.sys_tracer_tag;
}
bool AchieveResourceTag(TraceTag* target, TraceTag* owner, char* name)
{
static char name_buffer[TRACER_NODE_NAME_LEN];
TracerNode* inner_node = owner->meta;
assert(inner_node != NULL && inner_node->type == TRACER_OWNER);
while ((name = parse_path(name, name_buffer)) != NULL) {
if ((inner_node = tracer_find_node_onestep(inner_node, name_buffer)) == NULL) {
DEBUG("Tracer: No such object, owner: %s, child: %s\n", //
owner->meta->name == NULL ? "NULL" : owner->meta->name, name == NULL ? "NULL" : name_buffer);
return false;
}
}
target->meta = inner_node;
return true;
}
void tracer_init(void)
void* AchieveResource(TraceTag* tag)
{
/* init sys_tracer, the manager */
spinlock_init(&sys_tracer.mem_chunk_bitmap_lock, "tracer_mem_chunk_bitmap");
spinlock_init(&sys_tracer.trace_meta_bitmap_lock, "tracer_meta_bitmap");
memset(sys_tracer.mem_chunks_bit_map, 0, sizeof(sys_tracer.mem_chunk_bitmap_lock));
memset(sys_tracer.trace_meta_bit_map, 0, sizeof(sys_tracer.trace_meta_bit_map));
assert((TRACER_MEM_CHUNK_SIZE % sizeof(struct TraceMeta)) == 0);
assert((TRACER_MEM_CHUNK_SIZE % sizeof(struct TraceResourceEntry)) == 0);
// mem_chunk space, fit with mem_chunk_bit_map
mem_chunk_synchronizer_init((uintptr_t)tracer_space, TRACER_MEM_CHUNK_SIZE, NR_TRACER_MEM_CHUNKS);
/* build root inode */
alloc_trace_meta(); // inode as guard.
/* build root trace_meta */
struct TraceMeta* root_tracemeta = alloc_trace_meta();
assert(root_tracemeta->index == 1);
root_tracemeta->type = TRACER_OWNER;
root_tracemeta->size = 0;
tracer_append_meta(root_tracemeta, ".", root_tracemeta->index);
tracer_append_meta(root_tracemeta, "..", root_tracemeta->index);
RequireRootTag()->meta = root_tracemeta;
}
/// @brief find resource tag
void tracer_find_tag(struct TraceTag* target, struct TraceTag* const source, char* path)
{
target->meta = NULL;
struct TraceTag* p_trace_tag;
if (*path == '/' || source == NULL) {
p_trace_tag = RequireRootTag();
} else {
p_trace_tag = source;
}
if (p_trace_tag == NULL || p_trace_tag->meta == NULL) {
return;
}
trace_locate(target, p_trace_tag, path);
}
bool AchieveResourceTag(struct TraceTag* target, struct TraceTag* owner, char* name)
{
tracer_find_tag(target, owner, name);
if (target->meta == NULL) {
return false;
}
return true;
}
void* AchieveResource(struct TraceTag* target)
{
if (target->type == TRACER_OWNER) {
assert(tag != NULL);
if (tag->meta == NULL || tag->meta->type == TRACER_OWNER) {
return NULL;
}
void* p_resource = NULL;
tracer_read_trace(target, (char*)&p_resource, 0, sizeof(void*));
assert(p_resource != NULL);
return p_resource;
return tag->meta->p_resource;
}
bool CreateResourceTag(struct TraceTag* new_tag, struct TraceTag* owner, char* name, tracemeta_ac_type type, void* p_resource)
bool CreateResourceTag(TraceTag* new_tag, TraceTag* owner, char* name, tracemeta_ac_type type, void* p_resource)
{
new_tag->type = type;
if (type == TRACER_OWNER) {
return tracer_create_trace(new_tag, owner, name, type);
assert(new_tag != NULL && owner != NULL);
if (owner->meta == NULL) {
ERROR("Tracer: Empty owner\n");
return false;
}
assert(owner->meta->type == TRACER_OWNER);
TracerNode* new_node = (TracerNode*)slab_alloc(&sys_tracer.node_allocator);
if (new_node == NULL) {
ERROR("Tracer: No memory for new node\n");
return false;
}
tracer_init_node(new_node, name, type, p_resource);
// handle ac resource types
if (p_resource == NULL) {
// new node add to owner's children list
doubleListAddOnHead(&new_node->list_node, &owner->meta->children_guard);
new_node->parent = owner->meta;
new_tag->meta = new_node;
return true;
}
bool DeleteResource(TraceTag* target, TraceTag* owner)
{
assert(target != NULL && owner != NULL);
assert(owner->meta != NULL && owner->meta->type == TRACER_OWNER);
if (target->meta == NULL) {
ERROR("Tracer: Delete a empty resource\n");
return false;
}
if (!tracer_create_trace(new_tag, owner, name, type)) {
return false;
assert(target->meta->parent == owner->meta);
doubleListDel(&target->meta->list_node);
// delete name
if (target->meta->name != NULL) {
slab_free(&sys_tracer.node_name_allocator, target->meta->name);
}
bool ret = tracer_write_trace(new_tag, (char*)&p_resource, 0, sizeof(void*)) == sizeof(void*);
return ret;
}
bool DeleteResource(struct TraceTag* target, struct TraceTag* owner)
{
return tracer_delete_trace(target, owner);
// delete all children
/// @attention currently donot allow multilevel resource deletion
if (target->meta->type == TRACER_OWNER) {
assert(IS_DOUBLE_LIST_EMPTY(&target->meta->children_guard));
}
slab_free(&sys_tracer.node_allocator, target->meta);
target->meta = NULL;
return true;
}

View File

@ -30,8 +30,10 @@ Modification:
#include <stdbool.h>
#include <stdint.h>
#include "actracer_mem_chunk.h"
#include "spinlock.h"
#include "list.h"
#include "object_allocator.h"
#define TRACER_NODE_NAME_LEN 32
typedef enum {
TRACER_INVALID = 0,
@ -42,52 +44,32 @@ typedef enum {
TRACER_MEM_FROM_BUDDY_AC_RESOURCE,
} tracemeta_ac_type;
typedef uint16_t tracer_mem_chunk_idx_t;
#define TRACEMETA_NR_DIRECT 5
#define TRACEMETA_NR_INDIRECT 4
#define NR_ADDR_PER_MEM_CHUNK TRACER_MEM_CHUNK_SIZE / sizeof(tracer_mem_chunk_idx_t)
#define VFS_FILE_MAXSIZE (TRACEMETA_NR_DIRECT + (TRACEMETA_NR_INDIRECT * NR_ADDR_PER_MEM_CHUNK))
struct TraceMeta {
uint32_t size;
tracemeta_ac_type type; // TRACER_OWNER, etc.
uintptr_t reserved; // fast path to store pointer if content is a pointer
uint16_t index;
tracer_mem_chunk_idx_t addr[TRACEMETA_NR_DIRECT + TRACEMETA_NR_INDIRECT]; // 指向data mem_chunks, TRACER_OWNER 用于存放 dir entries, VT_FS用于存放bind
} __attribute__((aligned(32)));
typedef struct TracerNode {
tracemeta_ac_type type;
char* name;
union {
struct double_list_node children_guard;
void* p_resource;
};
struct TracerNode* parent;
struct double_list_node list_node;
} TracerNode;
/// @brief tag for other module to reference trace meta
struct TraceTag {
struct TraceMeta* meta;
short type; // TRACER_OWNER, etc.
};
#define RESOURCE_NAME_SIZE 14
struct TraceResourceEntry {
uint16_t index;
char name[RESOURCE_NAME_SIZE];
};
typedef struct TraceTag {
TracerNode* meta;
} TraceTag;
struct SysTracer {
#define NR_TRACER_MEM_CHUNKS 256
#define TRACER_MEM_CHUNK_SIZE sizeof(struct TraceMeta)
#define BITS_MEM_CHUNK_BITMAP (NR_TRACER_MEM_CHUNKS / 32)
uint32_t mem_chunks_bit_map[BITS_MEM_CHUNK_BITMAP];
struct spinlock mem_chunk_bitmap_lock;
#define NR_MAX_TRACEMETA 128
#define BITS_TRACEMETA_BITMAP (NR_MAX_TRACEMETA / 32)
uint32_t trace_meta_bit_map[BITS_TRACEMETA_BITMAP];
struct spinlock trace_meta_bitmap_lock;
struct TraceMeta trace_meta_poll[NR_MAX_TRACEMETA];
TracerNode root_node;
TraceTag sys_tracer_tag;
struct slab_allocator node_allocator;
struct slab_allocator node_name_allocator;
};
void tracer_init(void);
extern struct SysTracer sys_tracer;
extern struct TraceTag root_tracetag;
struct TraceTag* const RequireRootTag();
void sys_tracer_init();
TraceTag* const RequireRootTag();
bool AchieveResourceTag(struct TraceTag* target, struct TraceTag* owner, char* name);
void* AchieveResource(struct TraceTag* target);
bool CreateResourceTag(struct TraceTag* new_tag, struct TraceTag* owner, char* name, tracemeta_ac_type type, void* p_resource);
bool DeleteResource(struct TraceTag* target, struct TraceTag* owner);
bool DeleteResource(struct TraceTag* target, struct TraceTag* owner);

View File

@ -1,184 +0,0 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiUOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/**
* @file actracer_mem_chunk.c
* @brief tracer mem chunk implememntation
* @version 3.0
* @author AIIT XUOS Lab
* @date 2023.08.25
*/
/*************************************************
File name: actracer_mem_chunk.c
Description: tracer mem chunk implementation
Others:
History:
1. Date: 2023-08-28
Author: AIIT XUOS Lab
Modification:
1. first version
*************************************************/
#include <string.h>
#include "assert.h"
#include "spinlock.h"
#include "actracer.h"
#include "actracer_mem_chunk.h"
/// @brief to assert that a mem_chunk of memory will only write by one object
struct mem_chunk_synchronizer {
uintptr_t mem_chunk_base;
uint32_t mem_chunk_size;
uint32_t nr_mem_chunks;
struct spinlock lock;
struct tracer_mem_chunk mem_chunk_access_list[NR_MEM_CHUNK_CACHE];
struct double_list_node head;
};
static struct mem_chunk_synchronizer tracer_mem_chunk_syner;
static void tracer_mem_chunk_sync(struct tracer_mem_chunk* b)
{
if (!(b->flag & TRACER_MEM_CHUNK_BUSY)) {
panic("mem_chunk_sync: buf not busy");
}
if (b->chunk_id >= tracer_mem_chunk_syner.nr_mem_chunks) {
panic("mem_chunk_sync: sector out of range");
}
b->data = (uint8_t*)(tracer_mem_chunk_syner.mem_chunk_base + b->chunk_id * tracer_mem_chunk_syner.mem_chunk_size);
b->flag |= TRACER_MEM_CHUNK_VALID;
}
void mem_chunk_synchronizer_init(uintptr_t mem_chunk_base, uint32_t mem_chunk_size, uint32_t nr_mem_chunks)
{
tracer_mem_chunk_syner.mem_chunk_base = mem_chunk_base;
tracer_mem_chunk_syner.mem_chunk_size = mem_chunk_size;
tracer_mem_chunk_syner.nr_mem_chunks = nr_mem_chunks;
// Create linked list of buffers
doubleListNodeInit(&tracer_mem_chunk_syner.head);
for (struct tracer_mem_chunk* b = tracer_mem_chunk_syner.mem_chunk_access_list; b < tracer_mem_chunk_syner.mem_chunk_access_list + NR_MEM_CHUNK_CACHE; b++) {
doubleListNodeInit(&b->list_node);
doubleListAddOnHead(&b->list_node, &tracer_mem_chunk_syner.head);
}
}
static struct tracer_mem_chunk* tracer_get_mem_chunk_cache(uint32_t chunk_id)
{
// cached mem_chunk cache
struct tracer_mem_chunk* b;
DOUBLE_LIST_FOR_EACH_ENTRY(b, &tracer_mem_chunk_syner.head, list_node)
{
if (b->chunk_id == chunk_id) {
if (!(b->flag & TRACER_MEM_CHUNK_BUSY)) {
b->flag |= TRACER_MEM_CHUNK_BUSY;
return b;
}
}
}
// Non-cached mem_chunk cache
DOUBLE_LIST_FOR_EACH_ENTRY_REVERSE(b, &tracer_mem_chunk_syner.head, list_node)
{
if ((b->flag & TRACER_MEM_CHUNK_BUSY) == 0) {
b->chunk_id = chunk_id;
b->flag = TRACER_MEM_CHUNK_BUSY;
return b;
}
}
panic("tracer_get_mem_chunk_cache: no cache");
return NULL;
}
// Return a TRACER_MEM_CHUNK_BUSY buf with the contents of the indicated disk sector.
struct tracer_mem_chunk* tracer_mem_chunk_read(uint32_t chunk_id)
{
struct tracer_mem_chunk* b = tracer_get_mem_chunk_cache(chunk_id);
if (b == NULL) {
return NULL;
}
if (!(b->flag & TRACER_MEM_CHUNK_VALID)) {
tracer_mem_chunk_sync(b);
b->flag |= TRACER_MEM_CHUNK_VALID;
}
return b;
}
void tracer_mem_chunk_write(struct tracer_mem_chunk* b)
{
if ((b->flag & TRACER_MEM_CHUNK_BUSY) == 0) {
panic("tracer mem_chunk write a no busy mem_chunk");
}
tracer_mem_chunk_sync(b);
}
void tracer_mem_chunk_release(struct tracer_mem_chunk* b)
{
if ((b->flag & TRACER_MEM_CHUNK_BUSY) == 0) {
panic("tracer mem_chunk release but it's not busy occupied");
}
// move mem_chunk that just used to the head of cache list
doubleListDel(&b->list_node);
doubleListAddOnHead(&b->list_node, &tracer_mem_chunk_syner.head);
b->flag &= ~TRACER_MEM_CHUNK_BUSY;
}
static void tracer_mem_chunk_zero(uint32_t chunk_id)
{
assert(chunk_id >= 0 && chunk_id < tracer_mem_chunk_syner.nr_mem_chunks);
struct tracer_mem_chunk* tracer_mem_chunk = NULL;
tracer_mem_chunk = tracer_mem_chunk_read(chunk_id);
if (tracer_mem_chunk == NULL) {
return;
}
memset(tracer_mem_chunk->data, 0, tracer_mem_chunk_syner.mem_chunk_size);
tracer_mem_chunk_write(tracer_mem_chunk);
tracer_mem_chunk_release(tracer_mem_chunk);
}
/// @return mem_chunk_idx in bit_map
static uint32_t find_first_free_mem_chunk()
{
/// @todo another mem_chunk
for (uint32_t idx = 0; idx < BITS_MEM_CHUNK_BITMAP; idx++) {
if (sys_tracer.mem_chunks_bit_map[idx] == 0xFFFFFFFF) {
continue;
}
uint32_t position = __builtin_ffs(~sys_tracer.mem_chunks_bit_map[idx]);
if (position != 32) {
sys_tracer.mem_chunks_bit_map[idx] |= (1 << (position - 1));
return idx * 32 + position;
}
}
panic("Tracer no enough space.");
return 0;
}
uint32_t tracer_mem_chunk_alloc()
{
tracer_mem_chunk_idx_t idx = find_first_free_mem_chunk();
tracer_mem_chunk_zero(idx);
return idx;
}
void tracer_mem_chunk_free(uint32_t chunk_id)
{
assert(chunk_id >= 0 && chunk_id < NR_TRACER_MEM_CHUNKS);
uint32_t idx = chunk_id % 32;
uint32_t inner_mem_chunk_bit = chunk_id / 32;
// assert mem_chunk is allocated
assert((sys_tracer.mem_chunks_bit_map[idx] & (1 << inner_mem_chunk_bit)) != 0);
sys_tracer.mem_chunks_bit_map[idx] &= (uint32_t)(~(1 << inner_mem_chunk_bit));
}

View File

@ -1,54 +0,0 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiUOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/**
* @file actracer_mem_chunk.h
* @brief tracer mem chunk header
* @version 3.0
* @author AIIT XUOS Lab
* @date 2023.08.25
*/
/*************************************************
File name: actracer_mem_chunk.h
Description: tracer mem chunk header
Others:
History:
1. Date: 2023-08-28
Author: AIIT XUOS Lab
Modification:
1. first version
*************************************************/
#pragma once
#include <stdint.h>
#include "list.h"
#define NR_MEM_CHUNK_CACHE 128
typedef enum {
TRACER_MEM_CHUNK_BUSY = 0x1,
TRACER_MEM_CHUNK_VALID = 0x2,
} tracer_mem_chunk_flag;
struct tracer_mem_chunk {
tracer_mem_chunk_flag flag;
uint32_t chunk_id;
struct double_list_node list_node;
uint8_t* data;
};
void mem_chunk_synchronizer_init(uintptr_t mem_chunk_base, uint32_t mem_chunk_size, uint32_t nr_mem_chunks);
struct tracer_mem_chunk* tracer_mem_chunk_read(uint32_t chunk_id);
void tracer_mem_chunk_write(struct tracer_mem_chunk* b);
void tracer_mem_chunk_release(struct tracer_mem_chunk* b);
uint32_t tracer_mem_chunk_alloc();
void tracer_mem_chunk_free(uint32_t chunk_id);

View File

@ -57,7 +57,7 @@ static int InodeFreeRecursive(struct Inode* dp);
static char* PathElementExtract(char* path, char* name);
static uint32_t InodeBlockMapping(struct Inode* inode, uint32_t block_num);
#define MAX_SUPPORT_FD 1024
#define MAX_SUPPORT_FD 2048
static struct FileDescriptor fd_table[MAX_SUPPORT_FD];
struct MemFsRange MemFsRange;
@ -486,7 +486,7 @@ void FreeFileDescriptor(int fd)
printf("fd invlid.\n");
return;
}
fd_table[fd].data = 0;
fd_table[fd].data = NULL;
return;
}
@ -495,8 +495,9 @@ int AllocFileDescriptor(void)
int free_idx = -1;
for (int i = 0; i < MAX_SUPPORT_FD; i++) {
// found free fd
if (free_idx == -1 && fd_table[i].data == 0) {
if (free_idx == -1 && fd_table[i].data == NULL) {
free_idx = i;
break;
}
}
if (free_idx == -1) {

View File

@ -23,7 +23,7 @@ struct CwdPair {
struct Inode* Inode;
};
#define MAX_SUPPORT_SESSION 1024
#define MAX_SUPPORT_SESSION 2048
static struct CwdPair session_cwd[MAX_SUPPORT_SESSION];
static struct CwdPair* get_session_cwd(void)

View File

@ -57,25 +57,25 @@ Modification:
struct KPage {
struct double_list_node node;
union {
uint32_t order;
uintptr_t order;
struct KPage* page_node;
};
uintptr_t mapped_addr;
};
struct KFreeList {
uint32_t n_free_pages;
uintptr_t n_free_pages;
struct double_list_node list_head;
};
struct KBuddy {
uint32_t n_pages;
uint32_t use_lock;
uintptr_t n_pages;
uintptr_t use_lock;
struct spinlock lock;
struct KFreeList free_list[MAX_BUDDY_ORDER];
struct KPage* first_page;
uint32_t mem_start;
uint32_t mem_end;
uintptr_t mem_start;
uintptr_t mem_end;
struct KPage* pages;
};
@ -88,15 +88,15 @@ struct KBuddy {
* @param mem_end free memory region end
* @return void
*/
bool KBuddyInit(struct KBuddy* pbuddy, uint32_t mem_start, uint32_t mem_end);
void KBuddySysInit(struct KBuddy* pbuddy, uint32_t mem_start, uint32_t mem_end);
bool KBuddyInit(struct KBuddy* pbuddy, uintptr_t mem_start, uintptr_t mem_end);
void KBuddySysInit(struct KBuddy* pbuddy, uintptr_t mem_start, uintptr_t mem_end);
/*
* Continuous pages alloc by size
* @param sizeuint32_t size of need alloc
* @param sizeuintptr_t size of need alloc
* @return NULL or v_addr (char*) return NULL if alloc failed, or return virtual page's addr
*/
char* KBuddyAlloc(struct KBuddy* pbuddy, uint32_t size);
char* KBuddyAlloc(struct KBuddy* pbuddy, uintptr_t size);
/*
* Continuous pages free from vaddr

View File

@ -42,8 +42,11 @@ Modification:
#include <stdint.h>
#include "memlayout.h"
#define ELF_MAGIC 0x464C457FU // "\x7FELF" in little endian
#if (ARCH_BIT == 32)
// File header
struct elfhdr {
uint32_t magic; // must equal ELF_MAGIC
@ -51,9 +54,9 @@ struct elfhdr {
uint16_t type;
uint16_t machine;
uint32_t version;
uint32_t entry;
uint32_t phoff;
uint32_t shoff;
uintptr_t entry;
uintptr_t phoff;
uintptr_t shoff;
uint32_t flags;
uint16_t ehsize;
uint16_t phentsize;
@ -74,6 +77,37 @@ struct proghdr {
uint32_t flags;
uint32_t align;
};
#elif (ARCH_BIT == 64)
struct elfhdr {
uint magic; // must equal ELF_MAGIC
uchar elf[12];
ushort type;
ushort machine;
uint version;
uint64 entry;
uint64 phoff;
uint64 shoff;
uint flags;
ushort ehsize;
ushort phentsize;
ushort phnum;
ushort shentsize;
ushort shnum;
ushort shstrndx;
};
// Program section header
struct proghdr {
uint32 type;
uint32 flags;
uint64 off;
uint64 vaddr;
uint64 paddr;
uint64 filesz;
uint64 memsz;
uint64 align;
};
#endif
// Values for Proghdr type
#define ELF_PROG_LOAD 1

View File

@ -32,10 +32,10 @@ Modification:
#include "pagetable.h"
bool module_phymem_init();
char* kalloc(uint32_t size);
char* kalloc(size_t size);
bool kfree(char* vaddr);
char* raw_alloc(uint32_t size);
char* raw_alloc(size_t size);
bool raw_free(char* paddr);
void show_phymem_info();

View File

@ -29,6 +29,7 @@ Modification:
*************************************************/
#pragma once
#include <stddef.h>
#include <stdint.h>
struct slab_state {

View File

@ -39,8 +39,8 @@ Modification:
#include "mmu_common.h"
// clang-format off
#define ALIGNUP(sz, al) (((uint32_t)(sz) + (uint32_t)(al) - 1) & ~((uint32_t)(al) - 1))
#define ALIGNDOWN(sz, al) ((uint32_t)(sz) & ~((uint32_t)(al) - 1))
#define ALIGNUP(sz, al) (((uintptr_t)(sz) + (uintptr_t)(al) - 1) & ~((uintptr_t)(al) - 1))
#define ALIGNDOWN(sz, al) ((uintptr_t)(sz) & ~((uintptr_t)(al) - 1))
#define LEVEL4_PTE_IDX(v) (((uintptr_t)(v) >> LEVEL4_PTE_SHIFT) & (NUM_LEVEL4_PTE - 1))
#define LEVEL4_PTE_ADDR(v) ALIGNDOWN(v, LEVEL4_PTE_SIZE)

View File

@ -40,8 +40,6 @@ bool softkernel_init(struct TraceTag* _hardkernel_tag, struct TraceTag* _softker
CreateResourceTag(&server_identifier_owner, _softkernel_tag, "server-identifier", TRACER_OWNER, NULL);
/* init soft kernel */
module_phymem_init(); // init buddy management system
struct PagerRightGroup pager_rights;
AchieveResourceTag(&pager_rights.mmu_driver_tag, _hardkernel_tag, "mmu-ac-resource");
module_pager_init(&pager_rights);

View File

@ -33,22 +33,26 @@ Modification:
#include "multicores.h"
#include "assert.h"
#include "kalloc.h"
#include "task.h"
struct spinlock whole_kernel_lock;
extern uint32_t _binary_init_start[], _binary_default_fs_start[];
extern uintptr_t _binary_init_start[], _binary_default_fs_start[];
extern int sys_spawn(char* img_start, char* name, char** argv);
static struct TraceTag hardkernel_tag, softkernel_tag;
static volatile int core_init_done = 0;
__attribute__((optimize("O0"))) int main(void)
int main(void)
{
/* init tracer */
uint32_t cpu_id = cur_cpuid();
if (cpu_id == 0) {
tracer_init(); // init tracer system
/* init memory management first */
module_phymem_init(); // init buddy management system
/* init tracer system */
sys_tracer_init();
if (!CreateResourceTag(&hardkernel_tag, RequireRootTag(), "hardkernel", TRACER_OWNER, NULL) || //
!CreateResourceTag(&softkernel_tag, RequireRootTag(), "softkernel", TRACER_OWNER, NULL)) {
ERROR("Failed to create hardkernel owner and softkernel owner.\n");

View File

@ -32,9 +32,9 @@ Modification:
#include "kalloc.h"
#include "log.h"
static void _buddy_split_page(struct KPage* page, uint32_t low_order, uint32_t high_order, struct KFreeList* list)
static void _buddy_split_page(struct KPage* page, uintptr_t low_order, uintptr_t high_order, struct KFreeList* list)
{
uint32_t size = (1 << high_order);
uintptr_t size = (1 << high_order);
while (high_order > low_order) {
list--;
size >>= 1;
@ -57,14 +57,14 @@ __attribute__((always_inline)) static void inline _buddy_set_pages_order(struct
__attribute__((always_inline)) static void inline _buddy_page_to_vaddr(struct KBuddy* pbuddy, struct KPage* page, char** vaddr)
{
uint32_t offset = page - pbuddy->pages;
uintptr_t offset = page - pbuddy->pages;
*vaddr = (char*)(pbuddy->mem_start + (offset << LEVEL4_PTE_SHIFT));
return;
}
__attribute__((always_inline)) static void inline _buddy_vaddr_to_page(struct KBuddy* pbuddy, struct KPage** page, char* vaddr)
{
uint32_t offset = (uint32_t)vaddr - pbuddy->mem_start;
uintptr_t offset = (uintptr_t)vaddr - pbuddy->mem_start;
*page = (struct KPage*)(pbuddy->pages + (offset >> LEVEL4_PTE_SHIFT));
return;
}
@ -109,9 +109,9 @@ static struct KPage* KBuddyPagesAlloc(struct KBuddy* pbuddy, int nPages)
static void KBuddyPagesFree(struct KBuddy* pbuddy, struct KPage* page)
{
struct KPage* buddy = NULL;
uint32_t order = (page->order >= MAX_BUDDY_ORDER) ? 0 : page->order;
uint32_t buddy_idx = 0, new_buddy_idx = 0;
uint32_t page_idx = page - pbuddy->pages;
uintptr_t order = (page->order >= MAX_BUDDY_ORDER) ? 0 : page->order;
uintptr_t buddy_idx = 0, new_buddy_idx = 0;
uintptr_t page_idx = page - pbuddy->pages;
for (; order < MAX_BUDDY_ORDER - 1; order++) {
// find and delete buddy to combine
@ -139,7 +139,7 @@ static void KBuddyPagesFree(struct KBuddy* pbuddy, struct KPage* page)
return;
}
bool KBuddyInit(struct KBuddy* pbuddy, uint32_t mem_start, uint32_t mem_end)
bool KBuddyInit(struct KBuddy* pbuddy, uintptr_t mem_start, uintptr_t mem_end)
{
if (pbuddy->pages == NULL) {
if ((pbuddy->pages = (struct KPage*)kalloc(((mem_end - mem_start) >> LEVEL4_PTE_SHIFT) * sizeof(struct KPage))) == NULL) {
@ -148,7 +148,7 @@ bool KBuddyInit(struct KBuddy* pbuddy, uint32_t mem_start, uint32_t mem_end)
}
}
uint32_t i = 0;
uintptr_t i = 0;
struct KPage* page = NULL;
struct KFreeList* free_list = NULL;
@ -160,7 +160,7 @@ bool KBuddyInit(struct KBuddy* pbuddy, uint32_t mem_start, uint32_t mem_end)
pbuddy->mem_start = ALIGNUP(pbuddy->mem_start, 4 * PAGE_SIZE);
// total number of free pages
pbuddy->n_pages = (pbuddy->mem_end - (uint32_t)pbuddy->mem_start) >> LEVEL4_PTE_SHIFT;
pbuddy->n_pages = (pbuddy->mem_end - (uintptr_t)pbuddy->mem_start) >> LEVEL4_PTE_SHIFT;
memset(pbuddy->pages, 0, pbuddy->n_pages);
@ -185,7 +185,7 @@ bool KBuddyInit(struct KBuddy* pbuddy, uint32_t mem_start, uint32_t mem_end)
return true;
}
void KBuddySysInit(struct KBuddy* pbuddy, uint32_t mem_start, uint32_t mem_end)
void KBuddySysInit(struct KBuddy* pbuddy, uintptr_t mem_start, uintptr_t mem_end)
{
#define MAX_NR_PAGES MAX_NR_FREE_PAGES
static struct KPage kern_free_pages[MAX_NR_PAGES];
@ -193,7 +193,7 @@ void KBuddySysInit(struct KBuddy* pbuddy, uint32_t mem_start, uint32_t mem_end)
KBuddyInit(pbuddy, mem_start, mem_end);
}
char* KBuddyAlloc(struct KBuddy* pbuddy, uint32_t size)
char* KBuddyAlloc(struct KBuddy* pbuddy, uintptr_t size)
{
int nPages = CALCULATE_NPAGES(size);
struct KPage* page = KBuddyPagesAlloc(pbuddy, nPages);
@ -210,15 +210,15 @@ bool KBuddyFree(struct KBuddy* pbuddy, char* vaddr)
{
struct KPage* page = NULL;
if ((uint32_t)vaddr % (PAGE_SIZE)) {
if ((uintptr_t)vaddr % (PAGE_SIZE)) {
ERROR("kbuddyfree - unaligned: %x\n", vaddr);
return false;
}
if ((uint32_t)vaddr < pbuddy->mem_start) {
if ((uintptr_t)vaddr < pbuddy->mem_start) {
ERROR("kbuddyfree - under buddy free page address: %x\n", vaddr);
return false;
}
if ((uint32_t)vaddr >= pbuddy->mem_end) {
if ((uintptr_t)vaddr >= pbuddy->mem_end) {
ERROR("kbuddyfree - over buddy free page address: %x\n", vaddr);
return false;
}

View File

@ -36,21 +36,19 @@ Modification:
struct KBuddy kern_virtmem_buddy;
struct KBuddy user_phy_freemem_buddy;
extern uint32_t kernel_data_end[];
extern uintptr_t kernel_data_end[];
bool module_phymem_init()
{
LOG_PRINTF("Organizing free memory...\n");
uint32_t kern_freemem_start = V2P(&kernel_data_end);
uint32_t kern_freemem_end = PHY_USER_FREEMEM_BASE;
uint32_t user_freemem_start = PHY_USER_FREEMEM_BASE;
uint32_t user_freemem_end = PHY_MEM_STOP;
uintptr_t kern_freemem_start = V2P(&kernel_data_end);
uintptr_t kern_freemem_end = PHY_USER_FREEMEM_BASE;
uintptr_t user_freemem_start = PHY_USER_FREEMEM_BASE;
uintptr_t user_freemem_end = PHY_MEM_STOP;
KBuddySysInit(&kern_virtmem_buddy, kern_freemem_start, kern_freemem_end);
KBuddyInit(&user_phy_freemem_buddy, user_freemem_start, user_freemem_end);
LOG_PRINTF("Free memory organized done.\n");
return true;
}
char* kalloc(uint32_t size)
char* kalloc(size_t size)
{
char* mem_alloc = KBuddyAlloc(&kern_virtmem_buddy, size);
if (mem_alloc == NULL) {
@ -69,7 +67,7 @@ bool kfree(char* vaddr)
return KBuddyFree(&kern_virtmem_buddy, V2P_WO(vaddr));
}
char* raw_alloc(uint32_t size)
char* raw_alloc(size_t size)
{
char* mem_alloc = KBuddyAlloc(&user_phy_freemem_buddy, size);
if (mem_alloc == NULL) {

View File

@ -53,7 +53,7 @@ uintptr_t* _page_walk(uintptr_t* pgdir, uintptr_t vaddr, bool alloc)
pgtbl_vaddr = (uintptr_t*)P2V(pgtbl_paddr);
} else {
if (!alloc || !(pgtbl_vaddr = (uintptr_t*)kalloc(sizeof(uintptr_t) * NUM_LEVEL4_PTE))) {
return 0;
return NULL;
}
memset(pgtbl_vaddr, 0, sizeof(uintptr_t) * NUM_LEVEL4_PTE);

View File

@ -57,7 +57,7 @@ static struct slab_allocator* SessionAllocator()
static inline bool check_pages_unmapped(struct TaskMicroDescriptor* task, uintptr_t vaddr, int nr_pages)
{
static uintptr_t paddr = UINT32_MAX;
for (uint32_t i = 0; i < nr_pages; i++) {
for (uintptr_t i = 0; i < nr_pages; i++) {
if ((paddr = xizi_pager.address_translate(&task->pgdir, vaddr)) != 0) {
return false;
}

View File

@ -67,7 +67,7 @@ int sys_connect_session(char* path, int capacity, struct Session* user_session)
if (!AchieveResourceTag(&server_identifier_owner, RequireRootTag(), "softkernel/server-identifier")) {
panic("Server identifier not initialized.\b");
}
assert(server_identifier_owner.meta != NULL || server_identifier_owner.type == TRACER_OWNER);
assert(server_identifier_owner.meta != NULL);
struct TraceTag server_tag;
if (!AchieveResourceTag(&server_tag, &server_identifier_owner, path)) {

View File

@ -44,7 +44,7 @@ int sys_register_as_server(char* name)
if (!AchieveResourceTag(&server_identifier_set_tag, RequireRootTag(), "softkernel/server-identifier")) {
panic("Server identifier not initialized.\b");
}
assert(server_identifier_set_tag.meta != NULL || server_identifier_set_tag.type == TRACER_OWNER);
assert(server_identifier_set_tag.meta != NULL);
if (!CreateResourceTag(&server->server_identifier, &server_identifier_set_tag, name, TRACER_SERVER_IDENTITY_AC_RESOURCE, server)) {
return -1;

View File

@ -41,7 +41,7 @@ Modification:
extern uint8_t _binary_fs_img_start[], _binary_fs_img_end[];
#define SHOWINFO_BORDER_LINE() LOG_PRINTF("******************************************************\n");
#define SHOWTASK_TASK_BASE_INFO(task) LOG_PRINTF(" %-4d %-16s %-4d 0x%x(%-d)\n", task->pid, task->name, task->priority, task->mem_size >> 10, task->mem_size >> 10)
#define SHOWTASK_TASK_BASE_INFO(task) LOG_PRINTF(" %-6d %-16s %-4d 0x%x(%-d)\n", task->pid, task->name, task->priority, task->mem_size >> 10, task->mem_size >> 10)
void show_tasks(void)
{
@ -51,7 +51,7 @@ void show_tasks(void)
LOG_PRINTF("CPU %-2d: %s\n", i, (global_cpus[i].task == NULL ? "NULL" : global_cpus[i].task->name));
}
SHOWINFO_BORDER_LINE();
LOG_PRINTF("%-8s %-4s %-16s %-4s %-8s\n", "STAT", "ID", "TASK", "PRI", "MEM(KB)");
LOG_PRINTF("%-8s %-6s %-16s %-4s %-8s\n", "STAT", "ID", "TASK", "PRI", "MEM(KB)");
DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_running_list_head, node)
{
LOG_PRINTF("%-8s", "RUNNING");
@ -97,7 +97,7 @@ void show_tasks(void)
extern struct KBuddy user_phy_freemem_buddy;
extern struct KBuddy kern_virtmem_buddy;
extern uint32_t kernel_data_end[];
extern uintptr_t kernel_data_end[];
void show_mem(void)
{
SHOWINFO_BORDER_LINE();