forked from xuos/xiuos
Enhance optimization and warning level.
This commit is contained in:
parent
077dcd66ac
commit
3c6e8ce109
|
@ -1,6 +1,6 @@
|
||||||
export CROSS_COMPILE ?= arm-none-eabi-
|
export CROSS_COMPILE ?= arm-none-eabi-
|
||||||
export DEVICE = -march=armv7-a -mtune=cortex-a9 -mfpu=vfpv3-d16 -ftree-vectorize -ffast-math -mfloat-abi=softfp
|
export DEVICE = -march=armv7-a -mtune=cortex-a9 -mfpu=vfpv3-d16 -ftree-vectorize -ffast-math -mfloat-abi=softfp
|
||||||
export CFLAGS := $(DEVICE) -Wall -O0 -g -gdwarf-2
|
export CFLAGS := $(DEVICE) -Wall -O2 -g -gdwarf-2 -Wnull-dereference -Waddress -Warray-bounds -Wchar-subscripts -Wimplicit-int -Wimplicit-function-declaration -Wcomment -Wformat -Wmissing-braces -Wnonnull -Wparentheses -Wpointer-sign -Wreturn-type -Wsequence-point -Wstrict-aliasing -Wstrict-overflow=1 -Wswitch -Wtrigraphs -Wuninitialized -Wunknown-pragmas -Wunused-function -Wunused-label -Wunused-value -Wunused-variable -Wunused-function
|
||||||
export AFLAGS := -c $(DEVICE) -x assembler-with-cpp -D__ASSEMBLY__ -gdwarf-2
|
export AFLAGS := -c $(DEVICE) -x assembler-with-cpp -D__ASSEMBLY__ -gdwarf-2
|
||||||
# export LFLAGS := $(DEVICE) -Wl,-Map=XiZi-imx6q-sabrelite.map,-cref,-u,_boot_start -T $(KERNEL_ROOT)/hardkernel/arch/arm/armv7-a/cortex-a9/preboot_for_imx6q-sabrelite/nxp_imx6q_sabrelite.lds
|
# export LFLAGS := $(DEVICE) -Wl,-Map=XiZi-imx6q-sabrelite.map,-cref,-u,_boot_start -T $(KERNEL_ROOT)/hardkernel/arch/arm/armv7-a/cortex-a9/preboot_for_imx6q-sabrelite/nxp_imx6q_sabrelite.lds
|
||||||
export LFLAGS := $(DEVICE) --specs=nosys.specs -Wl,-Map=XiZi-imx6q-sabrelite.map,-cref,-u,_boot_start -T $(KERNEL_ROOT)/hardkernel/arch/arm/armv7-a/cortex-a9/preboot_for_imx6q-sabrelite/nxp_imx6q_sabrelite.lds
|
export LFLAGS := $(DEVICE) --specs=nosys.specs -Wl,-Map=XiZi-imx6q-sabrelite.map,-cref,-u,_boot_start -T $(KERNEL_ROOT)/hardkernel/arch/arm/armv7-a/cortex-a9/preboot_for_imx6q-sabrelite/nxp_imx6q_sabrelite.lds
|
||||||
|
|
|
@ -125,20 +125,22 @@ void iabort_reason(struct trapframe* r)
|
||||||
void handle_undefined_instruction(struct trapframe* tf)
|
void handle_undefined_instruction(struct trapframe* tf)
|
||||||
{
|
{
|
||||||
// unimplemented trap handler
|
// unimplemented trap handler
|
||||||
xizi_enter_kernel();
|
|
||||||
ERROR("undefined instruction at %x\n", tf->pc);
|
ERROR("undefined instruction at %x\n", tf->pc);
|
||||||
|
xizi_enter_kernel();
|
||||||
panic("");
|
panic("");
|
||||||
}
|
}
|
||||||
|
|
||||||
void handle_reserved(void)
|
void handle_reserved(void)
|
||||||
{
|
{
|
||||||
// unimplemented trap handler
|
// unimplemented trap handler
|
||||||
|
ERROR("Unimplemented Reserved\n");
|
||||||
xizi_enter_kernel();
|
xizi_enter_kernel();
|
||||||
panic("Unimplemented Reserved\n");
|
panic("");
|
||||||
}
|
}
|
||||||
|
|
||||||
void handle_fiq(void)
|
void handle_fiq(void)
|
||||||
{
|
{
|
||||||
|
ERROR("Unimplemented FIQ\n");
|
||||||
xizi_enter_kernel();
|
xizi_enter_kernel();
|
||||||
panic("Unimplemented FIQ\n");
|
panic("");
|
||||||
}
|
}
|
|
@ -367,8 +367,9 @@ static void trace_locate_inner(struct TraceTag* target, struct TraceTag* const p
|
||||||
// p_trace_meta: TRACER_OWNER, VT_FS or other.
|
// p_trace_meta: TRACER_OWNER, VT_FS or other.
|
||||||
// TRACER_OWNER: path: "", name: "dir name"
|
// TRACER_OWNER: path: "", name: "dir name"
|
||||||
// other: path: "", name: "file name"
|
// other: path: "", name: "file name"
|
||||||
if (!p_trace_meta) {
|
if (p_trace_meta == NULL) {
|
||||||
DEBUG("trace_locate, not found\n");
|
DEBUG("trace_locate, not found\n");
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
target->type = p_trace_meta->type;
|
target->type = p_trace_meta->type;
|
||||||
target->meta = p_trace_meta;
|
target->meta = p_trace_meta;
|
||||||
|
|
|
@ -105,6 +105,9 @@ static struct tracer_mem_chunk* tracer_get_mem_chunk_cache(uint32_t chunk_id)
|
||||||
struct tracer_mem_chunk* tracer_mem_chunk_read(uint32_t chunk_id)
|
struct tracer_mem_chunk* tracer_mem_chunk_read(uint32_t chunk_id)
|
||||||
{
|
{
|
||||||
struct tracer_mem_chunk* b = tracer_get_mem_chunk_cache(chunk_id);
|
struct tracer_mem_chunk* b = tracer_get_mem_chunk_cache(chunk_id);
|
||||||
|
if (b == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
if (!(b->flag & TRACER_MEM_CHUNK_VALID)) {
|
if (!(b->flag & TRACER_MEM_CHUNK_VALID)) {
|
||||||
tracer_mem_chunk_sync(b);
|
tracer_mem_chunk_sync(b);
|
||||||
b->flag |= TRACER_MEM_CHUNK_VALID;
|
b->flag |= TRACER_MEM_CHUNK_VALID;
|
||||||
|
@ -137,6 +140,9 @@ static void tracer_mem_chunk_zero(uint32_t chunk_id)
|
||||||
assert(chunk_id >= 0 && chunk_id < tracer_mem_chunk_syner.nr_mem_chunks);
|
assert(chunk_id >= 0 && chunk_id < tracer_mem_chunk_syner.nr_mem_chunks);
|
||||||
struct tracer_mem_chunk* tracer_mem_chunk = NULL;
|
struct tracer_mem_chunk* tracer_mem_chunk = NULL;
|
||||||
tracer_mem_chunk = tracer_mem_chunk_read(chunk_id);
|
tracer_mem_chunk = tracer_mem_chunk_read(chunk_id);
|
||||||
|
if (tracer_mem_chunk == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
memset(tracer_mem_chunk->data, 0, tracer_mem_chunk_syner.mem_chunk_size);
|
memset(tracer_mem_chunk->data, 0, tracer_mem_chunk_syner.mem_chunk_size);
|
||||||
tracer_mem_chunk_write(tracer_mem_chunk);
|
tracer_mem_chunk_write(tracer_mem_chunk);
|
||||||
tracer_mem_chunk_release(tracer_mem_chunk);
|
tracer_mem_chunk_release(tracer_mem_chunk);
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
ifeq ($(BOARD), imx6q-sabrelite)
|
ifeq ($(BOARD), imx6q-sabrelite)
|
||||||
toolchain ?= arm-none-eabi-
|
toolchain ?= arm-none-eabi-
|
||||||
user_ldflags = --specs=nosys.specs -Wl,-Map=user.map,-cref -N
|
user_ldflags = --specs=nosys.specs -Wl,-Map=user.map,-cref -N
|
||||||
cflags = -std=c11 -march=armv7-a -mtune=cortex-a9 -nostdlib -nodefaultlibs -mfloat-abi=soft -fno-pic -static -fno-builtin -fno-strict-aliasing -Wall -ggdb -Wno-unused -Werror -fno-omit-frame-pointer -fno-stack-protector -fno-pie -no-pie
|
cflags = -std=c11 -O2 -march=armv7-a -mtune=cortex-a9 -nostdlib -nodefaultlibs -mfloat-abi=soft -fno-pic -static -fno-builtin -fno-strict-aliasing -Wall -ggdb -Wno-unused -Werror -fno-omit-frame-pointer -fno-stack-protector -fno-pie -no-pie
|
||||||
endif
|
endif
|
||||||
ifeq ($(BOARD), zynq7000-zc702)
|
ifeq ($(BOARD), zynq7000-zc702)
|
||||||
toolchain ?= arm-xilinx-eabi-
|
toolchain ?= arm-xilinx-eabi-
|
||||||
user_ldflags = -Wl,--start-group,-lgcc,-lc,--end-group -N
|
user_ldflags = -Wl,--start-group,-lgcc,-lc,--end-group -N
|
||||||
cflags = -std=c11 -march=armv7-a -mtune=cortex-a9 -nostdlib -nodefaultlibs -mfloat-abi=soft -fno-pic -static -fno-builtin -fno-strict-aliasing -Wall -ggdb -Wno-unused -Werror -fno-omit-frame-pointer -fno-stack-protector -fno-pie
|
cflags = -std=c11 -O2 -march=armv7-a -mtune=cortex-a9 -nostdlib -nodefaultlibs -mfloat-abi=soft -fno-pic -static -fno-builtin -fno-strict-aliasing -Wall -ggdb -Wno-unused -Werror -fno-omit-frame-pointer -fno-stack-protector -fno-pie
|
||||||
board_specs = stub.o
|
board_specs = stub.o
|
||||||
#cflags = -Wall -g -std=c11
|
#cflags = -Wall -g -std=c11
|
||||||
endif
|
endif
|
||||||
|
|
|
@ -75,6 +75,9 @@ int sys_close_session(struct TaskMicroDescriptor* cur_task, struct Session* sess
|
||||||
}
|
}
|
||||||
|
|
||||||
/* close this session */
|
/* close this session */
|
||||||
|
if (session_backend == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
if (UNLIKELY(session_backend->client_side.closed && session_backend->server_side.closed) && LIKELY(session_valid)) {
|
if (UNLIKELY(session_backend->client_side.closed && session_backend->server_side.closed) && LIKELY(session_valid)) {
|
||||||
xizi_share_page_manager.delete_share_pages(session_backend);
|
xizi_share_page_manager.delete_share_pages(session_backend);
|
||||||
}
|
}
|
||||||
|
|
|
@ -233,7 +233,7 @@ static void _scheduler(struct SchedulerRightGroup right_group)
|
||||||
next_task = NULL;
|
next_task = NULL;
|
||||||
/* find next runnable task */
|
/* find next runnable task */
|
||||||
assert(cur_cpu()->task == NULL);
|
assert(cur_cpu()->task == NULL);
|
||||||
if (next_task_emergency != NULL && next_task->state == READY) {
|
if (next_task_emergency != NULL && next_task_emergency->state == READY) {
|
||||||
next_task = next_task_emergency;
|
next_task = next_task_emergency;
|
||||||
} else {
|
} else {
|
||||||
next_task = xizi_task_manager.next_runnable_task();
|
next_task = xizi_task_manager.next_runnable_task();
|
||||||
|
|
|
@ -58,11 +58,11 @@ __attribute__((optimize("O0"))) void dabort_handler(struct trapframe* r)
|
||||||
panic("data abort exception\n");
|
panic("data abort exception\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
xizi_enter_kernel();
|
|
||||||
|
|
||||||
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
||||||
ERROR("dabort in user space: %s\n", cur_task->name);
|
ERROR("dabort in user space: %s\n", cur_task->name);
|
||||||
dabort_reason(r);
|
dabort_reason(r);
|
||||||
|
|
||||||
|
xizi_enter_kernel();
|
||||||
sys_exit(cur_task);
|
sys_exit(cur_task);
|
||||||
assert(cur_cpu()->task == NULL);
|
assert(cur_cpu()->task == NULL);
|
||||||
context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler);
|
context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler);
|
||||||
|
@ -78,11 +78,11 @@ __attribute__((optimize("O0"))) void iabort_handler(struct trapframe* r)
|
||||||
panic("kernel prefetch abort exception\n");
|
panic("kernel prefetch abort exception\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
xizi_enter_kernel();
|
|
||||||
|
|
||||||
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
struct TaskMicroDescriptor* cur_task = cur_cpu()->task;
|
||||||
ERROR("iabort in user space: %s\n", cur_task->name);
|
ERROR("iabort in user space: %s\n", cur_task->name);
|
||||||
iabort_reason(r);
|
iabort_reason(r);
|
||||||
|
|
||||||
|
xizi_enter_kernel();
|
||||||
sys_exit(cur_task);
|
sys_exit(cur_task);
|
||||||
assert(cur_cpu()->task == NULL);
|
assert(cur_cpu()->task == NULL);
|
||||||
context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler);
|
context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler);
|
||||||
|
|
|
@ -95,13 +95,13 @@ intr_leave_interrupt:
|
||||||
xizi_leave_kernel();
|
xizi_leave_kernel();
|
||||||
}
|
}
|
||||||
|
|
||||||
void xizi_enter_kernel()
|
__attribute__((always_inline)) inline void xizi_enter_kernel()
|
||||||
{
|
{
|
||||||
/// @warning trampoline is responsible for closing interrupt
|
/// @warning trampoline is responsible for closing interrupt
|
||||||
spinlock_lock(&whole_kernel_lock);
|
spinlock_lock(&whole_kernel_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool xizi_try_enter_kernel()
|
__attribute__((always_inline)) inline bool xizi_try_enter_kernel()
|
||||||
{
|
{
|
||||||
/// @warning trampoline is responsible for closing interrupt
|
/// @warning trampoline is responsible for closing interrupt
|
||||||
if (spinlock_try_lock(&whole_kernel_lock)) {
|
if (spinlock_try_lock(&whole_kernel_lock)) {
|
||||||
|
@ -111,7 +111,7 @@ inline bool xizi_try_enter_kernel()
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void xizi_leave_kernel()
|
__attribute__((always_inline)) inline void xizi_leave_kernel()
|
||||||
{
|
{
|
||||||
/// @warning trampoline is responsible for eabling interrupt by using user's state register
|
/// @warning trampoline is responsible for eabling interrupt by using user's state register
|
||||||
spinlock_unlock(&whole_kernel_lock);
|
spinlock_unlock(&whole_kernel_lock);
|
||||||
|
|
Loading…
Reference in New Issue