kernel: seperate the shared arm code into armv6-m and armv7-m

This commit is contained in:
Gujinyu 2021-08-30 21:04:32 +08:00
parent 6fe9a4f5a8
commit e68b68458c
8 changed files with 430 additions and 4 deletions

View File

@ -1,16 +1,28 @@
#公共部分
SRC_DIR := shared
# The following three platforms support compatiable instructions.
ifeq ($(CONFIG_BOARD_CORTEX_M3_EVB),y)
SRC_DIR +=cortex-m3
SRC_DIR += armv7m
SRC_DIR += cortex-m3
endif
ifeq ($(CONFIG_BOARD_STM32F407_EVB),y)
SRC_DIR +=cortex-m4
SRC_DIR += armv7m
SRC_DIR += cortex-m4
endif
ifeq ($(CONFIG_BOARD_CORTEX_M7_EVB),y)
SRC_DIR +=cortex-m7
SRC_DIR += armv7m
SRC_DIR += cortex-m7
endif
# cortex-m0 is ARMv6-m
ifeq ($(CONFIG_BOARD_CORTEX_M0_EVB),y)
SRC_DIR += armv6m
SRC_DIR += cortex-m0
endif
include $(KERNEL_ROOT)/compiler.mk

View File

@ -0,0 +1,3 @@
SRC_FILES := pendsv.S arm32_switch.c
include $(KERNEL_ROOT)/compiler.mk

View File

@ -0,0 +1,249 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiUOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include <xs_base.h>
#include <xs_ktask.h>
#define SCB_VTOR "0xE000ED08"
#define NVIC_INT_CTRL "0xE000ED04"
#define NVIC_SYSPRI2 "0xE000ED20"
#define NVIC_PENDSV_PRI "0x00FF0000"
#define NVIC_PENDSVSET "0x10000000"
/* We replaced instructions that were not supported in thumb mode. */
void __attribute__((naked)) HwInterruptcontextSwitch(x_ubase from, x_ubase to, struct TaskDescriptor *to_task, void *context)
{
// KPrintf("%s\n", __func__);
asm volatile ("PUSH {R4}");
asm volatile ("PUSH {R5}");
asm volatile ("LDR r4, =KtaskSwitchInterruptFlag");
asm volatile ("LDR r5, [r4]");
asm volatile ("CMP r5, #1");
asm volatile ("POP {R5}");
asm volatile ("POP {R4}");
asm volatile ("BEQ Arm32SwitchReswitch");
asm volatile ("PUSH {R4}");
asm volatile ("PUSH {R5}");
asm volatile ("LDR r4, =KtaskSwitchInterruptFlag");
asm volatile ("MOV r5, #1");
asm volatile ("STR r5, [r4]");
asm volatile ("LDR r4, =InterruptFromKtask");
asm volatile ("STR r0, [r4]");
asm volatile ("POP {R5}");
asm volatile ("POP {R4}");
asm volatile ("B Arm32SwitchReswitch");
}
void __attribute__((naked)) Arm32SwitchReswitch()
{
// KPrintf("%s\n", __func__);
asm volatile ("PUSH {R4}");
asm volatile ("LDR r4, =InterruptToKtask");
asm volatile ("STR r1, [r4]");
asm volatile ("LDR r4, =InterruptToKtaskDescriptor");
asm volatile ("STR r2, [r4]");
asm volatile ("LDR r0, =" NVIC_INT_CTRL);
asm volatile ("LDR r1, =" NVIC_PENDSVSET);
asm volatile ("STR r1, [r0]");
asm volatile ("POP {R4}");
asm volatile ("BX LR");
}
void __attribute__((naked)) SwitchKtaskContext(x_ubase from, x_ubase to, struct TaskDescriptor *to_task)
{
// KPrintf("%s\n", __func__);
asm volatile("B HwInterruptcontextSwitch");
}
void SwitchKtaskContextTo(x_ubase to, struct TaskDescriptor *to_task)
{
// KPrintf("%s\n", __func__);
asm volatile ("LDR r2, =InterruptToKtask");
asm volatile ("STR r0, [r2]");
asm volatile ("LDR r2, =InterruptToKtaskDescriptor");
asm volatile ("STR r1, [r2]");
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
asm volatile ("MRS r2, CONTROL");
asm volatile ("BIC r2, #0x04");
asm volatile ("MSR CONTROL, r2");
#endif
asm volatile ("LDR r1, =InterruptFromKtask");
asm volatile ("MOV r0, #0x0");
asm volatile ("STR r0, [r1]");
asm volatile ("LDR r1, =KtaskSwitchInterruptFlag");
asm volatile ("MOV r0, #1");
asm volatile ("STR r0, [r1]");
asm volatile ("LDR r0, =" NVIC_SYSPRI2);
asm volatile ("LDR r1, =" NVIC_PENDSV_PRI);
// asm volatile ("LDR.W r2, [r0,#0x00]");
asm volatile ("LDR r2, [r0,#0x00]");
asm volatile ("ORR r1,r1,r2");
asm volatile ("STR r1, [r0]");
asm volatile ("LDR r0, =" NVIC_INT_CTRL);
asm volatile ("LDR r1, =" NVIC_PENDSVSET);
asm volatile ("STR r1, [r0]");
asm volatile ("LDR r0, =" SCB_VTOR);
asm volatile ("LDR r0, [r0]");
asm volatile ("LDR r0, [r0]");
asm volatile ("NOP");
asm volatile ("MSR msp, r0");
asm volatile ("CPSIE F");
asm volatile ("CPSIE I");
asm volatile ("BX lr");
}
void __attribute__((naked)) HardFaultHandler()
{
// KPrintf("%s\n", __func__);
asm volatile ("MRS r0, msp");
// asm volatile ("TST lr, #0x04");
asm volatile ("MOV r1, lr");
asm volatile ("MOV r2, #0x04");
asm volatile ("TST r1, r2");
asm volatile ("BEQ Arm32SwitchGetSpDone");
asm volatile ("MRS r0, psp");
asm volatile ("B Arm32SwitchGetSpDone");
}
void __attribute__((naked)) Arm32SwitchGetSpDone()
{
// KPrintf("%s\n", __func__);
asm volatile ("MRS r3, primask");
// asm volatile ("STMFD r0!, {r3 - r11}");
asm volatile ("SUB r0, r0, #0x24");
asm volatile ("STMIA r0!, {r3 - r7}");
asm volatile ("MOV r3, r8");
asm volatile ("MOV r4, r9");
asm volatile ("MOV r5, r10");
asm volatile ("MOV r6, r11");
asm volatile ("STMIA r0!, {r3 - r6}");
asm volatile ("SUB r0, r0, #0x24");
// asm volatile ("STMFD r0!, {lr}");
asm volatile ("SUB r0, r0, #0x4");
asm volatile ("MOV r0, lr");
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
asm volatile ("MOV r4, #0x00");
// asm volatile ("TST lr, #0x10");
asm volatile ("MOV r1, lr");
asm volatile ("MOV r2, #0x10");
asm volatile ("TST r1, r2");
asm volatile ("MOVEQ r4, #0x01");
asm volatile ("STMFD r0!, {r4}");
#endif
// asm volatile ("TST lr, #0x04");
asm volatile ("MOV r1, lr");
asm volatile ("MOV r2, #0x04");
asm volatile ("TST r1, r2");
asm volatile ("BEQ Arm32SwitchUpdateMsp");
asm volatile ("MSR psp, r0");
asm volatile ("B Arm32SwitchUpdateDone");
asm volatile ("B Arm32SwitchUpdateMsp");
}
void __attribute__((naked)) Arm32SwitchUpdateMsp()
{
// KPrintf("%s\n", __func__);
asm volatile ("MSR msp, r0");
asm volatile ("B Arm32SwitchUpdateDone");
}
void __attribute__((naked)) Arm32SwitchUpdateDone()
{
// KPrintf("%s\n", __func__);
asm volatile ("PUSH {LR}");
asm volatile ("BL HwHardFaultException");
// asm volatile ("POP {LR}");
asm volatile ("POP {R1}");
asm volatile ("MOV lr, r1");
// asm volatile ("ORR lr, lr, #0x04");
asm volatile ("MOV r1, lr");
asm volatile ("MOV r2, #0x04");
asm volatile ("ORR r1, r2");
asm volatile ("MOV lr, r1");
asm volatile ("BX lr");
}
void __attribute__((naked)) MemFaultHandler()
{
// KPrintf("%s\n", __func__);
asm volatile ("MRS r0, msp");
// asm volatile ("TST lr, #0x04");
asm volatile ("MOV r1, lr");
asm volatile ("MOV r2, #0x04");
asm volatile ("TST r1, r2");
asm volatile ("BEQ Arm32Switch1");
asm volatile ("MRS r0, psp");
asm volatile ("B Arm32Switch1");
}
void __attribute__((naked)) Arm32Switch1()
{
// KPrintf("%s\n", __func__);
asm volatile ("MRS r3, primask");
// asm volatile ("STMFD r0!, {r3 - r11}");
asm volatile ("SUB r0, r0, #0x24");
asm volatile ("STMIA r0!, {r3 - r7}");
asm volatile ("MOV r3, r8");
asm volatile ("MOV r4, r9");
asm volatile ("MOV r5, r10");
asm volatile ("MOV r6, r11");
asm volatile ("STMIA r0!, {r3 - r6}");
asm volatile ("SUB r0, r0, #0x24");
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
asm volatile ("MOV r4, #0x00");
// asm volatile ("TST lr, #0x10");
asm volatile ("MOV r1, lr");
asm volatile ("MOV r2, #0x10");
asm volatile ("TST r1, r2");
asm volatile ("MOV lr, r1");
asm volatile ("MOVEQ r4, #0x01");
asm volatile ("STMFD r0!, {r4}");
#endif
// asm volatile ("STMFD r0!, {lr}");
asm volatile ("SUB r0, r0, #0x4");
asm volatile ("MOV r0, lr");
asm volatile ("PUSH {LR}");
asm volatile ("BL MemFaultHandle");
// asm volatile ("POP {LR}");
asm volatile ("POP {R5}");
asm volatile ("MOV lr, r5");
// asm volatile ("ORR lr, lr, #0x04");
asm volatile ("MOV r5, lr");
asm volatile ("MOV r6, #0x04");
asm volatile ("ORR r5, r6");
asm volatile ("MOV lr, r5");
asm volatile ("BX lr");
}

View File

@ -0,0 +1,159 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2009-10-11 Bernard first version
* 2012-01-01 aozima support context switch load/store FPU register.
* 2013-06-18 aozima add restore MSP feature.
* 2013-06-23 aozima support lazy stack optimized.
* 2018-07-24 aozima enhancement hard fault exception handler.
*/
/*************************************************
File name: pendsv.S
Description: PendSV interrupt handler
Others: take RT-Thread v4.0.2/libcpu/arm/cortex-m4/context_gcc.S for references
https://github.com/RT-Thread/rt-thread/tree/v4.0.2
History:
1. Date: 2021-04-25
Author: AIIT XUOS Lab
*************************************************/
#include <xsconfig.h>
.cpu cortex-m0
.syntax unified
.thumb
.text
.equ SCB_VTOR, 0xE000ED08
.equ NVIC_INT_CTRL, 0xE000ED04
.equ NVIC_SYSPRI2, 0xE000ED20
.equ NVIC_PENDSV_PRI, 0x00FF0000
.equ NVIC_PENDSVSET, 0x10000000
.globl PendSV_Handler
.type PendSV_Handler, %function
PendSV_Handler:
MRS r3, PRIMASK
CPSID I
LDR r0, =KtaskSwitchInterruptFlag
LDR r1, [r0]
/*CBZ r1, switch_to_task*/
CMP r1, #0
BEQ pendsv_exit
MOVS r1, #0x00
STR r1, [r0]
LDR r0, =InterruptFromKtask
LDR r1, [r0]
/*CBZ r1, switch_to_task*/
CMP r1, #0
BEQ switch_to_task
MRS r1, psp
/*STMFD r1!, {r3 - r11}*/
SUBS r1, #0x24
STMIA r1!, {r3 - r7}
MOV r3, r8
MOV r4, r9
MOV r5, r10
MOV r6, r11
STMIA r1!, {r3 - r6}
SUBS r1, #0x24
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
MOV r4, #0x00
TST lr, #0x10
MOVEQ r4, #0x01
/*STMFD r1!, {r4}*/
SUBS r1, #0x4
STMIA r1!, {r4}
SUBS r1, #0x4
#endif
LDR r0, [r0]
STR r1, [r0]
switch_to_task:
PUSH {lr}
BL UpdateRunningTask
POP {r0}
MOV lr, r0
#ifdef TASK_ISOLATION
PUSH {lr}
BL GetTaskPrivilege
/*POP {lr}*/
POP {r0}
MOV lr, r0
#endif
LDR r1, =InterruptToKtask
LDR r1, [r1]
LDR r1, [r1]
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
LDMFD r1!, {r2}
#endif
/*LDMFD r1!, {r3 - r11}*/
ADDS r1, #0x14
LDMFD r1!, {r3 - r6}
MOV r8, r3
MOV r9, r4
MOV r10, r5
MOV r11, r6
SUBS r1, #0x24
LDMFD r1!, {r3 - r7}
ADDS r1, #0x10
MSR psp, r1
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
/*ORR lr, lr, #0x10*/
MOV r2, lr
MOVS r3, #0x10
ORRS r2, r3
MOV lr, r2
CMP r2, #0
BICNE lr, lr, #0x10
#endif
MRS r2, control
#ifdef TASK_ISOLATION
CMP r0, #1
BEQ unprivilege
privilege:
BIC r2, r2, #0x01
B exit
unprivilege:
/*ORR r2, r2, #0x01*/
MOVS r1, #0x01
ORRS r2, r1
#else
/*BIC r2, r2, #0x01*/
MOVS r0, #0x01
BICS r2, r0
#endif
exit:
MSR control, r2
pendsv_exit:
/*ORR lr, lr, #0x04*/
MOV r0, lr
MOVS r1, #0x04
ORRS r0, r1
MOV lr, r0
MSR PRIMASK, r3
BX lr

View File

@ -0,0 +1,3 @@
SRC_FILES := pendsv.S arm32_switch.c
include $(KERNEL_ROOT)/compiler.mk

View File

@ -1,4 +1,4 @@
SRC_FILES := pendsv.S prepare_ahwstack.c arm32_switch.c
SRC_FILES := prepare_ahwstack.c
include $(KERNEL_ROOT)/compiler.mk