fix:删除冗余的注释,修复和不使用SMP版本不兼容的部分宏定义错误问题

This commit is contained in:
sherecho 2024-10-12 17:27:28 +08:00
parent 98aa34d99a
commit 1347fb0cdf
7 changed files with 22 additions and 36 deletions

View File

@ -21,28 +21,13 @@
#define configASSERT(x) assert(x) #define configASSERT(x) assert(x)
#define INVALID_PRIMARY_CORE_NUM 0xffu #define INVALID_PRIMARY_CORE_NUM 0xffu
#define portMIN_INTERRUPT_PRIORITY ( 255UL ) #define portMIN_INTERRUPT_PRIORITY ( 255UL )
// typedef uint32_t BaseType_t;
#if(USE_SMP==1u)
/* Multi-core */ /* Multi-core */
#define portMAX_CORE_COUNT 2 #define portMAX_CORE_COUNT 2
/* Requires for SMP */
#define portCRITICAL_NESTING_IN_TCB 1
//获取core ID //获取core ID
#define port_GET_CORE_ID() get_core_num() #define port_GET_CORE_ID() get_core_num()
//这些宏定义在实现实时操作系统RTOS用于控制中断和任务切换的操作
#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT
//决定是否需要任务切换
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
typedef uint32_t BaseType_t;
#define portRTOS_SPINLOCK_COUNT 2 #define portRTOS_SPINLOCK_COUNT 2
#define portRESTORE_INTERRUPTS(ulState) __asm volatile ("msr PRIMASK,%0"::"r" (ulState) : )
/* Note this is a single method with uxAcquire parameter since we have
* static vars, the method is always called with a compile time constant for
* uxAcquire, and the compiler should dothe right thing! */
extern uint8_t ucOwnedByCore[ portMAX_CORE_COUNT ]; extern uint8_t ucOwnedByCore[ portMAX_CORE_COUNT ];
extern uint8_t ucRecursionCountByLock[ portRTOS_SPINLOCK_COUNT ]; extern uint8_t ucRecursionCountByLock[ portRTOS_SPINLOCK_COUNT ];
static void vPortRecursiveLock(uint32_t ulLockNum, spin_lock_t *pxSpinLock, BaseType_t uxAcquire) { static void vPortRecursiveLock(uint32_t ulLockNum, spin_lock_t *pxSpinLock, BaseType_t uxAcquire) {
@ -85,12 +70,7 @@ extern uint8_t ucRecursionCountByLock[ portRTOS_SPINLOCK_COUNT ];
#define portRELEASE_ISR_LOCK() vPortRecursiveLock(0, spin_lock_instance(configSMP_SPINLOCK_0), (BaseType_t)0) #define portRELEASE_ISR_LOCK() vPortRecursiveLock(0, spin_lock_instance(configSMP_SPINLOCK_0), (BaseType_t)0)
#define portGET_TASK_LOCK() vPortRecursiveLock(1, spin_lock_instance(configSMP_SPINLOCK_1), (BaseType_t)1) #define portGET_TASK_LOCK() vPortRecursiveLock(1, spin_lock_instance(configSMP_SPINLOCK_1), (BaseType_t)1)
#define portRELEASE_TASK_LOCK() vPortRecursiveLock(1, spin_lock_instance(configSMP_SPINLOCK_1), (BaseType_t) 0) #define portRELEASE_TASK_LOCK() vPortRecursiveLock(1, spin_lock_instance(configSMP_SPINLOCK_1), (BaseType_t) 0)
#endif
#define portCHECK_IF_IN_ISR() ({ \
uint32_t ulIPSR; \
__asm volatile ("mrs %0, IPSR" : "=r" (ulIPSR)::); \
((uint8_t)ulIPSR)>0;})
#endif #endif

View File

@ -156,8 +156,6 @@ __PORT__ __STATIC__ void prvFIFOInterruptHandler(void)
/* And explicitly clear any other IRQ flags */ /* And explicitly clear any other IRQ flags */
multicore_fifo_clear_irq(); multicore_fifo_clear_irq();
#if (USE_SMP == 1u) #if (USE_SMP == 1u)
//portYIELD_FROM_ISR(1);
//port_context_switch();
knl_sched(); knl_sched();
#endif /* portRUNNING_ON_BOTH_CORES */ #endif /* portRUNNING_ON_BOTH_CORES */
} }

View File

@ -1,4 +1,4 @@
.equ SMP, 1 @.equ SMP, 1
.global port_int_disable .global port_int_disable
.global port_int_enable .global port_int_enable

View File

@ -36,9 +36,12 @@ extern readyqueue_t k_rdyq;
extern k_tick_t k_tick_count; extern k_tick_t k_tick_count;
/* current task */ /* current task */
extern k_task_t *k_curr_task;
extern k_task_t *k_curr_tasks[configNUM_CORES]; extern k_task_t *k_curr_tasks[configNUM_CORES];
#if(USE_SMP==1u)
#define k_curr_task TaskGetCurrentKernelTaskHandle() #define k_curr_task TaskGetCurrentKernelTaskHandle()
#else
extern k_task_t *k_curr_task;
#endif
k_task_t * TaskGetCurrentKernelTaskHandle(void); k_task_t * TaskGetCurrentKernelTaskHandle(void);
/* next task to run */ /* next task to run */
extern k_task_t *k_next_task; extern k_task_t *k_next_task;

View File

@ -132,10 +132,10 @@ struct k_task_st {
by tos_event_pend to the caller */ by tos_event_pend to the caller */
#endif #endif
#if (USE_SMP == 1u ) #if (USE_SMP == 1u )
BaseType_t RunningOnCore; BaseType_t RunningOnCore; /**< when the task is running on the core this is the coreID if the task is not running this would be set as -1*/
BaseType_t IsIDLE; BaseType_t IsIDLE; /**<if this is IDLE task it would be set as 1 otherwise it would be set as 0 */
#if ( configUSE_CORE_AFFINITY == 1u ) #if ( configUSE_CORE_AFFINITY == 1u )
BaseType_t CoreAffinityMask; BaseType_t CoreAffinityMask; /**< The mask of cores that the current task is bound to */
#endif #endif
#endif #endif
}; };

View File

@ -26,9 +26,7 @@ knl_state_t k_knl_states[configNUM_CORES] = {KNL_STATE
readyqueue_t k_rdyq; readyqueue_t k_rdyq;
k_tick_t k_tick_count = (k_tick_t)0u; k_tick_t k_tick_count = (k_tick_t)0u;
//k_task_t *k_curr_task = K_NULL; #if(USE_SMP==1u)
k_task_t * k_curr_tasks[configNUM_CORES] = {K_NULL};
k_task_t * TaskGetCurrentKernelTaskHandle(void){ k_task_t * TaskGetCurrentKernelTaskHandle(void){
k_task_t * xReturn; k_task_t * xReturn;
TOS_CPU_CPSR_ALLOC(); TOS_CPU_CPSR_ALLOC();
@ -39,6 +37,10 @@ k_task_t * TaskGetCurrentKernelTaskHandle(void){
TOS_CPU_INT_ENABLE(); TOS_CPU_INT_ENABLE();
return xReturn; return xReturn;
} }
#else
k_task_t *k_curr_task = K_NULL;
#endif
k_task_t * k_curr_tasks[configNUM_CORES] = {K_NULL};
k_task_t *k_next_task = K_NULL; k_task_t *k_next_task = K_NULL;
k_task_t *k_next_tasks[configNUM_CORES] = {K_NULL}; k_task_t *k_next_tasks[configNUM_CORES] = {K_NULL};
k_task_t k_idle_tasks[configNUM_CORES] ; k_task_t k_idle_tasks[configNUM_CORES] ;

View File

@ -260,7 +260,7 @@ __API__ k_err_t tos_knl_sched_unlock(void)
knl_sched(); knl_sched();
return K_ERR_NONE; return K_ERR_NONE;
} }
#if USE_SMP ==1u
__API__ k_err_t tos_knl_start_smp(void) __API__ k_err_t tos_knl_start_smp(void)
{ {
//启动应用核心 //启动应用核心
@ -271,6 +271,7 @@ __API__ k_err_t tos_knl_start_smp(void)
} }
return K_ERR_NONE; return K_ERR_NONE;
} }
#endif
__API__ k_err_t tos_knl_start(void) __API__ k_err_t tos_knl_start(void)
{ {
if (unlikely(tos_knl_is_running())) { if (unlikely(tos_knl_is_running())) {
@ -427,9 +428,11 @@ __STATIC__ void knl_idle_entry(void *arg)
#if TOS_CFG_PWR_MGR_EN > 0u #if TOS_CFG_PWR_MGR_EN > 0u
pm_power_manager(); pm_power_manager();
#endif #endif
#if(USE_SMP==1u)
if(k_rdyq.highest_prio<K_TASK_PRIO_IDLE){ if(k_rdyq.highest_prio<K_TASK_PRIO_IDLE){
knl_sched(); knl_sched();
} }
#endif
} }
} }