From 9acf24a83e9e610ce222890e33caf68d06332b0a Mon Sep 17 00:00:00 2001 From: sherecho <2443397610@qq.com> Date: Thu, 10 Oct 2024 16:16:38 +0800 Subject: [PATCH] =?UTF-8?q?=E6=96=B0=E5=A2=9E=E5=AF=B9=E4=BA=8ESMP?= =?UTF-8?q?=E7=9A=84=E6=94=AF=E6=8C=81=EF=BC=8C=E5=AE=9E=E7=8E=B0SMP?= =?UTF-8?q?=E4=B8=8B=E6=A0=B8=E9=97=B4=E4=BA=92=E6=96=A5=EF=BC=8C=E4=BA=B2?= =?UTF-8?q?=E5=92=8C=E6=80=A7=E7=AD=89=E5=8A=9F=E8=83=BD=E7=9A=84=E6=94=AF?= =?UTF-8?q?=E6=8C=81=EF=BC=8C=E6=96=B0=E7=89=88=E6=9C=AC=E7=9A=84SMP?= =?UTF-8?q?=E5=88=86=E6=94=AF=E5=AF=B9=E5=8E=9F=E6=9C=89=E7=9A=84robin?= =?UTF-8?q?=E8=B0=83=E5=BA=A6=E5=92=8C=E6=8A=A2=E5=8D=A0=E5=BC=8F=E4=BC=98?= =?UTF-8?q?=E5=85=88=E7=BA=A7=E8=B0=83=E5=BA=A6=E6=83=85=E5=86=B5=E5=9D=87?= =?UTF-8?q?=E6=94=AF=E6=8C=81=E3=80=82=E5=AE=9E=E7=8E=B0=E5=AF=B9=E5=B7=B2?= =?UTF-8?q?=E6=9C=89=E6=89=80=E6=9C=89=E5=8A=9F=E8=83=BD=E7=9A=84SMP?= =?UTF-8?q?=E6=94=AF=E6=8C=81=EF=BC=8C=E5=8F=AF=E4=BB=A5=E5=AE=8C=E5=85=A8?= =?UTF-8?q?=E5=85=BC=E5=AE=B9=E5=B7=B2=E6=9C=89=E7=9A=84API=EF=BC=8C?= =?UTF-8?q?=E5=8E=9F=E6=9C=89=E5=8D=95=E6=A0=B8=E7=A8=8B=E5=BA=8F=E6=97=A0?= =?UTF-8?q?=E9=9C=80=E6=9B=B4=E6=94=B9=E4=BB=BB=E4=BD=95=E7=9A=84API?= =?UTF-8?q?=E4=BB=85=E9=9C=80=E5=AE=9A=E4=B9=89USE=5FSMP=E7=9A=84=E5=AE=8F?= =?UTF-8?q?=E4=B8=BA1=E5=8D=B3=E5=8F=AF=E5=9C=A8SMP=E7=8E=AF=E5=A2=83?= =?UTF-8?q?=E4=B8=8B=E5=A4=9A=E6=A0=B8=E8=BF=90=E8=A1=8C=E3=80=82=E5=A6=82?= =?UTF-8?q?=E6=9E=9C=E9=9C=80=E8=A6=81=E6=89=93=E5=BC=80=E4=BA=B2=E5=92=8C?= =?UTF-8?q?=E6=80=A7=E7=9A=84=E6=94=AF=E6=8C=81=E4=BB=85=E9=9C=80=E5=AE=9A?= =?UTF-8?q?=E4=B9=89configUSE=5FCORE=5FAFFINITY=E5=AE=8F=E4=B8=BA1?= =?UTF-8?q?=E5=8D=B3=E5=8F=AF=E3=80=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- arch/arm/arm-v7m/cortex-m0+/gcc/port_c.c | 2 + arch/arm/pico-RP2040/gcc/picoPortMacro.h | 99 +++++++++ arch/arm/pico-RP2040/gcc/port.h | 81 ++++++++ arch/arm/pico-RP2040/gcc/port_c.c | 178 ++++++++++++++++ arch/arm/pico-RP2040/gcc/port_config.h | 30 +++ arch/arm/pico-RP2040/gcc/port_s.S | 213 ++++++++++++++++++++ core/include/tos_global.h | 10 +- core/include/tos_k.h | 7 +- core/include/tos_sched.h | 1 + core/include/tos_sys.h | 14 ++ core/include/tos_task.h | 16 +- core/tos_barrier.c | 11 + core/tos_binary_heap.c | 12 ++ core/tos_char_fifo.c | 7 +- core/tos_completion.c | 18 ++ core/tos_countdownlatch.c | 13 ++ core/tos_event.c | 11 + core/tos_global.c | 21 +- core/tos_mail_queue.c | 19 +- core/tos_message_queue.c | 15 ++ core/tos_mmblk.c | 6 + core/tos_mutex.c | 16 ++ core/tos_priority_mail_queue.c | 16 +- core/tos_priority_message_queue.c | 15 ++ core/tos_priority_queue.c | 10 + core/tos_ring_queue.c | 12 ++ core/tos_robin.c | 52 ++++- core/tos_sched.c | 136 ++++++++++++- core/tos_sem.c | 13 ++ core/tos_sys.c | 245 +++++++++++++++++++++-- core/tos_task.c | 130 ++++++++++-- core/tos_tick.c | 24 ++- core/tos_time.c | 4 + core/tos_timer.c | 6 + osal/cmsis_os/cmsis_os.c | 10 +- osal/cmsis_os/cmsis_os2.c | 24 ++- osal/posix/mqueue_prv.c | 7 + osal/posix/pthread_prv.c | 10 + osal/posix/timer_prv.c | 7 + pm/tos_pm.c | 2 + pm/tos_tickless.c | 5 + 41 files changed, 1470 insertions(+), 58 deletions(-) create mode 100644 arch/arm/pico-RP2040/gcc/picoPortMacro.h create mode 100644 arch/arm/pico-RP2040/gcc/port.h create mode 100644 arch/arm/pico-RP2040/gcc/port_c.c create mode 100644 arch/arm/pico-RP2040/gcc/port_config.h create mode 100644 arch/arm/pico-RP2040/gcc/port_s.S diff --git a/arch/arm/arm-v7m/cortex-m0+/gcc/port_c.c b/arch/arm/arm-v7m/cortex-m0+/gcc/port_c.c index 40f810f..c65e19c 100644 --- a/arch/arm/arm-v7m/cortex-m0+/gcc/port_c.c +++ b/arch/arm/arm-v7m/cortex-m0+/gcc/port_c.c @@ -16,8 +16,10 @@ *---------------------------------------------------------------------------*/ #include "tos_k.h" + #include "core_cm0plus.h" + __PORT__ void port_cpu_reset(void) { NVIC_SystemReset(); diff --git a/arch/arm/pico-RP2040/gcc/picoPortMacro.h b/arch/arm/pico-RP2040/gcc/picoPortMacro.h new file mode 100644 index 0000000..720c653 --- /dev/null +++ b/arch/arm/pico-RP2040/gcc/picoPortMacro.h @@ -0,0 +1,99 @@ +#ifndef _PICOPORTMACRO_ +#define _PICOPORTMACRO_ +#include "hardware/sync.h" +#include "hardware/exception.h"//用于修改中断处理函数的头文件 +#include "pico/multicore.h" +#include "hardware/clocks.h" +#include "hardware/exception.h" +#include "hardware/irq.h" +#include "pico.h" + #include + #include + #ifndef configSMP_SPINLOCK_0 + #define configSMP_SPINLOCK_0 PICO_SPINLOCK_ID_OS1 + #endif + + #ifndef configSMP_SPINLOCK_1 + #define configSMP_SPINLOCK_1 PICO_SPINLOCK_ID_OS2 + #endif + + /* Define to trap errors during development. */ + #define configASSERT(x) assert(x) + #define INVALID_PRIMARY_CORE_NUM 0xffu + #define portMIN_INTERRUPT_PRIORITY ( 255UL ) +// +/* Multi-core */ + #define portMAX_CORE_COUNT 2 +/* Requires for SMP */ + #define portCRITICAL_NESTING_IN_TCB 1 + +//获取core ID +#define port_GET_CORE_ID() get_core_num() +//这些宏定义在实现实时操作系统(RTOS)时,用于控制中断和任务切换的操作 +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT +//决定是否需要任务切换 +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) + +typedef uint32_t BaseType_t; +#define portRTOS_SPINLOCK_COUNT 2 +#define portRESTORE_INTERRUPTS(ulState) __asm volatile ("msr PRIMASK,%0"::"r" (ulState) : ) + + /* Note this is a single method with uxAcquire parameter since we have + * static vars, the method is always called with a compile time constant for + * uxAcquire, and the compiler should dothe right thing! */ +extern uint8_t ucOwnedByCore[ portMAX_CORE_COUNT ]; +extern uint8_t ucRecursionCountByLock[ portRTOS_SPINLOCK_COUNT ]; + static void vPortRecursiveLock(uint32_t ulLockNum, spin_lock_t *pxSpinLock, BaseType_t uxAcquire) { + + configASSERT(ulLockNum >= 0 && ulLockNum < portRTOS_SPINLOCK_COUNT ); + uint32_t ulCoreNum = get_core_num(); + + uint32_t ulLockBit = 1u << ulLockNum; + configASSERT(ulLockBit < 256u ); + if( uxAcquire ) + { + //printf("Acquire spinlock core :!%d \r\n", ulCoreNum); + if( __builtin_expect( !*pxSpinLock, 0 ) ) + { + if( ucOwnedByCore[ulCoreNum] & ulLockBit ) + { + configASSERT(ucRecursionCountByLock[ulLockNum] != 255u ); + ucRecursionCountByLock[ulLockNum]++; + return; + } + while ( __builtin_expect( !*pxSpinLock, 0 ) ); + } + __mem_fence_acquire(); + configASSERT(ucRecursionCountByLock[ulLockNum] == 0 ); + ucRecursionCountByLock[ulLockNum] = 1; + ucOwnedByCore[ulCoreNum] |= ulLockBit; + } else { + //printf("Release spinlock core :!%d \r\n", ulCoreNum); + configASSERT((ucOwnedByCore[ulCoreNum] & ulLockBit) != 0 ); + configASSERT(ucRecursionCountByLock[ulLockNum] != 0 ); + if( !--ucRecursionCountByLock[ulLockNum] ) + { + ucOwnedByCore[ulCoreNum] &= ~ulLockBit; + __mem_fence_release(); + *pxSpinLock = 1; + } + } + } +#define portGET_ISR_LOCK() vPortRecursiveLock(0, spin_lock_instance(configSMP_SPINLOCK_0), (BaseType_t)1) +#define portRELEASE_ISR_LOCK() vPortRecursiveLock(0, spin_lock_instance(configSMP_SPINLOCK_0), (BaseType_t)0) +#define portGET_TASK_LOCK() vPortRecursiveLock(1, spin_lock_instance(configSMP_SPINLOCK_1), (BaseType_t)1) +#define portRELEASE_TASK_LOCK() vPortRecursiveLock(1, spin_lock_instance(configSMP_SPINLOCK_1), (BaseType_t) 0) + +#define portCHECK_IF_IN_ISR() ({ \ + uint32_t ulIPSR; \ + __asm volatile ("mrs %0, IPSR" : "=r" (ulIPSR)::); \ + ((uint8_t)ulIPSR)>0;}) + + +#endif + + + + diff --git a/arch/arm/pico-RP2040/gcc/port.h b/arch/arm/pico-RP2040/gcc/port.h new file mode 100644 index 0000000..cbb5d36 --- /dev/null +++ b/arch/arm/pico-RP2040/gcc/port.h @@ -0,0 +1,81 @@ +/*---------------------------------------------------------------------------- + * Tencent is pleased to support the open source community by making TencentOS + * available. + * + * Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. + * If you have downloaded a copy of the TencentOS binary from Tencent, please + * note that the TencentOS binary is licensed under the BSD 3-Clause License. + * + * If you have downloaded a copy of the TencentOS source code from Tencent, + * please note that TencentOS source code is licensed under the BSD 3-Clause + * License, except for the third-party components listed below which are + * subject to different license terms. Your integration of TencentOS into your + * own projects may require compliance with the BSD 3-Clause License, as well + * as the other licenses applicable to the third-party components included + * within TencentOS. + *---------------------------------------------------------------------------*/ + +#ifndef _PORT_H_ +#define _PORT_H_ +#include "picoPortMacro.h" +__PORT__ void port_int_disable(void); + +__PORT__ void port_int_enable(void); + +__PORT__ cpu_cpsr_t port_cpsr_save(void); + +__PORT__ void port_cpsr_restore(cpu_cpsr_t cpsr); + +__PORT__ void port_cpu_reset(void); + +__PORT__ void port_sched_start(void) __NO_RETURN__; + +__PORT__ void port_context_switch(void); + +__PORT__ void port_irq_context_switch(void); + +__PORT__ void port_systick_config(uint32_t cycle_per_tick); + +__PORT__ void port_systick_priority_set(uint32_t prio); + +#if TOS_CFG_TICKLESS_EN > 0u + +__PORT__ void port_systick_resume(void); + +__PORT__ void port_systick_suspend(void); + +__PORT__ void port_systick_reload(uint32_t cycle_per_tick); + +__PORT__ void port_systick_pending_reset(void); + +__PORT__ k_time_t port_systick_max_delay_millisecond(void); + +#endif + +#if TOS_CFG_PWR_MGR_EN > 0u + +__PORT__ void port_sleep_mode_enter(void); + +__PORT__ void port_stop_mode_enter(void); + +__PORT__ void port_standby_mode_enter(void); + +#endif + + +#if TOS_CFG_FAULT_BACKTRACE_EN > 0u +__PORT__ void HardFault_Handler(void); + +__PORT__ void port_fault_diagnosis(void); +#endif + +#if USE_SMP > 0u +__PORT__ void portSCHED_CORE (int xCoreID ); +__PORT__ static void prvFIFOInterruptHandler(void); +__PORT__ void port_smp_init_kernel(void); +#define port_multicore_launch(x) multicore_launch_core1(x) +extern uint8_t ucPrimaryCoreNum ; +#endif + +#endif /* _PORT_H_ */ + diff --git a/arch/arm/pico-RP2040/gcc/port_c.c b/arch/arm/pico-RP2040/gcc/port_c.c new file mode 100644 index 0000000..156427e --- /dev/null +++ b/arch/arm/pico-RP2040/gcc/port_c.c @@ -0,0 +1,178 @@ +/*---------------------------------------------------------------------------- + * Tencent is pleased to support the open source community by making TencentOS + * available. + * + * Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. + * If you have downloaded a copy of the TencentOS binary from Tencent, please + * note that the TencentOS binary is licensed under the BSD 3-Clause License. + * + * If you have downloaded a copy of the TencentOS source code from Tencent, + * please note that TencentOS source code is licensed under the BSD 3-Clause + * License, except for the third-party components listed below which are + * subject to different license terms. Your integration of TencentOS into your + * own projects may require compliance with the BSD 3-Clause License, as well + * as the other licenses applicable to the third-party components included + * within TencentOS. + *---------------------------------------------------------------------------*/ + +#include "tos_k.h" + +#include "core_cm0plus.h" + +extern __KNL__ void knl_sched(void); + +__PORT__ void port_cpu_reset(void) +{ + NVIC_SystemReset(); +} + +__PORT__ void port_systick_config(uint32_t cycle_per_tick) +{ + (void)SysTick_Config(cycle_per_tick); +} + +__PORT__ void port_systick_priority_set(uint32_t prio) +{ + NVIC_SetPriority(SysTick_IRQn, prio); +} + +#if TOS_CFG_TICKLESS_EN > 0u + +__PORT__ k_time_t port_systick_max_delay_millisecond(void) +{ + k_time_t max_millisecond; + uint32_t max_cycle; + + max_cycle = SysTick_LOAD_RELOAD_Msk; // 24 bit + max_millisecond = (k_time_t)((uint64_t)max_cycle * K_TIME_MILLISEC_PER_SEC / TOS_CFG_CPU_CLOCK); // CLOCK: cycle per second + return max_millisecond; +} + +__PORT__ void port_systick_resume(void) +{ + SysTick->CTRL |= SysTick_CTRL_TICKINT_Msk; + SysTick->CTRL |= SysTick_CTRL_ENABLE_Msk; +} + +__PORT__ void port_systick_suspend(void) +{ + SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk; + SysTick->CTRL &= ~SysTick_CTRL_TICKINT_Msk; +} + +__PORT__ void port_systick_reload(uint32_t cycle_per_tick) +{ + port_systick_config(cycle_per_tick); +} + +__PORT__ void port_systick_pending_reset(void) +{ + SCB->ICSR |= SCB_ICSR_PENDSTCLR_Msk; +} + +#endif + +#if TOS_CFG_PWR_MGR_EN > 0u + +__PORT__ void port_sleep_mode_enter(void) +{ +#if 1 + HAL_PWR_EnterSLEEPMode(PWR_LOWPOWERREGULATOR_ON, PWR_SLEEPENTRY_WFI); +#else + HAL_PWR_EnterSLEEPMode(PWR_MAINREGULATOR_ON, PWR_SLEEPENTRY_WFI); +#endif +} + +__PORT__ void port_stop_mode_enter(void) +{ + HAL_PWR_EnterSTOPMode(PWR_LOWPOWERREGULATOR_ON, PWR_STOPENTRY_WFI); +} + +__PORT__ void port_standby_mode_enter(void) +{ + HAL_PWR_EnterSTANDBYMode(); +} + +#endif + +#if TOS_CFG_FAULT_BACKTRACE_EN > 0u +__PORT__ void port_fault_diagnosis(void) +{ + k_fault_log_writer("fault diagnosis is not supported in CORTEX M0+\n"); +} + +/*------------------ RealView Compiler -----------------*/ +/* V5 */ +#if defined(__CC_ARM) + +__PORT__ __ASM__ void HardFault_Handler(void) +{ + IMPORT fault_backtrace + + MOV r0, lr + TST lr, #0x04 + ITE EQ + MRSEQ r1, MSP + MRSNE r1, PSP + BL fault_backtrace +} + +/*------------------ ARM Compiler V6 -------------------*/ +#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + +__PORT__ void __NAKED__ HardFault_Handler(void) +{ + __ASM__ __VOLATILE__ ( + "MOV r0, lr\n\t" + "TST lr, #0x04\n\t" + "ITE EQ\n\t" + "MRSEQ r1, MSP\n\t" + "MRSNE r1, PSP\n\t" + "BL fault_backtrace\n\t" + ); +} + +#endif /* ARMCC VERSION */ + +#endif /* TOS_CFG_FAULT_BACKTRACE_EN */ + +#if USE_SMP ==1u +#define INVALID_PRIMARY_CORE_NUM 0xffu +uint8_t ucPrimaryCoreNum = INVALID_PRIMARY_CORE_NUM; +__PORT__ void portSCHED_CORE (int xCoreID ){ + assert(xCoreID != port_GET_CORE_ID()); + #if USE_SMP>0u + /* Non blocking, will cause interrupt on other core if the queue isn't already full, + in which case an IRQ must be pending */ + sio_hw->fifo_wr = 0; + #endif +} + +__PORT__ __STATIC__ void prvFIFOInterruptHandler(void) +{ + /* We must remove the contents (which we don't care about) + * to clear the IRQ */ + multicore_fifo_drain(); + /* And explicitly clear any other IRQ flags */ + multicore_fifo_clear_irq(); + #if (USE_SMP == 1u) + //portYIELD_FROM_ISR(1); + //port_context_switch(); + knl_sched(); + #endif /* portRUNNING_ON_BOTH_CORES */ +} + +__PORT__ void port_smp_init_kernel(void){ + //设置PENDSV_EXCEPTION中断处理函数,用于切换上下文 + //exception_set_exclusive_handler( PENDSV_EXCEPTION, PendSV_smp_Handler); + /* Install FIFO handler to receive interrupt from other core */ + multicore_fifo_clear_irq(); + multicore_fifo_drain(); + uint32_t ulIRQNum = SIO_IRQ_PROC0 + get_core_num(); + irq_set_priority( ulIRQNum, portMIN_INTERRUPT_PRIORITY ); + irq_set_exclusive_handler( ulIRQNum, prvFIFOInterruptHandler ); + irq_set_enabled( ulIRQNum, 1 ); +} +uint8_t ucOwnedByCore[ portMAX_CORE_COUNT ]={0}; +uint8_t ucRecursionCountByLock[ portRTOS_SPINLOCK_COUNT ]={0}; +#endif diff --git a/arch/arm/pico-RP2040/gcc/port_config.h b/arch/arm/pico-RP2040/gcc/port_config.h new file mode 100644 index 0000000..cc3d1e7 --- /dev/null +++ b/arch/arm/pico-RP2040/gcc/port_config.h @@ -0,0 +1,30 @@ +/*---------------------------------------------------------------------------- + * Tencent is pleased to support the open source community by making TencentOS + * available. + * + * Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. + * If you have downloaded a copy of the TencentOS binary from Tencent, please + * note that the TencentOS binary is licensed under the BSD 3-Clause License. + * + * If you have downloaded a copy of the TencentOS source code from Tencent, + * please note that TencentOS source code is licensed under the BSD 3-Clause + * License, except for the third-party components listed below which are + * subject to different license terms. Your integration of TencentOS into your + * own projects may require compliance with the BSD 3-Clause License, as well + * as the other licenses applicable to the third-party components included + * within TencentOS. + *---------------------------------------------------------------------------*/ + +#ifndef _PORT_CONFIG_H_ +#define _PORT_CONFIG_H_ + +#define TOS_CFG_CPU_ADDR_SIZE CPU_WORD_SIZE_32 +#define TOS_CFG_CPU_DATA_SIZE CPU_WORD_SIZE_32 +#define TOS_CFG_CPU_STK_GROWTH CPU_STK_GROWTH_DESCENDING +// #define TOS_CFG_CPU_HRTIMER_SIZE CPU_WORD_SIZE_32 +#define TOS_CFG_CPU_HRTIMER_EN 0u +#define TOS_CFG_CPU_LEAD_ZEROS_ASM_PRESENT 0u +#define TOS_CFG_CPU_BYTE_ORDER CPU_BYTE_ORDER_LITTLE_ENDIAN + +#endif /* _PORT_CONFIG_H_ */ + diff --git a/arch/arm/pico-RP2040/gcc/port_s.S b/arch/arm/pico-RP2040/gcc/port_s.S new file mode 100644 index 0000000..5b6d330 --- /dev/null +++ b/arch/arm/pico-RP2040/gcc/port_s.S @@ -0,0 +1,213 @@ + .equ SMP, 1 + .global port_int_disable + .global port_int_enable + + .global port_cpsr_save + .global port_cpsr_restore + + .global port_sched_start + .global port_context_switch + .global port_irq_context_switch + .ifdef SMP + .global PendSV_smp_Handler + .global k_curr_tasks + .global k_next_tasks + .else + .global PendSV_Handler + .global k_curr_task + .global k_next_task + .endif + + + +.equ NVIC_INT_CTRL , 0xE000ED04 @Interrupt control state register. +.equ NVIC_SYSPRI14 , 0xE000ED20 @System priority register (priority 14). +.equ NVIC_PENDSV_PRI , 0x00FF0000 @ PendSV priority value (lowest). +.equ NVIC_PENDSVSET , 0x10000000 @ Value to trigger PendSV exception. +.equ SIO_ID , 0xD0000000 @ code ID + + .text + .align 2 + .thumb + .syntax unified + +.type port_int_disable, %function +port_int_disable: + CPSID I + BX LR + + +.type port_int_enable, %function +port_int_enable: + CPSIE I + BX LR + + +.type port_cpsr_save, %function +port_cpsr_save: + MRS R0, PRIMASK + CPSID I + BX LR + + +.type port_cpsr_restore, %function +port_cpsr_restore: + MSR PRIMASK, R0 + BX LR + + +.thumb_func +.type port_sched_start, %function +port_sched_start: + LDR R0, =NVIC_SYSPRI14 + + + LDR R1, =NVIC_PENDSV_PRI + + + STR R1, [R0] + + MOVS R0, #0 + MSR PSP, R0 + + LDR R0, =NVIC_INT_CTRL + LDR R1, =NVIC_PENDSVSET + STR R1, [R0] + + CPSIE I + +__unreachable: + B __unreachable + + +.thumb_func +.type port_context_switch, %function +port_context_switch: + LDR R0, =NVIC_INT_CTRL + LDR R1, =NVIC_PENDSVSET + STR R1, [R0] + BX LR + + +.thumb_func +.type port_irq_context_switch, %function +port_irq_context_switch: + LDR R0, =NVIC_INT_CTRL + LDR R1, =NVIC_PENDSVSET + STR R1, [R0] + BX LR + +.ifdef SMP + +.thumb_func +.type PendSV_smp_Handler, %function +PendSV_smp_Handler: + CPSID I + MRS R0, PSP + CMP R0, #0 + BEQ PendSVHandler_smp_nosave + + SUBS R0, R0, #0x20 + STMIA R0!, {R4 - R7} + MOV R4, R8 + MOV R5, R9 + MOV R6, R10 + MOV R7, R11 + STMIA R0!, {R4-R7} + SUBS R0, R0, #0x20 + LDR R1, =k_curr_tasks + LDR R2, = SIO_ID + LDR R2, [R2] + LSLS R2 ,R2 ,#2 + ADDS R1, R2 + LDR R1, [R1] + STR R0, [R1] + +PendSVHandler_smp_nosave: + LDR R0, =k_curr_tasks + LDR R1, =k_next_tasks + @R3= Core number + LDR R3, = SIO_ID + LDR R3, [R3] + LSLS R3 ,R3 ,#2 + @R0=&k_curr_tasks[Core number] + ADDS R0, R3 + @R1=&k_next_tasks[Core number] + ADDS R1, R3 + @R2=k_next_tasks[Core number] + LDR R2, [R1] + @k_curr_tasks[Core number]=k_next_tasks[Core number] + STR R2, [R0] + + LDR R0, [R2] + + LDMIA R0!, {R4 - R7} + LDMIA R0!, {R2 - R3} + MOV R8, R2 + MOV R9, R3 + LDMIA R0!, {R2 - R3} + MOV R10, R2 + MOV R11, R3 + MSR PSP, R0 + + MOV R0, R14 + MOVS R1, #0x04 + ORRS R0, R1 + MOV R14, R0 + + CPSIE I + + BX LR + +.else + +.thumb_func +.type PendSV_Handler, %function +PendSV_Handler: + CPSID I + MRS R0, PSP + CMP R0, #0 + BEQ PendSVHandler_nosave + + SUBS R0, R0, #0x20 + STMIA R0!, {R4 - R7} + MOV R4, R8 + MOV R5, R9 + MOV R6, R10 + MOV R7, R11 + STMIA R0!, {R4-R7} + SUBS R0, R0, #0x20 + LDR R1, =k_curr_task + LDR R1, [R1] + STR R0, [R1] + +PendSVHandler_nosave: + LDR R0, =k_curr_task + LDR R1, =k_next_task + LDR R2, [R1] + STR R2, [R0] + + LDR R0, [R2] + + LDMIA R0!, {R4 - R7} + LDMIA R0!, {R2 - R3} + MOV R8, R2 + MOV R9, R3 + LDMIA R0!, {R2 - R3} + MOV R10, R2 + MOV R11, R3 + MSR PSP, R0 + + MOV R0, R14 + MOVS R1, #0x04 + ORRS R0, R1 + MOV R14, R0 + + CPSIE I + + BX LR + +.endif + +.end + diff --git a/core/include/tos_global.h b/core/include/tos_global.h index f8d5641..84e831a 100644 --- a/core/include/tos_global.h +++ b/core/include/tos_global.h @@ -20,12 +20,14 @@ /* interrupt nesting count */ extern k_nesting_t k_irq_nest_cnt; +extern k_nesting_t k_irq_nest_cnts[configNUM_CORES]; /* schedule lock nesting count */ extern k_nesting_t k_sched_lock_nest_cnt; /* kernel running state */ extern knl_state_t k_knl_state; +extern knl_state_t k_knl_states[configNUM_CORES]; /* ready queue of tasks */ extern readyqueue_t k_rdyq; @@ -35,15 +37,21 @@ extern k_tick_t k_tick_count; /* current task */ extern k_task_t *k_curr_task; +extern k_task_t *k_curr_tasks[configNUM_CORES]; +#define k_curr_task TaskGetCurrentKernelTaskHandle() +k_task_t * TaskGetCurrentKernelTaskHandle(void); /* next task to run */ extern k_task_t *k_next_task; - +extern k_task_t *k_next_tasks[configNUM_CORES]; /* idle task related stuff */ extern k_task_t k_idle_task; +extern k_task_t k_idle_tasks[configNUM_CORES]; extern k_stack_t k_idle_task_stk[]; extern k_stack_t *const k_idle_task_stk_addr; extern size_t const k_idle_task_stk_size; +extern k_stack_t k_idle_core1_task_stk[TOS_CFG_IDLE_TASK_STK_SIZE]; +extern k_stack_t *const k_idle_task_stk_addrs[configNUM_CORES]; #if TOS_CFG_OBJ_DYNAMIC_CREATE_EN > 0u /* list to hold all the destroyed dynamic created tasks */ extern k_list_t k_dead_task_list; diff --git a/core/include/tos_k.h b/core/include/tos_k.h index a9584ee..69ac2ed 100644 --- a/core/include/tos_k.h +++ b/core/include/tos_k.h @@ -68,6 +68,11 @@ #endif #include #include - +// #ifndef portGET_TASK_LOCK() +// #define portGET_TASK_LOCK() +// #endif +// #ifndef portRELEASE_TASK_LOCK() +// #define portRELEASE_TASK_LOCK() +// #endif #endif /* _TOS_K_H_ */ diff --git a/core/include/tos_sched.h b/core/include/tos_sched.h index 090c569..50540bb 100644 --- a/core/include/tos_sched.h +++ b/core/include/tos_sched.h @@ -37,6 +37,7 @@ __KNL__ void readyqueue_init(void); __KNL__ int readyqueue_is_prio_onlyone(k_prio_t prio); __KNL__ k_task_t *readyqueue_first_task_get(k_prio_t prio); +__KNL__ k_task_t *readyqueue_first_task_get_smp(k_prio_t prio,int corenum); __KNL__ k_task_t *readyqueue_highest_ready_task_get(void); diff --git a/core/include/tos_sys.h b/core/include/tos_sys.h index 6dbbe3a..8af26d5 100644 --- a/core/include/tos_sys.h +++ b/core/include/tos_sys.h @@ -19,6 +19,8 @@ #define _TOS_SYS_H_ __CDECLS_BEGIN +#define configMAX_TASK_NAME_LEN 16 +#define configIDLE_TASK_NAME "idle" #define K_NESTING_LIMIT_IRQ (k_nesting_t)250u #define K_NESTING_LIMIT_SCHED_LOCK (k_nesting_t)250u @@ -165,6 +167,18 @@ __API__ k_err_t tos_knl_sched_lock(void); * @retval K_ERR_NONE return successfully. */ __API__ k_err_t tos_knl_sched_unlock(void); +/////////////////new for smp////////////////////////////////////////// +#if (USE_SMP ==1u) +__API__ static void prvDisableInterruptsAndPortStartSchedulerOnCore( void ); +__API__ void smp_init_core0(void); +__API__ k_err_t tos_knl_smp_init(void); +__KNL__ int knl_is_idle_pre(k_task_t *task); +__API__ k_err_t tos_knl_start_smp(void); +#if ( configUSE_CORE_AFFINITY == 1u ) +__API__ void tos_TaskCoreAffinitySet( k_task_t * Task, BaseType_t CoreAffinityMask ); +__API__ BaseType_t vTaskCoreAffinityGet( const k_task_t * Task ); +#endif +#endif #if TOS_CFG_TICKLESS_EN > 0u __KNL__ k_tick_t knl_next_expires_get(void); diff --git a/core/include/tos_task.h b/core/include/tos_task.h index 5b1c970..1f2762a 100644 --- a/core/include/tos_task.h +++ b/core/include/tos_task.h @@ -131,6 +131,13 @@ struct k_task_st { k_event_flag_t *flag_match; /**< if we pend an event successfully, flag_match will be set by the event poster, and will be returned by tos_event_pend to the caller */ #endif +#if (USE_SMP == 1u ) + BaseType_t RunningOnCore; + BaseType_t IsIDLE; + #if ( configUSE_CORE_AFFINITY == 1u ) + BaseType_t CoreAffinityMask; + #endif +#endif }; /** @@ -434,7 +441,13 @@ __DEBUG__ __STATIC_INLINE__ void task_default_walker(k_task_t *task) state_str = state_str; tos_kprintln("tsk name: %s", task->name); - + #if(USE_SMP==1u) + if(task->RunningOnCore!=-1){ + state_str = "RUNNING"; + } + else + #endif + { if (tos_task_curr_task_get() == task) { state_str = "RUNNING"; } else if (task->state == K_TASK_STATE_PENDTIMEOUT_SUSPENDED) { @@ -454,6 +467,7 @@ __DEBUG__ __STATIC_INLINE__ void task_default_walker(k_task_t *task) } else if (task->state == K_TASK_STATE_READY) { state_str = "READY"; } + } tos_kprintln("tsk stat: %s", state_str); tos_kprintln("stk size: %d", task->stk_size); diff --git a/core/tos_barrier.c b/core/tos_barrier.c index f0933f2..420af60 100644 --- a/core/tos_barrier.c +++ b/core/tos_barrier.c @@ -43,12 +43,16 @@ __API__ k_err_t tos_barrier_destroy(k_barrier_t *barrier) TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); + pend_wakeup_all(&barrier->pend_obj, PEND_STATE_DESTROY); pend_object_deinit(&barrier->pend_obj); TOS_OBJ_DEINIT(barrier); + portRELEASE_TASK_LOCK(); + TOS_CPU_INT_ENABLE(); knl_sched(); @@ -65,7 +69,9 @@ __API__ k_err_t tos_barrier_pend(k_barrier_t *barrier) TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (barrier->count == 0u) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_BARRIER_OVERFLOW; } @@ -74,11 +80,13 @@ __API__ k_err_t tos_barrier_pend(k_barrier_t *barrier) barrier->count = (k_barrier_cnt_t)0u; pend_wakeup_all(&barrier->pend_obj, PEND_STATE_POST); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } if (knl_is_sched_locked()) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_SCHED_LOCKED; } @@ -86,6 +94,7 @@ __API__ k_err_t tos_barrier_pend(k_barrier_t *barrier) --barrier->count; pend_task_block(k_curr_task, &barrier->pend_obj, TOS_TIME_FOREVER); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -100,7 +109,9 @@ __API__ k_err_t tos_barrier_reset(k_barrier_t *barrier, k_barrier_cnt_t count) TOS_OBJ_VERIFY(barrier, KNL_OBJ_TYPE_BARRIER); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); barrier->count = count; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; diff --git a/core/tos_binary_heap.c b/core/tos_binary_heap.c index 4c4e6a2..bb21c53 100644 --- a/core/tos_binary_heap.c +++ b/core/tos_binary_heap.c @@ -218,10 +218,12 @@ __API__ k_err_t tos_bin_heap_push(k_bin_heap_t *bin_heap, void *item, size_t ite TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); bin_heap_item_copy_from(bin_heap, item); bin_heap_percolate_up(bin_heap, item); bin_heap_item_increase(bin_heap); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } @@ -235,6 +237,7 @@ __API__ k_err_t tos_bin_heap_pop(k_bin_heap_t *bin_heap, void *item, size_t *ite TOS_OBJ_VERIFY(bin_heap, KNL_OBJ_TYPE_BINARY_HEAP); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (tos_bin_heap_is_empty(bin_heap)) { TOS_CPU_INT_ENABLE(); @@ -245,6 +248,7 @@ __API__ k_err_t tos_bin_heap_pop(k_bin_heap_t *bin_heap, void *item, size_t *ite bin_heap_item_decrease(bin_heap); bin_heap_percolate_down(bin_heap); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; @@ -258,7 +262,11 @@ __API__ k_err_t tos_bin_heap_flush(k_bin_heap_t *bin_heap) TOS_OBJ_VERIFY(bin_heap, KNL_OBJ_TYPE_BINARY_HEAP); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); + bin_heap->total = 0; + + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; @@ -273,7 +281,9 @@ __API__ int tos_bin_heap_is_empty(k_bin_heap_t *bin_heap) TOS_OBJ_VERIFY_RC(bin_heap, KNL_OBJ_TYPE_BINARY_HEAP, K_FALSE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); is_empty = (bin_heap->total == 0); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return is_empty; @@ -288,7 +298,9 @@ __API__ int tos_bin_heap_is_full(k_bin_heap_t *bin_heap) TOS_OBJ_VERIFY_RC(bin_heap, KNL_OBJ_TYPE_BINARY_HEAP, K_FALSE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); is_full = (bin_heap->total == bin_heap->item_cnt ? K_TRUE : K_FALSE); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return is_full; diff --git a/core/tos_char_fifo.c b/core/tos_char_fifo.c index 7dc3bc4..abeecfa 100644 --- a/core/tos_char_fifo.c +++ b/core/tos_char_fifo.c @@ -114,16 +114,18 @@ __API__ int tos_chr_fifo_push_stream(k_chr_fifo_t *chr_fifo, uint8_t *stream, si TOS_OBJ_VERIFY_RC(chr_fifo, KNL_OBJ_TYPE_CHAR_FIFO, 0); TOS_CPU_INT_DISABLE(); - + portGET_TASK_LOCK(); while (i < size) { err = tos_ring_q_enqueue(&chr_fifo->ring_q, &stream[i], sizeof(uint8_t)); if (err != K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return i; } ++i; } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return i; } @@ -147,14 +149,17 @@ __API__ int tos_chr_fifo_pop_stream(k_chr_fifo_t *chr_fifo, uint8_t *buffer, siz TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); while (i < size) { if (tos_ring_q_dequeue(&chr_fifo->ring_q, &data, K_NULL) != K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return i; } buffer[i++] = data; } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return i; } diff --git a/core/tos_completion.c b/core/tos_completion.c index 6564227..d9db890 100644 --- a/core/tos_completion.c +++ b/core/tos_completion.c @@ -39,12 +39,15 @@ __API__ k_err_t tos_completion_destroy(k_completion_t *completion) TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); + pend_wakeup_all(&completion->pend_obj, PEND_STATE_DESTROY); pend_object_deinit(&completion->pend_obj); TOS_OBJ_DEINIT(completion); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -60,24 +63,30 @@ __API__ k_err_t tos_completion_pend_timed(k_completion_t *completion, k_tick_t t TOS_OBJ_VERIFY(completion, KNL_OBJ_TYPE_COMPLETION); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (completion->done > (completion_done_t)0u) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } if (timeout == TOS_TIME_NOWAIT) { // no wait, return immediately + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_NOWAIT; } if (knl_is_sched_locked()) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_SCHED_LOCKED; } pend_task_block(k_curr_task, &completion->pend_obj, timeout); + portRELEASE_TASK_LOCK(); + TOS_CPU_INT_ENABLE(); knl_sched(); @@ -98,7 +107,10 @@ __STATIC__ k_err_t completion_do_post(k_completion_t *completion, opt_post_t opt TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); + if (completion->done == (completion_done_t)-1) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_COMPLETION_OVERFLOW; } @@ -106,12 +118,14 @@ __STATIC__ k_err_t completion_do_post(k_completion_t *completion, opt_post_t opt ++completion->done; if (pend_is_nopending(&completion->pend_obj)) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } pend_wakeup(&completion->pend_obj, PEND_STATE_POST, opt); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -136,7 +150,9 @@ __API__ k_err_t tos_completion_reset(k_completion_t *completion) TOS_OBJ_VERIFY(completion, KNL_OBJ_TYPE_COMPLETION); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); completion->done = (completion_done_t)0u; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; @@ -151,7 +167,9 @@ __API__ int tos_completion_is_done(k_completion_t *completion) TOS_OBJ_VERIFY_RC(completion, KNL_OBJ_TYPE_COMPLETION, K_FALSE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); is_done = (completion->done > (completion_done_t)0u ? K_TRUE : K_FALSE); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return is_done; diff --git a/core/tos_countdownlatch.c b/core/tos_countdownlatch.c index cdb0186..0144f99 100644 --- a/core/tos_countdownlatch.c +++ b/core/tos_countdownlatch.c @@ -39,12 +39,14 @@ __API__ k_err_t tos_countdownlatch_destroy(k_countdownlatch_t *countdownlatch) TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); pend_wakeup_all(&countdownlatch->pend_obj, PEND_STATE_DESTROY); pend_object_deinit(&countdownlatch->pend_obj); TOS_OBJ_DEINIT(countdownlatch); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -61,23 +63,28 @@ __API__ k_err_t tos_countdownlatch_pend_timed(k_countdownlatch_t *countdownlatch TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (countdownlatch->count == (k_countdownlatch_cnt_t)0u) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } if (timeout == TOS_TIME_NOWAIT) { // no wait, return immediately + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_NOWAIT; } if (knl_is_sched_locked()) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_SCHED_LOCKED; } pend_task_block(k_curr_task, &countdownlatch->pend_obj, timeout); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -97,8 +104,10 @@ __API__ k_err_t tos_countdownlatch_post(k_countdownlatch_t *countdownlatch) TOS_OBJ_VERIFY(countdownlatch, KNL_OBJ_TYPE_COUNTDOWNLATCH); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (countdownlatch->count == (k_countdownlatch_cnt_t)0) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_COUNTDOWNLATCH_OVERFLOW; } @@ -106,12 +115,14 @@ __API__ k_err_t tos_countdownlatch_post(k_countdownlatch_t *countdownlatch) --countdownlatch->count; if (countdownlatch->count > (k_countdownlatch_cnt_t)0) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } pend_wakeup_one(&countdownlatch->pend_obj, PEND_STATE_POST); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -126,7 +137,9 @@ __API__ k_err_t tos_countdownlatch_reset(k_countdownlatch_t *countdownlatch, k_c TOS_OBJ_VERIFY(countdownlatch, KNL_OBJ_TYPE_COUNTDOWNLATCH); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); countdownlatch->count = count; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; diff --git a/core/tos_event.c b/core/tos_event.c index a901819..fd35808 100644 --- a/core/tos_event.c +++ b/core/tos_event.c @@ -49,6 +49,7 @@ __API__ k_err_t tos_event_destroy(k_event_t *event) TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); pend_wakeup_all(&event->pend_obj, PEND_STATE_DESTROY); event->flag = (k_event_flag_t)0u; @@ -61,6 +62,7 @@ __API__ k_err_t tos_event_destroy(k_event_t *event) knl_object_alloc_reset(&event->knl_obj); #endif + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -105,6 +107,7 @@ __API__ k_err_t tos_event_destroy_dyn(k_event_t *event) TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); pend_wakeup_all(&event->pend_obj, PEND_STATE_DESTROY); event->flag = (k_event_flag_t)0u; @@ -115,6 +118,7 @@ __API__ k_err_t tos_event_destroy_dyn(k_event_t *event) tos_mmheap_free(event); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -158,20 +162,24 @@ __API__ k_err_t tos_event_pend(k_event_t *event, k_event_flag_t flag_expect, k_e TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (event_is_match(event->flag, flag_expect, flag_match, opt_pend)) { if (opt_pend & TOS_OPT_EVENT_PEND_CLR) { // destroy the bridge after get across the river event->flag = (k_event_flag_t)0u; } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } if (timeout == TOS_TIME_NOWAIT) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_NOWAIT; } if (knl_is_sched_locked()) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_SCHED_LOCKED; } @@ -182,6 +190,7 @@ __API__ k_err_t tos_event_pend(k_event_t *event, k_event_flag_t flag_expect, k_e pend_task_block(k_curr_task, &event->pend_obj, timeout); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -207,6 +216,7 @@ __STATIC__ k_err_t event_do_post(k_event_t *event, k_event_flag_t flag, opt_even } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); TOS_LIST_FOR_EACH_ENTRY_SAFE(task, tmp, k_task_t, pend_list, &event->pend_obj.list) { if (event_is_match(event->flag, task->flag_expect, task->flag_match, task->opt_event_pend)) { @@ -220,6 +230,7 @@ __STATIC__ k_err_t event_do_post(k_event_t *event, k_event_flag_t flag, opt_even } } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); diff --git a/core/tos_global.c b/core/tos_global.c index 672ffd2..b42b027 100644 --- a/core/tos_global.c +++ b/core/tos_global.c @@ -18,17 +18,34 @@ #include k_nesting_t k_irq_nest_cnt = (k_nesting_t)0; +k_nesting_t k_irq_nest_cnts[configNUM_CORES] = {(k_nesting_t)0}; k_nesting_t k_sched_lock_nest_cnt = (k_nesting_t)0; knl_state_t k_knl_state = KNL_STATE_STOPPED; +knl_state_t k_knl_states[configNUM_CORES] = {KNL_STATE_STOPPED}; readyqueue_t k_rdyq; k_tick_t k_tick_count = (k_tick_t)0u; -k_task_t *k_curr_task = K_NULL; -k_task_t *k_next_task = K_NULL; +//k_task_t *k_curr_task = K_NULL; +k_task_t * k_curr_tasks[configNUM_CORES] = {K_NULL}; +k_task_t * TaskGetCurrentKernelTaskHandle(void){ + k_task_t * xReturn; + TOS_CPU_CPSR_ALLOC(); + TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); + xReturn = k_curr_tasks[ port_GET_CORE_ID() ]; + portRELEASE_TASK_LOCK(); + TOS_CPU_INT_ENABLE(); + return xReturn; +} +k_task_t *k_next_task = K_NULL; +k_task_t *k_next_tasks[configNUM_CORES] = {K_NULL}; +k_task_t k_idle_tasks[configNUM_CORES] ; k_task_t k_idle_task; k_stack_t k_idle_task_stk[TOS_CFG_IDLE_TASK_STK_SIZE]; +k_stack_t k_idle_core1_task_stk[TOS_CFG_IDLE_TASK_STK_SIZE]; +k_stack_t *const k_idle_task_stk_addrs[configNUM_CORES]={&k_idle_task_stk[0],&k_idle_core1_task_stk[0]}; k_stack_t *const k_idle_task_stk_addr = &k_idle_task_stk[0]; size_t const k_idle_task_stk_size = TOS_CFG_IDLE_TASK_STK_SIZE; diff --git a/core/tos_mail_queue.c b/core/tos_mail_queue.c index 033d9b4..40664f2 100644 --- a/core/tos_mail_queue.c +++ b/core/tos_mail_queue.c @@ -52,8 +52,10 @@ __API__ k_err_t tos_mail_q_destroy(k_mail_q_t *mail_q) TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); err = tos_ring_q_destroy(&mail_q->ring_q); if (err != K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return err; } @@ -65,6 +67,7 @@ __API__ k_err_t tos_mail_q_destroy(k_mail_q_t *mail_q) TOS_OBJ_DEINIT(mail_q); knl_object_alloc_reset(&mail_q->knl_obj); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -103,9 +106,10 @@ __API__ k_err_t tos_mail_q_destroy_dyn(k_mail_q_t *mail_q) } TOS_CPU_INT_DISABLE(); - + portGET_TASK_LOCK(); err = tos_ring_q_destroy_dyn(&mail_q->ring_q); if (err != K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return err; } @@ -117,6 +121,7 @@ __API__ k_err_t tos_mail_q_destroy_dyn(k_mail_q_t *mail_q) TOS_OBJ_DEINIT(mail_q); knl_object_alloc_reset(&mail_q->knl_obj); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -142,19 +147,22 @@ __API__ k_err_t tos_mail_q_pend(k_mail_q_t *mail_q, void *mail_buf, size_t *mail TOS_OBJ_VERIFY(mail_q, KNL_OBJ_TYPE_MAIL_QUEUE); TOS_CPU_INT_DISABLE(); - + portGET_TASK_LOCK(); if (tos_ring_q_dequeue(&mail_q->ring_q, mail_buf, mail_size) == K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } if (timeout == TOS_TIME_NOWAIT) { *mail_size = 0; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_NOWAIT; } if (knl_is_sched_locked()) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_SCHED_LOCKED; } @@ -162,6 +170,7 @@ __API__ k_err_t tos_mail_q_pend(k_mail_q_t *mail_q, void *mail_buf, size_t *mail k_curr_task->mail = mail_buf; pend_task_block(k_curr_task, &mail_q->pend_obj, timeout); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -193,13 +202,15 @@ __STATIC__ k_err_t mail_q_do_post(k_mail_q_t *mail_q, void *mail_buf, size_t mai TOS_OBJ_VERIFY(mail_q, KNL_OBJ_TYPE_MAIL_QUEUE); TOS_CPU_INT_DISABLE(); - + portGET_TASK_LOCK(); if (pend_is_nopending(&mail_q->pend_obj)) { err = tos_ring_q_enqueue(&mail_q->ring_q, mail_buf, mail_size); if (err != K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return err; } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } @@ -212,7 +223,7 @@ __STATIC__ k_err_t mail_q_do_post(k_mail_q_t *mail_q, void *mail_buf, size_t mai mail_task_recv(task, mail_buf, mail_size); } } - + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); diff --git a/core/tos_message_queue.c b/core/tos_message_queue.c index 136a5b1..5e5c810 100644 --- a/core/tos_message_queue.c +++ b/core/tos_message_queue.c @@ -58,8 +58,10 @@ __API__ k_err_t tos_msg_q_destroy(k_msg_q_t *msg_q) TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); err = tos_ring_q_destroy(&msg_q->ring_q); if (err != K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return err; } @@ -74,6 +76,7 @@ __API__ k_err_t tos_msg_q_destroy(k_msg_q_t *msg_q) knl_object_alloc_reset(&msg_q->knl_obj); #endif + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -113,8 +116,10 @@ __API__ k_err_t tos_msg_q_destroy_dyn(k_msg_q_t *msg_q) TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); err = tos_ring_q_destroy_dyn(&msg_q->ring_q); if (err != K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return err; } @@ -126,6 +131,7 @@ __API__ k_err_t tos_msg_q_destroy_dyn(k_msg_q_t *msg_q) TOS_OBJ_DEINIT(msg_q); knl_object_alloc_reset(&msg_q->knl_obj); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -151,25 +157,30 @@ __API__ k_err_t tos_msg_q_pend(k_msg_q_t *msg_q, void **msg_ptr, k_tick_t timeou TOS_OBJ_VERIFY(msg_q, KNL_OBJ_TYPE_MESSAGE_QUEUE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (tos_ring_q_dequeue(&msg_q->ring_q, msg_ptr, K_NULL) == K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } if (timeout == TOS_TIME_NOWAIT) { *msg_ptr = K_NULL; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_NOWAIT; } if (knl_is_sched_locked()) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_SCHED_LOCKED; } pend_task_block(k_curr_task, &msg_q->pend_obj, timeout); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -198,13 +209,16 @@ __STATIC__ k_err_t msg_q_do_post(k_msg_q_t *msg_q, void *msg_ptr, opt_post_t opt TOS_OBJ_VERIFY(msg_q, KNL_OBJ_TYPE_MESSAGE_QUEUE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (pend_is_nopending(&msg_q->pend_obj)) { err = tos_ring_q_enqueue(&msg_q->ring_q, &msg_ptr, sizeof(void*)); if (err != K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return err; } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } @@ -217,6 +231,7 @@ __STATIC__ k_err_t msg_q_do_post(k_msg_q_t *msg_q, void *msg_ptr, opt_post_t opt } } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); diff --git a/core/tos_mmblk.c b/core/tos_mmblk.c index a518920..7dc7c83 100644 --- a/core/tos_mmblk.c +++ b/core/tos_mmblk.c @@ -165,7 +165,9 @@ __API__ k_err_t tos_mmblk_alloc(k_mmblk_pool_t *mbp, void **blk) TOS_OBJ_VERIFY(mbp, KNL_OBJ_TYPE_MMBLK_POOL); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (mbp->blk_free == 0) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); *blk = K_NULL; return K_ERR_MMBLK_POOL_EMPTY; @@ -173,6 +175,7 @@ __API__ k_err_t tos_mmblk_alloc(k_mmblk_pool_t *mbp, void **blk) *blk = mbp->free_list; mbp->free_list = *(void **)mbp->free_list; --mbp->blk_free; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } @@ -186,7 +189,9 @@ __API__ k_err_t tos_mmblk_free(k_mmblk_pool_t *mbp, void *blk) TOS_OBJ_VERIFY(mbp, KNL_OBJ_TYPE_MMBLK_POOL); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (mbp->blk_free >= mbp->blk_max) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_MMBLK_POOL_FULL; } @@ -194,6 +199,7 @@ __API__ k_err_t tos_mmblk_free(k_mmblk_pool_t *mbp, void *blk) *(void **)blk = mbp->free_list; mbp->free_list = blk; ++mbp->blk_free; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } diff --git a/core/tos_mutex.c b/core/tos_mutex.c index 8756e94..6271c5d 100644 --- a/core/tos_mutex.c +++ b/core/tos_mutex.c @@ -102,6 +102,7 @@ __API__ k_err_t tos_mutex_destroy(k_mutex_t *mutex) #endif TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); pend_wakeup_all(&mutex->pend_obj, PEND_STATE_DESTROY); @@ -117,6 +118,7 @@ __API__ k_err_t tos_mutex_destroy(k_mutex_t *mutex) knl_object_alloc_reset(&mutex->knl_obj); #endif + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -164,6 +166,7 @@ __API__ k_err_t tos_mutex_destroy_dyn(k_mutex_t *mutex) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); pend_wakeup_all(&mutex->pend_obj, PEND_STATE_DESTROY); @@ -177,6 +180,7 @@ __API__ k_err_t tos_mutex_destroy_dyn(k_mutex_t *mutex) tos_mmheap_free(mutex); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -194,28 +198,34 @@ __API__ k_err_t tos_mutex_pend_timed(k_mutex_t *mutex, k_tick_t timeout) TOS_OBJ_VERIFY(mutex, KNL_OBJ_TYPE_MUTEX); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (mutex->pend_nesting == (k_nesting_t)0u) { // first come mutex_fresh_owner_mark(mutex, k_curr_task); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } if (knl_is_self(mutex->owner)) { // come again if (mutex->pend_nesting == (k_nesting_t)-1) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_MUTEX_NESTING_OVERFLOW; } ++mutex->pend_nesting; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_MUTEX_NESTING; } if (timeout == TOS_TIME_NOWAIT) { // no wait, return immediately + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_NOWAIT; } if (knl_is_sched_locked()) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_SCHED_LOCKED; } @@ -229,6 +239,7 @@ __API__ k_err_t tos_mutex_pend_timed(k_mutex_t *mutex, k_tick_t timeout) pend_task_block(k_curr_task, &mutex->pend_obj, timeout); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -250,13 +261,16 @@ __API__ k_err_t tos_mutex_post(k_mutex_t *mutex) TOS_OBJ_VERIFY(mutex, KNL_OBJ_TYPE_MUTEX); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (!knl_is_self(mutex->owner)) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_MUTEX_NOT_OWNER; } --mutex->pend_nesting; if (mutex->pend_nesting > (k_nesting_t)0u) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_MUTEX_NESTING; } @@ -264,6 +278,7 @@ __API__ k_err_t tos_mutex_post(k_mutex_t *mutex) mutex_old_owner_release(mutex); if (pend_is_nopending(&mutex->pend_obj)) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } @@ -277,6 +292,7 @@ __API__ k_err_t tos_mutex_post(k_mutex_t *mutex) mutex_new_owner_mark(mutex, pending_task); pend_wakeup_one(&mutex->pend_obj, PEND_STATE_POST); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); diff --git a/core/tos_priority_mail_queue.c b/core/tos_priority_mail_queue.c index cba5ecc..054abff 100644 --- a/core/tos_priority_mail_queue.c +++ b/core/tos_priority_mail_queue.c @@ -59,9 +59,11 @@ __API__ k_err_t tos_prio_mail_q_destroy(k_prio_mail_q_t *prio_mail_q) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); err = tos_prio_q_destroy(&prio_mail_q->prio_q); if (err != K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return err; } @@ -76,6 +78,7 @@ __API__ k_err_t tos_prio_mail_q_destroy(k_prio_mail_q_t *prio_mail_q) TOS_OBJ_DEINIT(prio_mail_q); knl_object_alloc_reset(&prio_mail_q->knl_obj); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -114,9 +117,10 @@ __API__ k_err_t tos_prio_mail_q_destroy_dyn(k_prio_mail_q_t *prio_mail_q) } TOS_CPU_INT_DISABLE(); - + portGET_TASK_LOCK(); err = tos_prio_q_destroy_dyn(&prio_mail_q->prio_q); if (err != K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return err; } @@ -128,6 +132,7 @@ __API__ k_err_t tos_prio_mail_q_destroy_dyn(k_prio_mail_q_t *prio_mail_q) TOS_OBJ_DEINIT(prio_mail_q); knl_object_alloc_reset(&prio_mail_q->knl_obj); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -153,19 +158,23 @@ __API__ k_err_t tos_prio_mail_q_pend(k_prio_mail_q_t *prio_mail_q, void *mail_bu TOS_OBJ_VERIFY(prio_mail_q, KNL_OBJ_TYPE_PRIORITY_MAIL_QUEUE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (tos_prio_q_dequeue(&prio_mail_q->prio_q, mail_buf, mail_size, K_NULL) == K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } if (timeout == TOS_TIME_NOWAIT) { *mail_size = 0; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_NOWAIT; } if (knl_is_sched_locked()) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_SCHED_LOCKED; } @@ -173,6 +182,7 @@ __API__ k_err_t tos_prio_mail_q_pend(k_prio_mail_q_t *prio_mail_q, void *mail_bu k_curr_task->mail = mail_buf; pend_task_block(k_curr_task, &prio_mail_q->pend_obj, timeout); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -204,13 +214,16 @@ __STATIC__ k_err_t prio_mail_q_do_post(k_prio_mail_q_t *prio_mail_q, void *mail_ TOS_OBJ_VERIFY(prio_mail_q, KNL_OBJ_TYPE_PRIORITY_MAIL_QUEUE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (pend_is_nopending(&prio_mail_q->pend_obj)) { err = tos_prio_q_enqueue(&prio_mail_q->prio_q, mail_buf, mail_size, prio); if (err != K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return err; } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } @@ -224,6 +237,7 @@ __STATIC__ k_err_t prio_mail_q_do_post(k_prio_mail_q_t *prio_mail_q, void *mail_ } } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); diff --git a/core/tos_priority_message_queue.c b/core/tos_priority_message_queue.c index 07925f0..a706768 100644 --- a/core/tos_priority_message_queue.c +++ b/core/tos_priority_message_queue.c @@ -59,9 +59,11 @@ __API__ k_err_t tos_prio_msg_q_destroy(k_prio_msg_q_t *prio_msg_q) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); err = tos_prio_q_destroy(&prio_msg_q->prio_q); if (err != K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return err; } @@ -76,6 +78,7 @@ __API__ k_err_t tos_prio_msg_q_destroy(k_prio_msg_q_t *prio_msg_q) TOS_OBJ_DEINIT(prio_msg_q); knl_object_alloc_reset(&prio_msg_q->knl_obj); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -114,9 +117,11 @@ __API__ k_err_t tos_prio_msg_q_destroy_dyn(k_prio_msg_q_t *prio_msg_q) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); err = tos_prio_q_destroy_dyn(&prio_msg_q->prio_q); if (err != K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return err; } @@ -131,6 +136,7 @@ __API__ k_err_t tos_prio_msg_q_destroy_dyn(k_prio_msg_q_t *prio_msg_q) TOS_OBJ_DEINIT(prio_msg_q); knl_object_alloc_reset(&prio_msg_q->knl_obj); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -156,25 +162,30 @@ __API__ k_err_t tos_prio_msg_q_pend(k_prio_msg_q_t *prio_msg_q, void **msg_ptr, TOS_OBJ_VERIFY(prio_msg_q, KNL_OBJ_TYPE_PRIORITY_MESSAGE_QUEUE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (tos_prio_q_dequeue(&prio_msg_q->prio_q, msg_ptr, K_NULL, K_NULL) == K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } if (timeout == TOS_TIME_NOWAIT) { *msg_ptr = K_NULL; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_NOWAIT; } if (knl_is_sched_locked()) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_SCHED_LOCKED; } pend_task_block(k_curr_task, &prio_msg_q->pend_obj, timeout); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -203,13 +214,16 @@ __STATIC__ k_err_t prio_msg_q_do_post(k_prio_msg_q_t *prio_msg_q, void *msg_ptr, TOS_OBJ_VERIFY(prio_msg_q, KNL_OBJ_TYPE_PRIORITY_MESSAGE_QUEUE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (pend_is_nopending(&prio_msg_q->pend_obj)) { err = tos_prio_q_enqueue(&prio_msg_q->prio_q, &msg_ptr, sizeof(void *), prio); if (err != K_ERR_NONE) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return err; } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } @@ -222,6 +236,7 @@ __STATIC__ k_err_t prio_msg_q_do_post(k_prio_msg_q_t *prio_msg_q, void *msg_ptr, } } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); diff --git a/core/tos_priority_queue.c b/core/tos_priority_queue.c index e6dc6b6..61022ed 100644 --- a/core/tos_priority_queue.c +++ b/core/tos_priority_queue.c @@ -290,11 +290,13 @@ __API__ k_err_t tos_prio_q_enqueue(k_prio_q_t *prio_q, void *item, size_t item_s } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); the_slot = prio_q_pool_mgr_slot_alloc(&prio_q->pool_mgr); TOS_ASSERT(the_slot != PRIO_Q_POOL_SLOT_INVALID); prio_q_do_enqueue(prio_q, item, the_slot, prio); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } @@ -320,10 +322,12 @@ __API__ k_err_t tos_prio_q_dequeue(k_prio_q_t *prio_q, void *item, size_t *item_ } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); the_slot = prio_q_prio_mgr_slot_dequeue(&prio_q->prio_mgr, prio); prio_q_do_dequeue(prio_q, item, item_size, the_slot); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } @@ -336,11 +340,13 @@ __API__ k_err_t tos_prio_q_flush(k_prio_q_t *prio_q) TOS_OBJ_VERIFY(prio_q, KNL_OBJ_TYPE_PRIORITY_QUEUE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); prio_q_pool_mgr_reset(&prio_q->pool_mgr, prio_q->item_cnt); prio_q_prio_mgr_reset(&prio_q->prio_mgr); prio_q->total = 0; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } @@ -354,7 +360,9 @@ __API__ int tos_prio_q_is_empty(k_prio_q_t *prio_q) TOS_OBJ_VERIFY_RC(prio_q, KNL_OBJ_TYPE_PRIORITY_QUEUE, K_FALSE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); is_empty = (prio_q->total == 0); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return is_empty; @@ -369,7 +377,9 @@ __API__ int tos_prio_q_is_full(k_prio_q_t *prio_q) TOS_OBJ_VERIFY_RC(prio_q, KNL_OBJ_TYPE_PRIORITY_QUEUE, K_FALSE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); is_full = (prio_q->total == prio_q->item_cnt); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return is_full; diff --git a/core/tos_ring_queue.c b/core/tos_ring_queue.c index fd3f8b7..4b5e6f5 100644 --- a/core/tos_ring_queue.c +++ b/core/tos_ring_queue.c @@ -147,8 +147,10 @@ __API__ k_err_t tos_ring_q_enqueue(k_ring_q_t *ring_q, void *item, size_t item_s } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (tos_ring_q_is_full(ring_q)) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_RING_Q_FULL; } @@ -156,6 +158,7 @@ __API__ k_err_t tos_ring_q_enqueue(k_ring_q_t *ring_q, void *item, size_t item_s ring_q_item_copy_from(ring_q, item); ring_q_item_increase(ring_q); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } @@ -169,8 +172,10 @@ __API__ k_err_t tos_ring_q_dequeue(k_ring_q_t *ring_q, void *item, size_t *item_ TOS_OBJ_VERIFY(ring_q, KNL_OBJ_TYPE_RING_QUEUE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (tos_ring_q_is_empty(ring_q)) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_RING_Q_EMPTY; } @@ -178,6 +183,7 @@ __API__ k_err_t tos_ring_q_dequeue(k_ring_q_t *ring_q, void *item, size_t *item_ ring_q_item_copy_to(ring_q, item, item_size); ring_q_item_decrease(ring_q); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; @@ -191,11 +197,13 @@ __API__ k_err_t tos_ring_q_flush(k_ring_q_t *ring_q) TOS_OBJ_VERIFY(ring_q, KNL_OBJ_TYPE_RING_QUEUE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); ring_q->head = 0u; ring_q->tail = 0u; ring_q->total = 0; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; @@ -210,7 +218,9 @@ __API__ int tos_ring_q_is_empty(k_ring_q_t *ring_q) TOS_OBJ_VERIFY_RC(ring_q, KNL_OBJ_TYPE_RING_QUEUE, K_FALSE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); is_empty = (ring_q->total == 0 ? K_TRUE : K_FALSE); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return is_empty; @@ -225,7 +235,9 @@ __API__ int tos_ring_q_is_full(k_ring_q_t *ring_q) TOS_OBJ_VERIFY_RC(ring_q, KNL_OBJ_TYPE_RING_QUEUE, K_FALSE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); is_full = (ring_q->total == ring_q->item_cnt ? K_TRUE : K_FALSE); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return is_full; diff --git a/core/tos_robin.c b/core/tos_robin.c index 3ec31c4..8bbac4a 100644 --- a/core/tos_robin.c +++ b/core/tos_robin.c @@ -24,6 +24,7 @@ __API__ void tos_robin_default_timeslice_config(k_timeslice_t default_timeslice) TOS_CPU_CPSR_ALLOC(); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (default_timeslice > (k_timeslice_t)0u) { k_robin_default_timeslice = default_timeslice; @@ -31,6 +32,7 @@ __API__ void tos_robin_default_timeslice_config(k_timeslice_t default_timeslice) k_robin_default_timeslice = TOS_CFG_CPU_TICK_PER_SECOND / 10; } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); } @@ -43,6 +45,7 @@ __API__ void tos_robin_timeslice_set(k_task_t *task, k_timeslice_t timeslice) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (timeslice == (k_timeslice_t)0u) { task->timeslice_reload = k_robin_default_timeslice; @@ -53,6 +56,7 @@ __API__ void tos_robin_timeslice_set(k_task_t *task, k_timeslice_t timeslice) if (task->timeslice_reload > task->timeslice) { task->timeslice = task->timeslice_reload; } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); } @@ -60,15 +64,14 @@ __KNL__ void robin_sched(k_prio_t prio) { TOS_CPU_CPSR_ALLOC(); k_task_t *task; - + #if (USE_SMP ==0u) TOS_CPU_INT_DISABLE(); - task = readyqueue_first_task_get(prio); if (!task || knl_is_idle(task)) { TOS_CPU_INT_ENABLE(); return; } - + if (readyqueue_is_prio_onlyone(prio)) { TOS_CPU_INT_ENABLE(); return; @@ -96,9 +99,50 @@ __KNL__ void robin_sched(k_prio_t prio) } else { task->timeslice = task->timeslice_reload; } - + TOS_CPU_INT_ENABLE(); knl_sched(); + # else + TOS_CPU_INT_DISABLE(); + BaseType_t num=configNUM_CORES-1; + BaseType_t xCoreID; + BaseType_t x; + xCoreID = port_GET_CORE_ID(); + int issched=1; + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUM_CORES; x++ ){ + //task = readyqueue_first_task_get_smp(prio,x-bias); + task=k_curr_tasks[x]; + if(task==K_NULL) + continue; + if (!task || knl_is_idle(task)) + continue; + if (knl_is_sched_locked()) { + TOS_CPU_INT_ENABLE(); + return; + } + if (task->timeslice > (k_timeslice_t)0u) { + --task->timeslice; + } + if (task->timeslice > (k_timeslice_t)0u) { + if(x==xCoreID) + issched=0; + continue; + } + readyqueue_remove(task); + readyqueue_add(task); + if (task->timeslice_reload == (k_timeslice_t)0u) { + task->timeslice = k_robin_default_timeslice; + } else { + task->timeslice = task->timeslice_reload; + } + if( x != xCoreID ){ + portSCHED_CORE (x); + } + } + TOS_CPU_INT_ENABLE(); + if(issched) + knl_sched(); + #endif } #endif diff --git a/core/tos_sched.c b/core/tos_sched.c index b6ea129..5aa66e5 100644 --- a/core/tos_sched.c +++ b/core/tos_sched.c @@ -72,15 +72,139 @@ __KNL__ k_task_t *readyqueue_first_task_get(k_prio_t prio) task_list = &k_rdyq.task_list_head[prio]; return TOS_LIST_FIRST_ENTRY_OR_NULL(task_list, k_task_t, pend_list); } - -__KNL__ k_task_t *readyqueue_highest_ready_task_get(void) +__KNL__ k_task_t *readyqueue_first_task_get_smp(k_prio_t prio,int corenum) { k_list_t *task_list; - - task_list = &k_rdyq.task_list_head[k_rdyq.highest_prio]; - return TOS_LIST_FIRST_ENTRY(task_list, k_task_t, pend_list); + + task_list = &k_rdyq.task_list_head[prio]; + if(corenum==0) + return TOS_LIST_FIRST_ENTRY_OR_NULL(task_list, k_task_t, pend_list); + k_list_t * Head=&k_rdyq.task_list_head[prio]; + task_list=task_list->next; + while(corenum>0){ + while(task_list->next!=Head&&corenum>0){ + task_list=task_list->next; + corenum--; + } + if(corenum>0){ + prio++; + task_list=&k_rdyq.task_list_head[prio]; + Head=&k_rdyq.task_list_head[prio]; + if(tos_list_empty(task_list)){ + return K_NULL; + } + else{ + corenum--; + if(corenum==0) + break; + task_list=task_list->next; + } + } + } + return TOS_LIST_ENTRY(task_list, k_task_t, pend_list); } + +__KNL__ k_task_t *readyqueue_highest_ready_task_get ( void ) +{ + k_list_t *task_list; + #if ( USE_SMP == 1 ) + #if ( configUSE_CORE_AFFINITY == 1u ) + k_task_t * prvtask=K_NULL; + #endif + const BaseType_t xCoreID= port_GET_CORE_ID(); + k_prio_t uxCurrentPriority= k_rdyq.highest_prio; + BaseType_t TaskScheduled = 0; + while(TaskScheduled==0){ + //BaseType_t xDecrementTopPriority=1; + k_list_t * cur_task_list = &k_rdyq.task_list_head[uxCurrentPriority]; + k_task_t * cur_task; + // whether the current linked list of the highest priority queue is empty + if(!tos_list_empty(cur_task_list)) { + /* The ready task list for uxCurrentPriority is not empty, so uxTopReadyPriority + * must not be decremented any further */ + k_list_t *headNode=cur_task_list; + //Traverse linked list + do{ + cur_task_list=cur_task_list->next; + cur_task=TOS_LIST_ENTRY(cur_task_list, k_task_t, pend_list); + if(cur_task->RunningOnCore==-1){ + + /* If the task is not being executed by any core swap it in */ + #if ( configUSE_CORE_AFFINITY == 1u ) + if( ( cur_task->CoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) + #endif + { + #if ( configUSE_CORE_AFFINITY == 1u ) + prvtask=k_curr_tasks[xCoreID]; + #endif + k_curr_tasks[xCoreID]->RunningOnCore=-1; + cur_task->RunningOnCore=xCoreID; + TaskScheduled=1; + } + } + else if( cur_task== k_curr_tasks[ xCoreID ]){ + /* The task is already running on this core, mark it as scheduled */ + #if ( configUSE_CORE_AFFINITY == 1u ) + if( ( cur_task->CoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) + #endif + { + cur_task->RunningOnCore=xCoreID; + TaskScheduled=1; + } + } + if( TaskScheduled != 0 ){ + /* Once a task has been selected to run on this core*/ + break; + } + }while(cur_task_list->next!=headNode); + } + if(TaskScheduled==1){ + return cur_task; + } + if(uxCurrentPriorityprio], k_task_t, pend_list)==prvtask){ + BaseType_t uxCoreMap = prvtask->CoreAffinityMask; + BaseType_t xLowestPriority = prvtask->prio; + BaseType_t xLowestPriorityCore = -1; + if( ( uxCoreMap & ( 1 << xCoreID ) ) != 0 ) + { + uxCoreMap &= ~( k_curr_tasks[ xCoreID ]->CoreAffinityMask ); + } + uxCoreMap &= ( ( 1 << configNUM_CORES ) - 1 ); + while( uxCoreMap != 0 ) + { + int uxCore = 31UL - ( uint32_t ) __builtin_clz( uxCoreMap ); + + configASSERT( ( 0 <= xCoreID ) && ( xCoreID < configNUM_CORES ) ); + + uxCoreMap &= ~( 1 << uxCore ); + + BaseType_t xTaskPriority = ( BaseType_t ) k_curr_tasks[ uxCore ]->prio; + + if( ( xTaskPriority < xLowestPriority ) && ( k_curr_tasks[ uxCore ]->RunningOnCore != -1 ) ){ + xLowestPriority = xTaskPriority; + xLowestPriorityCore = uxCore; + } + } + if( ( 0 <= xLowestPriorityCore ) && ( xLowestPriorityCore < configNUM_CORES )){ + portSCHED_CORE(xLowestPriorityCore); + } + } + + #endif + #else + task_list = &k_rdyq.task_list_head[k_rdyq.highest_prio]; + return TOS_LIST_FIRST_ENTRY(task_list, k_task_t, pend_list); + #endif +} __KNL__ void readyqueue_init(void) { uint8_t i; @@ -100,7 +224,6 @@ __KNL__ void readyqueue_add_head(k_task_t *task) { k_prio_t task_prio; k_list_t *task_list; - task_prio = task->prio; task_list = &k_rdyq.task_list_head[task_prio]; @@ -118,7 +241,6 @@ __KNL__ void readyqueue_add_tail(k_task_t *task) task_prio = task->prio; task_list = &k_rdyq.task_list_head[task_prio]; - if (tos_list_empty(task_list)) { readyqueue_prio_mark(task_prio); } diff --git a/core/tos_sem.c b/core/tos_sem.c index 33debe0..23ac200 100644 --- a/core/tos_sem.c +++ b/core/tos_sem.c @@ -59,6 +59,7 @@ __API__ k_err_t tos_sem_destroy(k_sem_t *sem) #endif TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); pend_wakeup_all(&sem->pend_obj, PEND_STATE_DESTROY); @@ -70,6 +71,7 @@ __API__ k_err_t tos_sem_destroy(k_sem_t *sem) knl_object_alloc_reset(&sem->knl_obj); #endif + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -124,6 +126,7 @@ __API__ k_err_t tos_sem_destroy_dyn(k_sem_t *sem) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); pend_wakeup_all(&sem->pend_obj, PEND_STATE_DESTROY); @@ -133,6 +136,7 @@ __API__ k_err_t tos_sem_destroy_dyn(k_sem_t *sem) tos_mmheap_free(sem); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -149,20 +153,24 @@ __STATIC__ k_err_t sem_do_post(k_sem_t *sem, opt_post_t opt) TOS_OBJ_VERIFY(sem, KNL_OBJ_TYPE_SEMAPHORE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (sem->count == sem->count_max) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_SEM_OVERFLOW; } if (pend_is_nopending(&sem->pend_obj)) { ++sem->count; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } pend_wakeup(&sem->pend_obj, PEND_STATE_POST, opt); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -188,25 +196,30 @@ __API__ k_err_t tos_sem_pend(k_sem_t *sem, k_tick_t timeout) TOS_OBJ_VERIFY(sem, KNL_OBJ_TYPE_SEMAPHORE); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (sem->count > (k_sem_cnt_t)0u) { --sem->count; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } if (timeout == TOS_TIME_NOWAIT) { // no wait, return immediately + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_NOWAIT; } if (knl_is_sched_locked()) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_PEND_SCHED_LOCKED; } pend_task_block(k_curr_task, &sem->pend_obj, timeout); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); diff --git a/core/tos_sys.c b/core/tos_sys.c index 20f09cf..2f8b5ad 100644 --- a/core/tos_sys.c +++ b/core/tos_sys.c @@ -16,7 +16,97 @@ *---------------------------------------------------------------------------*/ #include "tos_k.h" +#if (USE_SMP ==1u) +extern uint8_t ucPrimaryCoreNum ; +__API__ static void prvDisableInterruptsAndPortStartSchedulerOnCore( void ) + { + TOS_CPU_CPSR_ALLOC(); + TOS_CPU_INT_DISABLE(); + port_smp_init_kernel(); + tos_knl_start(); + } + +__API__ void smp_init_core0(void){ + /* No one else should use these! */ + spin_lock_claim( configSMP_SPINLOCK_0 ); + spin_lock_claim( configSMP_SPINLOCK_1 ); + #if (USE_SMP == 1u) + ucPrimaryCoreNum = configTICK_CORE; + configASSERT( get_core_num() == 0) ; // we must be started on core 0 + #else + ucPrimaryCoreNum = get_core_num(); + #endif + //主核初始化 + port_smp_init_kernel(); +} + +__API__ k_err_t tos_knl_smp_init(void) + +{ + k_err_t err; + err=tos_knl_init(); + TOS_CPU_CPSR_ALLOC(); + //关全局中断 + TOS_CPU_INT_DISABLE(); + smp_init_core0(); + //开全局中断 + TOS_CPU_INT_ENABLE(); + return err; +} +#if ( configUSE_CORE_AFFINITY == 1u ) + +__API__ void tos_TaskCoreAffinitySet( k_task_t * Task, BaseType_t CoreAffinityMask ){ + + int flag=0; + //ensure the core is valid + CoreAffinityMask &= ( ( 1 << configNUM_CORES ) - 1 ); + TOS_CPU_CPSR_ALLOC(); + TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); + Task->CoreAffinityMask=CoreAffinityMask; + if(!tos_knl_is_running()){ + portRELEASE_TASK_LOCK(); + TOS_CPU_INT_ENABLE(); + return; + } + if (knl_is_sched_locked()) { + portRELEASE_TASK_LOCK(); + TOS_CPU_INT_ENABLE(); + return; + } + + BaseType_t xCoreID = ( BaseType_t ) Task->RunningOnCore; + if(xCoreID!=-1&&(CoreAffinityMask & ( 1 << xCoreID ) ) == 0){ + if(xCoreID==port_GET_CORE_ID()){ + flag=1; + } + else{ + portSCHED_CORE (xCoreID); + } + } + portRELEASE_TASK_LOCK(); + TOS_CPU_INT_ENABLE(); + if(flag){ + knl_sched(); + } + return ; +} +__API__ BaseType_t vTaskCoreAffinityGet( const k_task_t * Task ) + { + + BaseType_t uxCoreAffinityMask; + TOS_CPU_CPSR_ALLOC(); + TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); + uxCoreAffinityMask = Task->CoreAffinityMask; + portRELEASE_TASK_LOCK(); + TOS_CPU_INT_ENABLE(); + return uxCoreAffinityMask; + } +#endif + +#endif __API__ k_err_t tos_knl_init(void) { k_err_t err; @@ -64,12 +154,18 @@ __API__ void tos_knl_irq_enter(void) if (!tos_knl_is_running()) { return; } - + #if (USE_SMP == 1u) + if (unlikely(k_irq_nest_cnts[ port_GET_CORE_ID()] >= K_NESTING_LIMIT_IRQ)) { + return; + } + ++k_irq_nest_cnts[ port_GET_CORE_ID()]; + #else if (unlikely(k_irq_nest_cnt >= K_NESTING_LIMIT_IRQ)) { return; } - + ++k_irq_nest_cnt; + #endif } __API__ void tos_knl_irq_leave(void) @@ -81,30 +177,41 @@ __API__ void tos_knl_irq_leave(void) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (!knl_is_inirq()) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return; } - - --k_irq_nest_cnt; - + #if (USE_SMP == 1u) + --k_irq_nest_cnts[ port_GET_CORE_ID()]; + #else + --k_irq_nest_cnt; + #endif if (knl_is_inirq()) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return; } if (knl_is_sched_locked()) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return; } - + k_next_task = readyqueue_highest_ready_task_get(); + #if ( USE_SMP == 1u) + k_next_tasks[port_GET_CORE_ID()]=k_next_task; + #endif if (knl_is_self(k_next_task)) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return; } cpu_irq_context_switch(); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); } @@ -123,7 +230,9 @@ __API__ k_err_t tos_knl_sched_lock(void) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); ++k_sched_lock_nest_cnt; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } @@ -143,31 +252,61 @@ __API__ k_err_t tos_knl_sched_unlock(void) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); --k_sched_lock_nest_cnt; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); - + knl_sched(); return K_ERR_NONE; } +__API__ k_err_t tos_knl_start_smp(void) +{ + //启动应用核心 + port_multicore_launch(prvDisableInterruptsAndPortStartSchedulerOnCore); + //启动主核心调度 + if(port_GET_CORE_ID()==ucPrimaryCoreNum){ + return tos_knl_start(); + } + return K_ERR_NONE; +} __API__ k_err_t tos_knl_start(void) { if (unlikely(tos_knl_is_running())) { return K_ERR_KNL_RUNNING; } + #if( USE_SMP == 1u ) + TOS_CPU_CPSR_ALLOC(); + //关全局中断 + TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); + k_next_task = readyqueue_highest_ready_task_get(); + k_next_tasks[port_GET_CORE_ID()]=k_next_task; + k_curr_tasks[port_GET_CORE_ID()]=k_next_task; + portRELEASE_TASK_LOCK(); + //开全局中断 + TOS_CPU_INT_ENABLE(); + k_knl_states[port_GET_CORE_ID()] = KNL_STATE_RUNNING; + #else + k_next_task = readyqueue_highest_ready_task_get(); k_curr_task = k_next_task; k_knl_state = KNL_STATE_RUNNING; + #endif cpu_sched_start(); - return K_ERR_NONE; } __API__ int tos_knl_is_running(void) { - return k_knl_state == KNL_STATE_RUNNING; + #if( USE_SMP == 1u ) + return k_knl_states[port_GET_CORE_ID()] == KNL_STATE_RUNNING; + #else + return k_knl_state == KNL_STATE_RUNNING; + #endif } #if TOS_CFG_TICKLESS_EN > 0u @@ -198,7 +337,6 @@ __KNL__ k_tick_t knl_next_expires_get(void) } #endif - __KNL__ void knl_sched(void) { TOS_CPU_CPSR_ALLOC(); @@ -216,13 +354,19 @@ __KNL__ void knl_sched(void) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); /* Must always acquire the task lock first */ k_next_task = readyqueue_highest_ready_task_get(); + #if (USE_SMP == 1u ) + k_next_tasks[port_GET_CORE_ID()]=k_next_task; + #endif if (knl_is_self(k_next_task)) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return; } - cpu_context_switch(); + portRELEASE_TASK_LOCK(); + TOS_CPU_INT_ENABLE(); } @@ -233,12 +377,32 @@ __KNL__ int knl_is_sched_locked(void) __KNL__ int knl_is_inirq(void) { - return k_irq_nest_cnt > 0u; + #if (USE_SMP == 1u ) + //return portCHECK_IF_IN_ISR() ; + return k_irq_nest_cnts[port_GET_CORE_ID()] > 0u; + #else + return k_irq_nest_cnt > 0u; + #endif +} + +__KNL__ int knl_is_idle_pre(k_task_t *task) +{ + int x=0; + for(x;xIsIDLE==1; + #else + return task == &k_idle_task; + #endif } __KNL__ int knl_is_self(k_task_t *task) @@ -246,28 +410,71 @@ __KNL__ int knl_is_self(k_task_t *task) return task == k_curr_task; } +__STATIC__ void knl_idle1_entry(void *arg) +{ + arg = arg; // make compiler happy + while (K_TRUE) { + knl_sched(); + } +} __STATIC__ void knl_idle_entry(void *arg) { arg = arg; // make compiler happy - while (K_TRUE) { -#if TOS_CFG_OBJ_DYNAMIC_CREATE_EN > 0u + #if TOS_CFG_OBJ_DYNAMIC_CREATE_EN > 0u task_free_all(); -#endif - -#if TOS_CFG_PWR_MGR_EN > 0u + #endif + #if TOS_CFG_PWR_MGR_EN > 0u pm_power_manager(); -#endif + #endif + if(k_rdyq.highest_priomail = K_NULL; task->mail_size = 0; #endif +#if (USE_SMP ==1u) + task->RunningOnCore=-1; + task->IsIDLE=0; + #if ( configUSE_CORE_AFFINITY == 1 ) + task->CoreAffinityMask=( 1 << configNUM_CORES ) - 1; + #endif +#endif TOS_OBJ_DEINIT(task); } @@ -102,10 +109,15 @@ __API__ k_err_t tos_task_create(k_task_t *task, if (unlikely(stk_size < K_TASK_STK_SIZE_MIN)) { return K_ERR_TASK_STK_SIZE_INVALID; } - + #if (USE_SMP == 1u ) + if (unlikely(prio == K_TASK_PRIO_IDLE && !knl_is_idle_pre(task))) { + return K_ERR_TASK_PRIO_INVALID; + } + #else if (unlikely(prio == K_TASK_PRIO_IDLE && !knl_is_idle(task))) { return K_ERR_TASK_PRIO_INVALID; } + #endif if (unlikely(prio > K_TASK_PRIO_IDLE)) { return K_ERR_TASK_PRIO_INVALID; @@ -138,14 +150,25 @@ __API__ k_err_t tos_task_create(k_task_t *task, #endif TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); task_state_set_ready(task); readyqueue_add_tail(task); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); - + #if (USE_SMP ==1u) + BaseType_t x; + BaseType_t xCoreID = port_GET_CORE_ID(); + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUM_CORES; x++ ){ + if( x != xCoreID ){ + if( k_knl_states[x] == KNL_STATE_RUNNING) + portSCHED_CORE (x); + } + } + #endif if (tos_knl_is_running()) { knl_sched(); } - + return K_ERR_NONE; } @@ -158,6 +181,7 @@ __STATIC__ k_err_t task_do_destroy(k_task_t *task) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); #if TOS_CFG_MUTEX_EN > 0u // when we die, wakeup all the people in this land. @@ -177,13 +201,18 @@ __STATIC__ k_err_t task_do_destroy(k_task_t *task) } tos_list_del(&task->stat_list); + //if the destroyed task is running on other cores must be stopped + #if (USE_SMP ==1u) + if(task->RunningOnCore!= port_GET_CORE_ID()&&task->RunningOnCore!=-1){ + portSCHED_CORE (task->RunningOnCore); + } + #endif task_reset(task); - task_state_set_deleted(task); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); - return K_ERR_NONE; } @@ -224,12 +253,14 @@ __KNL__ void task_free_all(void) k_task_t *task, *tmp; TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); TOS_LIST_FOR_EACH_ENTRY_SAFE(task, tmp, k_task_t, dead_list, &k_dead_task_list) { tos_list_del(&task->dead_list); task_free(task); } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); } @@ -263,10 +294,12 @@ __API__ k_err_t tos_task_create_dyn(k_task_t **task, TOS_CPU_CPSR_ALLOC(); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); the_task->stk_base = stk_base; err = tos_task_create(the_task, name, entry, arg, prio, stk_base, stk_size, timeslice); if (err != K_ERR_NONE) { task_free(the_task); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return err; } @@ -274,6 +307,7 @@ __API__ k_err_t tos_task_create_dyn(k_task_t **task, knl_object_alloc_set_dynamic(&the_task->knl_obj); *task = the_task; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } @@ -330,12 +364,14 @@ __API__ void tos_task_yield(void) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); readyqueue_remove(k_curr_task); readyqueue_add_tail(k_curr_task); - + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); + } __API__ k_err_t tos_task_prio_change(k_task_t *task, k_prio_t prio_new) @@ -354,8 +390,10 @@ __API__ k_err_t tos_task_prio_change(k_task_t *task, k_prio_t prio_new) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (task->prio == prio_new) { // just kidding + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); return K_ERR_NONE; @@ -391,7 +429,17 @@ __API__ k_err_t tos_task_prio_change(k_task_t *task, k_prio_t prio_new) } } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); + #if (USE_SMP ==1u) + BaseType_t x; + BaseType_t xCoreID = port_GET_CORE_ID(); + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUM_CORES; x++ ){ + if( x != xCoreID ){ + portSCHED_CORE (x); + } + } + #endif knl_sched(); return K_ERR_NONE; @@ -416,6 +464,7 @@ __API__ k_err_t tos_task_suspend(k_task_t *task) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (task_state_is_ready(task)) { // kill the good kid readyqueue_remove(task); @@ -427,7 +476,11 @@ __API__ k_err_t tos_task_suspend(k_task_t *task) tick_list_remove(task); } task_state_set_suspended(task); - + #if (USE_SMP == 1u) + if(task->RunningOnCore!= port_GET_CORE_ID()&&task->RunningOnCore!=-1) + portSCHED_CORE (task->RunningOnCore); + #endif + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); @@ -446,8 +499,10 @@ __API__ k_err_t tos_task_resume(k_task_t *task) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (!task_state_is_suspended(task)) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); knl_sched(); return K_ERR_NONE; @@ -458,7 +513,17 @@ __API__ k_err_t tos_task_resume(k_task_t *task) readyqueue_add(task); } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); + #if (USE_SMP ==1u) + BaseType_t x; + BaseType_t xCoreID = port_GET_CORE_ID(); + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUM_CORES; x++ ){ + if( x != xCoreID ){ + portSCHED_CORE (x); + } + } + #endif knl_sched(); return K_ERR_NONE; @@ -483,13 +548,29 @@ __API__ k_err_t tos_task_delay(k_tick_t delay) // if you wanna delay your task forever, why don't just suspend? return K_ERR_DELAY_FOREVER; } - TOS_CPU_INT_DISABLE(); - + portGET_TASK_LOCK(); + #if(USE_SMP==1u) + if(k_curr_task->RunningOnCore!= port_GET_CORE_ID()){ + portRELEASE_TASK_LOCK(); + TOS_CPU_INT_ENABLE(); + knl_sched(); + return K_ERR_NONE; + } + #endif tick_list_add(k_curr_task, delay); readyqueue_remove(k_curr_task); - + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); + #if (USE_SMP ==1u) + BaseType_t x; + BaseType_t xCoreID = port_GET_CORE_ID(); + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUM_CORES; x++ ){ + if( x != xCoreID ){ + portSCHED_CORE (x); + } + } + #endif knl_sched(); return K_ERR_NONE; @@ -504,21 +585,33 @@ __API__ k_err_t tos_task_delay_abort(k_task_t *task) TOS_OBJ_VERIFY(task, KNL_OBJ_TYPE_TASK); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (knl_is_self(task) || !task_state_is_sleeping(task)) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_TASK_NOT_DELAY; } if (task_state_is_suspended(task)) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_TASK_SUSPENDED; } - tick_list_remove(task); readyqueue_add(task); - + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); + #if (USE_SMP ==1u) + BaseType_t x; + BaseType_t xCoreID = port_GET_CORE_ID(); + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUM_CORES; x++ ){ + if( x != xCoreID ){ + if( k_knl_states[x] == KNL_STATE_RUNNING) + portSCHED_CORE (x); + } + } + #endif knl_sched(); return K_ERR_NONE; @@ -530,9 +623,15 @@ __API__ k_task_t *tos_task_curr_task_get(void) k_task_t *curr_task = K_NULL; TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (likely(tos_knl_is_running())) { + #if (USE_SMP ==1u) + curr_task = k_curr_tasks[port_GET_CORE_ID()]; + #else curr_task = k_curr_task; + #endif } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return curr_task; @@ -548,14 +647,17 @@ __API__ k_task_t *tos_task_find(const char *name) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); TOS_LIST_FOR_EACH_ENTRY(task, k_task_t, stat_list, &k_stat_list) { if (strncmp(task->name, name, K_TASK_NAME_LEN_MAX) == 0) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return task; } } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_NULL; } @@ -570,11 +672,13 @@ __API__ void tos_task_walkthru(k_task_walker_t walker) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); TOS_LIST_FOR_EACH_ENTRY(task, k_task_t, stat_list, &k_stat_list) { walker(task); } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); } @@ -599,7 +703,9 @@ __API__ k_err_t tos_task_stack_draught_depth(k_task_t *task, int *depth) TOS_OBJ_VERIFY(task, KNL_OBJ_TYPE_TASK); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); rc = cpu_task_stack_draught_depth(task->stk_base, task->stk_size, depth); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return rc; diff --git a/core/tos_tick.c b/core/tos_tick.c index 49feeb9..d310bda 100644 --- a/core/tos_tick.c +++ b/core/tos_tick.c @@ -24,6 +24,7 @@ __STATIC__ void tick_task_place(k_task_t *task, k_tick_t timeout) k_tick_t curr_expires, prev_expires = (k_tick_t)0u; TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); task->tick_expires = timeout; @@ -45,6 +46,7 @@ __STATIC__ void tick_task_place(k_task_t *task, k_tick_t timeout) } tos_list_add_tail(&task->tick_list, &curr_task->tick_list); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); } @@ -54,6 +56,7 @@ __STATIC__ void tick_task_takeoff(k_task_t *task) k_task_t *next; TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); next = TOS_LIST_FIRST_ENTRY_OR_NULL(&task->tick_list, k_task_t, tick_list); if (next && task->tick_list.next != &k_tick_list) { // not the only one @@ -66,6 +69,7 @@ __STATIC__ void tick_task_takeoff(k_task_t *task) tos_list_del(&task->tick_list); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); } @@ -87,9 +91,11 @@ __KNL__ void tick_update(k_tick_t tick) k_task_t *first, *task, *tmp; TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); k_tick_count += tick; if (tos_list_empty(&k_tick_list)) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return; } @@ -99,6 +105,7 @@ __KNL__ void tick_update(k_tick_t tick) first->tick_expires = (k_tick_t)0u; } else { first->tick_expires -= tick; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return; } @@ -112,6 +119,7 @@ __KNL__ void tick_update(k_tick_t tick) pend_task_wakeup(task, PEND_STATE_TIMEOUT); } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); } @@ -122,10 +130,12 @@ __KNL__ k_tick_t tick_next_expires_get(void) k_task_t *first; TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); first = TOS_LIST_FIRST_ENTRY_OR_NULL(&k_tick_list, k_task_t, tick_list); next_expires = first ? first->tick_expires : TOS_TIME_FOREVER; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return next_expires; } @@ -143,7 +153,19 @@ __API__ void tos_tick_handler(void) #endif #if TOS_CFG_ROUND_ROBIN_EN > 0u - robin_sched(k_curr_task->prio); + #if (USE_SMP ==1u) + if(k_curr_task->prio>k_rdyq.highest_prio){ + k_task_t *task; + task = readyqueue_first_task_get(k_rdyq.highest_prio); + if(task->RunningOnCore==-1) + tos_task_resume(task); + } + else + robin_sched(k_rdyq.highest_prio); + #else + robin_sched(k_curr_task->prio); + #endif + #endif } diff --git a/core/tos_time.c b/core/tos_time.c index bbf180c..3326392 100644 --- a/core/tos_time.c +++ b/core/tos_time.c @@ -23,7 +23,9 @@ __API__ k_tick_t tos_systick_get(void) k_tick_t tick; TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); tick = k_tick_count; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return tick; } @@ -33,7 +35,9 @@ __API__ void tos_systick_set(k_tick_t tick) TOS_CPU_CPSR_ALLOC(); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); k_tick_count = tick; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); } diff --git a/core/tos_timer.c b/core/tos_timer.c index 17b4592..cc11f0f 100644 --- a/core/tos_timer.c +++ b/core/tos_timer.c @@ -25,6 +25,7 @@ __STATIC__ void timer_place(k_timer_t *tmr) k_timer_t *iter = K_NULL; TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); tmr->expires += k_tick_count; @@ -52,6 +53,7 @@ __STATIC__ void timer_place(k_timer_t *tmr) } #endif + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); } @@ -61,6 +63,7 @@ __STATIC__ void timer_takeoff(k_timer_t *tmr) k_timer_t *first, *next; TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); first = TOS_LIST_FIRST_ENTRY(&k_timer_ctl.list, k_timer_t, list); @@ -77,6 +80,7 @@ __STATIC__ void timer_takeoff(k_timer_t *tmr) } } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); } @@ -333,6 +337,7 @@ __KNL__ k_tick_t soft_timer_next_expires_get(void) k_tick_t next_expires; TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); if (k_timer_ctl.next_expires == TOS_TIME_FOREVER) { next_expires = TOS_TIME_FOREVER; @@ -342,6 +347,7 @@ __KNL__ k_tick_t soft_timer_next_expires_get(void) next_expires = k_timer_ctl.next_expires - k_tick_count; } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return next_expires; } diff --git a/osal/cmsis_os/cmsis_os.c b/osal/cmsis_os/cmsis_os.c index 6840745..456085c 100644 --- a/osal/cmsis_os/cmsis_os.c +++ b/osal/cmsis_os/cmsis_os.c @@ -23,7 +23,11 @@ static osStatus errno_knl2cmsis(k_err_t err) // ==== Kernel Control Functions ==== osStatus osKernelStart(void) { + #if( USE_SMP == 1u ) + return errno_knl2cmsis(tos_knl_start_smp()); + #else return errno_knl2cmsis(tos_knl_start()); + #endif } /** @@ -32,7 +36,11 @@ osStatus osKernelStart(void) */ osStatus osKernelInitialize(void) { - return errno_knl2cmsis(tos_knl_init()); + #if (USE_SMP ==1u) + return errno_knl2cmsis(tos_knl_smp_init()); + #else + return errno_knl2cmsis(tos_knl_init()); + #endif } /** diff --git a/osal/cmsis_os/cmsis_os2.c b/osal/cmsis_os/cmsis_os2.c index a6ea544..07301a2 100644 --- a/osal/cmsis_os/cmsis_os2.c +++ b/osal/cmsis_os/cmsis_os2.c @@ -42,7 +42,11 @@ static osStatus_t errno_knl2cmsis(k_err_t err) { /*---------------------------------------------------------------------------*/ osStatus_t osKernelInitialize(void) { - return errno_knl2cmsis(tos_knl_init()); + #if (USE_SMP ==1u) + return errno_knl2cmsis(tos_knl_smp_init()); + #else + return errno_knl2cmsis(tos_knl_init()); + #endif } osStatus_t osKernelGetInfo(osVersion_t* version, @@ -65,8 +69,14 @@ osStatus_t osKernelGetInfo(osVersion_t* version, osKernelState_t osKernelGetState(void) { osKernelState_t state; + knl_state_t k_state_core; + #if( USE_SMP == 1u ) + k_state_core=k_knl_state[port_GET_CORE_ID()]; + #else + k_state_core=k_knl_state; + #endif - switch (k_knl_state) { + switch (k_state_core) { case KNL_STATE_RUNNING: state = osKernelRunning; break; @@ -79,7 +89,11 @@ osKernelState_t osKernelGetState(void) { } osStatus_t osKernelStart(void) { - return errno_knl2cmsis(tos_knl_start()); + #if( USE_SMP == 1u ) + return errno_knl2cmsis(tos_knl_start_smp()); + #else + return errno_knl2cmsis(tos_knl_start()); + #endif } int32_t osKernelLock(void) { @@ -343,10 +357,12 @@ uint32_t osThreadGetCount(void) { TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); TOS_LIST_FOR_EACH_ENTRY(task, k_task_t, stat_list, &k_stat_list) { count += 1; } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return count; @@ -360,6 +376,7 @@ uint32_t osThreadEnumerate(osThreadId_t* thread_array, uint32_t array_items) { k_task_t* task; TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); TOS_LIST_FOR_EACH_ENTRY(task, k_task_t, stat_list, &k_stat_list) { while (count < array_items) { @@ -368,6 +385,7 @@ uint32_t osThreadEnumerate(osThreadId_t* thread_array, uint32_t array_items) { } } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); } diff --git a/osal/posix/mqueue_prv.c b/osal/posix/mqueue_prv.c index 249bf49..16d2415 100644 --- a/osal/posix/mqueue_prv.c +++ b/osal/posix/mqueue_prv.c @@ -33,9 +33,11 @@ __KNL__ int mqueue_id_add(mqd_t id, mqueue_ctl_t *mqueue_ctl) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); mqueue_ctl_table[id] = mqueue_ctl; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return 0; @@ -47,14 +49,17 @@ __KNL__ mqd_t mqueue_id_alloc(void) int i = 0; TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); for (i = 0; i < TOS_COUNT_OF(mqueue_ctl_table); ++i) { if (!mqueue_ctl_table[i]) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return (mqd_t)i; } } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return -1; } @@ -70,9 +75,11 @@ __KNL__ int mqueue_id_free(mqd_t id) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); mqueue_ctl_table[id] = K_NULL; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return 0; diff --git a/osal/posix/pthread_prv.c b/osal/posix/pthread_prv.c index 6c9aa55..d60c372 100644 --- a/osal/posix/pthread_prv.c +++ b/osal/posix/pthread_prv.c @@ -36,15 +36,18 @@ __KNL__ pthread_ctl_t *pthread_ctl_self(void) self_task = tos_task_curr_task_get(); TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); for (i = 0; i < TOS_COUNT_OF(thread_ctl_table); ++i) { the_info = thread_ctl_table[i]; if (the_info && the_info->the_ktask == self_task) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return the_info; } } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_NULL; } @@ -81,9 +84,11 @@ __KNL__ int pthread_id_add(pthread_t id, pthread_ctl_t *info) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); thread_ctl_table[id] = info; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return 0; @@ -95,14 +100,17 @@ __KNL__ pthread_t pthread_id_alloc(void) int i = 0; TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); for (i = 0; i < TOS_COUNT_OF(thread_ctl_table); ++i) { if (!thread_ctl_table[i]) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return (pthread_t)i; } } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return -1; } @@ -118,9 +126,11 @@ __KNL__ int pthread_id_free(pthread_t id) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); thread_ctl_table[id] = K_NULL; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return 0; diff --git a/osal/posix/timer_prv.c b/osal/posix/timer_prv.c index a66e6ff..5f27e60 100644 --- a/osal/posix/timer_prv.c +++ b/osal/posix/timer_prv.c @@ -34,8 +34,10 @@ __KNL__ int timer_id_add(timer_t id, ptimer_ctl_t *ptimer_ctl) TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); ptimer_ctl_table[id] = ptimer_ctl; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return 0; @@ -47,14 +49,17 @@ __KNL__ timer_t timer_id_alloc(void) int i = 0; TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); for (i = 0; i < TOS_COUNT_OF(ptimer_ctl_table); ++i) { if (!ptimer_ctl_table[i]) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return (timer_t)i; } } + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return -1; } @@ -70,9 +75,11 @@ __KNL__ int timer_id_free(timer_t id) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); ptimer_ctl_table[id] = K_NULL; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return 0; diff --git a/pm/tos_pm.c b/pm/tos_pm.c index 66420d5..dd10af5 100644 --- a/pm/tos_pm.c +++ b/pm/tos_pm.c @@ -30,7 +30,9 @@ __API__ k_err_t tos_pm_cpu_lpwr_mode_set(k_cpu_lpwr_mode_t cpu_lpwr_mode) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); k_cpu_lpwr_mode = cpu_lpwr_mode; + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return K_ERR_NONE; } diff --git a/pm/tos_tickless.c b/pm/tos_tickless.c index 90d21d5..6a38d39 100644 --- a/pm/tos_tickless.c +++ b/pm/tos_tickless.c @@ -107,6 +107,7 @@ __STATIC__ void tickless_tick_fix(k_tick_t tick_sleep) TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); /* we wakeup from SLEEP mode, fix the system's tick & timer */ tick_update(tick_sleep); @@ -116,6 +117,7 @@ __STATIC__ void tickless_tick_fix(k_tick_t tick_sleep) tickless_tick_resume(); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); } @@ -149,14 +151,17 @@ __KNL__ void tickless_proc(void) } TOS_CPU_INT_DISABLE(); + portGET_TASK_LOCK(); time_sleep = tickless_cpu_sleep_time_get(lpwr_mode); /* in millisecond */ if (unlikely(time_sleep == (k_time_t)0)) { + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); return; } tickless_enter(); + portRELEASE_TASK_LOCK(); TOS_CPU_INT_ENABLE(); tickless_wkup_alarm_setup(lpwr_mode, time_sleep); pm_cpu_lpwr_mode_enter(lpwr_mode);