From 1513f50a474d1227a83fb655ad18c19cd792f235 Mon Sep 17 00:00:00 2001
From: ou-yangkan <519689417@qq.com>
Date: Fri, 31 Dec 2021 13:58:12 +0800
Subject: [PATCH] feat: added assembly atomic interface
added 32-bit assembly atomic interface:
BREAKING CHANGE:
Assembly implementation:
ArchAtomicRead
ArchAtomicSet
ArchAtomicAdd
ArchAtomicSub
ArchAtomicInc
ArchAtomicIncRet
ArchAtomicDec
ArchAtomicDecRet
https://gitee.com/openharmony/kernel_liteos_m/issues/I4O1UC
Signed-off-by: wang-shulin93 <15173259956@163.com>
---
arch/arm/arm9/gcc/los_arch_atomic.h | 240 ++++++++++----
arch/arm/cortex-m3/keil/los_arch_atomic.h | 221 ++++++++++---
arch/arm/cortex-m33/gcc/NTZ/los_arch_atomic.h | 221 ++++++++++---
.../gcc/TZ/non_secure/los_arch_atomic.h | 221 ++++++++++---
arch/arm/cortex-m33/iar/NTZ/los_arch_atomic.h | 240 +++++++++++---
.../iar/TZ/non_secure/los_arch_atomic.h | 240 +++++++++++---
arch/arm/cortex-m4/gcc/los_arch_atomic.h | 221 ++++++++++---
arch/arm/cortex-m4/iar/los_arch_atomic.h | 222 ++++++++++---
arch/arm/cortex-m7/gcc/los_arch_atomic.h | 222 ++++++++++---
arch/arm/cortex-m7/iar/los_arch_atomic.h | 222 ++++++++++---
arch/csky/v2/gcc/los_arch_atomic.h | 300 +++++++++++++++++
arch/include/los_atomic.h | 185 +----------
arch/risc-v/nuclei/gcc/los_arch_atomic.h | 303 +++++++++++++++++
arch/risc-v/riscv32/gcc/los_arch_atomic.h | 303 +++++++++++++++++
arch/xtensa/lx6/gcc/los_arch_atomic.h | 308 ++++++++++++++++++
testsuites/BUILD.gn | 1 +
testsuites/include/osTest.h | 1 +
testsuites/sample/kernel/atomic/BUILD.gn | 45 +++
.../sample/kernel/atomic/it_los_atomic.c | 61 ++++
.../sample/kernel/atomic/it_los_atomic.h | 66 ++++
.../sample/kernel/atomic/it_los_atomic_001.c | 92 ++++++
.../sample/kernel/atomic/it_los_atomic_002.c | 100 ++++++
.../sample/kernel/atomic/it_los_atomic_003.c | 114 +++++++
.../sample/kernel/atomic/it_los_atomic_004.c | 112 +++++++
.../sample/kernel/atomic/it_los_atomic_005.c | 110 +++++++
.../sample/kernel/atomic/it_los_atomic_006.c | 134 ++++++++
.../sample/kernel/atomic/it_los_atomic_007.c | 118 +++++++
.../sample/kernel/atomic/it_los_atomic_008.c | 121 +++++++
.../sample/kernel/atomic/it_los_atomic_009.c | 140 ++++++++
testsuites/src/osTest.c | 3 +
utils/los_compiler.h | 3 +
31 files changed, 4288 insertions(+), 602 deletions(-)
create mode 100644 arch/csky/v2/gcc/los_arch_atomic.h
create mode 100644 arch/risc-v/nuclei/gcc/los_arch_atomic.h
create mode 100644 arch/risc-v/riscv32/gcc/los_arch_atomic.h
create mode 100644 arch/xtensa/lx6/gcc/los_arch_atomic.h
create mode 100644 testsuites/sample/kernel/atomic/BUILD.gn
create mode 100644 testsuites/sample/kernel/atomic/it_los_atomic.c
create mode 100644 testsuites/sample/kernel/atomic/it_los_atomic.h
create mode 100644 testsuites/sample/kernel/atomic/it_los_atomic_001.c
create mode 100644 testsuites/sample/kernel/atomic/it_los_atomic_002.c
create mode 100644 testsuites/sample/kernel/atomic/it_los_atomic_003.c
create mode 100644 testsuites/sample/kernel/atomic/it_los_atomic_004.c
create mode 100644 testsuites/sample/kernel/atomic/it_los_atomic_005.c
create mode 100644 testsuites/sample/kernel/atomic/it_los_atomic_006.c
create mode 100644 testsuites/sample/kernel/atomic/it_los_atomic_007.c
create mode 100644 testsuites/sample/kernel/atomic/it_los_atomic_008.c
create mode 100644 testsuites/sample/kernel/atomic/it_los_atomic_009.c
diff --git a/arch/arm/arm9/gcc/los_arch_atomic.h b/arch/arm/arm9/gcc/los_arch_atomic.h
index 9a8a0adc..a8e73239 100644
--- a/arch/arm/arm9/gcc/los_arch_atomic.h
+++ b/arch/arm/arm9/gcc/los_arch_atomic.h
@@ -40,75 +40,191 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
-/**
- * @ingroup los_arch_atomic
- * @brief Atomic exchange for 32-bit variable.
- *
- * @par Description:
- * This API is used to implement the atomic exchange for 32-bit variable
- * and return the previous value of the atomic variable.
- * @attention
- *
The pointer v must not be NULL.
- *
- * @param v [IN] The variable pointer.
- * @param val [IN] The exchange value.
- *
- * @retval #INT32 The previous value of the atomic variable
- * @par Dependency:
- * - los_arch_atomic.h: the header file that contains the API declaration.
- * @see
- */
-STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
+STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
{
- return -1;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ LOS_IntRestore(intSave);
+ return *v;
}
-/**
- * @ingroup los_arch_atomic
- * @brief Atomic auto-decrement.
- *
- * @par Description:
- * This API is used to implement the atomic auto-decrement and return the result of auto-decrement.
- * @attention
- *
- * - The pointer v must not be NULL.
- * - The value which v point to must not be INT_MIN to avoid overflow after reducing 1.
- *
- *
- * @param v [IN] The addSelf variable pointer.
- *
- * @retval #INT32 The return value of variable auto-decrement.
- * @par Dependency:
- * - los_arch_atomic.h: the header file that contains the API declaration.
- * @see
- */
-STATIC INLINE INT32 ArchAtomicDecRet(volatile INT32 *v)
+STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
{
- return -1;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v = setVal;
+ LOS_IntRestore(intSave);
}
-/**
- * @ingroup los_arch_atomic
- * @brief Atomic exchange for 32-bit variable with compare.
- *
- * @par Description:
- * This API is used to implement the atomic exchange for 32-bit variable, if the value of variable is equal to oldVal.
- * @attention
- * The pointer v must not be NULL.
- *
- * @param v [IN] The variable pointer.
- * @param val [IN] The new value.
- * @param oldVal [IN] The old value.
- *
- * @retval TRUE The previous value of the atomic variable is not equal to oldVal.
- * @retval FALSE The previous value of the atomic variable is equal to oldVal.
- * @par Dependency:
- * - los_arch_atomic.h: the header file that contains the API declaration.
- * @see
- */
-STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 oldVal)
+STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
{
- return FALSE;
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v += addVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v -= subVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicInc(Atomic *v)
+{
+ (VOID)ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomicDec(Atomic *v)
+{
+ (VOID)ArchAtomicSub(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
+{
+ return ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
+{
+ return ArchAtomicSub(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicXchg32bits(Atomic *v, INT32 val)
+{
+ INT32 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ *v = val;
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+STATIC INLINE BOOL ArchAtomicCmpXchg32bits(Atomic *v, INT32 val, INT32 oldVal)
+{
+ INT32 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ if (prevVal == oldVal) {
+ *v = val;
+ }
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
+STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
+{
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v = setVal;
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v += addVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v -= subVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
+{
+ return ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
+{
+ return ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ *v = val;
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ if (prevVal == oldVal) {
+ *v = val;
+ }
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
}
#ifdef __cplusplus
diff --git a/arch/arm/cortex-m3/keil/los_arch_atomic.h b/arch/arm/cortex-m3/keil/los_arch_atomic.h
index 12ed5bc8..96edd411 100644
--- a/arch/arm/cortex-m3/keil/los_arch_atomic.h
+++ b/arch/arm/cortex-m3/keil/los_arch_atomic.h
@@ -40,12 +40,100 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("ldrex %0, [%1]\n"
+ : "=&r"(val)
+ : "r"(v)
+ : "cc");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
+{
+ UINT32 status;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("1:ldrex %0, [%2]\n"
+ " strex %0, %3, [%2]\n"
+ " teq %0, #0\n"
+ " beq 1b"
+ : "=&r"(status), "+m"(*v)
+ : "r"(v), "r"(setVal)
+ : "cc");
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(addVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(subVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicInc(Atomic *v)
+{
+ (VOID)ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomicDec(Atomic *v)
+{
+ (VOID)ArchAtomicSub(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
+{
+ return ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
+{
+ return ArchAtomicSub(v, 1);
+}
+
/**
* @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable.
*
* @par Description:
- * This API is used to implement the atomic exchange for 32-bit variable and return the previous value of the atomic variable.
+ * This API is used to implement the atomic exchange for 32-bit variable
+ * and return the previous value of the atomic variable.
* @attention
* The pointer v must not be NULL.
*
@@ -73,42 +161,6 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
return prevVal;
}
-/**
- * @ingroup los_arch_atomic
- * @brief Atomic auto-decrement.
- *
- * @par Description:
- * This API is used to implement the atomic auto-decrement and return the result of auto-decrement.
- * @attention
- *
- * - The pointer v must not be NULL.
- * - The value which v point to must not be INT_MIN to avoid overflow after reducing 1.
- *
- *
- * @param v [IN] The addSelf variable pointer.
- *
- * @retval #INT32 The return value of variable auto-decrement.
- * @par Dependency:
- * - los_arch_atomic.h: the header file that contains the API declaration.
- * @see
- */
-STATIC INLINE INT32 ArchAtomicDecRet(volatile INT32 *v)
-{
- INT32 val = 0;
- UINT32 status = 0;
-
- do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "sub %0, %0, #1\n"
- "strex %1, %0, [%3]"
- : "=&r"(val), "=&r"(status), "+m"(*v)
- : "r"(v)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
-
- return val;
-}
-
/**
* @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable with compare.
@@ -148,6 +200,101 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
return prevVal != oldVal;
}
+STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
+{
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v = setVal;
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v += addVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v -= subVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
+{
+ return ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
+{
+ return ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ *v = val;
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ if (prevVal == oldVal) {
+ *v = val;
+ }
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
#ifdef __cplusplus
#if __cplusplus
}
diff --git a/arch/arm/cortex-m33/gcc/NTZ/los_arch_atomic.h b/arch/arm/cortex-m33/gcc/NTZ/los_arch_atomic.h
index 12ed5bc8..96edd411 100755
--- a/arch/arm/cortex-m33/gcc/NTZ/los_arch_atomic.h
+++ b/arch/arm/cortex-m33/gcc/NTZ/los_arch_atomic.h
@@ -40,12 +40,100 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("ldrex %0, [%1]\n"
+ : "=&r"(val)
+ : "r"(v)
+ : "cc");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
+{
+ UINT32 status;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("1:ldrex %0, [%2]\n"
+ " strex %0, %3, [%2]\n"
+ " teq %0, #0\n"
+ " beq 1b"
+ : "=&r"(status), "+m"(*v)
+ : "r"(v), "r"(setVal)
+ : "cc");
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(addVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(subVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicInc(Atomic *v)
+{
+ (VOID)ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomicDec(Atomic *v)
+{
+ (VOID)ArchAtomicSub(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
+{
+ return ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
+{
+ return ArchAtomicSub(v, 1);
+}
+
/**
* @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable.
*
* @par Description:
- * This API is used to implement the atomic exchange for 32-bit variable and return the previous value of the atomic variable.
+ * This API is used to implement the atomic exchange for 32-bit variable
+ * and return the previous value of the atomic variable.
* @attention
* The pointer v must not be NULL.
*
@@ -73,42 +161,6 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
return prevVal;
}
-/**
- * @ingroup los_arch_atomic
- * @brief Atomic auto-decrement.
- *
- * @par Description:
- * This API is used to implement the atomic auto-decrement and return the result of auto-decrement.
- * @attention
- *
- * - The pointer v must not be NULL.
- * - The value which v point to must not be INT_MIN to avoid overflow after reducing 1.
- *
- *
- * @param v [IN] The addSelf variable pointer.
- *
- * @retval #INT32 The return value of variable auto-decrement.
- * @par Dependency:
- * - los_arch_atomic.h: the header file that contains the API declaration.
- * @see
- */
-STATIC INLINE INT32 ArchAtomicDecRet(volatile INT32 *v)
-{
- INT32 val = 0;
- UINT32 status = 0;
-
- do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "sub %0, %0, #1\n"
- "strex %1, %0, [%3]"
- : "=&r"(val), "=&r"(status), "+m"(*v)
- : "r"(v)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
-
- return val;
-}
-
/**
* @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable with compare.
@@ -148,6 +200,101 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
return prevVal != oldVal;
}
+STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
+{
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v = setVal;
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v += addVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v -= subVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
+{
+ return ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
+{
+ return ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ *v = val;
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ if (prevVal == oldVal) {
+ *v = val;
+ }
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
#ifdef __cplusplus
#if __cplusplus
}
diff --git a/arch/arm/cortex-m33/gcc/TZ/non_secure/los_arch_atomic.h b/arch/arm/cortex-m33/gcc/TZ/non_secure/los_arch_atomic.h
index 12ed5bc8..96edd411 100755
--- a/arch/arm/cortex-m33/gcc/TZ/non_secure/los_arch_atomic.h
+++ b/arch/arm/cortex-m33/gcc/TZ/non_secure/los_arch_atomic.h
@@ -40,12 +40,100 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("ldrex %0, [%1]\n"
+ : "=&r"(val)
+ : "r"(v)
+ : "cc");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
+{
+ UINT32 status;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("1:ldrex %0, [%2]\n"
+ " strex %0, %3, [%2]\n"
+ " teq %0, #0\n"
+ " beq 1b"
+ : "=&r"(status), "+m"(*v)
+ : "r"(v), "r"(setVal)
+ : "cc");
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(addVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(subVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicInc(Atomic *v)
+{
+ (VOID)ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomicDec(Atomic *v)
+{
+ (VOID)ArchAtomicSub(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
+{
+ return ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
+{
+ return ArchAtomicSub(v, 1);
+}
+
/**
* @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable.
*
* @par Description:
- * This API is used to implement the atomic exchange for 32-bit variable and return the previous value of the atomic variable.
+ * This API is used to implement the atomic exchange for 32-bit variable
+ * and return the previous value of the atomic variable.
* @attention
* The pointer v must not be NULL.
*
@@ -73,42 +161,6 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
return prevVal;
}
-/**
- * @ingroup los_arch_atomic
- * @brief Atomic auto-decrement.
- *
- * @par Description:
- * This API is used to implement the atomic auto-decrement and return the result of auto-decrement.
- * @attention
- *
- * - The pointer v must not be NULL.
- * - The value which v point to must not be INT_MIN to avoid overflow after reducing 1.
- *
- *
- * @param v [IN] The addSelf variable pointer.
- *
- * @retval #INT32 The return value of variable auto-decrement.
- * @par Dependency:
- * - los_arch_atomic.h: the header file that contains the API declaration.
- * @see
- */
-STATIC INLINE INT32 ArchAtomicDecRet(volatile INT32 *v)
-{
- INT32 val = 0;
- UINT32 status = 0;
-
- do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "sub %0, %0, #1\n"
- "strex %1, %0, [%3]"
- : "=&r"(val), "=&r"(status), "+m"(*v)
- : "r"(v)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
-
- return val;
-}
-
/**
* @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable with compare.
@@ -148,6 +200,101 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
return prevVal != oldVal;
}
+STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
+{
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v = setVal;
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v += addVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v -= subVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
+{
+ return ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
+{
+ return ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ *v = val;
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ if (prevVal == oldVal) {
+ *v = val;
+ }
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
#ifdef __cplusplus
#if __cplusplus
}
diff --git a/arch/arm/cortex-m33/iar/NTZ/los_arch_atomic.h b/arch/arm/cortex-m33/iar/NTZ/los_arch_atomic.h
index 25a1ed52..96edd411 100644
--- a/arch/arm/cortex-m33/iar/NTZ/los_arch_atomic.h
+++ b/arch/arm/cortex-m33/iar/NTZ/los_arch_atomic.h
@@ -29,8 +29,8 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef LOS_ATOMIC_H
-#define LOS_ATOMIC_H
+#ifndef _LOS_ARCH_ATOMIC_H
+#define _LOS_ARCH_ATOMIC_H
#include "los_compiler.h"
@@ -40,21 +40,109 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("ldrex %0, [%1]\n"
+ : "=&r"(val)
+ : "r"(v)
+ : "cc");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
+{
+ UINT32 status;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("1:ldrex %0, [%2]\n"
+ " strex %0, %3, [%2]\n"
+ " teq %0, #0\n"
+ " beq 1b"
+ : "=&r"(status), "+m"(*v)
+ : "r"(v), "r"(setVal)
+ : "cc");
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(addVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(subVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicInc(Atomic *v)
+{
+ (VOID)ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomicDec(Atomic *v)
+{
+ (VOID)ArchAtomicSub(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
+{
+ return ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
+{
+ return ArchAtomicSub(v, 1);
+}
+
/**
- * @ingroup los_atomic
+ * @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable.
*
* @par Description:
- * This API is used to implement the atomic exchange for 32-bit variable and return the previous value of the atomic variable.
+ * This API is used to implement the atomic exchange for 32-bit variable
+ * and return the previous value of the atomic variable.
* @attention
* The pointer v must not be NULL.
*
* @param v [IN] The variable pointer.
- * @param val [IN] The exchange value.
+ * @param val [IN] The exchange value.
*
* @retval #INT32 The previous value of the atomic variable
* @par Dependency:
- * - los_atomic.h: the header file that contains the API declaration.
+ * - los_arch_atomic.h: the header file that contains the API declaration.
* @see
*/
STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
@@ -74,43 +162,7 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
}
/**
- * @ingroup los_atomic
- * @brief Atomic auto-decrement.
- *
- * @par Description:
- * This API is used to implement the atomic auto-decrement and return the result of auto-decrement.
- * @attention
- *
- * - The pointer v must not be NULL.
- * - The value which v point to must not be INT_MIN to avoid overflow after reducing 1.
- *
- *
- * @param v [IN] The addSelf variable pointer.
- *
- * @retval #INT32 The return value of variable auto-decrement.
- * @par Dependency:
- * - los_atomic.h: the header file that contains the API declaration.
- * @see
- */
-STATIC INLINE INT32 ArchAtomicDecRet(volatile INT32 *v)
-{
- INT32 val = 0;
- UINT32 status = 0;
-
- do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "sub %0, %0, #1\n"
- "strex %1, %0, [%3]"
- : "=&r"(val), "=&r"(status), "+m"(*v)
- : "r"(v)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
-
- return val;
-}
-
-/**
- * @ingroup los_atomic
+ * @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable with compare.
*
* @par Description:
@@ -120,12 +172,12 @@ STATIC INLINE INT32 ArchAtomicDecRet(volatile INT32 *v)
*
* @param v [IN] The variable pointer.
* @param val [IN] The new value.
- * @param oldVal [IN] The old value.
+ * @param oldVal [IN] The old value.
*
* @retval TRUE The previous value of the atomic variable is not equal to oldVal.
* @retval FALSE The previous value of the atomic variable is equal to oldVal.
* @par Dependency:
- * - los_atomic.h: the header file that contains the API declaration.
+ * - los_arch_atomic.h: the header file that contains the API declaration.
* @see
*/
STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 oldVal)
@@ -148,11 +200,105 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
return prevVal != oldVal;
}
+STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
+{
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v = setVal;
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v += addVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v -= subVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
+{
+ return ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
+{
+ return ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ *v = val;
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ if (prevVal == oldVal) {
+ *v = val;
+ }
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
-#endif /* LOS_ATOMIC_H */
-
+#endif /* _LOS_ARCH_ATOMIC_H */
diff --git a/arch/arm/cortex-m33/iar/TZ/non_secure/los_arch_atomic.h b/arch/arm/cortex-m33/iar/TZ/non_secure/los_arch_atomic.h
index 25a1ed52..96edd411 100644
--- a/arch/arm/cortex-m33/iar/TZ/non_secure/los_arch_atomic.h
+++ b/arch/arm/cortex-m33/iar/TZ/non_secure/los_arch_atomic.h
@@ -29,8 +29,8 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef LOS_ATOMIC_H
-#define LOS_ATOMIC_H
+#ifndef _LOS_ARCH_ATOMIC_H
+#define _LOS_ARCH_ATOMIC_H
#include "los_compiler.h"
@@ -40,21 +40,109 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("ldrex %0, [%1]\n"
+ : "=&r"(val)
+ : "r"(v)
+ : "cc");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
+{
+ UINT32 status;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("1:ldrex %0, [%2]\n"
+ " strex %0, %3, [%2]\n"
+ " teq %0, #0\n"
+ " beq 1b"
+ : "=&r"(status), "+m"(*v)
+ : "r"(v), "r"(setVal)
+ : "cc");
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(addVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(subVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicInc(Atomic *v)
+{
+ (VOID)ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomicDec(Atomic *v)
+{
+ (VOID)ArchAtomicSub(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
+{
+ return ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
+{
+ return ArchAtomicSub(v, 1);
+}
+
/**
- * @ingroup los_atomic
+ * @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable.
*
* @par Description:
- * This API is used to implement the atomic exchange for 32-bit variable and return the previous value of the atomic variable.
+ * This API is used to implement the atomic exchange for 32-bit variable
+ * and return the previous value of the atomic variable.
* @attention
* The pointer v must not be NULL.
*
* @param v [IN] The variable pointer.
- * @param val [IN] The exchange value.
+ * @param val [IN] The exchange value.
*
* @retval #INT32 The previous value of the atomic variable
* @par Dependency:
- * - los_atomic.h: the header file that contains the API declaration.
+ * - los_arch_atomic.h: the header file that contains the API declaration.
* @see
*/
STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
@@ -74,43 +162,7 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
}
/**
- * @ingroup los_atomic
- * @brief Atomic auto-decrement.
- *
- * @par Description:
- * This API is used to implement the atomic auto-decrement and return the result of auto-decrement.
- * @attention
- *
- * - The pointer v must not be NULL.
- * - The value which v point to must not be INT_MIN to avoid overflow after reducing 1.
- *
- *
- * @param v [IN] The addSelf variable pointer.
- *
- * @retval #INT32 The return value of variable auto-decrement.
- * @par Dependency:
- * - los_atomic.h: the header file that contains the API declaration.
- * @see
- */
-STATIC INLINE INT32 ArchAtomicDecRet(volatile INT32 *v)
-{
- INT32 val = 0;
- UINT32 status = 0;
-
- do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "sub %0, %0, #1\n"
- "strex %1, %0, [%3]"
- : "=&r"(val), "=&r"(status), "+m"(*v)
- : "r"(v)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
-
- return val;
-}
-
-/**
- * @ingroup los_atomic
+ * @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable with compare.
*
* @par Description:
@@ -120,12 +172,12 @@ STATIC INLINE INT32 ArchAtomicDecRet(volatile INT32 *v)
*
* @param v [IN] The variable pointer.
* @param val [IN] The new value.
- * @param oldVal [IN] The old value.
+ * @param oldVal [IN] The old value.
*
* @retval TRUE The previous value of the atomic variable is not equal to oldVal.
* @retval FALSE The previous value of the atomic variable is equal to oldVal.
* @par Dependency:
- * - los_atomic.h: the header file that contains the API declaration.
+ * - los_arch_atomic.h: the header file that contains the API declaration.
* @see
*/
STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 oldVal)
@@ -148,11 +200,105 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
return prevVal != oldVal;
}
+STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
+{
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v = setVal;
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v += addVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v -= subVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
+{
+ return ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
+{
+ return ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ *v = val;
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ if (prevVal == oldVal) {
+ *v = val;
+ }
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
-#endif /* LOS_ATOMIC_H */
-
+#endif /* _LOS_ARCH_ATOMIC_H */
diff --git a/arch/arm/cortex-m4/gcc/los_arch_atomic.h b/arch/arm/cortex-m4/gcc/los_arch_atomic.h
index 12ed5bc8..96edd411 100644
--- a/arch/arm/cortex-m4/gcc/los_arch_atomic.h
+++ b/arch/arm/cortex-m4/gcc/los_arch_atomic.h
@@ -40,12 +40,100 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("ldrex %0, [%1]\n"
+ : "=&r"(val)
+ : "r"(v)
+ : "cc");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
+{
+ UINT32 status;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("1:ldrex %0, [%2]\n"
+ " strex %0, %3, [%2]\n"
+ " teq %0, #0\n"
+ " beq 1b"
+ : "=&r"(status), "+m"(*v)
+ : "r"(v), "r"(setVal)
+ : "cc");
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(addVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(subVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicInc(Atomic *v)
+{
+ (VOID)ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomicDec(Atomic *v)
+{
+ (VOID)ArchAtomicSub(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
+{
+ return ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
+{
+ return ArchAtomicSub(v, 1);
+}
+
/**
* @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable.
*
* @par Description:
- * This API is used to implement the atomic exchange for 32-bit variable and return the previous value of the atomic variable.
+ * This API is used to implement the atomic exchange for 32-bit variable
+ * and return the previous value of the atomic variable.
* @attention
* The pointer v must not be NULL.
*
@@ -73,42 +161,6 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
return prevVal;
}
-/**
- * @ingroup los_arch_atomic
- * @brief Atomic auto-decrement.
- *
- * @par Description:
- * This API is used to implement the atomic auto-decrement and return the result of auto-decrement.
- * @attention
- *
- * - The pointer v must not be NULL.
- * - The value which v point to must not be INT_MIN to avoid overflow after reducing 1.
- *
- *
- * @param v [IN] The addSelf variable pointer.
- *
- * @retval #INT32 The return value of variable auto-decrement.
- * @par Dependency:
- * - los_arch_atomic.h: the header file that contains the API declaration.
- * @see
- */
-STATIC INLINE INT32 ArchAtomicDecRet(volatile INT32 *v)
-{
- INT32 val = 0;
- UINT32 status = 0;
-
- do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "sub %0, %0, #1\n"
- "strex %1, %0, [%3]"
- : "=&r"(val), "=&r"(status), "+m"(*v)
- : "r"(v)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
-
- return val;
-}
-
/**
* @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable with compare.
@@ -148,6 +200,101 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
return prevVal != oldVal;
}
+STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
+{
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v = setVal;
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v += addVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v -= subVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
+{
+ return ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
+{
+ return ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ *v = val;
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ if (prevVal == oldVal) {
+ *v = val;
+ }
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
#ifdef __cplusplus
#if __cplusplus
}
diff --git a/arch/arm/cortex-m4/iar/los_arch_atomic.h b/arch/arm/cortex-m4/iar/los_arch_atomic.h
index 4302b0f4..96edd411 100644
--- a/arch/arm/cortex-m4/iar/los_arch_atomic.h
+++ b/arch/arm/cortex-m4/iar/los_arch_atomic.h
@@ -40,12 +40,100 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("ldrex %0, [%1]\n"
+ : "=&r"(val)
+ : "r"(v)
+ : "cc");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
+{
+ UINT32 status;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("1:ldrex %0, [%2]\n"
+ " strex %0, %3, [%2]\n"
+ " teq %0, #0\n"
+ " beq 1b"
+ : "=&r"(status), "+m"(*v)
+ : "r"(v), "r"(setVal)
+ : "cc");
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(addVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(subVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicInc(Atomic *v)
+{
+ (VOID)ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomicDec(Atomic *v)
+{
+ (VOID)ArchAtomicSub(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
+{
+ return ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
+{
+ return ArchAtomicSub(v, 1);
+}
+
/**
* @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable.
*
* @par Description:
- * This API is used to implement the atomic exchange for 32-bit variable and return the previous value of the atomic variable.
+ * This API is used to implement the atomic exchange for 32-bit variable
+ * and return the previous value of the atomic variable.
* @attention
* The pointer v must not be NULL.
*
@@ -73,42 +161,6 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
return prevVal;
}
-/**
- * @ingroup los_arch_atomic
- * @brief Atomic auto-decrement.
- *
- * @par Description:
- * This API is used to implement the atomic auto-decrement and return the result of auto-decrement.
- * @attention
- *
- * - The pointer v must not be NULL.
- * - The value which v point to must not be INT_MIN to avoid overflow after reducing 1.
- *
- *
- * @param v [IN] The addSelf variable pointer.
- *
- * @retval #INT32 The return value of variable auto-decrement.
- * @par Dependency:
- * - los_arch_atomic.h: the header file that contains the API declaration.
- * @see
- */
-STATIC INLINE INT32 ArchAtomicDecRet(volatile INT32 *v)
-{
- INT32 val = 0;
- UINT32 status = 0;
-
- do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "sub %0, %0, #1\n"
- "strex %1, %0, [%3]"
- : "=&r"(val), "=&r"(status), "+m"(*v)
- : "r"(v)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
-
- return val;
-}
-
/**
* @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable with compare.
@@ -148,6 +200,101 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
return prevVal != oldVal;
}
+STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
+{
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v = setVal;
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v += addVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v -= subVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
+{
+ return ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
+{
+ return ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ *v = val;
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ if (prevVal == oldVal) {
+ *v = val;
+ }
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
#ifdef __cplusplus
#if __cplusplus
}
@@ -155,4 +302,3 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
#endif /* __cplusplus */
#endif /* _LOS_ARCH_ATOMIC_H */
-
diff --git a/arch/arm/cortex-m7/gcc/los_arch_atomic.h b/arch/arm/cortex-m7/gcc/los_arch_atomic.h
index 4302b0f4..96edd411 100644
--- a/arch/arm/cortex-m7/gcc/los_arch_atomic.h
+++ b/arch/arm/cortex-m7/gcc/los_arch_atomic.h
@@ -40,12 +40,100 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("ldrex %0, [%1]\n"
+ : "=&r"(val)
+ : "r"(v)
+ : "cc");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
+{
+ UINT32 status;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("1:ldrex %0, [%2]\n"
+ " strex %0, %3, [%2]\n"
+ " teq %0, #0\n"
+ " beq 1b"
+ : "=&r"(status), "+m"(*v)
+ : "r"(v), "r"(setVal)
+ : "cc");
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(addVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(subVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicInc(Atomic *v)
+{
+ (VOID)ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomicDec(Atomic *v)
+{
+ (VOID)ArchAtomicSub(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
+{
+ return ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
+{
+ return ArchAtomicSub(v, 1);
+}
+
/**
* @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable.
*
* @par Description:
- * This API is used to implement the atomic exchange for 32-bit variable and return the previous value of the atomic variable.
+ * This API is used to implement the atomic exchange for 32-bit variable
+ * and return the previous value of the atomic variable.
* @attention
* The pointer v must not be NULL.
*
@@ -73,42 +161,6 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
return prevVal;
}
-/**
- * @ingroup los_arch_atomic
- * @brief Atomic auto-decrement.
- *
- * @par Description:
- * This API is used to implement the atomic auto-decrement and return the result of auto-decrement.
- * @attention
- *
- * - The pointer v must not be NULL.
- * - The value which v point to must not be INT_MIN to avoid overflow after reducing 1.
- *
- *
- * @param v [IN] The addSelf variable pointer.
- *
- * @retval #INT32 The return value of variable auto-decrement.
- * @par Dependency:
- * - los_arch_atomic.h: the header file that contains the API declaration.
- * @see
- */
-STATIC INLINE INT32 ArchAtomicDecRet(volatile INT32 *v)
-{
- INT32 val = 0;
- UINT32 status = 0;
-
- do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "sub %0, %0, #1\n"
- "strex %1, %0, [%3]"
- : "=&r"(val), "=&r"(status), "+m"(*v)
- : "r"(v)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
-
- return val;
-}
-
/**
* @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable with compare.
@@ -148,6 +200,101 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
return prevVal != oldVal;
}
+STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
+{
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v = setVal;
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v += addVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v -= subVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
+{
+ return ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
+{
+ return ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ *v = val;
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ if (prevVal == oldVal) {
+ *v = val;
+ }
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
#ifdef __cplusplus
#if __cplusplus
}
@@ -155,4 +302,3 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
#endif /* __cplusplus */
#endif /* _LOS_ARCH_ATOMIC_H */
-
diff --git a/arch/arm/cortex-m7/iar/los_arch_atomic.h b/arch/arm/cortex-m7/iar/los_arch_atomic.h
index 4302b0f4..96edd411 100644
--- a/arch/arm/cortex-m7/iar/los_arch_atomic.h
+++ b/arch/arm/cortex-m7/iar/los_arch_atomic.h
@@ -40,12 +40,100 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("ldrex %0, [%1]\n"
+ : "=&r"(val)
+ : "r"(v)
+ : "cc");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
+{
+ UINT32 status;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("1:ldrex %0, [%2]\n"
+ " strex %0, %3, [%2]\n"
+ " teq %0, #0\n"
+ " beq 1b"
+ : "=&r"(status), "+m"(*v)
+ : "r"(v), "r"(setVal)
+ : "cc");
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(addVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
+{
+ INT32 val;
+ UINT32 status;
+
+ do {
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(subVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicInc(Atomic *v)
+{
+ (VOID)ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomicDec(Atomic *v)
+{
+ (VOID)ArchAtomicSub(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
+{
+ return ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
+{
+ return ArchAtomicSub(v, 1);
+}
+
/**
* @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable.
*
* @par Description:
- * This API is used to implement the atomic exchange for 32-bit variable and return the previous value of the atomic variable.
+ * This API is used to implement the atomic exchange for 32-bit variable
+ * and return the previous value of the atomic variable.
* @attention
* The pointer v must not be NULL.
*
@@ -73,42 +161,6 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
return prevVal;
}
-/**
- * @ingroup los_arch_atomic
- * @brief Atomic auto-decrement.
- *
- * @par Description:
- * This API is used to implement the atomic auto-decrement and return the result of auto-decrement.
- * @attention
- *
- * - The pointer v must not be NULL.
- * - The value which v point to must not be INT_MIN to avoid overflow after reducing 1.
- *
- *
- * @param v [IN] The addSelf variable pointer.
- *
- * @retval #INT32 The return value of variable auto-decrement.
- * @par Dependency:
- * - los_arch_atomic.h: the header file that contains the API declaration.
- * @see
- */
-STATIC INLINE INT32 ArchAtomicDecRet(volatile INT32 *v)
-{
- INT32 val = 0;
- UINT32 status = 0;
-
- do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "sub %0, %0, #1\n"
- "strex %1, %0, [%3]"
- : "=&r"(val), "=&r"(status), "+m"(*v)
- : "r"(v)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
-
- return val;
-}
-
/**
* @ingroup los_arch_atomic
* @brief Atomic exchange for 32-bit variable with compare.
@@ -148,6 +200,101 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
return prevVal != oldVal;
}
+STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
+{
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v = setVal;
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v += addVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v -= subVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
+{
+ return ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
+{
+ return ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ *v = val;
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ if (prevVal == oldVal) {
+ *v = val;
+ }
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
#ifdef __cplusplus
#if __cplusplus
}
@@ -155,4 +302,3 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
#endif /* __cplusplus */
#endif /* _LOS_ARCH_ATOMIC_H */
-
diff --git a/arch/csky/v2/gcc/los_arch_atomic.h b/arch/csky/v2/gcc/los_arch_atomic.h
new file mode 100644
index 00000000..bf8fe6c1
--- /dev/null
+++ b/arch/csky/v2/gcc/los_arch_atomic.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LOS_ARCH_ATOMIC_H
+#define _LOS_ARCH_ATOMIC_H
+
+#include "los_compiler.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ __asm__ __volatile__("ldw %0, (%1)\n"
+ : "=&r"(val)
+ : "r"(v)
+ : "cc");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
+{
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ __asm__ __volatile__("stw %0, (%1, 0)"
+ : "=&r"(setVal)
+ : "r"(v)
+ : "cc");
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("ldw %0, (%1)\n"
+ "add %0, %0, %2\n"
+ "stw %0, (%1, 0)"
+ : "=&r"(val)
+ : "r"(v), "r"(addVal)
+ : "cc");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("ldw %0, (%1)\n"
+ "sub %0, %2\n"
+ "stw %0, (%1, 0)"
+ : "=&r"(val)
+ : "r"(v), "r"(subVal)
+ : "cc");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicInc(Atomic *v)
+{
+ (VOID)ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomicDec(Atomic *v)
+{
+ (VOID)ArchAtomicSub(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
+{
+ return ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
+{
+ return ArchAtomicSub(v, 1);
+}
+
+/**
+ * @ingroup los_arch_atomic
+ * @brief Atomic exchange for 32-bit variable.
+ *
+ * @par Description:
+ * This API is used to implement the atomic exchange for 32-bit variable
+ * and return the previous value of the atomic variable.
+ * @attention
+ * The pointer v must not be NULL.
+ *
+ * @param v [IN] The variable pointer.
+ * @param val [IN] The exchange value.
+ *
+ * @retval #INT32 The previous value of the atomic variable
+ * @par Dependency:
+ * - los_arch_atomic.h: the header file that contains the API declaration.
+ * @see
+ */
+STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
+{
+ INT32 prevVal = 0;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ __asm__ __volatile__("ldw %0, (%1)\n"
+ "stw %2, (%1)"
+ : "=&r"(prevVal)
+ : "r"(v), "r"(val)
+ : "cc");
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+/**
+ * @ingroup los_arch_atomic
+ * @brief Atomic exchange for 32-bit variable with compare.
+ *
+ * @par Description:
+ * This API is used to implement the atomic exchange for 32-bit variable, if the value of variable is equal to oldVal.
+ * @attention
+ * The pointer v must not be NULL.
+ *
+ * @param v [IN] The variable pointer.
+ * @param val [IN] The new value.
+ * @param oldVal [IN] The old value.
+ *
+ * @retval TRUE The previous value of the atomic variable is not equal to oldVal.
+ * @retval FALSE The previous value of the atomic variable is equal to oldVal.
+ * @par Dependency:
+ * - los_arch_atomic.h: the header file that contains the API declaration.
+ * @see
+ */
+STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 oldVal)
+{
+ INT32 prevVal = 0;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("1: ldw %0, (%1)\n"
+ " cmpne %0, %2\n"
+ " bt 2f\n"
+ " stw %3, (%1)\n"
+ "2:"
+ : "=&r"(prevVal)
+ : "r"(v), "r"(oldVal), "r"(val)
+ : "cc");
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
+STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
+{
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v = setVal;
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v += addVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v -= subVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
+{
+ return ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
+{
+ return ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ *v = val;
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ if (prevVal == oldVal) {
+ *v = val;
+ }
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+#endif /* _LOS_ARCH_ATOMIC_H */
diff --git a/arch/include/los_atomic.h b/arch/include/los_atomic.h
index 6373a053..6a0f3124 100755
--- a/arch/include/los_atomic.h
+++ b/arch/include/los_atomic.h
@@ -33,6 +33,7 @@
#define _LOS_ATOMIC_H
#include "los_compiler.h"
+#include "los_arch_atomic.h"
#ifdef __cplusplus
#if __cplusplus
@@ -40,190 +41,6 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
-typedef volatile INT32 Atomic;
-typedef volatile INT64 Atomic64;
-
-STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
-{
- return *v;
-}
-
-STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
-{
- *v = setVal;
-}
-
-STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
-{
- INT32 val;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- *v += addVal;
- val = *v;
- LOS_IntRestore(intSave);
-
- return val;
-}
-
-STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
-{
- INT32 val;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- *v -= subVal;
- val = *v;
- LOS_IntRestore(intSave);
-
- return val;
-}
-
-STATIC INLINE VOID ArchAtomicInc(Atomic *v)
-{
- (VOID)ArchAtomicAdd(v, 1);
-}
-
-STATIC INLINE VOID ArchAtomicDec(Atomic *v)
-{
- (VOID)ArchAtomicSub(v, 1);
-}
-
-STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
-{
- return ArchAtomicAdd(v, 1);
-}
-
-#ifndef ARCH_ARM
-STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
-{
- return ArchAtomicSub(v, 1);
-}
-
-STATIC INLINE INT32 ArchAtomicXchg32bits(Atomic *v, INT32 val)
-{
- INT32 prevVal;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- prevVal = *v;
- *v = val;
- LOS_IntRestore(intSave);
-
- return prevVal;
-}
-
-STATIC INLINE BOOL ArchAtomicCmpXchg32bits(Atomic *v, INT32 val, INT32 oldVal)
-{
- INT32 prevVal;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- prevVal = *v;
- if (prevVal == oldVal) {
- *v = val;
- }
- LOS_IntRestore(intSave);
-
- return prevVal != oldVal;
-}
-#endif
-
-STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
-{
- INT64 val;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- val = *v;
- LOS_IntRestore(intSave);
-
- return val;
-}
-
-STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
-{
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- *v = setVal;
- LOS_IntRestore(intSave);
-}
-
-STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
-{
- INT64 val;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- *v += addVal;
- val = *v;
- LOS_IntRestore(intSave);
-
- return val;
-}
-
-STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
-{
- INT64 val;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- *v -= subVal;
- val = *v;
- LOS_IntRestore(intSave);
-
- return val;
-}
-
-STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
-{
- (VOID)ArchAtomic64Add(v, 1);
-}
-
-STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
-{
- return ArchAtomic64Add(v, 1);
-}
-
-STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
-{
- (VOID)ArchAtomic64Sub(v, 1);
-}
-
-STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
-{
- return ArchAtomic64Sub(v, 1);
-}
-
-STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
-{
- INT64 prevVal;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- prevVal = *v;
- *v = val;
- LOS_IntRestore(intSave);
-
- return prevVal;
-}
-
-STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
-{
- INT64 prevVal;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- prevVal = *v;
- if (prevVal == oldVal) {
- *v = val;
- }
- LOS_IntRestore(intSave);
-
- return prevVal != oldVal;
-}
-
#define LOS_AtomicRead ArchAtomicRead
#define LOS_AtomicSet ArchAtomicSet
#define LOS_AtomicAdd ArchAtomicAdd
diff --git a/arch/risc-v/nuclei/gcc/los_arch_atomic.h b/arch/risc-v/nuclei/gcc/los_arch_atomic.h
new file mode 100644
index 00000000..56babd44
--- /dev/null
+++ b/arch/risc-v/nuclei/gcc/los_arch_atomic.h
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LOS_ARCH_ATOMIC_H
+#define _LOS_ARCH_ATOMIC_H
+
+#include "los_compiler.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("lr.w %0, (%1)\n"
+ "fence rw, rw\n"
+ : "=&r"(val)
+ : "r"(v)
+ : "memory");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
+{
+ UINT32 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("amoswap.w %0, %2, (%1)\n"
+ : "=r"(prevVal)
+ : "r"(v), "r"(setVal)
+ : "memory");
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("amoadd.w %0, %2, (%1)\n"
+ "lw %0, (%1)\n"
+ "fence rw, rw\n"
+ : "=&r"(val)
+ : "r"(v), "r"(addVal)
+ : "memory");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("amoadd.w %0, %2, (%1)\n"
+ "lw %0, (%1)\n"
+ "fence rw, rw\n"
+ : "=&r"(val)
+ : "r"(v), "r"(-subVal)
+ : "memory");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicInc(Atomic *v)
+{
+ (VOID)ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomicDec(Atomic *v)
+{
+ (VOID)ArchAtomicSub(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
+{
+ return ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
+{
+ return ArchAtomicSub(v, 1);
+}
+
+/**
+ * @ingroup los_arch_atomic
+ * @brief Atomic exchange for 32-bit variable.
+ *
+ * @par Description:
+ * This API is used to implement the atomic exchange for 32-bit variable
+ * and return the previous value of the atomic variable.
+ * @attention
+ * The pointer v must not be NULL.
+ *
+ * @param v [IN] The variable pointer.
+ * @param val [IN] The exchange value.
+ *
+ * @retval #INT32 The previous value of the atomic variable
+ * @par Dependency:
+ * - los_arch_atomic.h: the header file that contains the API declaration.
+ * @see
+ */
+STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
+{
+ INT32 prevVal = 0;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("lw %0, 0(%1)\n"
+ "amoswap.w %0, %2, (%1)\n"
+ : "=&r"(prevVal)
+ : "r"(v), "r"(val)
+ : "memory");
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+/**
+ * @ingroup los_arch_atomic
+ * @brief Atomic exchange for 32-bit variable with compare.
+ *
+ * @par Description:
+ * This API is used to implement the atomic exchange for 32-bit variable, if the value of variable is equal to oldVal.
+ * @attention
+ * The pointer v must not be NULL.
+ *
+ * @param v [IN] The variable pointer.
+ * @param val [IN] The new value.
+ * @param oldVal [IN] The old value.
+ *
+ * @retval TRUE The previous value of the atomic variable is not equal to oldVal.
+ * @retval FALSE The previous value of the atomic variable is equal to oldVal.
+ * @par Dependency:
+ * - los_arch_atomic.h: the header file that contains the API declaration.
+ * @see
+ */
+STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 oldVal)
+{
+ INT32 prevVal = 0;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ __asm__ __volatile__("1: lw %0, 0(%1)\n"
+ " bne %0, %2, 2f\n"
+ " amoswap.w %0, %3, (%1)\n"
+ "2:"
+ : "=&r"(prevVal)
+ : "r"(v), "r"(oldVal), "r"(val)
+ : "memory");
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
+STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
+{
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v = setVal;
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v += addVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v -= subVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
+{
+ return ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
+{
+ return ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ *v = val;
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ if (prevVal == oldVal) {
+ *v = val;
+ }
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+#endif /* _LOS_ARCH_ATOMIC_H */
diff --git a/arch/risc-v/riscv32/gcc/los_arch_atomic.h b/arch/risc-v/riscv32/gcc/los_arch_atomic.h
new file mode 100644
index 00000000..56babd44
--- /dev/null
+++ b/arch/risc-v/riscv32/gcc/los_arch_atomic.h
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LOS_ARCH_ATOMIC_H
+#define _LOS_ARCH_ATOMIC_H
+
+#include "los_compiler.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("lr.w %0, (%1)\n"
+ "fence rw, rw\n"
+ : "=&r"(val)
+ : "r"(v)
+ : "memory");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
+{
+ UINT32 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("amoswap.w %0, %2, (%1)\n"
+ : "=r"(prevVal)
+ : "r"(v), "r"(setVal)
+ : "memory");
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("amoadd.w %0, %2, (%1)\n"
+ "lw %0, (%1)\n"
+ "fence rw, rw\n"
+ : "=&r"(val)
+ : "r"(v), "r"(addVal)
+ : "memory");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("amoadd.w %0, %2, (%1)\n"
+ "lw %0, (%1)\n"
+ "fence rw, rw\n"
+ : "=&r"(val)
+ : "r"(v), "r"(-subVal)
+ : "memory");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicInc(Atomic *v)
+{
+ (VOID)ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomicDec(Atomic *v)
+{
+ (VOID)ArchAtomicSub(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
+{
+ return ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
+{
+ return ArchAtomicSub(v, 1);
+}
+
+/**
+ * @ingroup los_arch_atomic
+ * @brief Atomic exchange for 32-bit variable.
+ *
+ * @par Description:
+ * This API is used to implement the atomic exchange for 32-bit variable
+ * and return the previous value of the atomic variable.
+ * @attention
+ * The pointer v must not be NULL.
+ *
+ * @param v [IN] The variable pointer.
+ * @param val [IN] The exchange value.
+ *
+ * @retval #INT32 The previous value of the atomic variable
+ * @par Dependency:
+ * - los_arch_atomic.h: the header file that contains the API declaration.
+ * @see
+ */
+STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
+{
+ INT32 prevVal = 0;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("lw %0, 0(%1)\n"
+ "amoswap.w %0, %2, (%1)\n"
+ : "=&r"(prevVal)
+ : "r"(v), "r"(val)
+ : "memory");
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+/**
+ * @ingroup los_arch_atomic
+ * @brief Atomic exchange for 32-bit variable with compare.
+ *
+ * @par Description:
+ * This API is used to implement the atomic exchange for 32-bit variable, if the value of variable is equal to oldVal.
+ * @attention
+ * The pointer v must not be NULL.
+ *
+ * @param v [IN] The variable pointer.
+ * @param val [IN] The new value.
+ * @param oldVal [IN] The old value.
+ *
+ * @retval TRUE The previous value of the atomic variable is not equal to oldVal.
+ * @retval FALSE The previous value of the atomic variable is equal to oldVal.
+ * @par Dependency:
+ * - los_arch_atomic.h: the header file that contains the API declaration.
+ * @see
+ */
+STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 oldVal)
+{
+ INT32 prevVal = 0;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ __asm__ __volatile__("1: lw %0, 0(%1)\n"
+ " bne %0, %2, 2f\n"
+ " amoswap.w %0, %3, (%1)\n"
+ "2:"
+ : "=&r"(prevVal)
+ : "r"(v), "r"(oldVal), "r"(val)
+ : "memory");
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
+STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
+{
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v = setVal;
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v += addVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v -= subVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
+{
+ return ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
+{
+ return ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ *v = val;
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ if (prevVal == oldVal) {
+ *v = val;
+ }
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+#endif /* _LOS_ARCH_ATOMIC_H */
diff --git a/arch/xtensa/lx6/gcc/los_arch_atomic.h b/arch/xtensa/lx6/gcc/los_arch_atomic.h
new file mode 100644
index 00000000..af10f55c
--- /dev/null
+++ b/arch/xtensa/lx6/gcc/los_arch_atomic.h
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LOS_ARCH_ATOMIC_H
+#define _LOS_ARCH_ATOMIC_H
+
+#include "los_compiler.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("l32ai %0, %1, 0\n"
+ : "=&a"(val)
+ : "a"(v)
+ : "memory");
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("l32ai %0, %2, 0\n"
+ "wsr %0, SCOMPARE1\n"
+ "s32c1i %3, %1"
+ : "=&a"(val), "+m"(*v)
+ : "a"(v), "a"(setVal)
+ : "memory");
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("l32ai %0, %2, 0\n"
+ "wsr %0, SCOMPARE1\n"
+ "add %0, %0, %3\n"
+ "s32c1i %0, %1\n"
+ : "=&a"(val), "+m"(*v)
+ : "a"(v), "a"(addVal)
+ : "memory");
+ LOS_IntRestore(intSave);
+
+ return *v;
+}
+
+STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
+{
+ INT32 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("l32ai %0, %2, 0\n"
+ "wsr %0, SCOMPARE1\n"
+ "sub %0, %0, %3\n"
+ "s32c1i %0, %1\n"
+ : "=&a"(val), "+m"(*v)
+ : "a"(v), "a"(subVal)
+ : "memory");
+ LOS_IntRestore(intSave);
+ return *v;
+}
+
+STATIC INLINE VOID ArchAtomicInc(Atomic *v)
+{
+ (VOID)ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomicDec(Atomic *v)
+{
+ (VOID)ArchAtomicSub(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
+{
+ return ArchAtomicAdd(v, 1);
+}
+
+STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
+{
+ return ArchAtomicSub(v, 1);
+}
+
+/**
+ * @ingroup los_arch_atomic
+ * @brief Atomic exchange for 32-bit variable.
+ *
+ * @par Description:
+ * This API is used to implement the atomic exchange for 32-bit variable
+ * and return the previous value of the atomic variable.
+ * @attention
+ * The pointer v must not be NULL.
+ *
+ * @param v [IN] The variable pointer.
+ * @param val [IN] The exchange value.
+ *
+ * @retval #INT32 The previous value of the atomic variable
+ * @par Dependency:
+ * - los_arch_atomic.h: the header file that contains the API declaration.
+ * @see
+ */
+STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
+{
+ INT32 prevVal = 0;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("l32ai %0, %2, 0\n"
+ "wsr %0, SCOMPARE1\n"
+ "s32c1i %3, %1\n"
+ : "=&a"(prevVal), "+m"(*v)
+ : "a"(v), "a"(val)
+ : "memory");
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+/**
+ * @ingroup los_arch_atomic
+ * @brief Atomic exchange for 32-bit variable with compare.
+ *
+ * @par Description:
+ * This API is used to implement the atomic exchange for 32-bit variable, if the value of variable is equal to oldVal.
+ * @attention
+ * The pointer v must not be NULL.
+ *
+ * @param v [IN] The variable pointer.
+ * @param val [IN] The new value.
+ * @param oldVal [IN] The old value.
+ *
+ * @retval TRUE The previous value of the atomic variable is not equal to oldVal.
+ * @retval FALSE The previous value of the atomic variable is equal to oldVal.
+ * @par Dependency:
+ * - los_arch_atomic.h: the header file that contains the API declaration.
+ * @see
+ */
+STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 oldVal)
+{
+ INT32 prevVal = 0;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+
+ __asm__ __volatile__("l32ai %0, %2, 0\n"
+ "wsr %0, SCOMPARE1\n"
+ "bne %0, %3, 2f\n"
+ "s32c1i %4, %1\n"
+ "2:\n"
+ : "=&a"(prevVal), "+m"(*v)
+ : "a"(v), "a"(oldVal), "a"(val)
+ : "cc");
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
+STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
+{
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v = setVal;
+ LOS_IntRestore(intSave);
+}
+
+STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v += addVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
+{
+ INT64 val;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ *v -= subVal;
+ val = *v;
+ LOS_IntRestore(intSave);
+
+ return val;
+}
+
+STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
+{
+ return ArchAtomic64Add(v, 1);
+}
+
+STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
+{
+ (VOID)ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
+{
+ return ArchAtomic64Sub(v, 1);
+}
+
+STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ *v = val;
+ LOS_IntRestore(intSave);
+
+ return prevVal;
+}
+
+STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
+{
+ INT64 prevVal;
+ UINT32 intSave;
+
+ intSave = LOS_IntLock();
+ prevVal = *v;
+ if (prevVal == oldVal) {
+ *v = val;
+ }
+ LOS_IntRestore(intSave);
+
+ return prevVal != oldVal;
+}
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+#endif /* _LOS_ARCH_ATOMIC_H */
diff --git a/testsuites/BUILD.gn b/testsuites/BUILD.gn
index e08d8a2c..525d4f6f 100644
--- a/testsuites/BUILD.gn
+++ b/testsuites/BUILD.gn
@@ -56,6 +56,7 @@ kernel_module("test_init") {
group("testsuites") {
deps = [
":test_init",
+ "sample/kernel/atomic:test_atomic",
"sample/kernel/event:test_event",
"sample/kernel/hwi:test_hwi",
"sample/kernel/mem:test_mem",
diff --git a/testsuites/include/osTest.h b/testsuites/include/osTest.h
index 5783002e..c1e76e74 100644
--- a/testsuites/include/osTest.h
+++ b/testsuites/include/osTest.h
@@ -71,6 +71,7 @@ extern "C" {
#ifndef LOS_KERNEL_TEST_FULL
#define LOS_KERNEL_TEST_FULL 0
#endif
+#define LOS_KERNEL_ATOMIC_TEST 1
#define LOS_KERNEL_CORE_TASK_TEST 1
#define LOS_KERNEL_IPC_MUX_TEST 1
#define LOS_KERNEL_IPC_SEM_TEST 1
diff --git a/testsuites/sample/kernel/atomic/BUILD.gn b/testsuites/sample/kernel/atomic/BUILD.gn
new file mode 100644
index 00000000..cbb06f9e
--- /dev/null
+++ b/testsuites/sample/kernel/atomic/BUILD.gn
@@ -0,0 +1,45 @@
+# Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+# Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this list of
+# conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice, this list
+# of conditions and the following disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific prior written
+# permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+static_library("test_atomic") {
+ sources = [
+ "it_los_atomic.c",
+ "it_los_atomic_001.c",
+ "it_los_atomic_002.c",
+ "it_los_atomic_003.c",
+ "it_los_atomic_004.c",
+ "it_los_atomic_005.c",
+ "it_los_atomic_006.c",
+ "it_los_atomic_007.c",
+ "it_los_atomic_008.c",
+ "it_los_atomic_009.c",
+ ]
+ include_dirs = [ "." ]
+ configs += [ "//kernel/liteos_m/testsuites:include" ]
+}
diff --git a/testsuites/sample/kernel/atomic/it_los_atomic.c b/testsuites/sample/kernel/atomic/it_los_atomic.c
new file mode 100644
index 00000000..d72269ef
--- /dev/null
+++ b/testsuites/sample/kernel/atomic/it_los_atomic.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "it_los_atomic.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+Atomic g_testAtomicID03 = 0;
+Atomic64 g_testAtomicID05 = 0;
+
+VOID ItSuiteLosAtomic(VOID)
+{
+ ItLosAtomic001();
+ ItLosAtomic002();
+ ItLosAtomic003();
+ ItLosAtomic004();
+ ItLosAtomic005();
+ ItLosAtomic006();
+ ItLosAtomic007();
+ ItLosAtomic008();
+ ItLosAtomic009();
+}
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __cplusplus */
diff --git a/testsuites/sample/kernel/atomic/it_los_atomic.h b/testsuites/sample/kernel/atomic/it_los_atomic.h
new file mode 100644
index 00000000..6bb953ca
--- /dev/null
+++ b/testsuites/sample/kernel/atomic/it_los_atomic.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _IT_LOS_ATOMIC_H
+#define _IT_LOS_ATOMIC_H
+
+#include "osTest.h"
+#include "los_atomic.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+#define ATOMIC_MUTI_TASK_NUM 3
+
+extern Atomic g_testAtomicID03;
+extern Atomic64 g_testAtomicID05;
+
+extern VOID ItLosAtomic001(VOID);
+extern VOID ItLosAtomic002(VOID);
+extern VOID ItLosAtomic003(VOID);
+extern VOID ItLosAtomic004(VOID);
+extern VOID ItLosAtomic005(VOID);
+extern VOID ItLosAtomic006(VOID);
+extern VOID ItLosAtomic007(VOID);
+extern VOID ItLosAtomic008(VOID);
+extern VOID ItLosAtomic009(VOID);
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+#endif /* _IT_LOS_ATOMIC_H */
diff --git a/testsuites/sample/kernel/atomic/it_los_atomic_001.c b/testsuites/sample/kernel/atomic/it_los_atomic_001.c
new file mode 100644
index 00000000..9bdeab50
--- /dev/null
+++ b/testsuites/sample/kernel/atomic/it_los_atomic_001.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "it_los_atomic.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+static UINT32 TestCase(VOID)
+{
+ volatile INT32 value = 0;
+ UINTPTR ret;
+
+ ret = LOS_AtomicDecRet(&value);
+ ICUNIT_ASSERT_EQUAL(value, ret, value);
+ ICUNIT_ASSERT_EQUAL(value, -1, value);
+
+ ret = LOS_AtomicDecRet(&value);
+ ICUNIT_ASSERT_EQUAL(value, ret, value);
+ ICUNIT_ASSERT_EQUAL(value, -0x02, value);
+
+ value = 0x7fffffff;
+ ret = LOS_AtomicDecRet(&value);
+ ICUNIT_ASSERT_EQUAL(value, ret, value);
+ ICUNIT_ASSERT_EQUAL(value, 0x7ffffffe, value);
+
+ return LOS_OK;
+}
+
+/**
+ * @ingroup TEST_ATO
+ * @par TestCase_Number
+ * ItLosAtomic001
+ * @par TestCase_TestCase_Type
+ * Function test
+ * @brief Test interface LOS_AtomicDecRet
+ * @par TestCase_Pretreatment_Condition
+ * NA.
+ * @par TestCase_Test_Steps
+ * step1: Invoke the LOS_AtomicDecRet interface.
+ * @par TestCase_Expected_Result
+ * 1.LOS_AtomicDecRet return expected result.
+ * @par TestCase_Level
+ * Level 0
+ * @par TestCase_Automated
+ * true
+ * @par TestCase_Remark
+ * null
+ */
+
+VOID ItLosAtomic001(VOID)
+{
+ TEST_ADD_CASE("ItLosAtomic001", TestCase, TEST_LOS, TEST_ATO, TEST_LEVEL0, TEST_FUNCTION);
+}
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __cplusplus */
diff --git a/testsuites/sample/kernel/atomic/it_los_atomic_002.c b/testsuites/sample/kernel/atomic/it_los_atomic_002.c
new file mode 100644
index 00000000..21d162d8
--- /dev/null
+++ b/testsuites/sample/kernel/atomic/it_los_atomic_002.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "it_los_atomic.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+static UINT32 TestCase(VOID)
+{
+ volatile INT32 value = 0;
+ UINT32 ret;
+ UINT32 newVal;
+
+ newVal = 0xff;
+ ret = LOS_AtomicXchg32bits(&value, newVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xff, value);
+
+ newVal = 0xffff;
+ ret = LOS_AtomicXchg32bits(&value, newVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0xff, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffff, value);
+
+ newVal = 0xffffff;
+ ret = LOS_AtomicXchg32bits(&value, newVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0xffff, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffffff, value);
+
+ newVal = 0xffffffff;
+ ret = LOS_AtomicXchg32bits(&value, newVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0xffffff, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffffffff, value);
+
+ return LOS_OK;
+}
+
+/**
+ * @ingroup TEST_ATO
+ * @par TestCase_Number
+ * ItLosAtomic002
+ * @par TestCase_TestCase_Type
+ * Function test
+ * @brief Test interface LOS_AtomicXchg32bits
+ * @par TestCase_Pretreatment_Condition
+ * NA.
+ * @par TestCase_Test_Steps
+ * step1: Invoke the LOS_AtomicXchg32bits interface.
+ * @par TestCase_Expected_Result
+ * 1.LOS_AtomicXchg32bits return expected result.
+ * @par TestCase_Level
+ * Level 0
+ * @par TestCase_Automated
+ * true
+ * @par TestCase_Remark
+ * null
+ */
+
+VOID ItLosAtomic002(VOID)
+{
+ TEST_ADD_CASE("ItLosAtomic002", TestCase, TEST_LOS, TEST_ATO, TEST_LEVEL0, TEST_FUNCTION);
+}
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __cplusplus */
diff --git a/testsuites/sample/kernel/atomic/it_los_atomic_003.c b/testsuites/sample/kernel/atomic/it_los_atomic_003.c
new file mode 100644
index 00000000..f79deece
--- /dev/null
+++ b/testsuites/sample/kernel/atomic/it_los_atomic_003.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "it_los_atomic.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+static UINT32 TestCase(VOID)
+{
+ volatile INT32 value = 0;
+ BOOL ret;
+ UINT32 newVal;
+ UINT32 oldVal;
+
+ newVal = 0xff;
+ oldVal = 1;
+ ret = LOS_AtomicCmpXchg32bits(&value, newVal, oldVal);
+ ICUNIT_ASSERT_EQUAL(ret, 1, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0, value);
+
+ oldVal = 0;
+ ret = LOS_AtomicCmpXchg32bits(&value, newVal, oldVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xff, value);
+
+ newVal = 0xffff;
+ oldVal = 1;
+ ret = LOS_AtomicCmpXchg32bits(&value, newVal, oldVal);
+ ICUNIT_ASSERT_EQUAL(ret, 1, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xff, value);
+
+ oldVal = 0xff;
+ ret = LOS_AtomicCmpXchg32bits(&value, newVal, oldVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffff, value);
+
+ newVal = 0xffffffff;
+ oldVal = 1;
+ ret = LOS_AtomicCmpXchg32bits(&value, newVal, oldVal);
+ ICUNIT_ASSERT_EQUAL(ret, 1, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffff, value);
+
+ oldVal = 0xffff;
+ ret = LOS_AtomicCmpXchg32bits(&value, newVal, oldVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffffffff, value);
+
+ return LOS_OK;
+}
+
+/**
+ * @ingroup TEST_ATO
+ * @par TestCase_Number
+ * ItLosAtomic003
+ * @par TestCase_TestCase_Type
+ * Function test
+ * @brief Test interface LOS_AtomicCmpXchg32bits
+ * @par TestCase_Pretreatment_Condition
+ * NA.
+ * @par TestCase_Test_Steps
+ * step1: Invoke the LOS_AtomicCmpXchg32bits interface.
+ * @par TestCase_Expected_Result
+ * 1.LOS_AtomicCmpXchg32bits return expected result.
+ * @par TestCase_Level
+ * Level 0
+ * @par TestCase_Automated
+ * true
+ * @par TestCase_Remark
+ * null
+ */
+
+VOID ItLosAtomic003(VOID)
+{
+ TEST_ADD_CASE("ItLosAtomic003", TestCase, TEST_LOS, TEST_ATO, TEST_LEVEL0, TEST_FUNCTION);
+}
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __cplusplus */
diff --git a/testsuites/sample/kernel/atomic/it_los_atomic_004.c b/testsuites/sample/kernel/atomic/it_los_atomic_004.c
new file mode 100644
index 00000000..73e915b2
--- /dev/null
+++ b/testsuites/sample/kernel/atomic/it_los_atomic_004.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "it_los_atomic.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+static VOID TaskF01(VOID)
+{
+ INT32 i;
+ UINTPTR ret;
+ UINTPTR count;
+ for (i = 0; i < 100; ++i) { // run 100 times.
+ count = g_testAtomicID03;
+ ret = LOS_AtomicIncRet(&g_testAtomicID03);
+ ICUNIT_ASSERT_EQUAL_VOID(ret, g_testAtomicID03, ret);
+ ICUNIT_ASSERT_EQUAL_VOID((count + 1), g_testAtomicID03, (count + 1));
+ }
+ ICUNIT_ASSERT_EQUAL_VOID(g_testAtomicID03, i, g_testAtomicID03);
+ ICUNIT_ASSERT_EQUAL_VOID(ret, g_testAtomicID03, ret);
+
+ LOS_AtomicAdd(&g_testCount, 1);
+}
+
+static UINT32 TestCase(VOID)
+{
+ UINT32 ret;
+
+ g_testAtomicID03 = 0;
+
+ g_testCount = 0;
+
+ TSK_INIT_PARAM_S stTask1 = {0};
+ stTask1.pfnTaskEntry = (TSK_ENTRY_FUNC)TaskF01;
+ stTask1.pcName = "Atomic_004";
+ stTask1.uwStackSize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE;
+ stTask1.usTaskPrio = TASK_PRIO_TEST - 2; // TASK_PRIO_TEST - 2 has higher priority than TASK_PRIO_TEST
+ stTask1.uwResved = LOS_TASK_STATUS_DETACHED;
+
+ ret = LOS_TaskCreate(&g_testTaskID01, &stTask1);
+ ICUNIT_ASSERT_EQUAL(ret, LOS_OK, ret);
+ LOS_TaskDelay(5); // delay 5 ticks.
+
+ ICUNIT_ASSERT_EQUAL(g_testCount, 1, g_testCount);
+
+ return LOS_OK;
+}
+
+/**
+ * @ingroup TEST_ATO
+ * @par TestCase_Number
+ * ItLosAtomic004
+ * @par TestCase_TestCase_Type
+ * Function test
+ * @brief Test interface LOS_AtomicIncRet 100 times.
+ * @par TestCase_Pretreatment_Condition
+ * NA.
+ * @par TestCase_Test_Steps
+ * step1: Invoke the LOS_AtomicIncRet interface 100 times in a task.
+ * @par TestCase_Expected_Result
+ * 1.LOS_AtomicIncRet return expected result.
+ * @par TestCase_Level
+ * Level 0
+ * @par TestCase_Automated
+ * true
+ * @par TestCase_Remark
+ * null
+ */
+
+VOID ItLosAtomic004(VOID)
+{
+ TEST_ADD_CASE("ItLosAtomic004", TestCase, TEST_LOS, TEST_ATO, TEST_LEVEL0, TEST_FUNCTION);
+}
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __cplusplus */
diff --git a/testsuites/sample/kernel/atomic/it_los_atomic_005.c b/testsuites/sample/kernel/atomic/it_los_atomic_005.c
new file mode 100644
index 00000000..0402a1eb
--- /dev/null
+++ b/testsuites/sample/kernel/atomic/it_los_atomic_005.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "it_los_atomic.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+static UINT32 TestCase(VOID)
+{
+ volatile INT64 value = 0;
+ UINT64 ret;
+ UINT64 uwNewVal;
+
+ uwNewVal = 0xff;
+ ret = LOS_AtomicXchg64bits(&value, uwNewVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xff, value);
+
+ uwNewVal = 0xffff;
+ ret = LOS_AtomicXchg64bits(&value, uwNewVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0xff, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffff, value);
+
+ uwNewVal = 0xffffff;
+ ret = LOS_AtomicXchg64bits(&value, uwNewVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0xffff, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffffff, value);
+
+ uwNewVal = 0xffffffff;
+ ret = LOS_AtomicXchg64bits(&value, uwNewVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0xffffff, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffffffff, value);
+
+ uwNewVal = 0xffffffffffff;
+ ret = LOS_AtomicXchg64bits(&value, uwNewVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0xffffffff, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffffffffffff, value);
+
+ uwNewVal = 0xffffffffffffffff;
+ ret = LOS_AtomicXchg64bits(&value, uwNewVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0xffffffffffff, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffffffffffffffff, value);
+
+ return LOS_OK;
+}
+
+/**
+ * @ingroup TEST_ATO
+ * @par TestCase_Number
+ * ItLosAtomic005
+ * @par TestCase_TestCase_Type
+ * Function test
+ * @brief Test interface LOS_AtomicXchg64bits
+ * @par TestCase_Pretreatment_Condition
+ * NA.
+ * @par TestCase_Test_Steps
+ * step1: Invoke the LOS_AtomicXchg64bits interface.
+ * @par TestCase_Expected_Result
+ * 1.LOS_AtomicXchg64bits return expected result.
+ * @par TestCase_Level
+ * Level 0
+ * @par TestCase_Automated
+ * true
+ * @par TestCase_Remark
+ * null
+ */
+
+VOID ItLosAtomic005(VOID)
+{
+ TEST_ADD_CASE("ItLosAtomic005", TestCase, TEST_LOS, TEST_ATO, TEST_LEVEL0, TEST_FUNCTION);
+}
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __cplusplus */
diff --git a/testsuites/sample/kernel/atomic/it_los_atomic_006.c b/testsuites/sample/kernel/atomic/it_los_atomic_006.c
new file mode 100644
index 00000000..8870c3f3
--- /dev/null
+++ b/testsuites/sample/kernel/atomic/it_los_atomic_006.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "it_los_atomic.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+static UINT32 TestCase(VOID)
+{
+ volatile INT64 value = 0;
+ BOOL ret;
+ UINT64 newVal = 0xff;
+ UINT64 oldVal = 1;
+
+ ret = LOS_AtomicCmpXchg64bits(&value, newVal, oldVal);
+ ICUNIT_ASSERT_EQUAL(ret, 1, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0, value);
+
+ oldVal = 0;
+ ret = LOS_AtomicCmpXchg64bits(&value, newVal, oldVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xff, value);
+
+ newVal = 0xffff;
+ oldVal = 1;
+ ret = LOS_AtomicCmpXchg64bits(&value, newVal, oldVal);
+ ICUNIT_ASSERT_EQUAL(ret, 1, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xff, value);
+
+ oldVal = 0xff;
+ ret = LOS_AtomicCmpXchg64bits(&value, newVal, oldVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffff, value);
+
+ newVal = 0xffffffff;
+ oldVal = 1;
+ ret = LOS_AtomicCmpXchg64bits(&value, newVal, oldVal);
+ ICUNIT_ASSERT_EQUAL(ret, 1, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffff, value);
+
+ oldVal = 0xffff;
+ ret = LOS_AtomicCmpXchg64bits(&value, newVal, oldVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffffffff, value);
+
+ newVal = 0xffffffffffff;
+ oldVal = 1;
+ ret = LOS_AtomicCmpXchg64bits(&value, newVal, oldVal);
+ ICUNIT_ASSERT_EQUAL(ret, 1, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffffffff, value);
+
+ oldVal = 0xffffffff;
+ ret = LOS_AtomicCmpXchg64bits(&value, newVal, oldVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffffffffffff, value);
+
+ newVal = 0xffffffffffffffff;
+ oldVal = 1;
+ ret = LOS_AtomicCmpXchg64bits(&value, newVal, oldVal);
+ ICUNIT_ASSERT_EQUAL(ret, 1, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffffffffffff, value);
+
+ oldVal = 0xffffffffffff;
+ ret = LOS_AtomicCmpXchg64bits(&value, newVal, oldVal);
+ ICUNIT_ASSERT_EQUAL(ret, 0, ret);
+ ICUNIT_ASSERT_EQUAL(value, 0xffffffffffffffff, value);
+
+ return LOS_OK;
+}
+
+/**
+ * @ingroup TEST_ATO
+ * @par TestCase_Number
+ * ItLosAtomic006
+ * @par TestCase_TestCase_Type
+ * Function test
+ * @brief Test interface LOS_AtomicCmpXchg64bits
+ * @par TestCase_Pretreatment_Condition
+ * NA.
+ * @par TestCase_Test_Steps
+ * step1: Invoke the LOS_AtomicCmpXchg64bits interface.
+ * @par TestCase_Expected_Result
+ * 1.LOS_AtomicCmpXchg64bits return expected result.
+ * @par TestCase_Level
+ * Level 0
+ * @par TestCase_Automated
+ * true
+ * @par TestCase_Remark
+ * null
+ */
+
+VOID ItLosAtomic006(VOID)
+{
+ TEST_ADD_CASE("ItLosAtomic006", TestCase, TEST_LOS, TEST_ATO, TEST_LEVEL0, TEST_FUNCTION);
+}
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __cplusplus */
\ No newline at end of file
diff --git a/testsuites/sample/kernel/atomic/it_los_atomic_007.c b/testsuites/sample/kernel/atomic/it_los_atomic_007.c
new file mode 100644
index 00000000..05f9f62a
--- /dev/null
+++ b/testsuites/sample/kernel/atomic/it_los_atomic_007.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "it_los_atomic.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+static VOID TaskF01(VOID)
+{
+ INT64 i;
+
+ for (i = 0x7fffffffffff0000; i < 0x7fffffffffff000f; ++i) {
+ LOS_AtomicXchg64bits(&g_testAtomicID05, i);
+ }
+
+ ++g_testCount;
+}
+
+static UINT32 TestCase(VOID)
+{
+ UINT32 ret, i;
+ UINT32 taskId[ATOMIC_MUTI_TASK_NUM];
+ TSK_INIT_PARAM_S task[ATOMIC_MUTI_TASK_NUM] = {0, };
+ CHAR taskName[ATOMIC_MUTI_TASK_NUM][20] = {"", }; // max taskName size is 20.
+ CHAR buf[10] = ""; // max buf size is 10.
+
+ g_testCount = 0;
+ g_testAtomicID05 = 0;
+
+ for (i = 0; i < ATOMIC_MUTI_TASK_NUM; i++) {
+ memset(buf, 0, 10); // max buf size is 10.
+ memset(taskName[i], 0, 20); // max taskName size is 20.
+
+ task[i].pfnTaskEntry = (TSK_ENTRY_FUNC)TaskF01;
+ task[i].pcName = taskName[i];
+ task[i].uwStackSize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE;
+ task[i].usTaskPrio = TASK_PRIO_TEST - 2; // TASK_PRIO_TEST - 2 has higher priority than TASK_PRIO_TEST
+ task[i].uwResved = LOS_TASK_STATUS_DETACHED;
+ }
+ for (i = 0; i < ATOMIC_MUTI_TASK_NUM; i++) {
+ ret = LOS_TaskCreate(&taskId[i], &task[i]);
+ ICUNIT_GOTO_EQUAL(ret, LOS_OK, ret, EXIT);
+ }
+ LOS_TaskDelay(20); // delay 20 ticks.
+
+ ICUNIT_GOTO_EQUAL(g_testCount, ATOMIC_MUTI_TASK_NUM, g_testCount, EXIT);
+ ICUNIT_GOTO_EQUAL(g_testAtomicID05, 0x7fffffffffff000e, g_testAtomicID05, EXIT);
+EXIT:
+ for (i = 0; i < ATOMIC_MUTI_TASK_NUM; i++) {
+ (VOID)LOS_TaskDelete(taskId[i]);
+ }
+ return LOS_OK;
+}
+
+ /**
+ * @ingroup TEST_ATO
+ * @par TestCase_Number
+ * ItLosAtomic007
+ * @par TestCase_TestCase_Type
+ * Function test
+ * @brief Test interface LOS_AtomicXchg64bits
+ * @par TestCase_Pretreatment_Condition
+ * NA.
+ * @par TestCase_Test_Steps
+ * step1: Invoke the LOS_AtomicXchg64bits interface in different task.
+ * @par TestCase_Expected_Result
+ * 1.LOS_AtomicXchg64bits return expected result.
+ * @par TestCase_Level
+ * Level 0
+ * @par TestCase_Automated
+ * true
+ * @par TestCase_Remark
+ * null
+ */
+
+VOID ItLosAtomic007(VOID)
+{
+ TEST_ADD_CASE("ItLosAtomic007", TestCase, TEST_LOS, TEST_ATO, TEST_LEVEL0, TEST_FUNCTION);
+}
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __cplusplus */
\ No newline at end of file
diff --git a/testsuites/sample/kernel/atomic/it_los_atomic_008.c b/testsuites/sample/kernel/atomic/it_los_atomic_008.c
new file mode 100644
index 00000000..02c4b291
--- /dev/null
+++ b/testsuites/sample/kernel/atomic/it_los_atomic_008.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "it_los_atomic.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+static VOID TaskF01(VOID)
+{
+ INT64 i;
+ INT64 count;
+ for (i = 0x7fffffffffff0000; i < 0x7fffffffffffffff; ++i) {
+ count = g_testAtomicID05;
+ LOS_AtomicCmpXchg64bits(&g_testAtomicID05, i, count);
+ }
+
+ ++g_testCount;
+}
+
+static UINT32 TestCase(VOID)
+{
+ UINT32 ret, i;
+ UINT32 taskId[ATOMIC_MUTI_TASK_NUM];
+ TSK_INIT_PARAM_S task[ATOMIC_MUTI_TASK_NUM] = {0, };
+ CHAR taskName[ATOMIC_MUTI_TASK_NUM][20] = {"", }; // max taskName size is 20.
+ CHAR buf[10] = ""; // max buf size is 10.
+
+ g_testCount = 0;
+ g_testAtomicID05 = 0;
+
+ for (i = 0; i < ATOMIC_MUTI_TASK_NUM; i++) {
+ memset(buf, 0, 10); // max buf size is 10.
+ memset(taskName[i], 0, 20); // max taskName size is 20.
+
+ task[i].pfnTaskEntry = (TSK_ENTRY_FUNC)TaskF01;
+ task[i].pcName = taskName[i];
+ task[i].uwStackSize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE;
+ task[i].usTaskPrio = TASK_PRIO_TEST - 2; // TASK_PRIO_TEST - 2 has higher priority than TASK_PRIO_TEST
+ task[i].uwResved = LOS_TASK_STATUS_DETACHED;
+ }
+
+ for (i = 0; i < ATOMIC_MUTI_TASK_NUM; i++) {
+ ret = LOS_TaskCreate(&taskId[i], &task[i]);
+ ICUNIT_GOTO_EQUAL(ret, LOS_OK, ret, EXIT);
+ }
+
+ LOS_TaskDelay(80); // delay 80 ticks.
+
+ ICUNIT_GOTO_EQUAL(g_testCount, ATOMIC_MUTI_TASK_NUM, g_testCount, EXIT);
+ ICUNIT_GOTO_EQUAL(g_testAtomicID05, (0x7fffffffffffffff - 1), g_testAtomicID05, EXIT);
+EXIT:
+ for (i = 0; i < ATOMIC_MUTI_TASK_NUM; i++) {
+ (VOID)LOS_TaskDelete(taskId[i]);
+ }
+ return LOS_OK;
+}
+
+ /**
+ * @ingroup TEST_ATO
+ * @par TestCase_Number
+ * ItLosAtomic008
+ * @par TestCase_TestCase_Type
+ * Function test
+ * @brief Test interface LOS_AtomicCmpXchg64bits
+ * @par TestCase_Pretreatment_Condition
+ * NA.
+ * @par TestCase_Test_Steps
+ * step1: Invoke the LOS_AtomicCmpXchg64bits interface in different task.
+ * @par TestCase_Expected_Result
+ * 1.LOS_AtomicCmpXchg64bits return expected result.
+ * @par TestCase_Level
+ * Level 0
+ * @par TestCase_Automated
+ * true
+ * @par TestCase_Remark
+ * null
+ */
+
+VOID ItLosAtomic008(VOID)
+{
+ TEST_ADD_CASE("ItLosAtomic008", TestCase, TEST_LOS, TEST_ATO, TEST_LEVEL0, TEST_FUNCTION);
+}
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __cplusplus */
\ No newline at end of file
diff --git a/testsuites/sample/kernel/atomic/it_los_atomic_009.c b/testsuites/sample/kernel/atomic/it_los_atomic_009.c
new file mode 100644
index 00000000..d578fb9b
--- /dev/null
+++ b/testsuites/sample/kernel/atomic/it_los_atomic_009.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "it_los_atomic.h"
+
+#ifdef __cplusplus
+#if __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#endif /* __cplusplus */
+
+#define DB_ATOMIC_MUTI_TASK_NUM (ATOMIC_MUTI_TASK_NUM * 2) // 2 is coefficients
+
+static VOID TaskF01(VOID)
+{
+ UINT32 i;
+ for (i = 0; i < 10; ++i) { // run 10 times.
+ LOS_Atomic64Dec(&g_testAtomicID05);
+ }
+
+ ++g_testCount;
+}
+
+static VOID TaskF02(VOID)
+{
+ UINT64 i;
+ for (i = 0; i < 10; ++i) { // run 10 times.
+ LOS_AtomicCmpXchg64bits(&g_testAtomicID05, g_testAtomicID05, g_testAtomicID05);
+ }
+
+ ++g_testCount;
+}
+
+static UINT32 TestCase(VOID)
+{
+ UINT32 ret, i;
+ UINT32 taskId[DB_ATOMIC_MUTI_TASK_NUM];
+ TSK_INIT_PARAM_S task[DB_ATOMIC_MUTI_TASK_NUM] = {0, };
+ CHAR taskName[DB_ATOMIC_MUTI_TASK_NUM][20] = {"", }; // max taskName size is 20.
+ CHAR buf[10] = ""; // max buf size is 10.
+ Atomic uCount = 0;
+ g_testCount = 0;
+ g_testAtomicID05 = 0xffffffffff;
+ UINT32 uLoop = g_taskMaxNum - TASK_EXISTED_NUM;
+
+ if (uLoop > DB_ATOMIC_MUTI_TASK_NUM) {
+ uLoop = DB_ATOMIC_MUTI_TASK_NUM;
+ }
+
+ for (i = 0; i < uLoop; i++) {
+ memset(buf, 0, 10); // max buf size is 10.
+ memset(taskName[i], 0, 20); // max taskName size is 20.
+
+ if (i % 2 == 0) { // 2 is index.
+ uCount++;
+ task[i].pfnTaskEntry = (TSK_ENTRY_FUNC)TaskF01;
+ } else {
+ task[i].pfnTaskEntry = (TSK_ENTRY_FUNC)TaskF02;
+ }
+ task[i].pcName = taskName[i];
+ task[i].uwStackSize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE;
+ task[i].usTaskPrio = TASK_PRIO_TEST - 2; // TASK_PRIO_TEST - 2 has higher priority than TASK_PRIO_TEST
+ task[i].uwResved = LOS_TASK_STATUS_DETACHED;
+ }
+ for (i = 0; i < uLoop; i++) {
+ ret = LOS_TaskCreate(&taskId[i], &task[i]);
+ ICUNIT_GOTO_EQUAL(ret, LOS_OK, ret, EXIT);
+ }
+
+ LOS_TaskDelay(20); // delay 20 ticks.
+ ICUNIT_GOTO_EQUAL(g_testCount, uLoop, g_testCount, EXIT);
+ ICUNIT_GOTO_EQUAL(g_testAtomicID05, (0xffffffffff - 10 * uCount), g_testAtomicID05, EXIT); // run 10 times.
+EXIT:
+ for (i = 0; i < uLoop; i++) {
+ (VOID)LOS_TaskDelete(taskId[i]);
+ }
+ return LOS_OK;
+}
+
+ /**
+ * @ingroup TEST_ATO
+ * @par TestCase_Number
+ * ItLosAtomic009
+ * @par TestCase_TestCase_Type
+ * Function test
+ * @brief Test interface LOS_Atomic64Dec and LOS_AtomicCmpXchg64bits in different task.
+ * @par TestCase_Pretreatment_Condition
+ * NA.
+ * @par TestCase_Test_Steps
+ * step1: Invoke the LOS_Atomic64Dec interface in TaskF01.
+ * step2: Invoke the LOS_AtomicCmpXchg64bits interface in TaskF02.
+ * @par TestCase_Expected_Result
+ * 1.LOS_Atomic64Dec and LOS_AtomicCmpXchg64bits return expected result.
+ * @par TestCase_Level
+ * Level 0
+ * @par TestCase_Automated
+ * true
+ * @par TestCase_Remark
+ * null
+ */
+
+VOID ItLosAtomic009(VOID)
+{
+ TEST_ADD_CASE("ItLosAtomic009", TestCase, TEST_LOS, TEST_ATO, TEST_LEVEL0, TEST_FUNCTION);
+}
+
+#ifdef __cplusplus
+#if __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __cplusplus */
\ No newline at end of file
diff --git a/testsuites/src/osTest.c b/testsuites/src/osTest.c
index 10afaf54..85b9040e 100644
--- a/testsuites/src/osTest.c
+++ b/testsuites/src/osTest.c
@@ -136,6 +136,9 @@ UINT32 TaskUsedCountGet(VOID)
void TestKernel(void)
{
+#if (LOS_KERNEL_ATOMIC_TEST == 1)
+ ItSuiteLosAtomic();
+#endif
#if (LOS_KERNEL_CORE_TASK_TEST == 1)
ItSuiteLosTask();
#endif
diff --git a/utils/los_compiler.h b/utils/los_compiler.h
index 33add0f8..1f9e7759 100644
--- a/utils/los_compiler.h
+++ b/utils/los_compiler.h
@@ -321,6 +321,9 @@ typedef signed long long INT64;
typedef unsigned int UINTPTR;
typedef signed int INTPTR;
+typedef volatile INT32 Atomic;
+typedef volatile INT64 Atomic64;
+
#define VOID void
#ifndef FALSE