Bläddra i källkod

[atomic] add stdc atomic detection. (#7536)

Bernard Xiong 2 år sedan
förälder
incheckning
4b4c3c85f2

+ 2 - 2
.github/workflows/action.yml

@@ -348,7 +348,7 @@ jobs:
         if: ${{ matrix.legs.RTT_TOOL_CHAIN == 'sourcery-riscv64-unknown-elf' && success() }}
         run: |
           wget -q https://github.com/RT-Thread/toolchains-ci/releases/download/v1.4/riscv64-unknown-elf-toolchain-10.2.0-2020.12.8-x86_64-linux-ubuntu14.tar.gz
-          sudo tar zxvf riscv64-unknown-elf-toolchain-10.2.0-2020.12.8-x86_64-linux-ubuntu14.tar.gz -C /opt
+          sudo tar zxf riscv64-unknown-elf-toolchain-10.2.0-2020.12.8-x86_64-linux-ubuntu14.tar.gz -C /opt
           /opt/riscv64-unknown-elf-toolchain-10.2.0-2020.12.8-x86_64-linux-ubuntu14/bin/riscv64-unknown-elf-gcc --version
           echo "RTT_EXEC_PATH=/opt/riscv64-unknown-elf-toolchain-10.2.0-2020.12.8-x86_64-linux-ubuntu14/bin" >> $GITHUB_ENV
 
@@ -356,7 +356,7 @@ jobs:
         if: ${{ matrix.legs.RTT_TOOL_CHAIN == 'sourcery-riscv-none-embed' && success() }}
         run: |
           wget -q https://github.com/RT-Thread/toolchains-ci/releases/download/v1.5/xpack-riscv-none-embed-gcc-8.3.0-2.3-linux-x64.tar.gz
-          sudo tar zxvf xpack-riscv-none-embed-gcc-8.3.0-2.3-linux-x64.tar.gz -C /opt
+          sudo tar zxf xpack-riscv-none-embed-gcc-8.3.0-2.3-linux-x64.tar.gz -C /opt
           /opt/xpack-riscv-none-embed-gcc-8.3.0-2.3/bin/riscv-none-embed-gcc --version
           echo "RTT_EXEC_PATH=/opt/xpack-riscv-none-embed-gcc-8.3.0-2.3/bin" >> $GITHUB_ENV
 

+ 30 - 14
include/rtatomic.h

@@ -6,10 +6,13 @@
  * Change Logs:
  * Date           Author       Notes
  * 2023-03-14     WangShun     first version
+ * 2023-05-20     Bernard      add stdc atomic detection.
  */
 #ifndef __RT_ATOMIC_H__
 #define __RT_ATOMIC_H__
 
+#if !defined(__cplusplus)
+
 rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr);
 void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val);
 rt_atomic_t rt_hw_atomic_add(volatile rt_atomic_t *ptr, rt_atomic_t val);
@@ -20,9 +23,30 @@ rt_atomic_t rt_hw_atomic_xor(volatile rt_atomic_t *ptr, rt_atomic_t val);
 rt_atomic_t rt_hw_atomic_exchange(volatile rt_atomic_t *ptr, rt_atomic_t val);
 void rt_hw_atomic_flag_clear(volatile rt_atomic_t *ptr);
 rt_atomic_t rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t *ptr);
-rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_atomic_t *old, rt_atomic_t new);
+rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_atomic_t *expected, rt_atomic_t desired);
+
+/* To detect stdatomic */
+#if !defined(RT_USING_HW_ATOMIC) && !defined(RT_USING_STDC_ATOMIC)
+#if defined(__GNUC__) && defined(RT_USING_LIBC) && !defined(__STDC_NO_ATOMICS__)
+#define RT_USING_STDC_ATOMIC
+#endif /* __GNUC__ && .. */
+#endif /* !RT_USING_HW_ATOMIC && !RT_USING_STDC_ATOMIC */
+
+#if defined(RT_USING_HW_ATOMIC)
+#define rt_atomic_load(ptr) rt_hw_atomic_load(ptr)
+#define rt_atomic_store(ptr, v) rt_hw_atomic_store(ptr, v)
+#define rt_atomic_add(ptr, v) rt_hw_atomic_add(ptr, v)
+#define rt_atomic_sub(ptr, v) rt_hw_atomic_sub(ptr, v)
+#define rt_atomic_and(ptr, v) rt_hw_atomic_and(ptr, v)
+#define rt_atomic_or(ptr, v)  rt_hw_atomic_or(ptr, v)
+#define rt_atomic_xor(ptr, v) rt_hw_atomic_xor(ptr, v)
+#define rt_atomic_exchange(ptr, v) rt_hw_atomic_exchange(ptr, v)
+#define rt_atomic_flag_clear(ptr) rt_hw_atomic_flag_clear(ptr)
+#define rt_atomic_flag_test_and_set(ptr) rt_hw_atomic_flag_test_and_set(ptr)
+#define rt_atomic_compare_exchange_strong(ptr, v,des) rt_hw_atomic_compare_exchange_strong(ptr, v ,des)
+
+#elif defined(RT_USING_STDC_ATOMIC)
 
-#if defined(RT_USING_STDC_ATOMIC)
 #ifndef __STDC_NO_ATOMICS__
 #define rt_atomic_load(ptr) atomic_load(ptr)
 #define rt_atomic_store(ptr, v) atomic_store(ptr, v)
@@ -38,18 +62,7 @@ rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_a
 #else
 #error "The standard library C doesn't support the atomic operation"
 #endif /* __STDC_NO_ATOMICS__ */
-#elif defined(RT_USING_HW_ATOMIC)
-#define rt_atomic_load(ptr) rt_hw_atomic_load(ptr)
-#define rt_atomic_store(ptr, v) rt_hw_atomic_store(ptr, v)
-#define rt_atomic_add(ptr, v) rt_hw_atomic_add(ptr, v)
-#define rt_atomic_sub(ptr, v) rt_hw_atomic_sub(ptr, v)
-#define rt_atomic_and(ptr, v) rt_hw_atomic_and(ptr, v)
-#define rt_atomic_or(ptr, v)  rt_hw_atomic_or(ptr, v)
-#define rt_atomic_xor(ptr, v) rt_hw_atomic_xor(ptr, v)
-#define rt_atomic_exchange(ptr, v) rt_hw_atomic_exchange(ptr, v)
-#define rt_atomic_flag_clear(ptr) rt_hw_atomic_flag_clear(ptr)
-#define rt_atomic_flag_test_and_set(ptr) rt_hw_atomic_flag_test_and_set(ptr)
-#define rt_atomic_compare_exchange_strong(ptr, v,des) rt_hw_atomic_compare_exchange_strong(ptr, v ,des)
+
 #else
 #include <rthw.h>
 #define rt_atomic_load(ptr) rt_soft_atomic_load(ptr)
@@ -192,4 +205,7 @@ rt_inline rt_atomic_t rt_soft_atomic_compare_exchange_strong(volatile rt_atomic_
     return temp;
 }
 #endif /* RT_USING_STDC_ATOMIC */
+
+#endif /* __cplusplus */
+
 #endif /* __RT_ATOMIC_H__ */

+ 17 - 4
include/rtdef.h

@@ -48,6 +48,7 @@
  * 2022-09-12     Meco Man     define rt_ssize_t
  * 2022-12-20     Meco Man     add const name for rt_object
  * 2023-04-01     Chushicheng  change version number to v5.0.1
+ * 2023-05-20     Bernard      add stdc atomic detection.
  */
 
 #ifndef __RT_DEF_H__
@@ -126,12 +127,24 @@ typedef rt_base_t                       rt_flag_t;      /**< Type for flags */
 typedef rt_ubase_t                      rt_dev_t;       /**< Type for device */
 typedef rt_base_t                       rt_off_t;       /**< Type for offset */
 
+#if !defined(__cplusplus)
 #if defined(RT_USING_STDC_ATOMIC)
-#include <stdatomic.h>
-typedef atomic_size_t rt_atomic_t;
+    #include <stdatomic.h>
+    typedef atomic_size_t rt_atomic_t;
+#elif defined(RT_USING_HW_ATOMIC)
+    typedef volatile rt_base_t rt_atomic_t;
 #else
-typedef volatile rt_base_t rt_atomic_t;
-#endif
+
+    /* To detect std atomic */
+    #if defined(RT_USING_LIBC) && defined(__GNUC__) && !defined(__STDC_NO_ATOMICS__)
+        #include <stdatomic.h>
+        typedef atomic_size_t rt_atomic_t;
+    #else
+        typedef volatile rt_base_t rt_atomic_t;
+    #endif /* __GNUC__ && !__STDC_NO_ATOMICS__ */
+
+#endif /* RT_USING_STDC_ATOMIC */
+#endif /* __cplusplus */
 
 /* boolean type definitions */
 #define RT_TRUE                         1               /**< boolean true  */

+ 2 - 0
include/rtthread.h

@@ -17,6 +17,7 @@
  * 2021-02-28     Meco Man     add RT_KSERVICE_USING_STDLIB
  * 2021-11-14     Meco Man     add rtlegacy.h for compatibility
  * 2022-06-04     Meco Man     remove strnlen
+ * 2023-05-20     Bernard      add rtatomic.h header file to included files.
  */
 
 #ifndef __RT_THREAD_H__
@@ -27,6 +28,7 @@
 #include <rtdef.h>
 #include <rtservice.h>
 #include <rtm.h>
+#include <rtatomic.h>
 #ifdef RT_USING_LEGACY
 #include <rtlegacy.h>
 #endif

+ 0 - 1
libcpu/Kconfig

@@ -166,7 +166,6 @@ config ARCH_ARMV8
     bool
     select ARCH_ARM
     select ARCH_ARM_MMU
-    select RT_USING_HW_ATOMIC
 
 config ARCH_MIPS
     bool

+ 1 - 1
libcpu/arm/common/atomic_arm.c

@@ -166,7 +166,7 @@ rt_atomic_t rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t *ptr)
     return oldval;
 }
 
-rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, volatile rt_atomic_t *old, rt_atomic_t new)
+rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_atomic_t *old, rt_atomic_t new)
 {
     rt_atomic_t result;
     rt_atomic_t temp = *old;

+ 16 - 16
libcpu/risc-v/common/atomic_riscv.c

@@ -12,7 +12,7 @@
 
 rt_atomic_t rt_hw_atomic_exchange(volatile rt_atomic_t *ptr, rt_atomic_t val)
 {
-    rt_atomic_t result;
+    rt_atomic_t result = 0;
 #if __riscv_xlen == 32
     asm volatile ("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
 #elif __riscv_xlen == 64
@@ -23,7 +23,7 @@ rt_atomic_t rt_hw_atomic_exchange(volatile rt_atomic_t *ptr, rt_atomic_t val)
 
 rt_atomic_t rt_hw_atomic_add(volatile rt_atomic_t *ptr, rt_atomic_t val)
 {
-    rt_atomic_t result;
+    rt_atomic_t result = 0;
 #if __riscv_xlen == 32
     asm volatile ("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
 #elif __riscv_xlen == 64
@@ -34,7 +34,7 @@ rt_atomic_t rt_hw_atomic_add(volatile rt_atomic_t *ptr, rt_atomic_t val)
 
 rt_atomic_t rt_hw_atomic_sub(volatile rt_atomic_t *ptr, rt_atomic_t val)
 {
-    rt_atomic_t result;
+    rt_atomic_t result = 0;
     val = -val;
 #if __riscv_xlen == 32
     asm volatile ("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
@@ -46,7 +46,7 @@ rt_atomic_t rt_hw_atomic_sub(volatile rt_atomic_t *ptr, rt_atomic_t val)
 
 rt_atomic_t rt_hw_atomic_xor(volatile rt_atomic_t *ptr, rt_atomic_t val)
 {
-    rt_atomic_t result;
+    rt_atomic_t result = 0;
 #if __riscv_xlen == 32
     asm volatile ("amoxor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
 #elif __riscv_xlen == 64
@@ -57,7 +57,7 @@ rt_atomic_t rt_hw_atomic_xor(volatile rt_atomic_t *ptr, rt_atomic_t val)
 
 rt_atomic_t rt_hw_atomic_and(volatile rt_atomic_t *ptr, rt_atomic_t val)
 {
-    rt_atomic_t result;
+    rt_atomic_t result = 0;
 #if __riscv_xlen == 32
     asm volatile ("amoand.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
 #elif __riscv_xlen == 64
@@ -68,7 +68,7 @@ rt_atomic_t rt_hw_atomic_and(volatile rt_atomic_t *ptr, rt_atomic_t val)
 
 rt_atomic_t rt_hw_atomic_or(volatile rt_atomic_t *ptr, rt_atomic_t val)
 {
-    rt_atomic_t result;
+    rt_atomic_t result = 0;
 #if __riscv_xlen == 32
     asm volatile ("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
 #elif __riscv_xlen == 64
@@ -79,7 +79,7 @@ rt_atomic_t rt_hw_atomic_or(volatile rt_atomic_t *ptr, rt_atomic_t val)
 
 rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr)
 {
-    rt_atomic_t result;
+    rt_atomic_t result = 0;
 #if __riscv_xlen == 32
     asm volatile ("amoxor.w %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
 #elif __riscv_xlen == 64
@@ -90,7 +90,7 @@ rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr)
 
 void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val)
 {
-    rt_atomic_t result;
+    rt_atomic_t result = 0;
 #if __riscv_xlen == 32
     asm volatile ("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
 #elif __riscv_xlen == 64
@@ -100,7 +100,7 @@ void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val)
 
 rt_atomic_t rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t *ptr)
 {
-    rt_atomic_t result;
+    rt_atomic_t result = 0;
     rt_atomic_t temp = 1;
 #if __riscv_xlen == 32
     asm volatile ("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
@@ -112,7 +112,7 @@ rt_atomic_t rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t *ptr)
 
 void rt_hw_atomic_flag_clear(volatile rt_atomic_t *ptr)
 {
-    rt_atomic_t result;
+    rt_atomic_t result = 0;
 #if __riscv_xlen == 32
     asm volatile ("amoand.w %0, x0, (%1)" : "=r"(result) :"r"(ptr) : "memory");
 #elif __riscv_xlen == 64
@@ -120,16 +120,16 @@ void rt_hw_atomic_flag_clear(volatile rt_atomic_t *ptr)
 #endif
 }
 
-rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, volatile rt_atomic_t *old, rt_atomic_t new)
+rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_atomic_t *old, rt_atomic_t desired)
 {
     rt_atomic_t tmp = *old;
-    rt_atomic_t result;
+    rt_atomic_t result = 0;
 #if __riscv_xlen == 32
     asm volatile(
             " fence iorw, ow\n"
             "1: lr.w.aq  %[result], (%[ptr])\n"
             "   bne      %[result], %[tmp], 2f\n"
-            "   sc.w.rl  %[tmp], %[new], (%[ptr])\n"
+            "   sc.w.rl  %[tmp], %[desired], (%[ptr])\n"
             "   bnez     %[tmp], 1b\n"
             "   li  %[result], 1\n"
             "   j 3f\n"
@@ -137,14 +137,14 @@ rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, vola
             "   li  %[result], 0\n"
             " 3:\n"
             : [result]"+r" (result), [tmp]"+r" (tmp), [ptr]"+r" (ptr)
-            : [new]"r" (new), [old]"r"(old)
+            : [desired]"r" (desired), [old]"r"(old)
             : "memory");
 #elif __riscv_xlen == 64
     asm volatile(
             " fence iorw, ow\n"
             "1: lr.d.aq  %[result], (%[ptr])\n"
             "   bne      %[result], %[tmp], 2f\n"
-            "   sc.d.rl  %[tmp], %[new], (%[ptr])\n"
+            "   sc.d.rl  %[tmp], %[desired], (%[ptr])\n"
             "   bnez     %[tmp], 1b\n"
             "   li  %[result], 1\n"
             "   j 3f\n"
@@ -152,7 +152,7 @@ rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, vola
             "   li  %[result], 0\n"
             " 3:\n"
             : [result]"+r" (result), [tmp]"+r" (tmp), [ptr]"+r" (ptr)
-            : [new]"r" (new), [old]"r"(old)
+            : [desired]"r" (desired), [old]"r"(old)
             : "memory");
 #endif
     return result;