atomic_riscv.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-03-14 WangShun first version
  9. */
  10. #include <rtthread.h>
  11. rt_atomic_t rt_hw_atomic_exchange(volatile rt_atomic_t *ptr, rt_atomic_t val)
  12. {
  13. rt_atomic_t result = 0;
  14. #if __riscv_xlen == 32
  15. asm volatile ("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
  16. #elif __riscv_xlen == 64
  17. asm volatile ("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
  18. #endif
  19. return result;
  20. }
  21. rt_atomic_t rt_hw_atomic_add(volatile rt_atomic_t *ptr, rt_atomic_t val)
  22. {
  23. rt_atomic_t result = 0;
  24. #if __riscv_xlen == 32
  25. asm volatile ("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
  26. #elif __riscv_xlen == 64
  27. asm volatile ("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
  28. #endif
  29. return result;
  30. }
  31. rt_atomic_t rt_hw_atomic_sub(volatile rt_atomic_t *ptr, rt_atomic_t val)
  32. {
  33. rt_atomic_t result = 0;
  34. val = -val;
  35. #if __riscv_xlen == 32
  36. asm volatile ("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
  37. #elif __riscv_xlen == 64
  38. asm volatile ("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
  39. #endif
  40. return result;
  41. }
  42. rt_atomic_t rt_hw_atomic_xor(volatile rt_atomic_t *ptr, rt_atomic_t val)
  43. {
  44. rt_atomic_t result = 0;
  45. #if __riscv_xlen == 32
  46. asm volatile ("amoxor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
  47. #elif __riscv_xlen == 64
  48. asm volatile ("amoxor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
  49. #endif
  50. return result;
  51. }
  52. rt_atomic_t rt_hw_atomic_and(volatile rt_atomic_t *ptr, rt_atomic_t val)
  53. {
  54. rt_atomic_t result = 0;
  55. #if __riscv_xlen == 32
  56. asm volatile ("amoand.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
  57. #elif __riscv_xlen == 64
  58. asm volatile ("amoand.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
  59. #endif
  60. return result;
  61. }
  62. rt_atomic_t rt_hw_atomic_or(volatile rt_atomic_t *ptr, rt_atomic_t val)
  63. {
  64. rt_atomic_t result = 0;
  65. #if __riscv_xlen == 32
  66. asm volatile ("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
  67. #elif __riscv_xlen == 64
  68. asm volatile ("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
  69. #endif
  70. return result;
  71. }
  72. rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr)
  73. {
  74. rt_atomic_t result = 0;
  75. #if __riscv_xlen == 32
  76. asm volatile ("amoxor.w %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
  77. #elif __riscv_xlen == 64
  78. asm volatile ("amoxor.d %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
  79. #endif
  80. return result;
  81. }
  82. void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val)
  83. {
  84. rt_atomic_t result = 0;
  85. #if __riscv_xlen == 32
  86. asm volatile ("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
  87. #elif __riscv_xlen == 64
  88. asm volatile ("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
  89. #endif
  90. }
  91. rt_atomic_t rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t *ptr)
  92. {
  93. rt_atomic_t result = 0;
  94. rt_atomic_t temp = 1;
  95. #if __riscv_xlen == 32
  96. asm volatile ("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
  97. #elif __riscv_xlen == 64
  98. asm volatile ("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
  99. #endif
  100. return result;
  101. }
  102. void rt_hw_atomic_flag_clear(volatile rt_atomic_t *ptr)
  103. {
  104. rt_atomic_t result = 0;
  105. #if __riscv_xlen == 32
  106. asm volatile ("amoand.w %0, x0, (%1)" : "=r"(result) :"r"(ptr) : "memory");
  107. #elif __riscv_xlen == 64
  108. asm volatile ("amoand.d %0, x0, (%1)" : "=r"(result) :"r"(ptr) : "memory");
  109. #endif
  110. }
  111. rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_atomic_t *old, rt_atomic_t desired)
  112. {
  113. rt_atomic_t tmp = *old;
  114. rt_atomic_t result = 0;
  115. #if __riscv_xlen == 32
  116. asm volatile(
  117. " fence iorw, ow\n"
  118. "1: lr.w.aq %[result], (%[ptr])\n"
  119. " bne %[result], %[tmp], 2f\n"
  120. " sc.w.rl %[tmp], %[desired], (%[ptr])\n"
  121. " bnez %[tmp], 1b\n"
  122. " li %[result], 1\n"
  123. " j 3f\n"
  124. " 2:sw %[result], (%[old])\n"
  125. " li %[result], 0\n"
  126. " 3:\n"
  127. : [result]"+r" (result), [tmp]"+r" (tmp), [ptr]"+r" (ptr)
  128. : [desired]"r" (desired), [old]"r"(old)
  129. : "memory");
  130. #elif __riscv_xlen == 64
  131. asm volatile(
  132. " fence iorw, ow\n"
  133. "1: lr.d.aq %[result], (%[ptr])\n"
  134. " bne %[result], %[tmp], 2f\n"
  135. " sc.d.rl %[tmp], %[desired], (%[ptr])\n"
  136. " bnez %[tmp], 1b\n"
  137. " li %[result], 1\n"
  138. " j 3f\n"
  139. " 2:sd %[result], (%[old])\n"
  140. " li %[result], 0\n"
  141. " 3:\n"
  142. : [result]"+r" (result), [tmp]"+r" (tmp), [ptr]"+r" (ptr)
  143. : [desired]"r" (desired), [old]"r"(old)
  144. : "memory");
  145. #endif
  146. return result;
  147. }