cpu.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2011-09-15 Bernard first version
  9. * 2019-07-28 zdzn add smp support
  10. * 2023-02-21 GuEe-GUI mov cpu ofw init to setup
  11. * 2024-04-29 Shell Add generic ticket spinlock using C11 atomic
  12. */
  13. #include <rthw.h>
  14. #include <rtthread.h>
  15. #include <rtdevice.h>
  16. #include <cpu.h>
  17. #define DBG_TAG "libcpu.aarch64.cpu"
  18. #define DBG_LVL DBG_INFO
  19. #include <rtdbg.h>
  20. #ifdef RT_USING_SMP
  21. #define REPORT_ERR(retval) LOG_E("got error code %d in %s(), %s:%d", (retval), __func__, __FILE__, __LINE__)
  22. #define CHECK_RETVAL(retval) if (retval) {REPORT_ERR(retval);}
  23. #define cpuid_to_hwid(cpuid) \
  24. ((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? rt_cpu_mpidr_early[cpuid] : ID_ERROR)
  25. #define set_hwid(cpuid, hwid) \
  26. ((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? (rt_cpu_mpidr_early[cpuid] = (hwid)) : ID_ERROR)
  27. #define get_cpu_node(cpuid) \
  28. ((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? _cpu_node[cpuid] : NULL)
  29. #define set_cpu_node(cpuid, node) \
  30. ((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? (_cpu_node[cpuid] = node) : NULL)
  31. typedef rt_hw_spinlock_t arch_spinlock_t;
  32. struct cpu_ops_t *cpu_ops_tbl[RT_CPUS_NR];
  33. #ifdef RT_USING_SMART
  34. // _id_to_mpidr is a table translate logical id to mpid, which is a 64-bit value
  35. rt_uint64_t rt_cpu_mpidr_early[RT_CPUS_NR] rt_weak = {[0 ... RT_CPUS_NR - 1] = ID_ERROR};
  36. #else
  37. /* The more common mpidr_el1 table, redefine it in BSP if it is in other cases */
  38. rt_weak rt_uint64_t rt_cpu_mpidr_early[] =
  39. {
  40. [0] = 0x80000000,
  41. [1] = 0x80000001,
  42. [2] = 0x80000002,
  43. [3] = 0x80000003,
  44. [4] = 0x80000004,
  45. [5] = 0x80000005,
  46. [6] = 0x80000006,
  47. [7] = 0x80000007,
  48. [RT_CPUS_NR] = 0
  49. };
  50. #endif /* RT_USING_SMART */
  51. /* in support of C11 atomic */
  52. #if __STDC_VERSION__ >= 201112L
  53. #include <stdatomic.h>
  54. union _spinlock
  55. {
  56. _Atomic(rt_uint32_t) _value;
  57. struct
  58. {
  59. _Atomic(rt_uint16_t) owner;
  60. _Atomic(rt_uint16_t) next;
  61. } ticket;
  62. };
  63. void rt_hw_spin_lock_init(rt_hw_spinlock_t *_lock)
  64. {
  65. union _spinlock *lock = (void *)_lock;
  66. /**
  67. * just a dummy note that this is an atomic operation, though it alway is
  68. * even without usage of atomic API in arm64
  69. */
  70. atomic_store_explicit(&lock->_value, 0, memory_order_relaxed);
  71. }
  72. rt_bool_t rt_hw_spin_trylock(rt_hw_spinlock_t *_lock)
  73. {
  74. rt_bool_t rc;
  75. rt_uint32_t readonce;
  76. union _spinlock temp;
  77. union _spinlock *lock = (void *)_lock;
  78. readonce = atomic_load_explicit(&lock->_value, memory_order_acquire);
  79. temp._value = readonce;
  80. if (temp.ticket.owner != temp.ticket.next)
  81. {
  82. rc = RT_FALSE;
  83. }
  84. else
  85. {
  86. temp.ticket.next += 1;
  87. rc = atomic_compare_exchange_strong_explicit(
  88. &lock->_value, &readonce, temp._value,
  89. memory_order_acquire, memory_order_relaxed);
  90. }
  91. return rc;
  92. }
  93. rt_inline rt_base_t _load_acq_exclusive(_Atomic(rt_uint16_t) *halfword)
  94. {
  95. rt_uint32_t old;
  96. __asm__ volatile("ldaxrh %w0, [%1]"
  97. : "=&r"(old)
  98. : "r"(halfword)
  99. : "memory");
  100. return old;
  101. }
  102. rt_inline void _send_event_local(void)
  103. {
  104. __asm__ volatile("sevl");
  105. }
  106. rt_inline void _wait_for_event(void)
  107. {
  108. __asm__ volatile("wfe" ::: "memory");
  109. }
  110. void rt_hw_spin_lock(rt_hw_spinlock_t *_lock)
  111. {
  112. union _spinlock *lock = (void *)_lock;
  113. rt_uint16_t ticket =
  114. atomic_fetch_add_explicit(&lock->ticket.next, 1, memory_order_relaxed);
  115. if (atomic_load_explicit(&lock->ticket.owner, memory_order_acquire) !=
  116. ticket)
  117. {
  118. _send_event_local();
  119. do
  120. {
  121. _wait_for_event();
  122. }
  123. while (_load_acq_exclusive(&lock->ticket.owner) != ticket);
  124. }
  125. }
  126. void rt_hw_spin_unlock(rt_hw_spinlock_t *_lock)
  127. {
  128. union _spinlock *lock = (void *)_lock;
  129. atomic_fetch_add_explicit(&lock->ticket.owner, 1, memory_order_release);
  130. }
  131. #endif
  132. static int _cpus_init_data_hardcoded(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
  133. {
  134. // load in cpu_hw_ids in cpuid_to_hwid,
  135. // cpu_ops to cpu_ops_tbl
  136. if (num_cpus > RT_CPUS_NR)
  137. {
  138. LOG_W("num_cpus (%d) greater than RT_CPUS_NR (%d)\n", num_cpus, RT_CPUS_NR);
  139. num_cpus = RT_CPUS_NR;
  140. }
  141. for (int i = 0; i < num_cpus; i++)
  142. {
  143. set_hwid(i, cpu_hw_ids[i]);
  144. cpu_ops_tbl[i] = cpu_ops[i];
  145. }
  146. return 0;
  147. }
  148. /** init cpu with hardcoded infomation or parsing from FDT */
  149. static int _cpus_init(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
  150. {
  151. int retval;
  152. // first setup cpu_ops_tbl and cpuid_to_hwid
  153. if (num_cpus > 0)
  154. retval = _cpus_init_data_hardcoded(num_cpus, cpu_hw_ids, cpu_ops);
  155. else
  156. {
  157. retval = -1;
  158. }
  159. if (retval)
  160. return retval;
  161. // using cpuid_to_hwid and cpu_ops_tbl to call method_init and cpu_init
  162. // assuming that cpuid 0 has already init
  163. for (int i = 1; i < RT_CPUS_NR; i++)
  164. {
  165. if (rt_cpu_mpidr_early[i] == ID_ERROR)
  166. {
  167. LOG_E("Failed to find hardware id of CPU %d", i);
  168. continue;
  169. }
  170. if (cpu_ops_tbl[i] && cpu_ops_tbl[i]->cpu_init)
  171. {
  172. retval = cpu_ops_tbl[i]->cpu_init(i, RT_NULL);
  173. CHECK_RETVAL(retval);
  174. }
  175. else
  176. {
  177. LOG_E("Failed to find cpu_init for cpu %d with cpu_ops[%p], cpu_ops->cpu_init[%p]"
  178. , rt_cpu_mpidr_early[i], cpu_ops_tbl[i], cpu_ops_tbl[i] ? cpu_ops_tbl[i]->cpu_init : NULL);
  179. }
  180. }
  181. return 0;
  182. }
  183. /**
  184. * @brief boot cpu with hardcoded data
  185. *
  186. * @param num_cpus number of cpus
  187. * @param cpu_hw_ids each element represents a hwid of cpu[i]
  188. * @param cpu_ops each element represents a pointer to cpu_ops of cpu[i]
  189. * @return int 0 on success,
  190. */
  191. int rt_hw_cpu_boot_secondary(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
  192. {
  193. int retval = 0;
  194. if (num_cpus < 1 || !cpu_hw_ids || !cpu_ops)
  195. return -1;
  196. retval = _cpus_init(num_cpus, cpu_hw_ids, cpu_ops);
  197. CHECK_RETVAL(retval);
  198. return retval;
  199. }
  200. #endif /*RT_USING_SMP*/
  201. /**
  202. * @addtogroup ARM CPU
  203. */
  204. /*@{*/
  205. const char *rt_hw_cpu_arch(void)
  206. {
  207. return "aarch64";
  208. }
  209. /*@}*/