cpu.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2011-09-15 Bernard first version
  9. * 2019-07-28 zdzn add smp support
  10. * 2023-02-21 GuEe-GUI mov cpu ofw init to setup
  11. */
  12. #include <rthw.h>
  13. #include <rtthread.h>
  14. #include <rtdevice.h>
  15. #include <cpu.h>
  16. #define DBG_TAG "libcpu.aarch64.cpu"
  17. #define DBG_LVL DBG_INFO
  18. #include <rtdbg.h>
  19. #ifdef RT_USING_SMP
  20. #define REPORT_ERR(retval) LOG_E("got error code %d in %s(), %s:%d", (retval), __func__, __FILE__, __LINE__)
  21. #define CHECK_RETVAL(retval) if (retval) {REPORT_ERR(retval);}
  22. #define cpuid_to_hwid(cpuid) \
  23. ((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? rt_cpu_mpidr_early[cpuid] : ID_ERROR)
  24. #define set_hwid(cpuid, hwid) \
  25. ((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? (rt_cpu_mpidr_early[cpuid] = (hwid)) : ID_ERROR)
  26. #define get_cpu_node(cpuid) \
  27. ((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? _cpu_node[cpuid] : NULL)
  28. #define set_cpu_node(cpuid, node) \
  29. ((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? (_cpu_node[cpuid] = node) : NULL)
  30. typedef rt_hw_spinlock_t arch_spinlock_t;
  31. struct cpu_ops_t *cpu_ops_tbl[RT_CPUS_NR];
  32. #ifdef RT_USING_SMART
  33. // _id_to_mpidr is a table translate logical id to mpid, which is a 64-bit value
  34. rt_uint64_t rt_cpu_mpidr_early[RT_CPUS_NR] rt_weak = {[0 ... RT_CPUS_NR - 1] = ID_ERROR};
  35. #else
  36. /* The more common mpidr_el1 table, redefine it in BSP if it is in other cases */
  37. rt_weak rt_uint64_t rt_cpu_mpidr_early[] =
  38. {
  39. [0] = 0x80000000,
  40. [1] = 0x80000001,
  41. [2] = 0x80000002,
  42. [3] = 0x80000003,
  43. [4] = 0x80000004,
  44. [5] = 0x80000005,
  45. [6] = 0x80000006,
  46. [7] = 0x80000007,
  47. [RT_CPUS_NR] = 0
  48. };
  49. #endif /* RT_USING_SMART */
  50. static inline void arch_spin_lock(arch_spinlock_t *lock)
  51. {
  52. unsigned int tmp;
  53. asm volatile(
  54. " sevl\n"
  55. "1: wfe\n"
  56. "2: ldaxr %w0, %1\n"
  57. " cbnz %w0, 1b\n"
  58. " stxr %w0, %w2, %1\n"
  59. " cbnz %w0, 2b\n"
  60. : "=&r" (tmp), "+Q" (lock->lock)
  61. : "r" (1)
  62. : "cc", "memory");
  63. }
  64. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  65. {
  66. unsigned int tmp;
  67. asm volatile(
  68. " ldaxr %w0, %1\n"
  69. " cbnz %w0, 1f\n"
  70. " stxr %w0, %w2, %1\n"
  71. "1:\n"
  72. : "=&r" (tmp), "+Q" (lock->lock)
  73. : "r" (1)
  74. : "cc", "memory");
  75. return !tmp;
  76. }
  77. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  78. {
  79. asm volatile(
  80. " stlr %w1, %0\n"
  81. : "=Q" (lock->lock) : "r" (0) : "memory");
  82. }
  83. void rt_hw_spin_lock_init(arch_spinlock_t *lock)
  84. {
  85. lock->lock = 0;
  86. }
  87. void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
  88. {
  89. arch_spin_lock(lock);
  90. }
  91. void rt_hw_spin_unlock(rt_hw_spinlock_t *lock)
  92. {
  93. arch_spin_unlock(lock);
  94. }
  95. rt_bool_t rt_hw_spin_trylock(rt_hw_spinlock_t *lock)
  96. {
  97. return arch_spin_trylock(lock);
  98. }
  99. static int _cpus_init_data_hardcoded(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
  100. {
  101. // load in cpu_hw_ids in cpuid_to_hwid,
  102. // cpu_ops to cpu_ops_tbl
  103. if (num_cpus > RT_CPUS_NR)
  104. {
  105. LOG_W("num_cpus (%d) greater than RT_CPUS_NR (%d)\n", num_cpus, RT_CPUS_NR);
  106. num_cpus = RT_CPUS_NR;
  107. }
  108. for (int i = 0; i < num_cpus; i++)
  109. {
  110. set_hwid(i, cpu_hw_ids[i]);
  111. cpu_ops_tbl[i] = cpu_ops[i];
  112. }
  113. return 0;
  114. }
  115. /** init cpu with hardcoded infomation or parsing from FDT */
  116. static int _cpus_init(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
  117. {
  118. int retval;
  119. // first setup cpu_ops_tbl and cpuid_to_hwid
  120. if (num_cpus > 0)
  121. retval = _cpus_init_data_hardcoded(num_cpus, cpu_hw_ids, cpu_ops);
  122. else
  123. {
  124. retval = -1;
  125. }
  126. if (retval)
  127. return retval;
  128. // using cpuid_to_hwid and cpu_ops_tbl to call method_init and cpu_init
  129. // assuming that cpuid 0 has already init
  130. for (int i = 1; i < RT_CPUS_NR; i++)
  131. {
  132. if (rt_cpu_mpidr_early[i] == ID_ERROR)
  133. {
  134. LOG_E("Failed to find hardware id of CPU %d", i);
  135. continue;
  136. }
  137. if (cpu_ops_tbl[i] && cpu_ops_tbl[i]->cpu_init)
  138. {
  139. retval = cpu_ops_tbl[i]->cpu_init(i, RT_NULL);
  140. CHECK_RETVAL(retval);
  141. }
  142. else
  143. {
  144. LOG_E("Failed to find cpu_init for cpu %d with cpu_ops[%p], cpu_ops->cpu_init[%p]"
  145. , rt_cpu_mpidr_early[i], cpu_ops_tbl[i], cpu_ops_tbl[i] ? cpu_ops_tbl[i]->cpu_init : NULL);
  146. }
  147. }
  148. return 0;
  149. }
  150. /**
  151. * @brief boot cpu with hardcoded data
  152. *
  153. * @param num_cpus number of cpus
  154. * @param cpu_hw_ids each element represents a hwid of cpu[i]
  155. * @param cpu_ops each element represents a pointer to cpu_ops of cpu[i]
  156. * @return int 0 on success,
  157. */
  158. int rt_hw_cpu_boot_secondary(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
  159. {
  160. int retval = 0;
  161. if (num_cpus < 1 || !cpu_hw_ids || !cpu_ops)
  162. return -1;
  163. retval = _cpus_init(num_cpus, cpu_hw_ids, cpu_ops);
  164. CHECK_RETVAL(retval);
  165. return retval;
  166. }
  167. #endif /*RT_USING_SMP*/
  168. /**
  169. * @addtogroup ARM CPU
  170. */
  171. /*@{*/
  172. const char *rt_hw_cpu_arch(void)
  173. {
  174. return "aarch64";
  175. }
  176. /*@}*/