cpu.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. /*
  2. * Copyright (c) 2006-2019, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2011-09-15 Bernard first version
  9. * 2019-07-28 zdzn add smp support
  10. */
  11. #include <rthw.h>
  12. #include <rtthread.h>
  13. #include <board.h>
  14. #include "cp15.h"
  15. #define DBG_TAG "libcpu.aarch64.cpu"
  16. #define DBG_LVL DBG_INFO
  17. #include <rtdbg.h>
  18. #include <string.h>
  19. #include "cpu.h"
  20. #include "psci_api.h"
  21. void (*system_off)(void);
  22. #ifdef RT_USING_SMP
  23. #ifdef RT_USING_FDT
  24. #include "dtb_node.h"
  25. struct dtb_node *_cpu_node[RT_CPUS_NR];
  26. #endif /* RT_USING_FDT */
  27. #define MPIDR_AFF_MASK 0x000000FF00FFFFFFul
  28. #define REPORT_ERR(retval) LOG_E("got error code %d in %s(), %s:%d", (retval), __func__, __FILE__, __LINE__)
  29. #define CHECK_RETVAL(retval) if (retval) {REPORT_ERR(retval);}
  30. /**
  31. * cpu_ops_tbl contains cpu_ops_t for each cpu kernel observed,
  32. * given cpu logical id 'i', its cpu_ops_t is 'cpu_ops_tbl[i]'
  33. */
  34. struct cpu_ops_t *cpu_ops_tbl[RT_CPUS_NR];
  35. #ifdef RT_USING_SMART
  36. // _id_to_mpidr is a table translate logical id to mpid, which is a 64-bit value
  37. rt_uint64_t rt_cpu_mpidr_early[RT_CPUS_NR] rt_weak = {[0 ... RT_CPUS_NR - 1] = ID_ERROR};
  38. #else
  39. /* The more common mpidr_el1 table, redefine it in BSP if it is in other cases */
  40. rt_weak rt_uint64_t rt_cpu_mpidr_early[] =
  41. {
  42. [0] = 0x80000000,
  43. [1] = 0x80000001,
  44. [2] = 0x80000002,
  45. [3] = 0x80000003,
  46. [4] = 0x80000004,
  47. [5] = 0x80000005,
  48. [6] = 0x80000006,
  49. [7] = 0x80000007,
  50. [RT_CPUS_NR] = 0
  51. };
  52. #endif /* RT_USING_SMART */
  53. typedef rt_hw_spinlock_t arch_spinlock_t;
  54. static inline void arch_spin_lock(arch_spinlock_t *lock)
  55. {
  56. unsigned int tmp;
  57. asm volatile(
  58. " sevl\n"
  59. "1: wfe\n"
  60. "2: ldaxr %w0, %1\n"
  61. " cbnz %w0, 1b\n"
  62. " stxr %w0, %w2, %1\n"
  63. " cbnz %w0, 2b\n"
  64. : "=&r" (tmp), "+Q" (lock->lock)
  65. : "r" (1)
  66. : "cc", "memory");
  67. }
  68. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  69. {
  70. unsigned int tmp;
  71. asm volatile(
  72. " ldaxr %w0, %1\n"
  73. " cbnz %w0, 1f\n"
  74. " stxr %w0, %w2, %1\n"
  75. "1:\n"
  76. : "=&r" (tmp), "+Q" (lock->lock)
  77. : "r" (1)
  78. : "cc", "memory");
  79. return !tmp;
  80. }
  81. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  82. {
  83. asm volatile(
  84. " stlr %w1, %0\n"
  85. : "=Q" (lock->lock) : "r" (0) : "memory");
  86. }
  87. void rt_hw_spin_lock_init(arch_spinlock_t *lock)
  88. {
  89. lock->lock = 0;
  90. }
  91. void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
  92. {
  93. arch_spin_lock(lock);
  94. }
  95. void rt_hw_spin_unlock(rt_hw_spinlock_t *lock)
  96. {
  97. arch_spin_unlock(lock);
  98. }
  99. rt_bool_t rt_hw_spin_trylock(rt_hw_spinlock_t *lock)
  100. {
  101. return arch_spin_trylock(lock);
  102. }
  103. static int _cpus_init_data_hardcoded(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
  104. {
  105. // load in cpu_hw_ids in cpuid_to_hwid,
  106. // cpu_ops to cpu_ops_tbl
  107. if (num_cpus > RT_CPUS_NR)
  108. {
  109. LOG_W("num_cpus (%d) greater than RT_CPUS_NR (%d)\n", num_cpus, RT_CPUS_NR);
  110. num_cpus = RT_CPUS_NR;
  111. }
  112. for (int i = 0; i < num_cpus; i++)
  113. {
  114. set_hwid(i, cpu_hw_ids[i]);
  115. cpu_ops_tbl[i] = cpu_ops[i];
  116. }
  117. return 0;
  118. }
  119. #ifdef RT_USING_FDT
  120. /** read ('size' * 4) bytes number from start, big-endian format */
  121. static rt_uint64_t _read_be_number(void *start, int size)
  122. {
  123. rt_uint64_t buf = 0;
  124. for (; size > 0; size--)
  125. {
  126. buf = (buf << 32) | fdt32_to_cpu(*(uint32_t *)start);
  127. start = (uint32_t *)start + 1;
  128. }
  129. return buf;
  130. }
  131. /** check device-type of the node, */
  132. static bool _node_is_cpu(struct dtb_node *node)
  133. {
  134. char *device_type = dtb_node_get_dtb_node_property_value(node, "device_type", NULL);
  135. if (device_type)
  136. {
  137. return !strcmp(device_type, "cpu");
  138. }
  139. return false;
  140. }
  141. static int _read_and_set_hwid(struct dtb_node *cpu, int *id_pool, int *pcpuid)
  142. {
  143. // size/address_cells is number of elements in reg array
  144. int size;
  145. static int address_cells, size_cells;
  146. if (!address_cells && !size_cells)
  147. dtb_node_get_dtb_node_cells(cpu, &address_cells, &size_cells);
  148. void *id_start = dtb_node_get_dtb_node_property_value(cpu, "reg", &size);
  149. rt_uint64_t mpid = _read_be_number(id_start, address_cells);
  150. *pcpuid = *id_pool;
  151. *id_pool = *id_pool + 1;
  152. set_hwid(*pcpuid, mpid);
  153. LOG_I("Using MPID 0x%lx as cpu %d", mpid, *pcpuid);
  154. // setting _cpu_node for cpu_init use
  155. _cpu_node[*pcpuid] = cpu;
  156. return 0;
  157. }
  158. static int _read_and_set_cpuops(struct dtb_node *cpu, int cpuid)
  159. {
  160. char *method = dtb_node_get_dtb_node_property_value(cpu, "enable-method", NULL);
  161. if (!method)
  162. {
  163. LOG_E("Cannot read method from cpu node");
  164. return -1;
  165. }
  166. struct cpu_ops_t *cpu_ops;
  167. if (!strcmp(method, cpu_ops_psci.method))
  168. {
  169. cpu_ops = &cpu_ops_psci;
  170. }
  171. else if (!strcmp(method, cpu_ops_spin_tbl.method))
  172. {
  173. cpu_ops = &cpu_ops_spin_tbl;
  174. }
  175. else
  176. {
  177. cpu_ops = RT_NULL;
  178. LOG_E("Not supported cpu_ops: %s", method);
  179. }
  180. cpu_ops_tbl[cpuid] = cpu_ops;
  181. LOG_D("Using boot method [%s] for cpu %d", cpu_ops->method, cpuid);
  182. return 0;
  183. }
  184. static int _cpus_init_data_fdt()
  185. {
  186. // cpuid_to_hwid and cpu_ops_tbl with fdt
  187. void *root = get_dtb_node_head();
  188. int id_pool = 0;
  189. int cpuid;
  190. struct dtb_node *cpus = dtb_node_get_dtb_node_by_path(root, "/cpus");
  191. // for each cpu node (device-type is cpu), read its mpid and set its cpuid_to_hwid
  192. for_each_node_child(cpus)
  193. {
  194. if (!_node_is_cpu(cpus))
  195. {
  196. continue;
  197. }
  198. if (id_pool > RT_CPUS_NR)
  199. {
  200. LOG_W("Reading more cpus from FDT than RT_CPUS_NR"
  201. "\n Parsing will not continue and only %d cpus will be used.", RT_CPUS_NR);
  202. break;
  203. }
  204. _read_and_set_hwid(cpus, &id_pool, &cpuid);
  205. _read_and_set_cpuops(cpus, cpuid);
  206. }
  207. return 0;
  208. }
  209. #endif /* RT_USING_FDT */
  210. /** init cpu with hardcoded infomation or parsing from FDT */
  211. static int _cpus_init(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
  212. {
  213. int retval;
  214. // first setup cpu_ops_tbl and cpuid_to_hwid
  215. if (num_cpus > 0)
  216. retval = _cpus_init_data_hardcoded(num_cpus, cpu_hw_ids, cpu_ops);
  217. else
  218. {
  219. retval = -1;
  220. #ifdef RT_USING_FDT
  221. retval = _cpus_init_data_fdt();
  222. #endif
  223. }
  224. if (retval)
  225. return retval;
  226. // using cpuid_to_hwid and cpu_ops_tbl to call method_init and cpu_init
  227. // assuming that cpuid 0 has already init
  228. for (int i = 1; i < RT_CPUS_NR; i++)
  229. {
  230. if (cpuid_to_hwid(i) == ID_ERROR)
  231. {
  232. LOG_E("Failed to find hardware id of CPU %d", i);
  233. continue;
  234. }
  235. if (cpu_ops_tbl[i] && cpu_ops_tbl[i]->cpu_init)
  236. {
  237. retval = cpu_ops_tbl[i]->cpu_init(i);
  238. CHECK_RETVAL(retval);
  239. }
  240. else
  241. {
  242. LOG_E("Failed to find cpu_init for cpu %d with cpu_ops[%p], cpu_ops->cpu_init[%p]"
  243. , cpuid_to_hwid(i), cpu_ops_tbl[i], cpu_ops_tbl[i] ? cpu_ops_tbl[i]->cpu_init : NULL);
  244. }
  245. }
  246. return 0;
  247. }
  248. static void _boot_secondary(void)
  249. {
  250. for (int i = 1; i < RT_CPUS_NR; i++)
  251. {
  252. int retval = -0xbad0; // mark no support operation
  253. if (cpu_ops_tbl[i] && cpu_ops_tbl[i]->cpu_boot)
  254. retval = cpu_ops_tbl[i]->cpu_boot(i);
  255. if (retval)
  256. {
  257. if (retval == -0xbad0)
  258. LOG_E("No cpu_ops was probed for CPU %d. Try to configure it or use fdt", i);
  259. else
  260. LOG_E("Failed to boot secondary CPU %d, error code %d", i, retval);
  261. } else {
  262. LOG_I("Secondary CPU %d booted", i);
  263. }
  264. }
  265. }
  266. rt_weak void rt_hw_secondary_cpu_up(void)
  267. {
  268. _boot_secondary();
  269. }
  270. /**
  271. * @brief boot cpu with hardcoded data
  272. *
  273. * @param num_cpus number of cpus
  274. * @param cpu_hw_ids each element represents a hwid of cpu[i]
  275. * @param cpu_ops each element represents a pointer to cpu_ops of cpu[i]
  276. * @return int 0 on success,
  277. */
  278. int rt_hw_cpu_boot_secondary(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
  279. {
  280. int retval = 0;
  281. if (num_cpus < 1 || !cpu_hw_ids || !cpu_ops)
  282. return -1;
  283. retval = _cpus_init(num_cpus, cpu_hw_ids, cpu_ops);
  284. CHECK_RETVAL(retval);
  285. return retval;
  286. }
  287. #define CPU_INIT_USING_FDT 0,0,0
  288. /**
  289. * @brief Initialize cpu infomation from fdt
  290. *
  291. * @return int
  292. */
  293. int rt_hw_cpu_init()
  294. {
  295. #ifdef RT_USING_FDT
  296. return _cpus_init(CPU_INIT_USING_FDT);
  297. #else
  298. LOG_E("CPU init failed since RT_USING_FDT was not defined");
  299. return -0xa; /* no fdt support */
  300. #endif /* RT_USING_FDT */
  301. }
  302. rt_weak void rt_hw_secondary_cpu_idle_exec(void)
  303. {
  304. asm volatile("wfe" ::
  305. : "memory", "cc");
  306. }
  307. #endif /*RT_USING_SMP*/
  308. /**
  309. * @addtogroup ARM CPU
  310. */
  311. /*@{*/
  312. const char *rt_hw_cpu_arch(void)
  313. {
  314. return "aarch64";
  315. }
  316. /** shutdown CPU */
  317. void rt_hw_cpu_shutdown(void)
  318. {
  319. rt_uint32_t level;
  320. rt_kprintf("shutdown...\n");
  321. if (system_off)
  322. system_off();
  323. LOG_E("system shutdown failed");
  324. level = rt_hw_interrupt_disable();
  325. while (level)
  326. {
  327. RT_ASSERT(0);
  328. }
  329. }
  330. MSH_CMD_EXPORT_ALIAS(rt_hw_cpu_shutdown, shutdown, shutdown machine);
  331. #ifdef RT_USING_CPU_FFS
  332. /**
  333. * This function finds the first bit set (beginning with the least significant bit)
  334. * in value and return the index of that bit.
  335. *
  336. * Bits are numbered starting at 1 (the least significant bit). A return value of
  337. * zero from any of these functions means that the argument was zero.
  338. *
  339. * @return return the index of the first bit set. If value is 0, then this function
  340. * shall return 0.
  341. */
  342. int __rt_ffs(int value)
  343. {
  344. return __builtin_ffs(value);
  345. }
  346. #endif
  347. /*@}*/