cpu.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. /*
  2. * Copyright (c) 2006-2019, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2011-09-15 Bernard first version
  9. * 2019-07-28 zdzn add smp support
  10. */
  11. #include <rthw.h>
  12. #include <rtthread.h>
  13. #include <board.h>
  14. #include "cp15.h"
  15. #define DBG_TAG "libcpu.aarch64.cpu"
  16. #define DBG_LVL DBG_INFO
  17. #include <rtdbg.h>
  18. #include <string.h>
  19. #include "cpu.h"
  20. #ifdef RT_USING_SMP
  21. void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
  22. {
  23. lock->slock = 0;
  24. }
  25. #define TICKET_SHIFT 16
  26. void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
  27. {
  28. unsigned int tmp;
  29. struct __arch_tickets lockval, newval;
  30. asm volatile(
  31. /* Atomically increment the next ticket. */
  32. " prfm pstl1strm, %3\n"
  33. "1: ldaxr %w0, %3\n"
  34. " add %w1, %w0, %w5\n"
  35. " stxr %w2, %w1, %3\n"
  36. " cbnz %w2, 1b\n"
  37. /* Did we get the lock? */
  38. " eor %w1, %w0, %w0, ror #16\n"
  39. " cbz %w1, 3f\n"
  40. /*
  41. * No: spin on the owner. Send a local event to avoid missing an
  42. * unlock before the exclusive load.
  43. */
  44. " sevl\n"
  45. "2: wfe\n"
  46. " ldaxrh %w2, %4\n"
  47. " eor %w1, %w2, %w0, lsr #16\n"
  48. " cbnz %w1, 2b\n"
  49. /* We got the lock. Critical section starts here. */
  50. "3:"
  51. : "=&r"(lockval), "=&r"(newval), "=&r"(tmp), "+Q"(*lock)
  52. : "Q"(lock->tickets.owner), "I"(1 << TICKET_SHIFT)
  53. : "memory");
  54. rt_hw_dmb();
  55. }
  56. void rt_hw_spin_unlock(rt_hw_spinlock_t *lock)
  57. {
  58. rt_hw_dmb();
  59. asm volatile(
  60. " stlrh %w1, %0\n"
  61. : "=Q"(lock->tickets.owner)
  62. : "r"(lock->tickets.owner + 1)
  63. : "memory");
  64. }
  65. #ifdef RT_CPUS_NR
  66. #define ID_ERROR __INT64_MAX__
  67. /**
  68. * cpu_ops_tbl contains cpu_ops_t for each cpu kernel observed,
  69. * given cpu logical id 'i', its cpu_ops_t is 'cpu_ops_tbl[i]'
  70. */
  71. struct cpu_ops_t *cpu_ops_tbl[RT_CPUS_NR];
  72. // _id_to_mpidr is a table translate logical id to mpid, which is a 64-bit value
  73. rt_uint64_t rt_cpu_mpidr_early[RT_CPUS_NR] RT_WEAK = {[0 ... RT_CPUS_NR - 1] = ID_ERROR};
  74. #ifdef RT_USING_FDT
  75. #include "dtb_node.h"
  76. struct dtb_node *_cpu_node[RT_CPUS_NR];
  77. #endif /* RT_USING_FDT */
  78. #else // RT_CPUS_NR not define
  79. #error "RT_CPUS_NR not define"
  80. #endif /* RT_CPUS_NR */
  81. #define MPIDR_AFF_MASK 0x000000FF00FFFFFFul
  82. #define REPORT_ERR(retval) LOG_E("got error code %d in %s(), %s:%d", (retval), __func__, __FILE__, __LINE__)
  83. #define CHECK_RETVAL(retval) if (retval) {REPORT_ERR(retval);}
  84. static int _cpus_init_data_hardcoded(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
  85. {
  86. // load in cpu_hw_ids in cpuid_to_hwid,
  87. // cpu_ops to cpu_ops_tbl
  88. if (num_cpus > RT_CPUS_NR)
  89. {
  90. LOG_W("num_cpus (%d) greater than RT_CPUS_NR (%d)\n", num_cpus, RT_CPUS_NR);
  91. num_cpus = RT_CPUS_NR;
  92. }
  93. for (int i = 0; i < num_cpus; i++)
  94. {
  95. cpuid_to_hwid(i) = cpu_hw_ids[i];
  96. cpu_ops_tbl[i] = cpu_ops[i];
  97. }
  98. return 0;
  99. }
  100. #ifdef RT_USING_FDT
  101. /** read ('size' * 4) bytes number from start, big-endian format */
  102. static rt_uint64_t _read_be_number(void *start, int size)
  103. {
  104. rt_uint64_t buf = 0;
  105. for (; size > 0; size--)
  106. buf = (buf << 32) | fdt32_to_cpu(*(uint32_t *)start++);
  107. return buf;
  108. }
  109. /** check device-type of the node, */
  110. static bool _node_is_cpu(struct dtb_node *node)
  111. {
  112. char *device_type = dtb_node_get_dtb_node_property_value(node, "device_type", NULL);
  113. if (device_type)
  114. {
  115. return !strcmp(device_type, "cpu");
  116. }
  117. return false;
  118. }
  119. static int _read_and_set_hwid(struct dtb_node *cpu, int *id_pool, int *pcpuid)
  120. {
  121. // size/address_cells is number of elements in reg array
  122. int size;
  123. static int address_cells, size_cells;
  124. if (!address_cells && !size_cells)
  125. dtb_node_get_dtb_node_cells(cpu, &address_cells, &size_cells);
  126. void *id_start = dtb_node_get_dtb_node_property_value(cpu, "reg", &size);
  127. rt_uint64_t mpid = _read_be_number(id_start, address_cells);
  128. *pcpuid = *id_pool;
  129. *id_pool = *pcpuid + 1;
  130. rt_cpu_mpidr_early[*pcpuid] = mpid;
  131. return 0;
  132. }
  133. static int _read_and_set_cpuops(struct dtb_node *cpu, int cpuid)
  134. {
  135. char *method = dtb_node_get_dtb_node_property_value(cpu, "enable-method", NULL);
  136. if (!method)
  137. {
  138. LOG_E("Cannot read method from cpu node");
  139. return -1;
  140. }
  141. struct cpu_ops_t *cpu_ops;
  142. if (!strcmp(method, cpu_ops_psci.method))
  143. {
  144. cpu_ops = &cpu_ops_psci;
  145. }
  146. else if (!strcmp(method, cpu_ops_spin_tbl.method))
  147. {
  148. cpu_ops = &cpu_ops_spin_tbl;
  149. }
  150. else
  151. {
  152. cpu_ops = RT_NULL;
  153. LOG_E("Not supported cpu_ops: %s", method);
  154. }
  155. cpu_ops_tbl[cpuid] = cpu_ops;
  156. return 0;
  157. }
  158. static int _cpus_init_data_fdt()
  159. {
  160. // cpuid_to_hwid and cpu_ops_tbl with fdt
  161. void *root = get_dtb_node_head();
  162. int id_pool = 0;
  163. int cpuid;
  164. struct dtb_node *cpus = dtb_node_get_dtb_node_by_path(root, "/cpus");
  165. // for each cpu node (device-type is cpu), read its mpid and set its cpuid_to_hwid
  166. for_each_node_child(cpus)
  167. {
  168. if (!_node_is_cpu(cpus))
  169. {
  170. continue;
  171. }
  172. _read_and_set_hwid(cpus, &id_pool, &cpuid);
  173. _read_and_set_cpuops(cpus, cpuid);
  174. }
  175. return 0;
  176. }
  177. #endif /* RT_USING_FDT */
  178. /** init cpu with hardcoded infomation or parsing from FDT */
  179. static int _cpus_init(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
  180. {
  181. int retval;
  182. // first setup cpu_ops_tbl and cpuid_to_hwid
  183. if (num_cpus > 0)
  184. retval = _cpus_init_data_hardcoded(num_cpus, cpu_hw_ids, cpu_ops);
  185. else
  186. {
  187. retval = -1;
  188. #ifdef RT_USING_FDT
  189. retval = _cpus_init_data_fdt();
  190. #endif
  191. }
  192. if (retval)
  193. return retval;
  194. // using cpuid_to_hwid and cpu_ops_tbl to call method_init and cpu_init
  195. for (int i = 0; i < RT_CPUS_NR; i++)
  196. {
  197. if (cpuid_to_hwid(i) == ID_ERROR)
  198. {
  199. LOG_E("Failed to find hardware id of CPU %d", i);
  200. continue;
  201. }
  202. if (cpu_ops_tbl[i]->cpu_init)
  203. {
  204. retval = cpu_ops_tbl[i]->cpu_init(i);
  205. CHECK_RETVAL(retval);
  206. }
  207. else
  208. {
  209. LOG_E("No cpu_init() supported in cpu %d", cpuid_to_hwid(i));
  210. }
  211. }
  212. return 0;
  213. }
  214. static void _boot_secondary(void)
  215. {
  216. for (int i = 1; i < RT_CPUS_NR; i++)
  217. {
  218. int retval = -0xbad0; // mark no support operation
  219. if (cpu_ops_tbl[i] && cpu_ops_tbl[i]->cpu_boot)
  220. retval = cpu_ops_tbl[i]->cpu_boot(i);
  221. if (retval)
  222. {
  223. LOG_E("Failed to boot secondary CPU %d , error code %d", i, retval);
  224. } else {
  225. LOG_I("Secondary CPU %d booted", i);
  226. }
  227. }
  228. }
  229. RT_WEAK void rt_hw_secondary_cpu_up(void)
  230. {
  231. _boot_secondary();
  232. }
  233. /**
  234. * @brief boot cpu with hardcoded data
  235. *
  236. * @param num_cpus number of cpus
  237. * @param cpu_hw_ids each element represents a hwid of cpu[i]
  238. * @param cpu_ops each element represents a pointer to cpu_ops of cpu[i]
  239. * @return int 0 on success,
  240. */
  241. int rt_hw_cpu_boot_secondary(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
  242. {
  243. int retval = 0;
  244. if (num_cpus < 1 || !cpu_hw_ids || !cpu_ops)
  245. return -1;
  246. retval = _cpus_init(num_cpus, cpu_hw_ids, cpu_ops);
  247. CHECK_RETVAL(retval);
  248. if (!retval)
  249. _boot_secondary();
  250. return retval;
  251. }
  252. #endif /*RT_USING_SMP*/
  253. #define CPU_INIT_USING_FDT 0,0,0
  254. /**
  255. * @brief Initialize cpu infomation from fdt
  256. *
  257. * @return int
  258. */
  259. int rt_hw_cpu_init()
  260. {
  261. #ifdef RT_USING_FDT
  262. return _cpus_init(CPU_INIT_USING_FDT);
  263. #else
  264. LOG_E("CPU init failed since RT_USING_FDT was not defined");
  265. return -0xa; /* no fdt support */
  266. #endif /* RT_USING_FDT */
  267. }
  268. /**
  269. * @addtogroup ARM CPU
  270. */
  271. /*@{*/
  272. /** shutdown CPU */
  273. void rt_hw_cpu_shutdown()
  274. {
  275. rt_uint32_t level;
  276. rt_kprintf("shutdown...\n");
  277. level = rt_hw_interrupt_disable();
  278. while (level)
  279. {
  280. RT_ASSERT(0);
  281. }
  282. }
  283. RT_WEAK void rt_hw_secondary_cpu_idle_exec(void)
  284. {
  285. asm volatile("wfe" ::
  286. : "memory", "cc");
  287. }
  288. /*@}*/