setup.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-02-21 GuEe-GUI first version
  9. */
  10. #include <rtthread.h>
  11. #define DBG_TAG "cpu.aa64"
  12. #define DBG_LVL DBG_INFO
  13. #include <rtdbg.h>
  14. #include <cpu.h>
  15. #include <mmu.h>
  16. #include <cpuport.h>
  17. #include <interrupt.h>
  18. #include <gtimer.h>
  19. #include <setup.h>
  20. #include <stdlib.h>
  21. #include <ioremap.h>
  22. #include <drivers/ofw.h>
  23. #include <drivers/ofw_fdt.h>
  24. #include <drivers/ofw_raw.h>
  25. #include <drivers/core/dm.h>
  26. #define rt_sysreg_write(sysreg, val) \
  27. __asm__ volatile ("msr "RT_STRINGIFY(sysreg)", %0"::"r"((rt_uint64_t)(val)))
  28. #define rt_sysreg_read(sysreg, val) \
  29. __asm__ volatile ("mrs %0, "RT_STRINGIFY(sysreg)"":"=r"((val)))
  30. #define SIZE_KB 1024
  31. #define SIZE_MB (1024 * SIZE_KB)
  32. #define SIZE_GB (1024 * SIZE_MB)
  33. extern rt_ubase_t _start, _end;
  34. extern void _secondary_cpu_entry(void);
  35. extern size_t MMUTable[];
  36. extern void *system_vectors;
  37. static void *fdt_ptr = RT_NULL;
  38. static rt_size_t fdt_size = 0;
  39. static rt_uint64_t initrd_ranges[3] = { };
  40. #ifdef RT_USING_SMP
  41. extern struct cpu_ops_t cpu_psci_ops;
  42. extern struct cpu_ops_t cpu_spin_table_ops;
  43. #else
  44. extern int rt_hw_cpu_id(void);
  45. #endif
  46. rt_uint64_t rt_cpu_mpidr_table[] =
  47. {
  48. [RT_CPUS_NR] = 0,
  49. };
  50. static struct cpu_ops_t *cpu_ops[] =
  51. {
  52. #ifdef RT_USING_SMP
  53. &cpu_psci_ops,
  54. &cpu_spin_table_ops,
  55. #endif
  56. };
  57. static struct rt_ofw_node *cpu_np[RT_CPUS_NR] = { };
  58. void rt_hw_fdt_install_early(void *fdt)
  59. {
  60. if (fdt != RT_NULL && !fdt_check_header(fdt))
  61. {
  62. fdt_ptr = fdt;
  63. fdt_size = fdt_totalsize(fdt);
  64. }
  65. }
  66. rt_weak void rt_hw_idle_wfi(void)
  67. {
  68. __asm__ volatile ("wfi");
  69. }
  70. static void system_vectors_init(void)
  71. {
  72. rt_hw_set_current_vbar((rt_ubase_t)&system_vectors);
  73. }
  74. rt_inline void cpu_info_init(void)
  75. {
  76. int i = 0;
  77. rt_uint64_t mpidr;
  78. struct rt_ofw_node *np;
  79. /* get boot cpu info */
  80. rt_sysreg_read(mpidr_el1, mpidr);
  81. rt_ofw_foreach_cpu_node(np)
  82. {
  83. rt_uint64_t hwid = rt_ofw_get_cpu_hwid(np, 0);
  84. if ((mpidr & MPIDR_AFFINITY_MASK) != hwid)
  85. {
  86. /* Only save affinity and res make smp boot can check */
  87. hwid |= 1ULL << 31;
  88. }
  89. else
  90. {
  91. hwid = mpidr;
  92. }
  93. cpu_np[i] = np;
  94. rt_cpu_mpidr_table[i] = hwid;
  95. rt_ofw_data(np) = (void *)hwid;
  96. for (int idx = 0; idx < RT_ARRAY_SIZE(cpu_ops); ++idx)
  97. {
  98. struct cpu_ops_t *ops = cpu_ops[idx];
  99. if (ops->cpu_init)
  100. {
  101. ops->cpu_init(i, np);
  102. }
  103. }
  104. if (++i >= RT_CPUS_NR)
  105. {
  106. break;
  107. }
  108. }
  109. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, rt_cpu_mpidr_table, sizeof(rt_cpu_mpidr_table));
  110. }
  111. rt_inline rt_bool_t is_kernel_aspace(const char *name)
  112. {
  113. static char * const names[] =
  114. {
  115. "kernel",
  116. "memheap",
  117. };
  118. if (!name)
  119. {
  120. return RT_FALSE;
  121. }
  122. for (int i = 0; i < RT_ARRAY_SIZE(names); ++i)
  123. {
  124. if (!rt_strcmp(names[i], name))
  125. {
  126. return RT_TRUE;
  127. }
  128. }
  129. return RT_FALSE;
  130. }
  131. void rt_hw_common_setup(void)
  132. {
  133. rt_size_t mem_region_nr;
  134. rt_region_t *mem_region;
  135. rt_size_t page_best_start;
  136. rt_region_t platform_mem_region;
  137. static struct mem_desc platform_mem_desc;
  138. void *kernel_start, *kernel_end, *memheap_start = RT_NULL, *memheap_end = RT_NULL;
  139. #ifdef RT_USING_SMART
  140. rt_hw_mmu_map_init(&rt_kernel_space, (void*)0xfffffffff0000000, 0x10000000, MMUTable, PV_OFFSET);
  141. #else
  142. rt_hw_mmu_map_init(&rt_kernel_space, (void*)0xffffd0000000, 0x10000000, MMUTable, 0);
  143. #endif
  144. kernel_start = rt_kmem_v2p((void *)&_start) - 64;
  145. kernel_end = rt_kmem_v2p((void *)&_end);
  146. if (!rt_fdt_commit_memregion_request(&mem_region, &mem_region_nr, RT_TRUE))
  147. {
  148. const char *name = "memheap";
  149. while (mem_region_nr --> 0)
  150. {
  151. if (mem_region->name == name || !rt_strcmp(mem_region->name, name))
  152. {
  153. memheap_start = (void *)mem_region->start;
  154. memheap_end = (void *)mem_region->end;
  155. break;
  156. }
  157. }
  158. }
  159. page_best_start = (rt_size_t)(memheap_end ? : kernel_end);
  160. if (memheap_end && fdt_ptr > kernel_start)
  161. {
  162. rt_memmove(memheap_end - PV_OFFSET, fdt_ptr - PV_OFFSET, fdt_size);
  163. fdt_ptr = memheap_end;
  164. page_best_start = (rt_size_t)fdt_ptr + fdt_size;
  165. }
  166. rt_fdt_commit_memregion_early(&(rt_region_t)
  167. {
  168. .name = "fdt",
  169. .start = (rt_size_t)fdt_ptr,
  170. .end = (rt_size_t)(fdt_ptr + fdt_size),
  171. }, RT_TRUE);
  172. fdt_ptr -= PV_OFFSET;
  173. rt_fdt_commit_memregion_early(&(rt_region_t)
  174. {
  175. .name = "kernel",
  176. .start = (rt_size_t)kernel_start,
  177. .end = (rt_size_t)kernel_end,
  178. }, RT_TRUE);
  179. if (rt_fdt_prefetch(fdt_ptr))
  180. {
  181. /* Platform cannot be initialized */
  182. RT_ASSERT(0);
  183. }
  184. rt_fdt_scan_chosen_stdout();
  185. rt_fdt_scan_initrd(initrd_ranges);
  186. rt_fdt_scan_memory();
  187. if (memheap_start && memheap_end)
  188. {
  189. rt_system_heap_init(memheap_start - PV_OFFSET, memheap_end - PV_OFFSET);
  190. }
  191. platform_mem_region.start = ~0UL;
  192. platform_mem_region.end = 0;
  193. if (!rt_fdt_commit_memregion_request(&mem_region, &mem_region_nr, RT_TRUE))
  194. {
  195. LOG_I("Reserved memory:");
  196. while (mem_region_nr --> 0)
  197. {
  198. if (is_kernel_aspace(mem_region->name))
  199. {
  200. if (platform_mem_region.start > mem_region->start)
  201. {
  202. platform_mem_region.start = mem_region->start;
  203. }
  204. if (platform_mem_region.end < mem_region->end)
  205. {
  206. platform_mem_region.end = mem_region->end;
  207. }
  208. }
  209. LOG_I(" %-*.s [%p, %p]", RT_NAME_MAX, mem_region->name, mem_region->start, mem_region->end);
  210. ++mem_region;
  211. }
  212. }
  213. if (!rt_fdt_commit_memregion_request(&mem_region, &mem_region_nr, RT_FALSE))
  214. {
  215. rt_ubase_t best_offset = ~0UL;
  216. rt_region_t *usable_mem_region = mem_region, *page_region = RT_NULL, init_page_region = { 0 };
  217. LOG_I("Usable memory:");
  218. for (int i = 0; i < mem_region_nr; ++i, ++mem_region)
  219. {
  220. if (!mem_region->name)
  221. {
  222. continue;
  223. }
  224. if (platform_mem_region.start > mem_region->start)
  225. {
  226. platform_mem_region.start = mem_region->start;
  227. }
  228. if (platform_mem_region.end < mem_region->end)
  229. {
  230. platform_mem_region.end = mem_region->end;
  231. }
  232. if (mem_region->start >= page_best_start &&
  233. mem_region->start - page_best_start < best_offset &&
  234. /* MUST >= 1MB */
  235. mem_region->end - mem_region->start >= SIZE_MB)
  236. {
  237. page_region = mem_region;
  238. best_offset = page_region->start - page_best_start;
  239. }
  240. LOG_I(" %-*.s [%p, %p]", RT_NAME_MAX, mem_region->name, mem_region->start, mem_region->end);
  241. }
  242. RT_ASSERT(page_region != RT_NULL);
  243. init_page_region.start = page_region->start - PV_OFFSET;
  244. init_page_region.end = page_region->end - PV_OFFSET;
  245. rt_page_init(init_page_region);
  246. platform_mem_region.start = RT_ALIGN(platform_mem_region.start, ARCH_PAGE_SIZE);
  247. platform_mem_region.end = RT_ALIGN_DOWN(platform_mem_region.end, ARCH_PAGE_SIZE);
  248. RT_ASSERT(platform_mem_region.end - platform_mem_region.start != 0);
  249. platform_mem_desc.paddr_start = platform_mem_region.start;
  250. platform_mem_desc.vaddr_start = platform_mem_region.start - PV_OFFSET;
  251. platform_mem_desc.vaddr_end = platform_mem_region.end - PV_OFFSET - 1;
  252. platform_mem_desc.attr = NORMAL_MEM;
  253. rt_hw_mmu_setup(&rt_kernel_space, &platform_mem_desc, 1);
  254. rt_fdt_earlycon_kick(FDT_EARLYCON_KICK_UPDATE);
  255. mem_region = usable_mem_region;
  256. for (int i = 0; i < mem_region_nr; ++i, ++mem_region)
  257. {
  258. if (mem_region != page_region)
  259. {
  260. rt_page_install(*mem_region);
  261. }
  262. }
  263. }
  264. rt_fdt_unflatten();
  265. cpu_info_init();
  266. /* initialize hardware interrupt */
  267. rt_hw_interrupt_init();
  268. /* initialize uart */
  269. rt_hw_uart_init();
  270. /* initialize timer for os tick */
  271. rt_hw_gtimer_init();
  272. #ifdef RT_USING_COMPONENTS_INIT
  273. rt_components_board_init();
  274. #endif
  275. #if defined(RT_USING_CONSOLE) && defined(RT_USING_DEVICE)
  276. rt_ofw_console_setup();
  277. #endif
  278. rt_thread_idle_sethook(rt_hw_idle_wfi);
  279. #ifdef RT_USING_SMP
  280. /* Install the IPI handle */
  281. rt_hw_ipi_handler_install(RT_SCHEDULE_IPI, rt_scheduler_ipi_handler);
  282. rt_hw_ipi_handler_install(RT_STOP_IPI, rt_scheduler_ipi_handler);
  283. rt_hw_interrupt_umask(RT_SCHEDULE_IPI);
  284. rt_hw_interrupt_umask(RT_STOP_IPI);
  285. #endif
  286. }
  287. #ifdef RT_USING_SMP
  288. rt_weak void rt_hw_secondary_cpu_up(void)
  289. {
  290. int cpu_id = rt_hw_cpu_id();
  291. rt_uint64_t entry = (rt_uint64_t)rt_kmem_v2p(_secondary_cpu_entry);
  292. if (!entry)
  293. {
  294. LOG_E("Failed to translate '_secondary_cpu_entry' to physical address");
  295. RT_ASSERT(0);
  296. }
  297. /* Maybe we are no in the first cpu */
  298. for (int i = 0; i < RT_ARRAY_SIZE(cpu_np); ++i)
  299. {
  300. int err;
  301. const char *enable_method;
  302. if (!cpu_np[i] || i == cpu_id)
  303. {
  304. continue;
  305. }
  306. err = rt_ofw_prop_read_string(cpu_np[i], "enable-method", &enable_method);
  307. for (int idx = 0; !err && idx < RT_ARRAY_SIZE(cpu_ops); ++idx)
  308. {
  309. struct cpu_ops_t *ops = cpu_ops[idx];
  310. if (ops->method && !rt_strcmp(ops->method, enable_method) && ops->cpu_boot)
  311. {
  312. err = ops->cpu_boot(i, entry);
  313. break;
  314. }
  315. }
  316. if (err)
  317. {
  318. LOG_W("Call cpu %d on %s", i, "failed");
  319. }
  320. }
  321. }
  322. rt_weak void rt_hw_secondary_cpu_bsp_start(void)
  323. {
  324. int cpu_id = rt_hw_cpu_id();
  325. system_vectors_init();
  326. rt_hw_spin_lock(&_cpus_lock);
  327. /* Save all mpidr */
  328. rt_sysreg_read(mpidr_el1, rt_cpu_mpidr_table[cpu_id]);
  329. rt_hw_mmu_ktbl_set((unsigned long)MMUTable);
  330. rt_hw_interrupt_init();
  331. rt_dm_secondary_cpu_init();
  332. rt_hw_interrupt_umask(RT_SCHEDULE_IPI);
  333. rt_hw_interrupt_umask(RT_STOP_IPI);
  334. LOG_I("Call cpu %d on %s", cpu_id, "success");
  335. #ifdef RT_USING_HWTIMER
  336. if (rt_device_hwtimer_us_delay == &cpu_us_delay)
  337. {
  338. cpu_loops_per_tick_init();
  339. }
  340. #endif
  341. rt_system_scheduler_start();
  342. }
  343. rt_weak void rt_hw_secondary_cpu_idle_exec(void)
  344. {
  345. rt_hw_wfe();
  346. }
  347. #endif