setup.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-02-21 GuEe-GUI first version
  9. */
  10. #include <rtthread.h>
  11. #define DBG_TAG "cpu.aa64"
  12. #define DBG_LVL DBG_INFO
  13. #include <rtdbg.h>
  14. #include <cpu.h>
  15. #include <mmu.h>
  16. #include <cpuport.h>
  17. #include <interrupt.h>
  18. #include <gtimer.h>
  19. #include <setup.h>
  20. #include <stdlib.h>
  21. #include <ioremap.h>
  22. #include <rtdevice.h>
  23. #include <gic.h>
  24. #include <gicv3.h>
  25. #define SIZE_KB 1024
  26. #define SIZE_MB (1024 * SIZE_KB)
  27. #define SIZE_GB (1024 * SIZE_MB)
  28. extern rt_ubase_t _start, _end;
  29. extern void _secondary_cpu_entry(void);
  30. extern size_t MMUTable[];
  31. extern void *system_vectors;
  32. static void *fdt_ptr = RT_NULL;
  33. static rt_size_t fdt_size = 0;
  34. static rt_uint64_t initrd_ranges[3] = { };
  35. #ifdef RT_USING_SMP
  36. extern struct cpu_ops_t cpu_psci_ops;
  37. extern struct cpu_ops_t cpu_spin_table_ops;
  38. #else
  39. extern int rt_hw_cpu_id(void);
  40. #endif
  41. rt_uint64_t rt_cpu_mpidr_table[] =
  42. {
  43. [RT_CPUS_NR] = 0,
  44. };
  45. static struct cpu_ops_t *cpu_ops[] =
  46. {
  47. #ifdef RT_USING_SMP
  48. &cpu_psci_ops,
  49. &cpu_spin_table_ops,
  50. #endif
  51. };
  52. static struct rt_ofw_node *cpu_np[RT_CPUS_NR] = { };
  53. void rt_hw_fdt_install_early(void *fdt)
  54. {
  55. if (fdt != RT_NULL && !fdt_check_header(fdt))
  56. {
  57. fdt_ptr = fdt;
  58. fdt_size = fdt_totalsize(fdt);
  59. }
  60. }
  61. #ifdef RT_USING_HWTIMER
  62. static rt_ubase_t loops_per_tick[RT_CPUS_NR];
  63. static rt_ubase_t cpu_get_cycles(void)
  64. {
  65. rt_ubase_t cycles;
  66. rt_hw_sysreg_read(cntpct_el0, cycles);
  67. return cycles;
  68. }
  69. static void cpu_loops_per_tick_init(void)
  70. {
  71. rt_ubase_t offset;
  72. volatile rt_ubase_t freq, step, cycles_end1, cycles_end2;
  73. volatile rt_uint32_t cycles_count1 = 0, cycles_count2 = 0;
  74. rt_hw_sysreg_read(cntfrq_el0, freq);
  75. step = freq / RT_TICK_PER_SECOND;
  76. cycles_end1 = cpu_get_cycles() + step;
  77. while (cpu_get_cycles() < cycles_end1)
  78. {
  79. __asm__ volatile ("nop");
  80. __asm__ volatile ("add %0, %0, #1":"=r"(cycles_count1));
  81. }
  82. cycles_end2 = cpu_get_cycles() + step;
  83. while (cpu_get_cycles() < cycles_end2)
  84. {
  85. __asm__ volatile ("add %0, %0, #1":"=r"(cycles_count2));
  86. }
  87. if ((rt_int32_t)(cycles_count2 - cycles_count1) > 0)
  88. {
  89. offset = cycles_count2 - cycles_count1;
  90. }
  91. else
  92. {
  93. /* Impossible, but prepared for any eventualities */
  94. offset = cycles_count2 / 4;
  95. }
  96. loops_per_tick[rt_hw_cpu_id()] = offset;
  97. }
  98. static void cpu_us_delay(rt_uint32_t us)
  99. {
  100. volatile rt_base_t start = cpu_get_cycles(), cycles;
  101. cycles = ((us * 0x10c7UL) * loops_per_tick[rt_hw_cpu_id()] * RT_TICK_PER_SECOND) >> 32;
  102. while ((cpu_get_cycles() - start) < cycles)
  103. {
  104. rt_hw_cpu_relax();
  105. }
  106. }
  107. #endif /* RT_USING_HWTIMER */
  108. rt_weak void rt_hw_idle_wfi(void)
  109. {
  110. __asm__ volatile ("wfi");
  111. }
  112. static void system_vectors_init(void)
  113. {
  114. rt_hw_set_current_vbar((rt_ubase_t)&system_vectors);
  115. }
  116. rt_inline void cpu_info_init(void)
  117. {
  118. int i = 0;
  119. rt_uint64_t mpidr;
  120. struct rt_ofw_node *np;
  121. /* get boot cpu info */
  122. rt_hw_sysreg_read(mpidr_el1, mpidr);
  123. rt_ofw_foreach_cpu_node(np)
  124. {
  125. rt_uint64_t hwid = rt_ofw_get_cpu_hwid(np, 0);
  126. if ((mpidr & MPIDR_AFFINITY_MASK) != hwid)
  127. {
  128. /* Only save affinity and res make smp boot can check */
  129. hwid |= 1ULL << 31;
  130. }
  131. else
  132. {
  133. hwid = mpidr;
  134. }
  135. cpu_np[i] = np;
  136. rt_cpu_mpidr_table[i] = hwid;
  137. rt_ofw_data(np) = (void *)hwid;
  138. for (int idx = 0; idx < RT_ARRAY_SIZE(cpu_ops); ++idx)
  139. {
  140. struct cpu_ops_t *ops = cpu_ops[idx];
  141. if (ops->cpu_init)
  142. {
  143. ops->cpu_init(i, np);
  144. }
  145. }
  146. if (++i >= RT_CPUS_NR)
  147. {
  148. break;
  149. }
  150. }
  151. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, rt_cpu_mpidr_table, sizeof(rt_cpu_mpidr_table));
  152. #ifdef RT_USING_HWTIMER
  153. cpu_loops_per_tick_init();
  154. if (!rt_device_hwtimer_us_delay)
  155. {
  156. rt_device_hwtimer_us_delay = &cpu_us_delay;
  157. }
  158. #endif /* RT_USING_HWTIMER */
  159. }
  160. rt_inline rt_bool_t is_kernel_aspace(const char *name)
  161. {
  162. static char * const names[] =
  163. {
  164. "kernel",
  165. "memheap",
  166. };
  167. if (!name)
  168. {
  169. return RT_FALSE;
  170. }
  171. for (int i = 0; i < RT_ARRAY_SIZE(names); ++i)
  172. {
  173. if (!rt_strcmp(names[i], name))
  174. {
  175. return RT_TRUE;
  176. }
  177. }
  178. return RT_FALSE;
  179. }
  180. void rt_hw_common_setup(void)
  181. {
  182. rt_size_t mem_region_nr;
  183. rt_region_t *mem_region;
  184. rt_size_t page_best_start;
  185. rt_region_t platform_mem_region;
  186. static struct mem_desc platform_mem_desc;
  187. void *kernel_start, *kernel_end, *memheap_start = RT_NULL, *memheap_end = RT_NULL;
  188. system_vectors_init();
  189. #ifdef RT_USING_SMART
  190. rt_hw_mmu_map_init(&rt_kernel_space, (void*)0xfffffffff0000000, 0x10000000, MMUTable, PV_OFFSET);
  191. #else
  192. rt_hw_mmu_map_init(&rt_kernel_space, (void*)0xffffd0000000, 0x10000000, MMUTable, 0);
  193. #endif
  194. kernel_start = rt_kmem_v2p((void *)&_start) - 64;
  195. kernel_end = rt_kmem_v2p((void *)&_end);
  196. if (!rt_fdt_commit_memregion_request(&mem_region, &mem_region_nr, RT_TRUE))
  197. {
  198. const char *name = "memheap";
  199. while (mem_region_nr --> 0)
  200. {
  201. if (mem_region->name == name || !rt_strcmp(mem_region->name, name))
  202. {
  203. memheap_start = (void *)mem_region->start;
  204. memheap_end = (void *)mem_region->end;
  205. break;
  206. }
  207. mem_region++;
  208. }
  209. }
  210. page_best_start = (rt_size_t)(memheap_end ? : kernel_end);
  211. if (memheap_end && fdt_ptr > kernel_start)
  212. {
  213. rt_memmove(memheap_end - PV_OFFSET, fdt_ptr - PV_OFFSET, fdt_size);
  214. fdt_ptr = memheap_end;
  215. page_best_start = (rt_size_t)fdt_ptr + fdt_size;
  216. }
  217. rt_fdt_commit_memregion_early(&(rt_region_t)
  218. {
  219. .name = "fdt",
  220. .start = (rt_size_t)fdt_ptr,
  221. .end = (rt_size_t)(fdt_ptr + fdt_size),
  222. }, RT_TRUE);
  223. fdt_ptr -= PV_OFFSET;
  224. rt_fdt_commit_memregion_early(&(rt_region_t)
  225. {
  226. .name = "kernel",
  227. .start = (rt_size_t)kernel_start,
  228. .end = (rt_size_t)kernel_end,
  229. }, RT_TRUE);
  230. #ifndef RT_USING_SMART
  231. rt_fdt_commit_memregion_early(&(rt_region_t)
  232. {
  233. .name = "null",
  234. .start = (rt_size_t)RT_NULL,
  235. .end = (rt_size_t)RT_NULL + ARCH_PAGE_SIZE,
  236. }, RT_TRUE);
  237. #endif /* !RT_USING_SMART */
  238. if (rt_fdt_prefetch(fdt_ptr))
  239. {
  240. /* Platform cannot be initialized */
  241. RT_ASSERT(0);
  242. }
  243. rt_fdt_scan_chosen_stdout();
  244. rt_fdt_scan_initrd(initrd_ranges);
  245. rt_fdt_scan_memory();
  246. if (memheap_start && memheap_end)
  247. {
  248. rt_system_heap_init(memheap_start - PV_OFFSET, memheap_end - PV_OFFSET);
  249. }
  250. platform_mem_region.start = ~0UL;
  251. platform_mem_region.end = 0;
  252. if (!rt_fdt_commit_memregion_request(&mem_region, &mem_region_nr, RT_TRUE))
  253. {
  254. LOG_I("Reserved memory:");
  255. while (mem_region_nr --> 0)
  256. {
  257. if (is_kernel_aspace(mem_region->name))
  258. {
  259. if (platform_mem_region.start > mem_region->start)
  260. {
  261. platform_mem_region.start = mem_region->start;
  262. }
  263. if (platform_mem_region.end < mem_region->end)
  264. {
  265. platform_mem_region.end = mem_region->end;
  266. }
  267. }
  268. LOG_I(" %-*.s [%p, %p]", RT_NAME_MAX, mem_region->name, mem_region->start, mem_region->end);
  269. ++mem_region;
  270. }
  271. }
  272. if (!rt_fdt_commit_memregion_request(&mem_region, &mem_region_nr, RT_FALSE))
  273. {
  274. rt_ubase_t best_offset = ~0UL;
  275. rt_region_t *usable_mem_region = mem_region, *page_region = RT_NULL;
  276. rt_region_t init_page_region = { 0 };
  277. rt_region_t defer_hi = { 0 };
  278. rt_err_t error;
  279. LOG_I("Usable memory:");
  280. for (int i = 0; i < mem_region_nr; ++i, ++mem_region)
  281. {
  282. if (!mem_region->name)
  283. {
  284. continue;
  285. }
  286. if (platform_mem_region.start > mem_region->start)
  287. {
  288. platform_mem_region.start = mem_region->start;
  289. }
  290. if (platform_mem_region.end < mem_region->end)
  291. {
  292. platform_mem_region.end = mem_region->end;
  293. }
  294. if (mem_region->start >= page_best_start &&
  295. mem_region->start - page_best_start < best_offset &&
  296. /* MUST >= 1MB */
  297. mem_region->end - mem_region->start >= SIZE_MB)
  298. {
  299. page_region = mem_region;
  300. best_offset = page_region->start - page_best_start;
  301. }
  302. LOG_I(" %-*.s [%p, %p]", RT_NAME_MAX, mem_region->name, mem_region->start, mem_region->end);
  303. }
  304. RT_ASSERT(page_region != RT_NULL);
  305. /* don't map more than ARCH_EARLY_MAP_SIZE */
  306. if (page_region->end - page_region->start > ARCH_PAGE_INIT_THRESHOLD)
  307. {
  308. defer_hi.name = page_region->name;
  309. defer_hi.end = page_region->end;
  310. defer_hi.start = RT_ALIGN_DOWN(page_region->start + ARCH_PAGE_INIT_THRESHOLD,
  311. ARCH_SECTION_SIZE);
  312. page_region->end = defer_hi.start;
  313. }
  314. init_page_region.start = page_region->start - PV_OFFSET;
  315. init_page_region.end = page_region->end - PV_OFFSET;
  316. rt_page_init(init_page_region);
  317. platform_mem_region.start = RT_ALIGN(platform_mem_region.start, ARCH_PAGE_SIZE);
  318. platform_mem_region.end = RT_ALIGN_DOWN(platform_mem_region.end, ARCH_PAGE_SIZE);
  319. RT_ASSERT(platform_mem_region.end - platform_mem_region.start != 0);
  320. platform_mem_desc.paddr_start = platform_mem_region.start;
  321. platform_mem_desc.vaddr_start = platform_mem_region.start - PV_OFFSET;
  322. platform_mem_desc.vaddr_end = platform_mem_region.end - PV_OFFSET - 1;
  323. platform_mem_desc.attr = NORMAL_MEM;
  324. rt_hw_mmu_setup(&rt_kernel_space, &platform_mem_desc, 1);
  325. rt_fdt_earlycon_kick(FDT_EARLYCON_KICK_UPDATE);
  326. mem_region = usable_mem_region;
  327. if (defer_hi.start)
  328. {
  329. /* to virt address */
  330. init_page_region.start = defer_hi.start - PV_OFFSET;
  331. init_page_region.end = defer_hi.end - PV_OFFSET;
  332. error = rt_page_install(init_page_region);
  333. if (error)
  334. {
  335. LOG_W("Deferred page installation FAILED:");
  336. LOG_W(" %-*.s [%p, %p]", RT_NAME_MAX,
  337. defer_hi.name, defer_hi.start, defer_hi.end);
  338. }
  339. else
  340. {
  341. LOG_I("Deferred page installation SUCCEED:");
  342. LOG_I(" %-*.s [%p, %p]", RT_NAME_MAX,
  343. defer_hi.name, defer_hi.start, defer_hi.end);
  344. }
  345. }
  346. for (int i = 0; i < mem_region_nr; ++i, ++mem_region)
  347. {
  348. if (mem_region != page_region && mem_region->name)
  349. {
  350. init_page_region.start = mem_region->start - PV_OFFSET;
  351. init_page_region.end = mem_region->end - PV_OFFSET;
  352. rt_page_install(init_page_region);
  353. }
  354. }
  355. }
  356. rt_fdt_unflatten();
  357. cpu_info_init();
  358. #ifdef RT_USING_PIC
  359. rt_pic_init();
  360. rt_pic_irq_init();
  361. #else
  362. /* initialize hardware interrupt */
  363. rt_hw_interrupt_init();
  364. /* initialize uart */
  365. rt_hw_uart_init();
  366. #endif
  367. #ifndef RT_HWTIMER_ARM_ARCH
  368. /* initialize timer for os tick */
  369. rt_hw_gtimer_init();
  370. #endif /* !RT_HWTIMER_ARM_ARCH */
  371. #ifdef RT_USING_COMPONENTS_INIT
  372. rt_components_board_init();
  373. #endif
  374. #if defined(RT_USING_CONSOLE) && defined(RT_USING_DEVICE)
  375. rt_ofw_console_setup();
  376. #endif
  377. rt_thread_idle_sethook(rt_hw_idle_wfi);
  378. #ifdef RT_USING_SMP
  379. /* Install the IPI handle */
  380. rt_hw_ipi_handler_install(RT_SCHEDULE_IPI, rt_scheduler_ipi_handler);
  381. rt_hw_ipi_handler_install(RT_STOP_IPI, rt_scheduler_ipi_handler);
  382. rt_hw_interrupt_umask(RT_SCHEDULE_IPI);
  383. rt_hw_interrupt_umask(RT_STOP_IPI);
  384. #endif
  385. }
  386. #ifdef RT_USING_SMP
  387. rt_weak void rt_hw_secondary_cpu_up(void)
  388. {
  389. int cpu_id = rt_hw_cpu_id();
  390. rt_uint64_t entry = (rt_uint64_t)rt_kmem_v2p(_secondary_cpu_entry);
  391. if (!entry)
  392. {
  393. LOG_E("Failed to translate '_secondary_cpu_entry' to physical address");
  394. RT_ASSERT(0);
  395. }
  396. /* Maybe we are no in the first cpu */
  397. for (int i = 0; i < RT_ARRAY_SIZE(cpu_np); ++i)
  398. {
  399. int err;
  400. const char *enable_method;
  401. if (!cpu_np[i] || i == cpu_id)
  402. {
  403. continue;
  404. }
  405. err = rt_ofw_prop_read_string(cpu_np[i], "enable-method", &enable_method);
  406. for (int idx = 0; !err && idx < RT_ARRAY_SIZE(cpu_ops); ++idx)
  407. {
  408. struct cpu_ops_t *ops = cpu_ops[idx];
  409. if (ops->method && !rt_strcmp(ops->method, enable_method) && ops->cpu_boot)
  410. {
  411. err = ops->cpu_boot(i, entry);
  412. break;
  413. }
  414. }
  415. if (err)
  416. {
  417. LOG_W("Call cpu %d on %s", i, "failed");
  418. }
  419. }
  420. }
  421. rt_weak void rt_hw_secondary_cpu_bsp_start(void)
  422. {
  423. int cpu_id = rt_hw_cpu_id();
  424. system_vectors_init();
  425. rt_hw_spin_lock(&_cpus_lock);
  426. /* Save all mpidr */
  427. rt_hw_sysreg_read(mpidr_el1, rt_cpu_mpidr_table[cpu_id]);
  428. rt_hw_mmu_ktbl_set((unsigned long)MMUTable);
  429. #ifdef RT_USING_PIC
  430. rt_pic_irq_init();
  431. #else
  432. /* initialize vector table */
  433. rt_hw_vector_init();
  434. arm_gic_cpu_init(0, 0);
  435. #ifdef BSP_USING_GICV3
  436. arm_gic_redist_init(0, 0);
  437. #endif /* BSP_USING_GICV3 */
  438. #endif
  439. #ifndef RT_HWTIMER_ARM_ARCH
  440. /* initialize timer for os tick */
  441. rt_hw_gtimer_local_enable();
  442. #endif /* !RT_HWTIMER_ARM_ARCH */
  443. rt_dm_secondary_cpu_init();
  444. rt_hw_interrupt_umask(RT_SCHEDULE_IPI);
  445. rt_hw_interrupt_umask(RT_STOP_IPI);
  446. LOG_I("Call cpu %d on %s", cpu_id, "success");
  447. #ifdef RT_USING_HWTIMER
  448. if (rt_device_hwtimer_us_delay == &cpu_us_delay)
  449. {
  450. cpu_loops_per_tick_init();
  451. }
  452. #endif
  453. rt_system_scheduler_start();
  454. }
  455. rt_weak void rt_hw_secondary_cpu_idle_exec(void)
  456. {
  457. rt_hw_wfe();
  458. }
  459. #endif
  460. void rt_hw_console_output(const char *str)
  461. {
  462. rt_fdt_earlycon_output(str);
  463. }