entry_point.S 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. /*
  2. * Copyright (c) 2006-2020, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Date Author Notes
  7. * 2020-01-15 bigmagic the first version
  8. * 2020-08-10 SummerGift support clang compiler
  9. */
  10. #include "rtconfig.h"
  11. .section ".text.entrypoint","ax"
  12. .global __start
  13. __start:
  14. #ifdef ARCH_ARM_BOOTWITH_FLUSH_CACHE
  15. bl __asm_flush_dcache_all
  16. #endif
  17. bl rt_hw_cpu_id_set
  18. /* read cpu id, stop slave cores */
  19. mrs x0, tpidr_el1
  20. cbz x0, .L__cpu_0 /* .L prefix is the local label in ELF */
  21. #ifndef RT_AMP_SLAVE
  22. /* cpu id > 0, stop */
  23. /* cpu id == 0 will also goto here after returned from entry() if possible */
  24. .L__current_cpu_idle:
  25. wfe
  26. b .L__current_cpu_idle
  27. #endif
  28. .L__cpu_0:
  29. /* set stack before our code, Define stack pointer for current exception level */
  30. adr x1, .el_stack_top
  31. /* set up EL1 */
  32. mrs x0, CurrentEL /* CurrentEL Register. bit 2, 3. Others reserved */
  33. and x0, x0, #12 /* clear reserved bits */
  34. /* running at EL3? */
  35. cmp x0, #12 /* 1100b. So, EL3 */
  36. bne .L__not_in_el3 /* 11? !EL3 -> 5: */
  37. /* should never be executed, just for completeness. (EL3) */
  38. mov x2, #0x5b1
  39. msr scr_el3, x2 /* SCR_ELn Secure Configuration Register */
  40. mov x2, #0x3c9
  41. msr spsr_el3, x2 /* SPSR_ELn. Saved Program Status Register. 1111001001 */
  42. adr x2, .L__not_in_el3
  43. msr elr_el3, x2
  44. eret /* Exception Return: from EL3, continue from .L__not_in_el3 */
  45. .L__not_in_el3: /* running at EL2 or EL1 */
  46. cmp x0, #4 /* 0x04 0100 EL1 */
  47. beq .L__in_el1 /* EL1 -> 5: */
  48. mrs x0, hcr_el2
  49. bic x0, x0, #0xff
  50. msr hcr_el2, x0
  51. msr sp_el1, x1 /* in EL2, set sp of EL1 to _start */
  52. /* enable CNTP for EL1 */
  53. mrs x0, cnthctl_el2 /* Counter-timer Hypervisor Control register */
  54. orr x0, x0, #3
  55. msr cnthctl_el2, x0
  56. msr cntvoff_el2, xzr
  57. /* enable AArch64 in EL1 */
  58. mov x0, #(1 << 31) /* AArch64 */
  59. orr x0, x0, #(1 << 1) /* SWIO hardwired on Pi3 */
  60. msr hcr_el2, x0
  61. mrs x0, hcr_el2
  62. /* change execution level to EL1 */
  63. mov x2, #0x3c4
  64. msr spsr_el2, x2 /* 1111000100 */
  65. adr x2, .L__in_el1
  66. msr elr_el2, x2
  67. eret /* exception return. from EL2. continue from .L__in_el1 */
  68. .macro GET_PHY reg, symbol
  69. adrp \reg, \symbol
  70. add \reg, \reg, #:lo12:\symbol
  71. .endm
  72. .L__in_el1:
  73. mov sp, x1 /* in EL1. Set sp to _start */
  74. /* Set CPACR_EL1 (Architecture Feature Access Control Register) to avoid trap from SIMD or float point instruction */
  75. mov x1, #0x00300000 /* Don't trap any SIMD/FP instructions in both EL0 and EL1 */
  76. msr cpacr_el1, x1
  77. /* applying context change */
  78. dsb ish
  79. isb
  80. /* clear bss */
  81. GET_PHY x1, __bss_start
  82. GET_PHY x2, __bss_end
  83. sub x2, x2, x1 /* get bss size */
  84. and x3, x2, #7 /* x3 is < 7 */
  85. ldr x4, =~0x7
  86. and x2, x2, x4 /* mask ~7 */
  87. .L__clean_bss_loop:
  88. cbz x2, .L__clean_bss_loop_1
  89. str xzr, [x1], #8
  90. sub x2, x2, #8
  91. b .L__clean_bss_loop
  92. .L__clean_bss_loop_1:
  93. cbz x3, .L__jump_to_entry
  94. strb wzr, [x1], #1
  95. sub x3, x3, #1
  96. b .L__clean_bss_loop_1
  97. .L__jump_to_entry: /* jump to C code, should not return */
  98. bl mmu_tcr_init
  99. adr x0, .early_mmu_table /* install early page table */
  100. add x1, x0, #0x1000
  101. msr ttbr0_el1, x0
  102. msr ttbr1_el1, x1
  103. dsb sy
  104. #ifdef RT_USING_SMART
  105. ldr x2, =__start
  106. GET_PHY x3, __start
  107. sub x3, x3, x2
  108. #else
  109. mov x3,0
  110. #endif
  111. ldr x2, =0x10000000 /* map 256M memory for kernel space */
  112. bl rt_hw_mem_setup_early
  113. ldr x30, =after_mmu_enable /* set LR to after_mmu_enable function, it's a v_addr */
  114. mrs x1, sctlr_el1
  115. bic x1, x1, #(3 << 3) /* dis SA, SA0 */
  116. bic x1, x1, #(1 << 1) /* dis A */
  117. orr x1, x1, #(1 << 12) /* I */
  118. orr x1, x1, #(1 << 2) /* C */
  119. orr x1, x1, #(1 << 0) /* M */
  120. msr sctlr_el1, x1 /* enable MMU */
  121. dsb ish
  122. isb
  123. ic ialluis /* Invalidate all instruction caches in Inner Shareable domain to Point of Unification */
  124. dsb ish
  125. isb
  126. tlbi vmalle1 /* Invalidate all stage 1 translations used at EL1 with the current VMID */
  127. dsb ish
  128. isb
  129. ret
  130. after_mmu_enable:
  131. #ifdef RT_USING_SMART
  132. mrs x0, tcr_el1 /* disable ttbr0, only using kernel space */
  133. orr x0, x0, #(1 << 7)
  134. msr tcr_el1, x0
  135. msr ttbr0_el1, xzr
  136. dsb sy
  137. #endif
  138. mov x0, #1
  139. msr spsel, x0
  140. adr x1, .el_stack_top
  141. mov sp, x1 /* sp_el1 set to _start */
  142. b rtthread_startup
  143. #ifdef RT_USING_SMP
  144. /**
  145. * secondary cpu
  146. */
  147. .global _secondary_cpu_entry
  148. _secondary_cpu_entry:
  149. bl rt_hw_cpu_id_set
  150. adr x1, .el_stack_top
  151. /* set up EL1 */
  152. mrs x0, CurrentEL /* CurrentEL Register. bit 2, 3. Others reserved */
  153. and x0, x0, #12 /* clear reserved bits */
  154. /* running at EL3? */
  155. cmp x0, #12 /* 1100b. So, EL3 */
  156. bne .L__not_in_el3_cpux /* 11? !EL3 -> 5: */
  157. /* should never be executed, just for completeness. (EL3) */
  158. mov x2, #0x5b1
  159. msr scr_el3, x2 /* SCR_ELn Secure Configuration Register */
  160. mov x2, #0x3c9
  161. msr spsr_el3, x2 /* SPSR_ELn. Saved Program Status Register. 1111001001 */
  162. adr x2, .L__not_in_el3_cpux
  163. msr elr_el3, x2
  164. eret /* Exception Return: from EL3, continue from .L__not_in_el3 */
  165. .L__not_in_el3_cpux: /* running at EL2 or EL1 */
  166. cmp x0, #4 /* 0x04 0100 EL1 */
  167. beq .L__in_el1_cpux /* EL1 -> 5: */
  168. mrs x0, hcr_el2
  169. bic x0, x0, #0xff
  170. msr hcr_el2, x0
  171. msr sp_el1, x1 /* in EL2, set sp of EL1 to _start */
  172. /* enable CNTP for EL1 */
  173. mrs x0, cnthctl_el2 /* Counter-timer Hypervisor Control register */
  174. orr x0, x0, #3
  175. msr cnthctl_el2, x0
  176. msr cntvoff_el2, xzr
  177. /* enable AArch64 in EL1 */
  178. mov x0, #(1 << 31) /* AArch64 */
  179. orr x0, x0, #(1 << 1) /* SWIO hardwired on Pi3 */
  180. msr hcr_el2, x0
  181. mrs x0, hcr_el2
  182. /* change execution level to EL1 */
  183. mov x2, #0x3c4
  184. msr spsr_el2, x2 /* 1111000100 */
  185. adr x2, .L__in_el1_cpux
  186. msr elr_el2, x2
  187. eret /* exception return. from EL2. continue from .L__in_el1 */
  188. .L__in_el1_cpux:
  189. mrs x0, tpidr_el1
  190. /* each cpu init stack is 8k */
  191. sub x1, x1, x0, lsl #13
  192. mov sp, x1 /* in EL1. Set sp to _start */
  193. /* Set CPACR_EL1 (Architecture Feature Access Control Register) to avoid trap from SIMD or float point instruction */
  194. mov x1, #0x00300000 /* Don't trap any SIMD/FP instructions in both EL0 and EL1 */
  195. msr cpacr_el1, x1
  196. .L__jump_to_entry_cpux: /* jump to C code, should not return */
  197. /* init mmu early */
  198. bl mmu_tcr_init
  199. adr x0, .early_mmu_table /* GET & setup early page table */
  200. add x1, x0, #0x1000
  201. msr ttbr0_el1, x0
  202. msr ttbr1_el1, x1
  203. dsb sy
  204. ldr x30, =after_mmu_enable_cpux /* set LR to after_mmu_enable function, it's a v_addr */
  205. mrs x1, sctlr_el1
  206. bic x1, x1, #(3 << 3) /* dis SA, SA0 */
  207. bic x1, x1, #(1 << 1) /* dis A */
  208. orr x1, x1, #(1 << 12) /* I */
  209. orr x1, x1, #(1 << 2) /* C */
  210. orr x1, x1, #(1 << 0) /* M */
  211. msr sctlr_el1, x1 /* enable MMU */
  212. dsb sy
  213. isb sy
  214. ic ialluis /* Invalidate all instruction caches in Inner Shareable domain to Point of Unification */
  215. dsb sy
  216. isb sy
  217. tlbi vmalle1 /* Invalidate all stage 1 translations used at EL1 with the current VMID */
  218. dsb sy
  219. isb sy
  220. ret
  221. after_mmu_enable_cpux:
  222. #ifdef RT_USING_SMART
  223. mrs x0, tcr_el1 /* disable ttbr0, only using kernel space */
  224. orr x0, x0, #(1 << 7)
  225. msr tcr_el1, x0
  226. msr ttbr0_el1, xzr
  227. dsb sy
  228. #endif
  229. mov x0, #1
  230. msr spsel, x0
  231. mrs x0, tpidr_el1
  232. /* each cpu init stack is 8k */
  233. adr x1, .el_stack_top
  234. sub x1, x1, x0, lsl #13
  235. mov sp, x1 /* in EL1. Set sp to _start */
  236. b rt_hw_secondary_cpu_bsp_start
  237. #endif
  238. .align 12
  239. .early_mmu_table:
  240. .space (4096 * 2)
  241. .align 12
  242. .el_stack:
  243. .space (8192)
  244. .el_stack_top: