entry_point.S 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Date Author Notes
  7. * 2020-01-15 bigmagic the first version
  8. * 2020-08-10 SummerGift support clang compiler
  9. * 2021-11-04 GuEe-GUI set sp with SP_ELx
  10. * 2021-12-28 GuEe-GUI add smp support
  11. */
  12. #include "rtconfig.h"
  13. #define SECONDARY_STACK_SIZE 4096
  14. .section ".text.entrypoint","ax"
  15. .globl _start
  16. .globl secondary_cpu_start
  17. _start:
  18. #ifdef RT_USING_SMP
  19. mrs x1, mpidr_el1
  20. adr x4, .boot_cpu_mpidr
  21. str x1, [x4]
  22. dsb sy
  23. #endif
  24. bl __asm_flush_dcache_all /* The kernel and data must flush to DDR */
  25. secondary_cpu_start:
  26. #ifdef RT_USING_SMP
  27. adr x4, .boot_cpu_mpidr
  28. ldr x4, [x4]
  29. dsb sy
  30. /* Read cpu mpidr_el1 */
  31. mrs x1, mpidr_el1
  32. /* Read cpu id */
  33. ldr x0, =rt_cpu_mpidr_early /* BSP must be defined `rt_cpu_mpidr_early' table in smp */
  34. mov x2, #0
  35. cpu_id_confirm:
  36. add x2, x2, #1 /* Next cpu id inc */
  37. ldr x3, [x0], #8
  38. dsb sy
  39. cmp x3, #0
  40. beq cpu_idle /* Mean that `rt_cpu_mpidr_early' table is end */
  41. cmp x3, x1
  42. bne cpu_id_confirm
  43. /* Get cpu id success */
  44. sub x0, x2, #1
  45. msr tpidr_el1, x0 /* Save cpu id global */
  46. cmp x3, x4 /* If it is boot cpu */
  47. beq boot_cpu_setup
  48. /* Set current cpu's stack top */
  49. sub x0, x0, #1
  50. mov x1, #SECONDARY_STACK_SIZE
  51. adr x2, .secondary_cpu_stack_top
  52. msub x1, x0, x1, x2
  53. b cpu_check_el
  54. #else
  55. msr tpidr_el1, xzr
  56. #endif /* RT_USING_SMP */
  57. boot_cpu_setup:
  58. ldr x1, =_start
  59. cpu_check_el:
  60. mrs x0, CurrentEL /* CurrentEL Register. bit 2, 3. Others reserved */
  61. and x0, x0, #12 /* Clear reserved bits */
  62. /* Running at EL3? */
  63. cmp x0, #12 /* EL3 value is 0b1100 */
  64. bne cpu_not_in_el3
  65. /* Should never be executed, just for completeness. (EL3) */
  66. mov x2, #(1 << 0) /* EL0 and EL1 are in Non-Secure state */
  67. orr x2, x2, #(1 << 4) /* RES1 */
  68. orr x2, x2, #(1 << 5) /* RES1 */
  69. bic x2, x2, #(1 << 7) /* SMC instructions are enabled at EL1 and above */
  70. orr x2, x2, #(1 << 8) /* HVC instructions are enabled at EL1 and above */
  71. orr x2, x2, #(1 << 10) /* The next lower level is AArch64 */
  72. msr scr_el3, x2
  73. mov x2, #9 /* Next level is 0b1001->EL2h */
  74. orr x2, x2, #(1 << 6) /* Mask FIQ */
  75. orr x2, x2, #(1 << 7) /* Mask IRQ */
  76. orr x2, x2, #(1 << 8) /* Mask SError */
  77. orr x2, x2, #(1 << 9) /* Mask Debug Exception */
  78. msr spsr_el3, x2
  79. adr x2, cpu_in_el2
  80. msr elr_el3, x2
  81. eret
  82. cpu_not_in_el3: /* Running at EL2 or EL1 */
  83. cmp x0, #4 /* EL1 = 0100 */
  84. beq cpu_in_el1
  85. cpu_in_el2:
  86. /* Enable CNTP for EL1 */
  87. mrs x0, cnthctl_el2 /* Counter-timer Hypervisor Control register */
  88. orr x0, x0, #3
  89. msr cnthctl_el2, x0
  90. msr cntvoff_el2, xzr
  91. mov x0, #(1 << 31) /* Enable AArch64 in EL1 */
  92. orr x0, x0, #(1 << 1) /* SWIO hardwired on Pi3 */
  93. msr hcr_el2, x0
  94. mov x2, #5 /* Next level is 0b0101->EL1h */
  95. orr x2, x2, #(1 << 6) /* Mask FIQ */
  96. orr x2, x2, #(1 << 7) /* Mask IRQ */
  97. orr x2, x2, #(1 << 8) /* Mask SError */
  98. orr x2, x2, #(1 << 9) /* Mask Debug Exception */
  99. msr spsr_el2, x2
  100. adr x2, cpu_in_el1
  101. msr elr_el2, x2
  102. eret
  103. cpu_in_el1:
  104. msr spsel, #1
  105. mov sp, x1 /* Set sp in el1 */
  106. /* Avoid trap from SIMD or float point instruction */
  107. mov x1, #0x00300000 /* Don't trap any SIMD/FP instructions in both EL0 and EL1 */
  108. msr cpacr_el1, x1
  109. mrs x1, sctlr_el1
  110. orr x1, x1, #(1 << 12) /* Enable Instruction */
  111. bic x1, x1, #(3 << 3) /* Disable SP Alignment check */
  112. bic x1, x1, #(1 << 1) /* Disable Alignment check */
  113. msr sctlr_el1, x1
  114. #ifdef RT_USING_SMP
  115. ldr x1, =_start
  116. cmp sp, x1
  117. bne secondary_cpu_c_start
  118. #endif /* RT_USING_SMP */
  119. ldr x0, =__bss_start
  120. ldr x1, =__bss_end
  121. sub x2, x1, x0
  122. mov x3, x1
  123. cmp x2, #7
  124. bls clean_bss_check
  125. clean_bss_loop_quad:
  126. str xzr, [x0], #8
  127. sub x2, x3, x0
  128. cmp x2, #7
  129. bhi clean_bss_loop_quad
  130. cmp x1, x0
  131. bls jump_to_entry
  132. clean_bss_loop_byte:
  133. str xzr, [x0], #1
  134. clean_bss_check:
  135. cmp x1, x0
  136. bhi clean_bss_loop_byte
  137. jump_to_entry:
  138. b rtthread_startup
  139. cpu_idle:
  140. wfe
  141. b cpu_idle
  142. #ifdef RT_USING_SMP
  143. .align 3
  144. .boot_cpu_mpidr:
  145. .quad 0x0
  146. .align 12
  147. .secondary_cpu_stack:
  148. .space (SECONDARY_STACK_SIZE * (RT_CPUS_NR - 1))
  149. .secondary_cpu_stack_top:
  150. #endif