start_gcc.S 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2013-07-05 Bernard the first version
  9. * 2018-11-22 Jesven in the interrupt context, use rt_scheduler_do_irq_switch checks
  10. * and switches to a new thread
  11. */
  12. #include "rtconfig.h"
  13. .equ Mode_USR, 0x10
  14. .equ Mode_FIQ, 0x11
  15. .equ Mode_IRQ, 0x12
  16. .equ Mode_SVC, 0x13
  17. .equ Mode_ABT, 0x17
  18. .equ Mode_UND, 0x1B
  19. .equ Mode_SYS, 0x1F
  20. .equ I_Bit, 0x80 @ when I bit is set, IRQ is disabled
  21. .equ F_Bit, 0x40 @ when F bit is set, FIQ is disabled
  22. .equ UND_Stack_Size, 0x00000000
  23. .equ SVC_Stack_Size, 0x00000400
  24. .equ ABT_Stack_Size, 0x00000000
  25. .equ RT_FIQ_STACK_PGSZ, 0x00000000
  26. .equ RT_IRQ_STACK_PGSZ, 0x00000800
  27. .equ USR_Stack_Size, 0x00000400
  28. #define ISR_Stack_Size (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
  29. RT_FIQ_STACK_PGSZ + RT_IRQ_STACK_PGSZ)
  30. .section .data.share.isr
  31. /* stack */
  32. .globl stack_start
  33. .globl stack_top
  34. stack_start:
  35. .rept ISR_Stack_Size
  36. .byte 0
  37. .endr
  38. stack_top:
  39. .text
  40. /* reset entry */
  41. .globl _reset
  42. _reset:
  43. /* set the cpu to SVC32 mode and disable interrupt */
  44. cps #Mode_SVC
  45. /* disable the data alignment check */
  46. mrc p15, 0, r1, c1, c0, 0
  47. bic r1, #(1<<1)
  48. mcr p15, 0, r1, c1, c0, 0
  49. /* setup stack */
  50. bl stack_setup
  51. /* clear .bss */
  52. mov r0,#0 /* get a zero */
  53. ldr r1,=__bss_start /* bss start */
  54. ldr r2,=__bss_end /* bss end */
  55. bss_loop:
  56. cmp r1,r2 /* check if data to clear */
  57. strlo r0,[r1],#4 /* clear 4 bytes */
  58. blo bss_loop /* loop until done */
  59. #ifdef RT_USING_SMP
  60. mrc p15, 0, r1, c1, c0, 1
  61. mov r0, #(1<<6)
  62. orr r1, r0
  63. mcr p15, 0, r1, c1, c0, 1 //enable smp
  64. #endif
  65. /* initialize the mmu table and enable mmu */
  66. ldr r0, =platform_mem_desc
  67. ldr r1, =platform_mem_desc_size
  68. ldr r1, [r1]
  69. bl rt_hw_init_mmu_table
  70. bl rt_hw_mmu_init
  71. /* call C++ constructors of global objects */
  72. ldr r0, =__ctors_start__
  73. ldr r1, =__ctors_end__
  74. ctor_loop:
  75. cmp r0, r1
  76. beq ctor_end
  77. ldr r2, [r0], #4
  78. stmfd sp!, {r0-r1}
  79. mov lr, pc
  80. bx r2
  81. ldmfd sp!, {r0-r1}
  82. b ctor_loop
  83. ctor_end:
  84. /* start RT-Thread Kernel */
  85. ldr pc, _rtthread_startup
  86. _rtthread_startup:
  87. .word rtthread_startup
  88. stack_setup:
  89. ldr r0, =stack_top
  90. @ Set the startup stack for svc
  91. mov sp, r0
  92. @ Enter Undefined Instruction Mode and set its Stack Pointer
  93. msr cpsr_c, #Mode_UND|I_Bit|F_Bit
  94. mov sp, r0
  95. sub r0, r0, #UND_Stack_Size
  96. @ Enter Abort Mode and set its Stack Pointer
  97. msr cpsr_c, #Mode_ABT|I_Bit|F_Bit
  98. mov sp, r0
  99. sub r0, r0, #ABT_Stack_Size
  100. @ Enter FIQ Mode and set its Stack Pointer
  101. msr cpsr_c, #Mode_FIQ|I_Bit|F_Bit
  102. mov sp, r0
  103. sub r0, r0, #RT_FIQ_STACK_PGSZ
  104. @ Enter IRQ Mode and set its Stack Pointer
  105. msr cpsr_c, #Mode_IRQ|I_Bit|F_Bit
  106. mov sp, r0
  107. sub r0, r0, #RT_IRQ_STACK_PGSZ
  108. /* come back to SVC mode */
  109. msr cpsr_c, #Mode_SVC|I_Bit|F_Bit
  110. bx lr
  111. /* exception handlers: undef, swi, padt, dabt, resv, irq, fiq */
  112. .section .text.isr, "ax"
  113. .align 5
  114. .globl vector_fiq
  115. vector_fiq:
  116. stmfd sp!,{r0-r7,lr}
  117. bl rt_hw_trap_fiq
  118. ldmfd sp!,{r0-r7,lr}
  119. subs pc, lr, #4
  120. .globl rt_interrupt_enter
  121. .globl rt_interrupt_leave
  122. .globl rt_thread_switch_interrupt_flag
  123. .globl rt_interrupt_from_thread
  124. .globl rt_interrupt_to_thread
  125. .globl rt_current_thread
  126. .globl vmm_thread
  127. .globl vmm_virq_check
  128. .align 5
  129. .globl vector_irq
  130. vector_irq:
  131. #ifdef RT_USING_SMP
  132. clrex
  133. #endif
  134. stmfd sp!, {r0-r12,lr}
  135. bl rt_interrupt_enter
  136. bl rt_hw_trap_irq
  137. bl rt_interrupt_leave
  138. #ifdef RT_USING_SMP
  139. mov r0, sp
  140. bl rt_scheduler_do_irq_switch
  141. ldmfd sp!, {r0-r12,lr}
  142. subs pc, lr, #4
  143. #else
  144. @ if rt_thread_switch_interrupt_flag set, jump to
  145. @ rt_hw_context_switch_interrupt_do and don't return
  146. ldr r0, =rt_thread_switch_interrupt_flag
  147. ldr r1, [r0]
  148. cmp r1, #1
  149. beq rt_hw_context_switch_interrupt_do
  150. ldmfd sp!, {r0-r12,lr}
  151. subs pc, lr, #4
  152. rt_hw_context_switch_interrupt_do:
  153. mov r1, #0 @ clear flag
  154. str r1, [r0]
  155. mov r1, sp @ r1 point to {r0-r3} in stack
  156. add sp, sp, #4*4
  157. ldmfd sp!, {r4-r12,lr}@ reload saved registers
  158. mrs r0, spsr @ get cpsr of interrupt thread
  159. sub r2, lr, #4 @ save old task's pc to r2
  160. @ Switch to SVC mode with no interrupt. If the usr mode guest is
  161. @ interrupted, this will just switch to the stack of kernel space.
  162. @ save the registers in kernel space won't trigger data abort.
  163. msr cpsr_c, #I_Bit|F_Bit|Mode_SVC
  164. stmfd sp!, {r2} @ push old task's pc
  165. stmfd sp!, {r4-r12,lr}@ push old task's lr,r12-r4
  166. ldmfd r1, {r1-r4} @ restore r0-r3 of the interrupt thread
  167. stmfd sp!, {r1-r4} @ push old task's r0-r3
  168. stmfd sp!, {r0} @ push old task's cpsr
  169. #ifdef RT_USING_LWP
  170. stmfd sp, {r13, r14}^ @push usr_sp, usr_lr
  171. sub sp, #8
  172. #endif
  173. ldr r4, =rt_interrupt_from_thread
  174. ldr r5, [r4]
  175. str sp, [r5] @ store sp in preempted tasks's TCB
  176. ldr r6, =rt_interrupt_to_thread
  177. ldr r6, [r6]
  178. ldr sp, [r6] @ get new task's stack pointer
  179. #ifdef RT_USING_LWP
  180. ldmfd sp, {r13, r14}^ @pop usr_sp, usr_lr
  181. add sp, #8
  182. #endif
  183. ldmfd sp!, {r4} @ pop new task's cpsr to spsr
  184. msr spsr_cxsf, r4
  185. ldmfd sp!, {r0-r12,lr,pc}^ @ pop new task's r0-r12,lr & pc, copy spsr to cpsr
  186. #endif
  187. .macro push_svc_reg
  188. sub sp, sp, #17 * 4 @/* Sizeof(struct rt_hw_exp_stack) */
  189. stmia sp, {r0 - r12} @/* Calling r0-r12 */
  190. mov r0, sp
  191. mrs r6, spsr @/* Save CPSR */
  192. str lr, [r0, #15*4] @/* Push PC */
  193. str r6, [r0, #16*4] @/* Push CPSR */
  194. cps #Mode_SVC
  195. str sp, [r0, #13*4] @/* Save calling SP */
  196. str lr, [r0, #14*4] @/* Save calling PC */
  197. .endm
  198. .align 5
  199. .weak vector_swi
  200. vector_swi:
  201. push_svc_reg
  202. bl rt_hw_trap_swi
  203. b .
  204. .align 5
  205. .globl vector_undef
  206. vector_undef:
  207. push_svc_reg
  208. bl rt_hw_trap_undef
  209. b .
  210. .align 5
  211. .globl vector_pabt
  212. vector_pabt:
  213. push_svc_reg
  214. bl rt_hw_trap_pabt
  215. b .
  216. .align 5
  217. .globl vector_dabt
  218. vector_dabt:
  219. push_svc_reg
  220. bl rt_hw_trap_dabt
  221. b .
  222. .align 5
  223. .globl vector_resv
  224. vector_resv:
  225. push_svc_reg
  226. bl rt_hw_trap_resv
  227. b .
  228. #ifdef RT_USING_SMP
  229. .global set_secondary_cpu_boot_address
  230. set_secondary_cpu_boot_address:
  231. ldr r0, =secondary_cpu_start
  232. mvn r1, #0 //0xffffffff
  233. ldr r2, =0x10000034
  234. str r1, [r2]
  235. str r0, [r2, #-4]
  236. mov pc, lr
  237. .global secondary_cpu_start
  238. secondary_cpu_start:
  239. mrc p15, 0, r1, c1, c0, 1
  240. mov r0, #(1<<6)
  241. orr r1, r0
  242. mcr p15, 0, r1, c1, c0, 1 //enable smp
  243. mrc p15, 0, r0, c1, c0, 0
  244. bic r0, #(1<<13)
  245. mcr p15, 0, r0, c1, c0, 0
  246. cps #Mode_IRQ
  247. ldr sp, =irq_stack_2_limit
  248. cps #Mode_FIQ
  249. ldr sp, =irq_stack_2_limit
  250. cps #Mode_SVC
  251. ldr sp, =svc_stack_2_limit
  252. /* initialize the mmu table and enable mmu */
  253. bl rt_hw_mmu_init
  254. b secondary_cpu_c_start
  255. #endif
  256. .bss
  257. .align 2 //align to 2~2=4
  258. svc_stack_2:
  259. .space (1 << 10)
  260. svc_stack_2_limit:
  261. irq_stack_2:
  262. .space (1 << 10)
  263. irq_stack_2_limit: