start_gcc.S 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2013-07-05 Bernard the first version
  9. * 2018-11-22 Jesven in the interrupt context, use rt_scheduler_do_irq_switch checks
  10. * and switches to a new thread
  11. */
  12. #include "rtconfig.h"
  13. .equ Mode_USR, 0x10
  14. .equ Mode_FIQ, 0x11
  15. .equ Mode_IRQ, 0x12
  16. .equ Mode_SVC, 0x13
  17. .equ Mode_ABT, 0x17
  18. .equ Mode_UND, 0x1B
  19. .equ Mode_SYS, 0x1F
  20. .equ I_Bit, 0x80 @ when I bit is set, IRQ is disabled
  21. .equ F_Bit, 0x40 @ when F bit is set, FIQ is disabled
  22. .equ UND_Stack_Size, 0x00000400
  23. .equ SVC_Stack_Size, 0x00000400
  24. .equ ABT_Stack_Size, 0x00000400
  25. .equ RT_FIQ_STACK_PGSZ, 0x00000000
  26. .equ RT_IRQ_STACK_PGSZ, 0x00000800
  27. .equ USR_Stack_Size, 0x00000400
  28. .equ SUB_UND_Stack_Size, 0x00000400
  29. .equ SUB_SVC_Stack_Size, 0x00000400
  30. .equ SUB_ABT_Stack_Size, 0x00000400
  31. .equ SUB_RT_FIQ_STACK_PGSZ, 0x00000000
  32. .equ SUB_RT_IRQ_STACK_PGSZ, 0x00000400
  33. .equ SUB_USR_Stack_Size, 0x00000400
  34. #define ISR_Stack_Size (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
  35. RT_FIQ_STACK_PGSZ + RT_IRQ_STACK_PGSZ)
  36. #define SUB_ISR_Stack_Size (SUB_UND_Stack_Size + SUB_SVC_Stack_Size + SUB_ABT_Stack_Size + \
  37. SUB_RT_FIQ_STACK_PGSZ + SUB_RT_IRQ_STACK_PGSZ)
  38. .section .bss.share.isr
  39. /* stack */
  40. .globl stack_start
  41. .globl stack_top
  42. .align 3
  43. stack_start:
  44. .rept ISR_Stack_Size
  45. .byte 0
  46. .endr
  47. stack_top:
  48. .text
  49. /* reset entry */
  50. .globl _reset
  51. _reset:
  52. #ifdef ARCH_ARMV8
  53. /* Check for HYP mode */
  54. mrs r0, cpsr_all
  55. and r0, r0, #0x1F
  56. mov r8, #0x1A
  57. cmp r0, r8
  58. beq overHyped
  59. b continue
  60. overHyped: /* Get out of HYP mode */
  61. adr r1, continue
  62. msr ELR_hyp, r1
  63. mrs r1, cpsr_all
  64. and r1, r1, #0x1f ;@ CPSR_MODE_MASK
  65. orr r1, r1, #0x13 ;@ CPSR_MODE_SUPERVISOR
  66. msr SPSR_hyp, r1
  67. eret
  68. continue:
  69. #endif
  70. /* set the cpu to SVC32 mode and disable interrupt */
  71. cps #Mode_SVC
  72. #ifdef RT_USING_FPU
  73. mov r4, #0xfffffff
  74. mcr p15, 0, r4, c1, c0, 2
  75. #endif
  76. /* disable the data alignment check */
  77. mrc p15, 0, r1, c1, c0, 0
  78. bic r1, #(1<<0) /* Disable MMU */
  79. bic r1, #(1<<1) /* Disable Alignment fault checking */
  80. bic r1, #(1<<2) /* Disable data cache */
  81. bic r1, #(1<<11) /* Disable program flow prediction */
  82. bic r1, #(1<<12) /* Disable instruction cache */
  83. bic r1, #(3<<19) /* bit[20:19] must be zero */
  84. mcr p15, 0, r1, c1, c0, 0
  85. @ get cpu id, and subtract the offset from the stacks base address
  86. bl rt_hw_cpu_id
  87. mov r5, r0
  88. cmp r5, #0 @ cpu id == 0
  89. beq normal_setup
  90. @ cpu id > 0, stop or wait
  91. #ifdef RT_SMP_AUTO_BOOT
  92. ldr r0, =secondary_cpu_entry
  93. mov r1, #0
  94. str r1, [r0] /* clean secondary_cpu_entry */
  95. #endif /* RT_SMP_AUTO_BOOT */
  96. secondary_loop:
  97. @ cpu core 1 goes into sleep until core 0 wakeup it
  98. wfe
  99. #ifdef RT_SMP_AUTO_BOOT
  100. ldr r1, =secondary_cpu_entry
  101. ldr r0, [r1]
  102. cmp r0, #0
  103. blxne r0 /* if(secondary_cpu_entry) secondary_cpu_entry(); */
  104. #endif /* RT_SMP_AUTO_BOOT */
  105. b secondary_loop
  106. normal_setup:
  107. /* enable I cache + branch prediction */
  108. mrc p15, 0, r0, c1, c0, 0
  109. orr r0, r0, #(1<<12)
  110. orr r0, r0, #(1<<11)
  111. mcr p15, 0, r0, c1, c0, 0
  112. /* setup stack */
  113. bl stack_setup
  114. /* clear .bss */
  115. mov r0,#0 /* get a zero */
  116. ldr r1,=__bss_start /* bss start */
  117. ldr r2,=__bss_end /* bss end */
  118. bss_loop:
  119. cmp r1,r2 /* check if data to clear */
  120. strlo r0,[r1],#4 /* clear 4 bytes */
  121. blo bss_loop /* loop until done */
  122. #ifdef RT_USING_SMP
  123. mrc p15, 0, r1, c1, c0, 1
  124. mov r0, #(1<<6)
  125. orr r1, r0
  126. mcr p15, 0, r1, c1, c0, 1 //enable smp
  127. #endif
  128. /* initialize the mmu table and enable mmu */
  129. ldr r0, =platform_mem_desc
  130. ldr r1, =platform_mem_desc_size
  131. ldr r1, [r1]
  132. bl rt_hw_init_mmu_table
  133. bl rt_hw_mmu_init
  134. /* start RT-Thread Kernel */
  135. ldr pc, _rtthread_startup
  136. _rtthread_startup:
  137. .word rtthread_startup
  138. stack_setup:
  139. ldr r0, =stack_top
  140. @ Set the startup stack for svc
  141. mov sp, r0
  142. sub r0, r0, #SVC_Stack_Size
  143. @ Enter Undefined Instruction Mode and set its Stack Pointer
  144. msr cpsr_c, #Mode_UND|I_Bit|F_Bit
  145. mov sp, r0
  146. sub r0, r0, #UND_Stack_Size
  147. @ Enter Abort Mode and set its Stack Pointer
  148. msr cpsr_c, #Mode_ABT|I_Bit|F_Bit
  149. mov sp, r0
  150. sub r0, r0, #ABT_Stack_Size
  151. @ Enter FIQ Mode and set its Stack Pointer
  152. msr cpsr_c, #Mode_FIQ|I_Bit|F_Bit
  153. mov sp, r0
  154. sub r0, r0, #RT_FIQ_STACK_PGSZ
  155. @ Enter IRQ Mode and set its Stack Pointer
  156. msr cpsr_c, #Mode_IRQ|I_Bit|F_Bit
  157. mov sp, r0
  158. sub r0, r0, #RT_IRQ_STACK_PGSZ
  159. /* come back to SVC mode */
  160. msr cpsr_c, #Mode_SVC|I_Bit|F_Bit
  161. bx lr
  162. /* exception handlers: undef, swi, padt, dabt, resv, irq, fiq */
  163. .section .text.isr, "ax"
  164. .align 5
  165. .globl vector_fiq
  166. vector_fiq:
  167. stmfd sp!,{r0-r7,lr}
  168. bl rt_hw_trap_fiq
  169. ldmfd sp!,{r0-r7,lr}
  170. subs pc, lr, #4
  171. .globl rt_interrupt_enter
  172. .globl rt_interrupt_leave
  173. .globl rt_thread_switch_interrupt_flag
  174. .globl rt_interrupt_from_thread
  175. .globl rt_interrupt_to_thread
  176. .globl rt_current_thread
  177. .globl vmm_thread
  178. .globl vmm_virq_check
  179. .align 5
  180. .globl vector_irq
  181. vector_irq:
  182. #ifdef RT_USING_SMP
  183. clrex
  184. stmfd sp!, {r0, r1}
  185. cps #Mode_SVC
  186. mov r0, sp /* svc_sp */
  187. mov r1, lr /* svc_lr */
  188. cps #Mode_IRQ
  189. sub lr, #4
  190. stmfd r0!, {r1, lr} /* svc_lr, svc_pc */
  191. stmfd r0!, {r2 - r12}
  192. ldmfd sp!, {r1, r2} /* original r0, r1 */
  193. stmfd r0!, {r1 - r2}
  194. mrs r1, spsr /* original mode */
  195. stmfd r0!, {r1}
  196. #ifdef RT_USING_LWP
  197. stmfd r0, {r13, r14}^ /* usr_sp, usr_lr */
  198. sub r0, #8
  199. #endif
  200. #ifdef RT_USING_FPU
  201. /* fpu context */
  202. vmrs r6, fpexc
  203. tst r6, #(1<<30)
  204. beq 1f
  205. vstmdb r0!, {d0-d15}
  206. vstmdb r0!, {d16-d31}
  207. vmrs r5, fpscr
  208. stmfd r0!, {r5}
  209. 1:
  210. stmfd r0!, {r6}
  211. #endif
  212. /* now irq stack is clean */
  213. /* r0 is task svc_sp */
  214. /* backup r0 -> r8 */
  215. mov r8, r0
  216. bl rt_interrupt_enter
  217. bl rt_hw_trap_irq
  218. bl rt_interrupt_leave
  219. cps #Mode_SVC
  220. mov sp, r8
  221. mov r0, r8
  222. bl rt_scheduler_do_irq_switch
  223. b rt_hw_context_switch_exit
  224. #else
  225. stmfd sp!, {r0-r12,lr}
  226. bl rt_interrupt_enter
  227. bl rt_hw_trap_irq
  228. bl rt_interrupt_leave
  229. @ if rt_thread_switch_interrupt_flag set, jump to
  230. @ rt_hw_context_switch_interrupt_do and don't return
  231. ldr r0, =rt_thread_switch_interrupt_flag
  232. ldr r1, [r0]
  233. cmp r1, #1
  234. beq rt_hw_context_switch_interrupt_do
  235. ldmfd sp!, {r0-r12,lr}
  236. subs pc, lr, #4
  237. rt_hw_context_switch_interrupt_do:
  238. mov r1, #0 @ clear flag
  239. str r1, [r0]
  240. mov r1, sp @ r1 point to {r0-r3} in stack
  241. add sp, sp, #4*4
  242. ldmfd sp!, {r4-r12,lr}@ reload saved registers
  243. mrs r0, spsr @ get cpsr of interrupt thread
  244. sub r2, lr, #4 @ save old task's pc to r2
  245. @ Switch to SVC mode with no interrupt. If the usr mode guest is
  246. @ interrupted, this will just switch to the stack of kernel space.
  247. @ save the registers in kernel space won't trigger data abort.
  248. msr cpsr_c, #I_Bit|F_Bit|Mode_SVC
  249. stmfd sp!, {r2} @ push old task's pc
  250. stmfd sp!, {r4-r12,lr}@ push old task's lr,r12-r4
  251. ldmfd r1, {r1-r4} @ restore r0-r3 of the interrupt thread
  252. stmfd sp!, {r1-r4} @ push old task's r0-r3
  253. stmfd sp!, {r0} @ push old task's cpsr
  254. #ifdef RT_USING_LWP
  255. stmfd sp, {r13, r14}^ @push usr_sp, usr_lr
  256. sub sp, #8
  257. #endif
  258. #ifdef RT_USING_FPU
  259. /* fpu context */
  260. vmrs r6, fpexc
  261. tst r6, #(1<<30)
  262. beq 1f
  263. vstmdb sp!, {d0-d15}
  264. vstmdb sp!, {d16-d31}
  265. vmrs r5, fpscr
  266. stmfd sp!, {r5}
  267. 1:
  268. stmfd sp!, {r6}
  269. #endif
  270. ldr r4, =rt_interrupt_from_thread
  271. ldr r5, [r4]
  272. str sp, [r5] @ store sp in preempted tasks's TCB
  273. ldr r6, =rt_interrupt_to_thread
  274. ldr r6, [r6]
  275. ldr sp, [r6] @ get new task's stack pointer
  276. bl rt_interrupt_hook
  277. #ifdef RT_USING_FPU
  278. /* fpu context */
  279. ldmfd sp!, {r6}
  280. vmsr fpexc, r6
  281. tst r6, #(1<<30)
  282. beq 1f
  283. ldmfd sp!, {r5}
  284. vmsr fpscr, r5
  285. vldmia sp!, {d16-d31}
  286. vldmia sp!, {d0-d15}
  287. 1:
  288. #endif
  289. #ifdef RT_USING_LWP
  290. ldmfd sp, {r13, r14}^ @pop usr_sp, usr_lr
  291. add sp, #8
  292. #endif
  293. ldmfd sp!, {r4} @ pop new task's cpsr to spsr
  294. msr spsr_cxsf, r4
  295. ldmfd sp!, {r0-r12,lr,pc}^ @ pop new task's r0-r12,lr & pc, copy spsr to cpsr
  296. #endif
  297. .macro push_svc_reg
  298. sub sp, sp, #17 * 4 @/* Sizeof(struct rt_hw_exp_stack) */
  299. stmia sp, {r0 - r12} @/* Calling r0-r12 */
  300. mov r0, sp
  301. mrs r6, spsr @/* Save CPSR */
  302. str lr, [r0, #15*4] @/* Push PC */
  303. str r6, [r0, #16*4] @/* Push CPSR */
  304. mrs r5, cpsr @/* Save CPSR */
  305. and r4, r6, #0x1F
  306. cmp r4, #Mode_USR
  307. moveq r6, #Mode_SYS
  308. orr r6, r6, #0x80 @/* Switch to previous mode, then save SP & PC */
  309. msr cpsr_c, r6
  310. str sp, [r0, #13*4] @/* Save calling SP */
  311. str lr, [r0, #14*4] @/* Save calling PC */
  312. msr cpsr_c, r5 @/* Switch back to current mode */
  313. .endm
  314. .align 5
  315. .weak vector_swi
  316. vector_swi:
  317. push_svc_reg
  318. bl rt_hw_trap_swi
  319. b .
  320. .align 5
  321. .globl vector_undef
  322. vector_undef:
  323. push_svc_reg
  324. cps #Mode_UND
  325. bl rt_hw_trap_undef
  326. #ifdef RT_USING_FPU
  327. ldr lr, [sp, #15*4]
  328. ldmia sp, {r0 - r12}
  329. add sp, sp, #17 * 4
  330. movs pc, lr
  331. #endif
  332. b .
  333. .align 5
  334. .globl vector_pabt
  335. vector_pabt:
  336. push_svc_reg
  337. bl rt_hw_trap_pabt
  338. b .
  339. .align 5
  340. .globl vector_dabt
  341. vector_dabt:
  342. push_svc_reg
  343. bl rt_hw_trap_dabt
  344. b .
  345. .align 5
  346. .globl vector_resv
  347. vector_resv:
  348. push_svc_reg
  349. bl rt_hw_trap_resv
  350. b .
  351. #ifdef RT_USING_SMP
  352. .global secondary_cpu_start
  353. secondary_cpu_start:
  354. #ifdef RT_USING_FPU
  355. mov r4, #0xfffffff
  356. mcr p15, 0, r4, c1, c0, 2
  357. #endif
  358. mrc p15, 0, r1, c1, c0, 1
  359. mov r0, #(1<<6)
  360. orr r1, r0
  361. mcr p15, 0, r1, c1, c0, 1 //enable smp
  362. mrc p15, 0, r0, c1, c0, 0
  363. bic r0, #(1<<13)
  364. mcr p15, 0, r0, c1, c0, 0
  365. /* enable branch prediction */
  366. mrc p15, 0, r0, c1, c0, 0
  367. orr r0, r0, #(1<<11)
  368. mcr p15, 0, r0, c1, c0, 0
  369. @ get cpu id, and subtract the offset from the stacks base address
  370. bl rt_hw_cpu_id
  371. sub r5, r0, #1
  372. ldr r0, =SUB_ISR_Stack_Size
  373. mul r0, r0, r5 @r0 = SUB_ISR_Stack_Size * (cpuid - 1)
  374. ldr r1, =sub_stack_top
  375. sub r0, r1, r0 @r0 = sub_stack_top - (SUB_ISR_Stack_Size * (cpuid - 1))
  376. cps #Mode_SVC
  377. mov sp, r0
  378. sub r0, r0, #SUB_SVC_Stack_Size
  379. cps #Mode_UND
  380. mov sp, r0
  381. sub r0, r0, #SUB_UND_Stack_Size
  382. cps #Mode_ABT
  383. mov sp, r0
  384. sub r0, r0, #SUB_ABT_Stack_Size
  385. cps #Mode_FIQ
  386. mov sp, r0
  387. sub r0, r0, #SUB_RT_FIQ_STACK_PGSZ
  388. cps #Mode_IRQ
  389. mov sp, r0
  390. sub r0, r0, #SUB_RT_IRQ_STACK_PGSZ
  391. cps #Mode_SVC
  392. /* initialize the mmu table and enable mmu */
  393. bl rt_hw_mmu_init
  394. b secondary_cpu_c_start
  395. .bss
  396. .align 2 //align to 2~2=4
  397. .global sub_stack_top /* used for backtrace to calculate stack top of irq mode */
  398. sub_stack_start:
  399. .space (SUB_ISR_Stack_Size * (RT_CPUS_NR-1))
  400. sub_stack_top:
  401. #endif