start_gcc.S 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2020-03-19 WangHuachen first version
  9. * 2021-05-11 WangHuachen Added call to Xil_InitializeExistingMPURegConfig to
  10. * initialize the MPU configuration table with the MPU
  11. * configurations already set in Init_Mpu function.
  12. */
  13. .equ Mode_USR, 0x10
  14. .equ Mode_FIQ, 0x11
  15. .equ Mode_IRQ, 0x12
  16. .equ Mode_SVC, 0x13
  17. .equ Mode_ABT, 0x17
  18. .equ Mode_UND, 0x1B
  19. .equ Mode_SYS, 0x1F
  20. .equ I_Bit, 0x80 @ when I bit is set, IRQ is disabled
  21. .equ F_Bit, 0x40 @ when F bit is set, FIQ is disabled
  22. .equ UND_Stack_Size, 0x00000000
  23. .equ SVC_Stack_Size, 0x00000000
  24. .equ ABT_Stack_Size, 0x00000000
  25. .equ FIQ_Stack_Size, 0x00000200
  26. .equ IRQ_Stack_Size, 0x00000200
  27. .equ USR_Stack_Size, 0x00000000
  28. .set RPU_GLBL_CNTL, 0xFF9A0000
  29. .set RPU_ERR_INJ, 0xFF9A0020
  30. .set RPU_0_CFG, 0xFF9A0100
  31. .set RPU_1_CFG, 0xFF9A0200
  32. .set RST_LPD_DBG, 0xFF5E0240
  33. .set BOOT_MODE_USER, 0xFF5E0200
  34. .set fault_log_enable, 0x101
  35. #define ISR_Stack_Size (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
  36. FIQ_Stack_Size + IRQ_Stack_Size)
  37. .section .data.share.isr
  38. /* stack */
  39. .globl stack_start
  40. .globl stack_top
  41. .align 3
  42. .bss
  43. stack_start:
  44. .rept ISR_Stack_Size
  45. .long 0
  46. .endr
  47. stack_top:
  48. .section .boot,"axS"
  49. /* reset entry */
  50. .globl _reset
  51. _reset:
  52. /* Initialize processor registers to 0 */
  53. mov r0,#0
  54. mov r1,#0
  55. mov r2,#0
  56. mov r3,#0
  57. mov r4,#0
  58. mov r5,#0
  59. mov r6,#0
  60. mov r7,#0
  61. mov r8,#0
  62. mov r9,#0
  63. mov r10,#0
  64. mov r11,#0
  65. mov r12,#0
  66. /* set the cpu to SVC32 mode and disable interrupt */
  67. cpsid if, #Mode_SVC
  68. /* setup stack */
  69. bl stack_setup
  70. /*
  71. * Enable access to VFP by enabling access to Coprocessors 10 and 11.
  72. * Enables Full Access i.e. in both privileged and non privileged modes
  73. */
  74. mrc p15, 0, r0, c1, c0, 2 /* Read Coprocessor Access Control Register (CPACR) */
  75. orr r0, r0, #(0xF << 20) /* Enable access to CP 10 & 11 */
  76. mcr p15, 0, r0, c1, c0, 2 /* Write Coprocessor Access Control Register (CPACR) */
  77. isb
  78. /* enable fpu access */
  79. vmrs r3, FPEXC
  80. orr r1, r3, #(1<<30)
  81. vmsr FPEXC, r1
  82. /* clear the floating point register*/
  83. mov r1,#0
  84. vmov d0,r1,r1
  85. vmov d1,r1,r1
  86. vmov d2,r1,r1
  87. vmov d3,r1,r1
  88. vmov d4,r1,r1
  89. vmov d5,r1,r1
  90. vmov d6,r1,r1
  91. vmov d7,r1,r1
  92. vmov d8,r1,r1
  93. vmov d9,r1,r1
  94. vmov d10,r1,r1
  95. vmov d11,r1,r1
  96. vmov d12,r1,r1
  97. vmov d13,r1,r1
  98. vmov d14,r1,r1
  99. vmov d15,r1,r1
  100. #ifdef __SOFTFP__
  101. /* Disable the FPU if SOFTFP is defined*/
  102. vmsr FPEXC,r3
  103. #endif
  104. /* Disable MPU and caches */
  105. mrc p15, 0, r0, c1, c0, 0 /* Read CP15 Control Register*/
  106. bic r0, r0, #0x05 /* Disable MPU (M bit) and data cache (C bit) */
  107. bic r0, r0, #0x1000 /* Disable instruction cache (I bit) */
  108. dsb /* Ensure all previous loads/stores have completed */
  109. mcr p15, 0, r0, c1, c0, 0 /* Write CP15 Control Register */
  110. isb /* Ensure subsequent insts execute wrt new MPU settings */
  111. /* Disable Branch prediction, TCM ECC checks */
  112. mrc p15, 0, r0, c1, c0, 1 /* Read ACTLR */
  113. orr r0, r0, #(0x1 << 17) /* Enable RSDIS bit 17 to disable the return stack */
  114. orr r0, r0, #(0x1 << 16) /* Clear BP bit 15 and set BP bit 16:*/
  115. bic r0, r0, #(0x1 << 15) /* Branch always not taken and history table updates disabled*/
  116. orr r0, r0, #(0x1 << 27) /* Enable B1TCM ECC check */
  117. orr r0, r0, #(0x1 << 26) /* Enable B0TCM ECC check */
  118. orr r0, r0, #(0x1 << 25) /* Enable ATCM ECC check */
  119. bic r0, r0, #(0x1 << 5) /* Generate abort on parity errors, with [5:3]=b 000*/
  120. bic r0, r0, #(0x1 << 4)
  121. bic r0, r0, #(0x1 << 3)
  122. mcr p15, 0, r0, c1, c0, 1 /* Write ACTLR*/
  123. dsb /* Complete all outstanding explicit memory operations*/
  124. /* Invalidate caches */
  125. mov r0,#0 /* r0 = 0 */
  126. dsb
  127. mcr p15, 0, r0, c7, c5, 0 /* invalidate icache */
  128. mcr p15, 0, r0, c15, c5, 0 /* Invalidate entire data cache*/
  129. isb
  130. /* enable fault log for lock step */
  131. ldr r0,=RPU_GLBL_CNTL
  132. ldr r1, [r0]
  133. ands r1, r1, #0x8
  134. /* branch to initialization if split mode*/
  135. bne init
  136. /* check for boot mode if in lock step, branch to init if JTAG boot mode*/
  137. ldr r0,=BOOT_MODE_USER
  138. ldr r1, [r0]
  139. ands r1, r1, #0xF
  140. beq init
  141. /* reset the debug logic */
  142. ldr r0,=RST_LPD_DBG
  143. ldr r1, [r0]
  144. orr r1, r1, #(0x1 << 4)
  145. orr r1, r1, #(0x1 << 5)
  146. str r1, [r0]
  147. /* enable fault log */
  148. ldr r0,=RPU_ERR_INJ
  149. ldr r1,=fault_log_enable
  150. ldr r2, [r0]
  151. orr r2, r2, r1
  152. str r2, [r0]
  153. nop
  154. nop
  155. init:
  156. bl Init_MPU /* Initialize MPU */
  157. /* Enable Branch prediction */
  158. mrc p15, 0, r0, c1, c0, 1 /* Read ACTLR*/
  159. bic r0, r0, #(0x1 << 17) /* Clear RSDIS bit 17 to enable return stack*/
  160. bic r0, r0, #(0x1 << 16) /* Clear BP bit 15 and BP bit 16:*/
  161. bic r0, r0, #(0x1 << 15) /* Normal operation, BP is taken from the global history table.*/
  162. orr r0, r0, #(0x1 << 14) /* Disable DBWR for errata 780125 */
  163. mcr p15, 0, r0, c1, c0, 1 /* Write ACTLR*/
  164. /* Enable icahce and dcache */
  165. mrc p15,0,r1,c1,c0,0
  166. ldr r0, =0x1005
  167. orr r1,r1,r0
  168. dsb
  169. mcr p15,0,r1,c1,c0,0 /* Enable cache */
  170. isb /* isb flush prefetch buffer */
  171. /* Set vector table in TCM/LOVEC */
  172. mrc p15, 0, r0, c1, c0, 0
  173. mvn r1, #0x2000
  174. and r0, r0, r1
  175. mcr p15, 0, r0, c1, c0, 0
  176. /* Clear VINITHI to enable LOVEC on reset */
  177. #if 1
  178. ldr r0, =RPU_0_CFG
  179. #else
  180. ldr r0, =RPU_1_CFG
  181. #endif
  182. ldr r1, [r0]
  183. bic r1, r1, #(0x1 << 2)
  184. str r1, [r0]
  185. /* enable asynchronous abort exception */
  186. mrs r0, cpsr
  187. bic r0, r0, #0x100
  188. msr cpsr_xsf, r0
  189. /* clear .bss */
  190. mov r0,#0 /* get a zero */
  191. ldr r1,=__bss_start /* bss start */
  192. ldr r2,=__bss_end /* bss end */
  193. bss_loop:
  194. cmp r1,r2 /* check if data to clear */
  195. strlo r0,[r1],#4 /* clear 4 bytes */
  196. blo bss_loop /* loop until done */
  197. /* call C++ constructors of global objects */
  198. ldr r0, =__ctors_start__
  199. ldr r1, =__ctors_end__
  200. ctor_loop:
  201. cmp r0, r1
  202. beq ctor_end
  203. ldr r2, [r0], #4
  204. stmfd sp!, {r0-r1}
  205. mov lr, pc
  206. bx r2
  207. ldmfd sp!, {r0-r1}
  208. b ctor_loop
  209. ctor_end:
  210. bl Xil_InitializeExistingMPURegConfig /* Initialize MPU config */
  211. /* start RT-Thread Kernel */
  212. ldr pc, _entry
  213. _entry:
  214. .word entry
  215. stack_setup:
  216. ldr r0, =stack_top
  217. @ Set the startup stack for svc
  218. mov sp, r0
  219. @ Enter Undefined Instruction Mode and set its Stack Pointer
  220. msr cpsr_c, #Mode_UND|I_Bit|F_Bit
  221. mov sp, r0
  222. sub r0, r0, #UND_Stack_Size
  223. @ Enter Abort Mode and set its Stack Pointer
  224. msr cpsr_c, #Mode_ABT|I_Bit|F_Bit
  225. mov sp, r0
  226. sub r0, r0, #ABT_Stack_Size
  227. @ Enter FIQ Mode and set its Stack Pointer
  228. msr cpsr_c, #Mode_FIQ|I_Bit|F_Bit
  229. mov sp, r0
  230. sub r0, r0, #FIQ_Stack_Size
  231. @ Enter IRQ Mode and set its Stack Pointer
  232. msr cpsr_c, #Mode_IRQ|I_Bit|F_Bit
  233. mov sp, r0
  234. sub r0, r0, #IRQ_Stack_Size
  235. @ Switch back to SVC
  236. msr cpsr_c, #Mode_SVC|I_Bit|F_Bit
  237. bx lr
  238. .section .text.isr, "ax"
  239. /* exception handlers: undef, swi, padt, dabt, resv, irq, fiq */
  240. .align 5
  241. .globl vector_fiq
  242. vector_fiq:
  243. stmfd sp!,{r0-r7,lr}
  244. bl rt_hw_trap_fiq
  245. ldmfd sp!,{r0-r7,lr}
  246. subs pc,lr,#4
  247. .globl rt_interrupt_enter
  248. .globl rt_interrupt_leave
  249. .globl rt_thread_switch_interrupt_flag
  250. .globl rt_interrupt_from_thread
  251. .globl rt_interrupt_to_thread
  252. .align 5
  253. .globl vector_irq
  254. vector_irq:
  255. stmfd sp!, {r0-r12,lr}
  256. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  257. vstmdb sp!, {d0-d15} /* Store floating point registers */
  258. vmrs r1, FPSCR
  259. stmfd sp!,{r1}
  260. vmrs r1, FPEXC
  261. stmfd sp!,{r1}
  262. #endif
  263. bl rt_interrupt_enter
  264. bl rt_hw_trap_irq
  265. bl rt_interrupt_leave
  266. @ if rt_thread_switch_interrupt_flag set, jump to
  267. @ rt_hw_context_switch_interrupt_do and don't return
  268. ldr r0, =rt_thread_switch_interrupt_flag
  269. ldr r1, [r0]
  270. cmp r1, #1
  271. beq rt_hw_context_switch_interrupt_do
  272. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  273. ldmfd sp!, {r1} /* Restore floating point registers */
  274. vmsr FPEXC, r1
  275. ldmfd sp!, {r1}
  276. vmsr FPSCR, r1
  277. vldmia sp!, {d0-d15}
  278. #endif
  279. ldmfd sp!, {r0-r12,lr}
  280. subs pc, lr, #4
  281. rt_hw_context_switch_interrupt_do:
  282. mov r1, #0 @ clear flag
  283. str r1, [r0]
  284. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  285. ldmfd sp!, {r1} /* Restore floating point registers */
  286. vmsr FPEXC, r1
  287. ldmfd sp!, {r1}
  288. vmsr FPSCR, r1
  289. vldmia sp!, {d0-d15}
  290. #endif
  291. mov r1, sp @ r1 point to {r0-r3} in stack
  292. add sp, sp, #4*4
  293. ldmfd sp!, {r4-r12,lr}@ reload saved registers
  294. mrs r0, spsr @ get cpsr of interrupt thread
  295. sub r2, lr, #4 @ save old task's pc to r2
  296. @ Switch to SVC mode with no interrupt.
  297. msr cpsr_c, #I_Bit|F_Bit|Mode_SVC
  298. stmfd sp!, {r2} @ push old task's pc
  299. stmfd sp!, {r4-r12,lr}@ push old task's lr,r12-r4
  300. ldmfd r1, {r1-r4} @ restore r0-r3 of the interrupt thread
  301. stmfd sp!, {r1-r4} @ push old task's r0-r3
  302. stmfd sp!, {r0} @ push old task's cpsr
  303. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  304. vstmdb sp!, {d0-d15} /* Store floating point registers */
  305. vmrs r1, FPSCR
  306. stmfd sp!,{r1}
  307. vmrs r1, FPEXC
  308. stmfd sp!,{r1}
  309. #endif
  310. ldr r4, =rt_interrupt_from_thread
  311. ldr r5, [r4]
  312. str sp, [r5] @ store sp in preempted tasks's TCB
  313. ldr r6, =rt_interrupt_to_thread
  314. ldr r7, [r6]
  315. ldr sp, [r7] @ get new task's stack pointer
  316. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  317. ldmfd sp!, {r1} /* Restore floating point registers */
  318. vmsr FPEXC, r1
  319. ldmfd sp!, {r1}
  320. vmsr FPSCR, r1
  321. vldmia sp!, {d0-d15}
  322. #endif
  323. ldmfd sp!, {r4} @ pop new task's cpsr to spsr
  324. msr spsr_cxsf, r4
  325. ldmfd sp!, {r0-r12,lr,pc}^ @ pop new task's r0-r12,lr & pc, copy spsr to cpsr
  326. .macro push_svc_reg
  327. sub sp, sp, #17 * 4 @/* Sizeof(struct rt_hw_exp_stack) */
  328. stmia sp, {r0 - r12} @/* Calling r0-r12 */
  329. mov r0, sp
  330. mrs r6, spsr @/* Save CPSR */
  331. str lr, [r0, #15*4] @/* Push PC */
  332. str r6, [r0, #16*4] @/* Push CPSR */
  333. cps #Mode_SVC
  334. str sp, [r0, #13*4] @/* Save calling SP */
  335. str lr, [r0, #14*4] @/* Save calling PC */
  336. .endm
  337. .align 5
  338. .globl vector_swi
  339. vector_swi:
  340. push_svc_reg
  341. bl rt_hw_trap_swi
  342. b .
  343. .align 5
  344. .globl vector_undef
  345. vector_undef:
  346. push_svc_reg
  347. bl rt_hw_trap_undef
  348. b .
  349. .align 5
  350. .globl vector_pabt
  351. vector_pabt:
  352. push_svc_reg
  353. bl rt_hw_trap_pabt
  354. b .
  355. .align 5
  356. .globl vector_dabt
  357. vector_dabt:
  358. push_svc_reg
  359. bl rt_hw_trap_dabt
  360. b .
  361. .align 5
  362. .globl vector_resv
  363. vector_resv:
  364. push_svc_reg
  365. bl rt_hw_trap_resv
  366. b .