start_gcc.S 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. /*
  2. * File : start_gcc.S
  3. * This file is part of RT-Thread RTOS
  4. * COPYRIGHT (C) 2013-2014, RT-Thread Development Team
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along
  17. * with this program; if not, write to the Free Software Foundation, Inc.,
  18. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  19. *
  20. * Change Logs:
  21. * Date Author Notes
  22. * 2013-07-05 Bernard the first version
  23. * 2018-11-22 Jesven in the interrupt context, use rt_scheduler_do_irq_switch checks
  24. * and switches to a new thread
  25. */
  26. #include "rtconfig.h"
  27. .equ Mode_USR, 0x10
  28. .equ Mode_FIQ, 0x11
  29. .equ Mode_IRQ, 0x12
  30. .equ Mode_SVC, 0x13
  31. .equ Mode_ABT, 0x17
  32. .equ Mode_UND, 0x1B
  33. .equ Mode_SYS, 0x1F
  34. .equ I_Bit, 0x80 @ when I bit is set, IRQ is disabled
  35. .equ F_Bit, 0x40 @ when F bit is set, FIQ is disabled
  36. .equ UND_Stack_Size, 0x00000000
  37. .equ SVC_Stack_Size, 0x00000400
  38. .equ ABT_Stack_Size, 0x00000000
  39. .equ RT_FIQ_STACK_PGSZ, 0x00000000
  40. .equ RT_IRQ_STACK_PGSZ, 0x00000800
  41. .equ USR_Stack_Size, 0x00000400
  42. #define ISR_Stack_Size (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
  43. RT_FIQ_STACK_PGSZ + RT_IRQ_STACK_PGSZ)
  44. .section .data.share.isr
  45. /* stack */
  46. .globl stack_start
  47. .globl stack_top
  48. stack_start:
  49. .rept ISR_Stack_Size
  50. .byte 0
  51. .endr
  52. stack_top:
  53. .text
  54. /* reset entry */
  55. .globl _reset
  56. _reset:
  57. /* set the cpu to SVC32 mode and disable interrupt */
  58. mrs r0, cpsr
  59. bic r0, r0, #0x1f
  60. orr r0, r0, #0x13
  61. msr cpsr_c, r0
  62. mrc p15, 0, r1, c1, c0, 1
  63. mov r0, #(1<<6)
  64. orr r1, r0
  65. mcr p15, 0, r1, c1, c0, 1 //enable smp
  66. ldr lr, =after_enable_mmu
  67. ldr r0, =mtbl
  68. b enable_mmu
  69. after_enable_mmu:
  70. /* setup stack */
  71. bl stack_setup
  72. /* clear .bss */
  73. mov r0,#0 /* get a zero */
  74. ldr r1,=__bss_start /* bss start */
  75. ldr r2,=__bss_end /* bss end */
  76. bss_loop:
  77. cmp r1,r2 /* check if data to clear */
  78. strlo r0,[r1],#4 /* clear 4 bytes */
  79. blo bss_loop /* loop until done */
  80. /* call C++ constructors of global objects */
  81. ldr r0, =__ctors_start__
  82. ldr r1, =__ctors_end__
  83. ctor_loop:
  84. cmp r0, r1
  85. beq ctor_end
  86. ldr r2, [r0], #4
  87. stmfd sp!, {r0-r1}
  88. mov lr, pc
  89. bx r2
  90. ldmfd sp!, {r0-r1}
  91. b ctor_loop
  92. ctor_end:
  93. /* start RT-Thread Kernel */
  94. bl flush_cache_all
  95. ldr pc, _rtthread_startup
  96. _rtthread_startup:
  97. .word rtthread_startup
  98. stack_setup:
  99. ldr r0, =stack_top
  100. @ Set the startup stack for svc
  101. mov sp, r0
  102. @ Enter Undefined Instruction Mode and set its Stack Pointer
  103. msr cpsr_c, #Mode_UND|I_Bit|F_Bit
  104. mov sp, r0
  105. sub r0, r0, #UND_Stack_Size
  106. @ Enter Abort Mode and set its Stack Pointer
  107. msr cpsr_c, #Mode_ABT|I_Bit|F_Bit
  108. mov sp, r0
  109. sub r0, r0, #ABT_Stack_Size
  110. @ Enter FIQ Mode and set its Stack Pointer
  111. msr cpsr_c, #Mode_FIQ|I_Bit|F_Bit
  112. mov sp, r0
  113. sub r0, r0, #RT_FIQ_STACK_PGSZ
  114. @ Enter IRQ Mode and set its Stack Pointer
  115. msr cpsr_c, #Mode_IRQ|I_Bit|F_Bit
  116. mov sp, r0
  117. sub r0, r0, #RT_IRQ_STACK_PGSZ
  118. /* come back to SVC mode */
  119. msr cpsr_c, #Mode_SVC|I_Bit|F_Bit
  120. bx lr
  121. .global enable_mmu
  122. enable_mmu:
  123. orr r0, #0x18
  124. mcr p15, 0, r0, c2, c0, 0 @ttbr0
  125. mov r0, #(1 << 5) @PD1=1
  126. mcr p15, 0, r0, c2, c0, 2 @ttbcr
  127. mov r0, #1
  128. mcr p15, 0, r0, c3, c0, 0 @dacr
  129. mov r0, #0
  130. mcr p15, 0, r0, c8, c7, 0
  131. mcr p15, 0, r0, c7, c5, 0 @iciallu
  132. mcr p15, 0, r0, c7, c5, 6 @bpiall
  133. mrc p15, 0, r0, c1, c0, 0
  134. orr r0, #(1 | 4)
  135. orr r0, #(1 << 12)
  136. mcr p15, 0, r0, c1, c0, 0
  137. dsb
  138. isb
  139. mov pc, lr
  140. .global flush_cache_all
  141. flush_cache_all:
  142. stmfd sp!, {r0-r12, lr}
  143. bl v7_flush_dcache_all
  144. mov r0, #0
  145. mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
  146. dsb
  147. isb
  148. ldmfd sp!, {r0-r12, lr}
  149. mov pc, lr
  150. v7_flush_dcache_all:
  151. dmb @ ensure ordering with previous memory accesses
  152. mrc p15, 1, r0, c0, c0, 1 @ read clidr
  153. ands r3, r0, #0x7000000 @ extract loc from clidr
  154. mov r3, r3, lsr #23 @ left align loc bit field
  155. beq finished @ if loc is 0, then no need to clean
  156. mov r10, #0 @ start clean at cache level 0
  157. loop1:
  158. add r2, r10, r10, lsr #1 @ work out 3x current cache level
  159. mov r1, r0, lsr r2 @ extract cache type bits from clidr
  160. and r1, r1, #7 @ mask of the bits for current cache only
  161. cmp r1, #2 @ see what cache we have at this level
  162. blt skip @ skip if no cache, or just i-cache
  163. mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
  164. isb @ isb to sych the new cssr&csidr
  165. mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
  166. and r2, r1, #7 @ extract the length of the cache lines
  167. add r2, r2, #4 @ add 4 (line length offset)
  168. ldr r4, =0x3ff
  169. ands r4, r4, r1, lsr #3 @ find maximum number on the way size
  170. clz r5, r4 @ find bit position of way size increment
  171. ldr r7, =0x7fff
  172. ands r7, r7, r1, lsr #13 @ extract max number of the index size
  173. loop2:
  174. mov r9, r4 @ create working copy of max way size
  175. loop3:
  176. orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
  177. orr r11, r11, r7, lsl r2 @ factor index number into r11
  178. mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
  179. subs r9, r9, #1 @ decrement the way
  180. bge loop3
  181. subs r7, r7, #1 @ decrement the index
  182. bge loop2
  183. skip:
  184. add r10, r10, #2 @ increment cache number
  185. cmp r3, r10
  186. bgt loop1
  187. finished:
  188. mov r10, #0 @ swith back to cache level 0
  189. mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
  190. dsb
  191. isb
  192. mov pc, lr
  193. /* exception handlers: undef, swi, padt, dabt, resv, irq, fiq */
  194. .section .text.isr, "ax"
  195. .align 5
  196. .globl vector_fiq
  197. vector_fiq:
  198. stmfd sp!,{r0-r7,lr}
  199. bl rt_hw_trap_fiq
  200. ldmfd sp!,{r0-r7,lr}
  201. subs pc, lr, #4
  202. .globl rt_interrupt_enter
  203. .globl rt_interrupt_leave
  204. .globl rt_thread_switch_interrupt_flag
  205. .globl rt_interrupt_from_thread
  206. .globl rt_interrupt_to_thread
  207. .align 5
  208. .globl vector_irq
  209. vector_irq:
  210. #ifdef RT_USING_SMP
  211. clrex
  212. #endif
  213. stmfd sp!, {r0-r12,lr}
  214. bl rt_interrupt_enter
  215. bl rt_hw_trap_irq
  216. bl rt_interrupt_leave
  217. #ifdef RT_USING_SMP
  218. mov r0, sp
  219. bl rt_scheduler_do_irq_switch
  220. ldmfd sp!, {r0-r12,lr}
  221. subs pc, lr, #4
  222. #else
  223. @ if rt_thread_switch_interrupt_flag set, jump to
  224. @ rt_hw_context_switch_interrupt_do and don't return
  225. ldr r0, =rt_thread_switch_interrupt_flag
  226. ldr r1, [r0]
  227. cmp r1, #1
  228. beq rt_hw_context_switch_interrupt_do
  229. ldmfd sp!, {r0-r12,lr}
  230. subs pc, lr, #4
  231. rt_hw_context_switch_interrupt_do:
  232. mov r1, #0 @ clear flag
  233. str r1, [r0]
  234. mov r1, sp @ r1 point to {r0-r3} in stack
  235. add sp, sp, #4*4
  236. ldmfd sp!, {r4-r12,lr}@ reload saved registers
  237. mrs r0, spsr @ get cpsr of interrupt thread
  238. sub r2, lr, #4 @ save old task's pc to r2
  239. @ Switch to SVC mode with no interrupt. If the usr mode guest is
  240. @ interrupted, this will just switch to the stack of kernel space.
  241. @ save the registers in kernel space won't trigger data abort.
  242. msr cpsr_c, #I_Bit|F_Bit|Mode_SVC
  243. stmfd sp!, {r2} @ push old task's pc
  244. stmfd sp!, {r4-r12,lr}@ push old task's lr,r12-r4
  245. ldmfd r1, {r1-r4} @ restore r0-r3 of the interrupt thread
  246. stmfd sp!, {r1-r4} @ push old task's r0-r3
  247. stmfd sp!, {r0} @ push old task's cpsr
  248. #ifdef RT_USING_LWP
  249. stmfd sp, {r13, r14}^ @push usr_sp, usr_lr
  250. sub sp, #8
  251. #endif
  252. ldr r4, =rt_interrupt_from_thread
  253. ldr r5, [r4]
  254. str sp, [r5] @ store sp in preempted tasks's TCB
  255. ldr r6, =rt_interrupt_to_thread
  256. ldr r6, [r6]
  257. ldr sp, [r6] @ get new task's stack pointer
  258. #ifdef RT_USING_LWP
  259. ldmfd sp, {r13, r14}^ @pop usr_sp, usr_lr
  260. add sp, #8
  261. #endif
  262. ldmfd sp!, {r4} @ pop new task's cpsr to spsr
  263. msr spsr_cxsf, r4
  264. ldmfd sp!, {r0-r12,lr,pc}^ @ pop new task's r0-r12,lr & pc, copy spsr to cpsr
  265. #endif
  266. .macro push_svc_reg
  267. sub sp, sp, #17 * 4 @/* Sizeof(struct rt_hw_exp_stack) */
  268. stmia sp, {r0 - r12} @/* Calling r0-r12 */
  269. mov r0, sp
  270. mrs r6, spsr @/* Save CPSR */
  271. str lr, [r0, #15*4] @/* Push PC */
  272. str r6, [r0, #16*4] @/* Push CPSR */
  273. cps #Mode_SVC
  274. str sp, [r0, #13*4] @/* Save calling SP */
  275. str lr, [r0, #14*4] @/* Save calling PC */
  276. .endm
  277. .align 5
  278. .globl vector_swi
  279. .weak SVC_Handler
  280. SVC_Handler:
  281. vector_swi:
  282. push_svc_reg
  283. bl rt_hw_trap_swi
  284. b .
  285. .align 5
  286. .globl vector_undef
  287. vector_undef:
  288. push_svc_reg
  289. bl rt_hw_trap_undef
  290. b .
  291. .align 5
  292. .globl vector_pabt
  293. vector_pabt:
  294. push_svc_reg
  295. bl rt_hw_trap_pabt
  296. b .
  297. .align 5
  298. .globl vector_dabt
  299. vector_dabt:
  300. push_svc_reg
  301. bl rt_hw_trap_dabt
  302. b .
  303. .align 5
  304. .globl vector_resv
  305. vector_resv:
  306. push_svc_reg
  307. bl rt_hw_trap_resv
  308. b .
  309. #ifdef RT_USING_SMP
  310. .global set_secondary_cpu_boot_address
  311. set_secondary_cpu_boot_address:
  312. ldr r0, =secondary_cpu_start
  313. mvn r1, #0 //0xffffffff
  314. ldr r2, =0x10000034
  315. str r1, [r2]
  316. str r0, [r2, #-4]
  317. mov pc, lr
  318. .global secondary_cpu_start
  319. secondary_cpu_start:
  320. mrc p15, 0, r1, c1, c0, 1
  321. mov r0, #(1<<6)
  322. orr r1, r0
  323. mcr p15, 0, r1, c1, c0, 1 //enable smp
  324. ldr r0, =mtbl
  325. ldr lr, =1f
  326. b enable_mmu
  327. 1:
  328. mrc p15, 0, r0, c1, c0, 0
  329. bic r0, #(1<<13)
  330. mcr p15, 0, r0, c1, c0, 0
  331. cps #Mode_IRQ
  332. ldr sp, =irq_stack_2_limit
  333. cps #Mode_FIQ
  334. ldr sp, =irq_stack_2_limit
  335. cps #Mode_SVC
  336. ldr sp, =svc_stack_2_limit
  337. b secondary_cpu_c_start
  338. #endif
  339. .bss
  340. .align 2 //align to 2~2=4
  341. svc_stack_2:
  342. .space (1 << 10)
  343. svc_stack_2_limit:
  344. irq_stack_2:
  345. .space (1 << 10)
  346. irq_stack_2_limit:
  347. .data
  348. #define DEVICE_MEM 0x10c06
  349. #define NORMAL_MEM 0x11c0e
  350. .align 14
  351. mtbl:
  352. //vaddr: 0x00000000
  353. .rept 0x100
  354. .word 0x0
  355. .endr
  356. //vaddr: 0x10000000
  357. .equ mmu_tbl_map_paddr, 0x10000000
  358. .rept 0x400
  359. .word mmu_tbl_map_paddr | DEVICE_MEM
  360. .equ mmu_tbl_map_paddr, mmu_tbl_map_paddr + 0x100000
  361. .endr
  362. //vaddr: 0x50000000
  363. .rept 0x100
  364. .word 0x0
  365. .endr
  366. //vaddr: 0x60000000
  367. .equ mmu_tbl_map_paddr, 0x60000000
  368. .rept 0x800
  369. .word mmu_tbl_map_paddr | NORMAL_MEM
  370. .equ mmu_tbl_map_paddr, mmu_tbl_map_paddr + 0x100000
  371. .endr
  372. //vaddr: 0xe0000000
  373. .rept 0x200
  374. .word 0x0
  375. .endr