start_gcc.S 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2013-07-05 Bernard the first version
  9. * 2018-11-22 Jesven in the interrupt context, use rt_scheduler_do_irq_switch checks
  10. * and switches to a new thread
  11. */
  12. #include "rtconfig.h"
  13. .equ Mode_USR, 0x10
  14. .equ Mode_FIQ, 0x11
  15. .equ Mode_IRQ, 0x12
  16. .equ Mode_SVC, 0x13
  17. .equ Mode_ABT, 0x17
  18. .equ Mode_UND, 0x1B
  19. .equ Mode_SYS, 0x1F
  20. .equ I_Bit, 0x80 /* when I bit is set, IRQ is disabled */
  21. .equ F_Bit, 0x40 /* when F bit is set, FIQ is disabled */
  22. .equ UND_Stack_Size, 0x00000400
  23. .equ SVC_Stack_Size, 0x00000400
  24. .equ ABT_Stack_Size, 0x00000400
  25. .equ RT_FIQ_STACK_PGSZ, 0x00000000
  26. .equ RT_IRQ_STACK_PGSZ, 0x00000800
  27. .equ USR_Stack_Size, 0x00000400
  28. #define ISR_Stack_Size (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
  29. RT_FIQ_STACK_PGSZ + RT_IRQ_STACK_PGSZ)
  30. .section .data.share.isr
  31. /* stack */
  32. .globl stack_start
  33. .globl stack_top
  34. stack_start:
  35. .rept ISR_Stack_Size
  36. .byte 0
  37. .endr
  38. stack_top:
  39. #ifdef RT_USING_USERSPACE
  40. .data
  41. .align 14
  42. init_mtbl:
  43. .space 16*1024
  44. #endif
  45. .text
  46. /* reset entry */
  47. .globl _reset
  48. _reset:
  49. #ifdef ARCH_ARMV8
  50. /* Check for HYP mode */
  51. mrs r0, cpsr_all
  52. and r0, r0, #0x1F
  53. mov r8, #0x1A
  54. cmp r0, r8
  55. beq overHyped
  56. b continue
  57. overHyped: /* Get out of HYP mode */
  58. adr r1, continue
  59. msr ELR_hyp, r1
  60. mrs r1, cpsr_all
  61. and r1, r1, #0x1f /* CPSR_MODE_MASK */
  62. orr r1, r1, #0x13 /* CPSR_MODE_SUPERVISOR */
  63. msr SPSR_hyp, r1
  64. eret
  65. continue:
  66. #endif
  67. #ifdef SOC_BCM283x
  68. /* Suspend the other cpu cores */
  69. mrc p15, 0, r0, c0, c0, 5
  70. ands r0, #3
  71. bne _halt
  72. /* Disable IRQ & FIQ */
  73. cpsid if
  74. /* Check for HYP mode */
  75. mrs r0, cpsr_all
  76. and r0, r0, #0x1F
  77. mov r8, #0x1A
  78. cmp r0, r8
  79. beq overHyped
  80. b continue
  81. overHyped: /* Get out of HYP mode */
  82. adr r1, continue
  83. msr ELR_hyp, r1
  84. mrs r1, cpsr_all
  85. and r1, r1, #0x1f /* CPSR_MODE_MASK */
  86. orr r1, r1, #0x13 /* CPSR_MODE_SUPERVISOR */
  87. msr SPSR_hyp, r1
  88. eret
  89. continue:
  90. /* set the cpu to SVC32 mode and disable interrupt */
  91. mrs r0, cpsr
  92. bic r0, r0, #0x1f
  93. orr r0, r0, #0x13
  94. msr cpsr_c, r0
  95. #endif
  96. #ifdef RT_USING_USERSPACE
  97. ldr r5, =PV_OFFSET
  98. mov r7, #0x100000
  99. sub r7, #1
  100. mvn r8, r7
  101. ldr r9, =KERNEL_VADDR_START
  102. ldr r6, =__bss_end
  103. add r6, r7
  104. and r6, r8 /* r6 end vaddr align up to 1M */
  105. sub r6, r9 /* r6 is size */
  106. ldr sp, =stack_top
  107. add sp, r5 /* use paddr */
  108. ldr r0, =init_mtbl
  109. add r0, r5
  110. mov r1, r6
  111. mov r2, r5
  112. bl init_mm_setup
  113. ldr lr, =after_enable_mmu
  114. ldr r0, =init_mtbl
  115. add r0, r5
  116. b enable_mmu
  117. after_enable_mmu:
  118. #endif
  119. #ifndef SOC_BCM283x
  120. /* set the cpu to SVC32 mode and disable interrupt */
  121. cps #Mode_SVC
  122. #endif
  123. #ifdef RT_USING_FPU
  124. mov r4, #0xfffffff
  125. mcr p15, 0, r4, c1, c0, 2
  126. #endif
  127. /* disable the data alignment check */
  128. mrc p15, 0, r1, c1, c0, 0
  129. bic r1, #(1<<1)
  130. mcr p15, 0, r1, c1, c0, 0
  131. /* setup stack */
  132. bl stack_setup
  133. /* clear .bss */
  134. mov r0,#0 /* get a zero */
  135. ldr r1,=__bss_start /* bss start */
  136. ldr r2,=__bss_end /* bss end */
  137. bss_loop:
  138. cmp r1,r2 /* check if data to clear */
  139. strlo r0,[r1],#4 /* clear 4 bytes */
  140. blo bss_loop /* loop until done */
  141. #ifdef RT_USING_SMP
  142. mrc p15, 0, r1, c1, c0, 1
  143. mov r0, #(1<<6)
  144. orr r1, r0
  145. mcr p15, 0, r1, c1, c0, 1 /* enable smp */
  146. #endif
  147. /* initialize the mmu table and enable mmu */
  148. ldr r0, =platform_mem_desc
  149. ldr r1, =platform_mem_desc_size
  150. ldr r1, [r1]
  151. bl rt_hw_init_mmu_table
  152. #ifdef RT_USING_USERSPACE
  153. ldr r0, =MMUTable /* vaddr */
  154. add r0, r5 /* to paddr */
  155. bl switch_mmu
  156. #else
  157. bl rt_hw_mmu_init
  158. #endif
  159. /* call C++ constructors of global objects */
  160. ldr r0, =__ctors_start__
  161. ldr r1, =__ctors_end__
  162. ctor_loop:
  163. cmp r0, r1
  164. beq ctor_end
  165. ldr r2, [r0], #4
  166. stmfd sp!, {r0-r1}
  167. mov lr, pc
  168. bx r2
  169. ldmfd sp!, {r0-r1}
  170. b ctor_loop
  171. ctor_end:
  172. /* start RT-Thread Kernel */
  173. ldr pc, _rtthread_startup
  174. _rtthread_startup:
  175. .word rtthread_startup
  176. stack_setup:
  177. ldr r0, =stack_top
  178. /* Set the startup stack for svc */
  179. mov sp, r0
  180. /* Enter Undefined Instruction Mode and set its Stack Pointer */
  181. msr cpsr_c, #Mode_UND|I_Bit|F_Bit
  182. mov sp, r0
  183. sub r0, r0, #UND_Stack_Size
  184. /* Enter Abort Mode and set its Stack Pointer */
  185. msr cpsr_c, #Mode_ABT|I_Bit|F_Bit
  186. mov sp, r0
  187. sub r0, r0, #ABT_Stack_Size
  188. /* Enter FIQ Mode and set its Stack Pointer */
  189. msr cpsr_c, #Mode_FIQ|I_Bit|F_Bit
  190. mov sp, r0
  191. sub r0, r0, #RT_FIQ_STACK_PGSZ
  192. /* Enter IRQ Mode and set its Stack Pointer */
  193. msr cpsr_c, #Mode_IRQ|I_Bit|F_Bit
  194. mov sp, r0
  195. sub r0, r0, #RT_IRQ_STACK_PGSZ
  196. /* come back to SVC mode */
  197. msr cpsr_c, #Mode_SVC|I_Bit|F_Bit
  198. bx lr
  199. #ifdef RT_USING_USERSPACE
  200. .align 2
  201. .global enable_mmu
  202. enable_mmu:
  203. orr r0, #0x18
  204. mcr p15, 0, r0, c2, c0, 0 /* ttbr0 */
  205. mov r0, #(1 << 5) /* PD1=1 */
  206. mcr p15, 0, r0, c2, c0, 2 /* ttbcr */
  207. mov r0, #1
  208. mcr p15, 0, r0, c3, c0, 0 /* dacr */
  209. /* invalid tlb before enable mmu */
  210. mov r0, #0
  211. mcr p15, 0, r0, c8, c7, 0
  212. mcr p15, 0, r0, c7, c5, 0 /* iciallu */
  213. mcr p15, 0, r0, c7, c5, 6 /* bpiall */
  214. mrc p15, 0, r0, c1, c0, 0
  215. orr r0, #((1 << 12) | (1 << 11)) /* instruction cache, branch prediction */
  216. orr r0, #((1 << 2) | (1 << 0)) /* data cache, mmu enable */
  217. mcr p15, 0, r0, c1, c0, 0
  218. dsb
  219. isb
  220. mov pc, lr
  221. .global set_process_id
  222. set_process_id:
  223. MCR p15, 0, r0, c13, c0, 1
  224. mov pc, lr
  225. .global switch_mmu
  226. switch_mmu:
  227. orr r0, #0x18
  228. mcr p15, 0, r0, c2, c0, 0 /* ttbr0 */
  229. /* invalid tlb */
  230. mov r0, #0
  231. mcr p15, 0, r0, c8, c7, 0
  232. mcr p15, 0, r0, c7, c5, 0 /* iciallu */
  233. mcr p15, 0, r0, c7, c5, 6 /* bpiall */
  234. dsb
  235. isb
  236. mov pc, lr
  237. .global mmu_table_get
  238. mmu_table_get:
  239. mrc p15, 0, r0, c2, c0, 0 /* ttbr0 */
  240. bic r0, #0x18
  241. mov pc, lr
  242. #endif
  243. _halt:
  244. wfe
  245. b _halt
  246. /* exception handlers: undef, swi, padt, dabt, resv, irq, fiq */
  247. .section .text.isr, "ax"
  248. .align 5
  249. .globl vector_fiq
  250. vector_fiq:
  251. stmfd sp!,{r0-r7,lr}
  252. bl rt_hw_trap_fiq
  253. ldmfd sp!,{r0-r7,lr}
  254. subs pc, lr, #4
  255. .globl rt_interrupt_enter
  256. .globl rt_interrupt_leave
  257. .globl rt_thread_switch_interrupt_flag
  258. .globl rt_interrupt_from_thread
  259. .globl rt_interrupt_to_thread
  260. .globl rt_current_thread
  261. .globl vmm_thread
  262. .globl vmm_virq_check
  263. .align 5
  264. .globl vector_irq
  265. vector_irq:
  266. #ifdef RT_USING_SMP
  267. clrex
  268. stmfd sp!, {r0, r1}
  269. cps #Mode_SVC
  270. mov r0, sp /* svc_sp */
  271. mov r1, lr /* svc_lr */
  272. cps #Mode_IRQ
  273. sub lr, #4
  274. stmfd r0!, {r1, lr} /* svc_lr, svc_pc */
  275. stmfd r0!, {r2 - r12}
  276. ldmfd sp!, {r1, r2} /* original r0, r1 */
  277. stmfd r0!, {r1 - r2}
  278. mrs r1, spsr /* original mode */
  279. stmfd r0!, {r1}
  280. #ifdef RT_USING_LWP
  281. stmfd r0, {r13, r14}^ /* usr_sp, usr_lr */
  282. sub r0, #8
  283. #endif
  284. #ifdef RT_USING_FPU
  285. /* fpu context */
  286. vmrs r6, fpexc
  287. tst r6, #(1<<30)
  288. beq 1f
  289. vstmdb r0!, {d0-d15}
  290. vstmdb r0!, {d16-d31}
  291. vmrs r5, fpscr
  292. stmfd r0!, {r5}
  293. 1:
  294. stmfd r0!, {r6}
  295. #endif
  296. /* now irq stack is clean */
  297. /* r0 is task svc_sp */
  298. /* backup r0 -> r8 */
  299. mov r8, r0
  300. cps #Mode_SVC
  301. mov sp, r8
  302. bl rt_interrupt_enter
  303. bl rt_hw_trap_irq
  304. bl rt_interrupt_leave
  305. mov r0, r8
  306. bl rt_scheduler_do_irq_switch
  307. b rt_hw_context_switch_exit
  308. #else
  309. stmfd sp!, {r0-r12,lr}
  310. bl rt_interrupt_enter
  311. bl rt_hw_trap_irq
  312. bl rt_interrupt_leave
  313. /* if rt_thread_switch_interrupt_flag set, jump to
  314. * rt_hw_context_switch_interrupt_do and don't return */
  315. ldr r0, =rt_thread_switch_interrupt_flag
  316. ldr r1, [r0]
  317. cmp r1, #1
  318. beq rt_hw_context_switch_interrupt_do
  319. #ifdef RT_USING_LWP
  320. ldmfd sp!, {r0-r12,lr}
  321. cps #Mode_SVC
  322. push {r0-r12}
  323. mov r7, lr
  324. cps #Mode_IRQ
  325. mrs r4, spsr
  326. sub r5, lr, #4
  327. cps #Mode_SVC
  328. bl lwp_check_exit
  329. and r6, r4, #0x1f
  330. cmp r6, #0x10
  331. bne 1f
  332. msr spsr_csxf, r4
  333. mov lr, r5
  334. pop {r0-r12}
  335. b ret_to_user
  336. 1:
  337. mov lr, r7
  338. cps #Mode_IRQ
  339. msr spsr_csxf, r4
  340. mov lr, r5
  341. cps #Mode_SVC
  342. pop {r0-r12}
  343. cps #Mode_IRQ
  344. movs pc, lr
  345. #else
  346. ldmfd sp!, {r0-r12,lr}
  347. subs pc, lr, #4
  348. #endif
  349. rt_hw_context_switch_interrupt_do:
  350. mov r1, #0 /* clear flag */
  351. str r1, [r0]
  352. mov r1, sp /* r1 point to {r0-r3} in stack */
  353. add sp, sp, #4*4
  354. ldmfd sp!, {r4-r12,lr} /* reload saved registers */
  355. mrs r0, spsr /* get cpsr of interrupt thread */
  356. sub r2, lr, #4 /* save old task's pc to r2 */
  357. /* Switch to SVC mode with no interrupt. If the usr mode guest is
  358. * interrupted, this will just switch to the stack of kernel space.
  359. * save the registers in kernel space won't trigger data abort. */
  360. msr cpsr_c, #I_Bit|F_Bit|Mode_SVC
  361. stmfd sp!, {r2} /* push old task's pc */
  362. stmfd sp!, {r4-r12,lr} /* push old task's lr,r12-r4 */
  363. ldmfd r1, {r1-r4} /* restore r0-r3 of the interrupt thread */
  364. stmfd sp!, {r1-r4} /* push old task's r0-r3 */
  365. stmfd sp!, {r0} /* push old task's cpsr */
  366. #ifdef RT_USING_LWP
  367. stmfd sp, {r13, r14}^ /*push usr_sp, usr_lr */
  368. sub sp, #8
  369. #endif
  370. #ifdef RT_USING_FPU
  371. /* fpu context */
  372. vmrs r6, fpexc
  373. tst r6, #(1<<30)
  374. beq 1f
  375. vstmdb sp!, {d0-d15}
  376. vstmdb sp!, {d16-d31}
  377. vmrs r5, fpscr
  378. stmfd sp!, {r5}
  379. 1:
  380. stmfd sp!, {r6}
  381. #endif
  382. ldr r4, =rt_interrupt_from_thread
  383. ldr r5, [r4]
  384. str sp, [r5] /* store sp in preempted tasks's TCB */
  385. ldr r6, =rt_interrupt_to_thread
  386. ldr r6, [r6]
  387. ldr sp, [r6] /* get new task's stack pointer */
  388. #ifdef RT_USING_USERSPACE
  389. ldr r1, =rt_current_thread
  390. ldr r0, [r1]
  391. bl lwp_mmu_switch
  392. #endif
  393. #ifdef RT_USING_FPU
  394. /* fpu context */
  395. ldmfd sp!, {r6}
  396. vmsr fpexc, r6
  397. tst r6, #(1<<30)
  398. beq 1f
  399. ldmfd sp!, {r5}
  400. vmsr fpscr, r5
  401. vldmia sp!, {d16-d31}
  402. vldmia sp!, {d0-d15}
  403. 1:
  404. #endif
  405. #ifdef RT_USING_LWP
  406. ldmfd sp, {r13, r14}^ /*pop usr_sp, usr_lr */
  407. add sp, #8
  408. #endif
  409. ldmfd sp!, {r4} /* pop new task's cpsr to spsr */
  410. msr spsr_cxsf, r4
  411. #ifdef RT_USING_GDBSERVER
  412. bl lwp_check_debug
  413. #endif
  414. #ifdef RT_USING_LWP
  415. bl lwp_check_exit
  416. #endif
  417. #ifdef RT_USING_LWP
  418. and r4, #0x1f
  419. cmp r4, #0x10
  420. bne 1f
  421. ldmfd sp!, {r0-r12,lr}
  422. ldmfd sp!, {lr}
  423. b ret_to_user
  424. 1:
  425. #endif
  426. /* pop new task's r0-r12,lr & pc, copy spsr to cpsr */
  427. ldmfd sp!, {r0-r12,lr,pc}^
  428. #endif
  429. .macro push_svc_reg
  430. sub sp, sp, #17 * 4 /* Sizeof(struct rt_hw_exp_stack) */
  431. stmia sp, {r0 - r12} /* Calling r0-r12 */
  432. mov r0, sp
  433. add sp, sp, #17 * 4
  434. mrs r6, spsr /* Save CPSR */
  435. str lr, [r0, #15*4] /* Push PC */
  436. str r6, [r0, #16*4] /* Push CPSR */
  437. and r1, r6, #0x1f
  438. cmp r1, #0x10
  439. cps #Mode_SYS
  440. streq sp, [r0, #13*4] /* Save calling SP */
  441. streq lr, [r0, #14*4] /* Save calling PC */
  442. cps #Mode_SVC
  443. strne sp, [r0, #13*4] /* Save calling SP */
  444. strne lr, [r0, #14*4] /* Save calling PC */
  445. .endm
  446. .align 5
  447. .weak vector_swi
  448. vector_swi:
  449. push_svc_reg
  450. bl rt_hw_trap_swi
  451. b .
  452. .align 5
  453. .globl vector_undef
  454. vector_undef:
  455. push_svc_reg
  456. bl rt_hw_trap_undef
  457. cps #Mode_UND
  458. #ifdef RT_USING_FPU
  459. sub sp, sp, #17 * 4
  460. ldr lr, [sp, #15*4]
  461. ldmia sp, {r0 - r12}
  462. add sp, sp, #17 * 4
  463. movs pc, lr
  464. #endif
  465. b .
  466. .align 5
  467. .globl vector_pabt
  468. vector_pabt:
  469. push_svc_reg
  470. #ifdef RT_USING_USERSPACE
  471. /* cp Mode_ABT stack to SVC */
  472. sub sp, sp, #17 * 4 /* Sizeof(struct rt_hw_exp_stack) */
  473. mov lr, r0
  474. ldmia lr, {r0 - r12}
  475. stmia sp, {r0 - r12}
  476. add r1, lr, #13 * 4
  477. add r2, sp, #13 * 4
  478. ldmia r1, {r4 - r7}
  479. stmia r2, {r4 - r7}
  480. mov r0, sp
  481. bl rt_hw_trap_pabt
  482. /* return to user */
  483. ldr lr, [sp, #16*4] /* orign spsr */
  484. msr spsr_cxsf, lr
  485. ldr lr, [sp, #15*4] /* orign pc */
  486. ldmia sp, {r0 - r12}
  487. add sp, #17 * 4
  488. b ret_to_user
  489. #else
  490. bl rt_hw_trap_pabt
  491. b .
  492. #endif
  493. .align 5
  494. .globl vector_dabt
  495. vector_dabt:
  496. push_svc_reg
  497. #ifdef RT_USING_USERSPACE
  498. /* cp Mode_ABT stack to SVC */
  499. sub sp, sp, #17 * 4 /* Sizeof(struct rt_hw_exp_stack) */
  500. mov lr, r0
  501. ldmia lr, {r0 - r12}
  502. stmia sp, {r0 - r12}
  503. add r1, lr, #13 * 4
  504. add r2, sp, #13 * 4
  505. ldmia r1, {r4 - r7}
  506. stmia r2, {r4 - r7}
  507. mov r0, sp
  508. bl rt_hw_trap_dabt
  509. /* return to user */
  510. ldr lr, [sp, #16*4] /* orign spsr */
  511. msr spsr_cxsf, lr
  512. ldr lr, [sp, #15*4] /* orign pc */
  513. ldmia sp, {r0 - r12}
  514. add sp, #17 * 4
  515. b ret_to_user
  516. #else
  517. bl rt_hw_trap_dabt
  518. b .
  519. #endif
  520. .align 5
  521. .globl vector_resv
  522. vector_resv:
  523. push_svc_reg
  524. bl rt_hw_trap_resv
  525. b .
  526. #ifdef RT_USING_SMP
  527. .global rt_clz
  528. rt_clz:
  529. clz r0, r0
  530. bx lr
  531. .global rt_secondary_cpu_entry
  532. rt_secondary_cpu_entry:
  533. #ifdef RT_USING_USERSPACE
  534. ldr r5, =PV_OFFSET
  535. ldr lr, =after_enable_mmu2
  536. ldr r0, =init_mtbl
  537. add r0, r5
  538. b enable_mmu
  539. after_enable_mmu2:
  540. ldr r0, =MMUTable
  541. add r0, r5
  542. bl switch_mmu
  543. #endif
  544. #ifdef RT_USING_FPU
  545. mov r4, #0xfffffff
  546. mcr p15, 0, r4, c1, c0, 2
  547. #endif
  548. mrc p15, 0, r1, c1, c0, 1
  549. mov r0, #(1<<6)
  550. orr r1, r0
  551. mcr p15, 0, r1, c1, c0, 1 /* enable smp */
  552. mrc p15, 0, r0, c1, c0, 0
  553. bic r0, #(1<<13)
  554. mcr p15, 0, r0, c1, c0, 0
  555. cps #Mode_UND
  556. ldr sp, =und_stack_2_limit
  557. cps #Mode_IRQ
  558. ldr sp, =irq_stack_2_limit
  559. cps #Mode_FIQ
  560. ldr sp, =irq_stack_2_limit
  561. cps #Mode_SVC
  562. ldr sp, =svc_stack_2_limit
  563. cps #Mode_ABT
  564. ldr sp, =abt_stack_2_limit
  565. /* initialize the mmu table and enable mmu */
  566. #ifndef RT_USING_USERSPACE
  567. bl rt_hw_mmu_init
  568. #endif
  569. b rt_hw_secondary_cpu_bsp_start
  570. #endif
  571. .bss
  572. .align 2 /* align to 2~2=4 */
  573. svc_stack_2:
  574. .space (1 << 10)
  575. svc_stack_2_limit:
  576. irq_stack_2:
  577. .space (1 << 10)
  578. irq_stack_2_limit:
  579. und_stack_2:
  580. .space (1 << 10)
  581. und_stack_2_limit:
  582. abt_stack_2:
  583. .space (1 << 10)
  584. abt_stack_2_limit: