start_gcc.S 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2013-07-05 Bernard the first version
  9. * 2018-11-22 Jesven in the interrupt context, use rt_scheduler_do_irq_switch checks
  10. * and switches to a new thread
  11. */
  12. #include "rtconfig.h"
  13. .equ Mode_USR, 0x10
  14. .equ Mode_FIQ, 0x11
  15. .equ Mode_IRQ, 0x12
  16. .equ Mode_SVC, 0x13
  17. .equ Mode_ABT, 0x17
  18. .equ Mode_UND, 0x1B
  19. .equ Mode_SYS, 0x1F
  20. .equ I_Bit, 0x80 /* when I bit is set, IRQ is disabled */
  21. .equ F_Bit, 0x40 /* when F bit is set, FIQ is disabled */
  22. #ifdef RT_USING_SMART
  23. .data
  24. .align 14
  25. init_mtbl:
  26. .space 16*1024
  27. #endif
  28. .text
  29. /* reset entry */
  30. .globl _reset
  31. _reset:
  32. #ifdef ARCH_ARMV8
  33. /* Check for HYP mode */
  34. mrs r0, cpsr_all
  35. and r0, r0, #0x1F
  36. mov r8, #0x1A
  37. cmp r0, r8
  38. beq overHyped
  39. b continue
  40. overHyped: /* Get out of HYP mode */
  41. adr r1, continue
  42. msr ELR_hyp, r1
  43. mrs r1, cpsr_all
  44. and r1, r1, #0x1f /* CPSR_MODE_MASK */
  45. orr r1, r1, #0x13 /* CPSR_MODE_SUPERVISOR */
  46. msr SPSR_hyp, r1
  47. eret
  48. continue:
  49. #endif
  50. #ifdef SOC_BCM283x
  51. /* Suspend the other cpu cores */
  52. mrc p15, 0, r0, c0, c0, 5
  53. ands r0, #3
  54. bne _halt
  55. /* Disable IRQ & FIQ */
  56. cpsid if
  57. /* Check for HYP mode */
  58. mrs r0, cpsr_all
  59. and r0, r0, #0x1F
  60. mov r8, #0x1A
  61. cmp r0, r8
  62. beq overHyped
  63. b continue
  64. overHyped: /* Get out of HYP mode */
  65. adr r1, continue
  66. msr ELR_hyp, r1
  67. mrs r1, cpsr_all
  68. and r1, r1, #0x1f /* CPSR_MODE_MASK */
  69. orr r1, r1, #0x13 /* CPSR_MODE_SUPERVISOR */
  70. msr SPSR_hyp, r1
  71. eret
  72. continue:
  73. /* set the cpu to SVC32 mode and disable interrupt */
  74. mrs r0, cpsr
  75. bic r0, r0, #0x1f
  76. orr r0, r0, #0x13
  77. msr cpsr_c, r0
  78. #endif
  79. /* invalid tlb before enable mmu */
  80. mrc p15, 0, r0, c1, c0, 0
  81. bic r0, #1
  82. mcr p15, 0, r0, c1, c0, 0
  83. dsb
  84. isb
  85. mov r0, #0
  86. mcr p15, 0, r0, c8, c7, 0
  87. mcr p15, 0, r0, c7, c5, 0 /* iciallu */
  88. mcr p15, 0, r0, c7, c5, 6 /* bpiall */
  89. dsb
  90. isb
  91. #ifdef RT_USING_SMART
  92. ldr r5, =PV_OFFSET
  93. mov r7, #0x100000
  94. sub r7, #1
  95. mvn r8, r7
  96. ldr r9, =KERNEL_VADDR_START
  97. ldr r6, =__bss_end
  98. add r6, r7
  99. and r6, r8 /* r6 end vaddr align up to 1M */
  100. sub r6, r9 /* r6 is size */
  101. ldr sp, =svc_stack_n_limit
  102. add sp, r5 /* use paddr */
  103. ldr r0, =init_mtbl
  104. add r0, r5
  105. mov r1, r6
  106. mov r2, r5
  107. bl init_mm_setup
  108. ldr lr, =after_enable_mmu
  109. ldr r0, =init_mtbl
  110. add r0, r5
  111. b enable_mmu
  112. after_enable_mmu:
  113. #endif
  114. #ifndef SOC_BCM283x
  115. /* set the cpu to SVC32 mode and disable interrupt */
  116. cps #Mode_SVC
  117. #endif
  118. #ifdef RT_USING_FPU
  119. mov r4, #0xfffffff
  120. mcr p15, 0, r4, c1, c0, 2
  121. #endif
  122. /* disable the data alignment check */
  123. mrc p15, 0, r1, c1, c0, 0
  124. bic r1, #(1<<1)
  125. mcr p15, 0, r1, c1, c0, 0
  126. /* enable I cache + branch prediction */
  127. mrc p15, 0, r0, c1, c0, 0
  128. orr r0, r0, #(1<<12)
  129. orr r0, r0, #(1<<11)
  130. mcr p15, 0, r0, c1, c0, 0
  131. /* setup stack */
  132. bl stack_setup
  133. /* clear .bss */
  134. mov r0,#0 /* get a zero */
  135. ldr r1,=__bss_start /* bss start */
  136. ldr r2,=__bss_end /* bss end */
  137. bss_loop:
  138. cmp r1,r2 /* check if data to clear */
  139. strlo r0,[r1],#4 /* clear 4 bytes */
  140. blo bss_loop /* loop until done */
  141. #ifdef RT_USING_SMP
  142. mrc p15, 0, r1, c1, c0, 1
  143. mov r0, #(1<<6)
  144. orr r1, r0
  145. mcr p15, 0, r1, c1, c0, 1 /* enable smp */
  146. #endif
  147. /* initialize the mmu table and enable mmu */
  148. ldr r0, =platform_mem_desc
  149. ldr r1, =platform_mem_desc_size
  150. ldr r1, [r1]
  151. bl rt_hw_init_mmu_table
  152. #ifdef RT_USING_SMART
  153. ldr r0, =MMUTable /* vaddr */
  154. add r0, r5 /* to paddr */
  155. bl rt_hw_mmu_switch
  156. #else
  157. bl rt_hw_mmu_init
  158. #endif
  159. /* start RT-Thread Kernel */
  160. ldr pc, _rtthread_startup
  161. _rtthread_startup:
  162. .word rtthread_startup
  163. stack_setup:
  164. #ifdef RT_USING_SMP
  165. /* cpu id */
  166. mrc p15, 0, r0, c0, c0, 5
  167. and r0, r0, #0xf
  168. add r0, r0, #1
  169. #else
  170. mov r0, #1
  171. #endif
  172. cps #Mode_UND
  173. ldr r1, =und_stack_n
  174. add sp, r1, r0, asl #12
  175. cps #Mode_IRQ
  176. ldr r1, =irq_stack_n
  177. add sp, r1, r0, asl #12
  178. cps #Mode_FIQ
  179. ldr r1, =irq_stack_n
  180. add sp, r1, r0, asl #12
  181. cps #Mode_ABT
  182. ldr r1, =abt_stack_n
  183. add sp, r1, r0, asl #12
  184. cps #Mode_SVC
  185. ldr r1, =svc_stack_n
  186. add sp, r1, r0, asl #12
  187. bx lr
  188. #ifdef RT_USING_SMART
  189. .align 2
  190. .global enable_mmu
  191. enable_mmu:
  192. orr r0, #0x18
  193. mcr p15, 0, r0, c2, c0, 0 /* ttbr0 */
  194. mov r0, #(1 << 5) /* PD1=1 */
  195. mcr p15, 0, r0, c2, c0, 2 /* ttbcr */
  196. mov r0, #1
  197. mcr p15, 0, r0, c3, c0, 0 /* dacr */
  198. /* invalid tlb before enable mmu */
  199. mov r0, #0
  200. mcr p15, 0, r0, c8, c7, 0
  201. mcr p15, 0, r0, c7, c5, 0 /* iciallu */
  202. mcr p15, 0, r0, c7, c5, 6 /* bpiall */
  203. mrc p15, 0, r0, c1, c0, 0
  204. orr r0, #((1 << 12) | (1 << 11)) /* instruction cache, branch prediction */
  205. orr r0, #((1 << 2) | (1 << 0)) /* data cache, mmu enable */
  206. mcr p15, 0, r0, c1, c0, 0
  207. dsb
  208. isb
  209. mov pc, lr
  210. .global rt_hw_set_process_id
  211. rt_hw_set_process_id:
  212. LSL r0, r0, #8
  213. MCR p15, 0, r0, c13, c0, 1
  214. mov pc, lr
  215. #endif
  216. .global rt_hw_mmu_switch
  217. rt_hw_mmu_switch:
  218. orr r0, #0x18
  219. mcr p15, 0, r0, c2, c0, 0 // ttbr0
  220. //invalid tlb
  221. mov r0, #0
  222. mcr p15, 0, r0, c8, c7, 0
  223. mcr p15, 0, r0, c7, c5, 0 //iciallu
  224. mcr p15, 0, r0, c7, c5, 6 //bpiall
  225. dsb
  226. isb
  227. mov pc, lr
  228. .global rt_hw_mmu_tbl_get
  229. rt_hw_mmu_tbl_get:
  230. mrc p15, 0, r0, c2, c0, 0 /* ttbr0 */
  231. bic r0, #0x18
  232. mov pc, lr
  233. _halt:
  234. wfe
  235. b _halt
  236. /* exception handlers: undef, swi, padt, dabt, resv, irq, fiq */
  237. .section .text.isr, "ax"
  238. .align 5
  239. .globl vector_fiq
  240. vector_fiq:
  241. stmfd sp!,{r0-r7,lr}
  242. bl rt_hw_trap_fiq
  243. ldmfd sp!,{r0-r7,lr}
  244. subs pc, lr, #4
  245. .globl rt_interrupt_enter
  246. .globl rt_interrupt_leave
  247. .globl rt_thread_switch_interrupt_flag
  248. .globl rt_interrupt_from_thread
  249. .globl rt_interrupt_to_thread
  250. .globl rt_current_thread
  251. .globl vmm_thread
  252. .globl vmm_virq_check
  253. .align 5
  254. .globl vector_irq
  255. vector_irq:
  256. #ifdef RT_USING_SMP
  257. clrex
  258. stmfd sp!, {r0, r1}
  259. cps #Mode_SVC
  260. mov r0, sp /* svc_sp */
  261. mov r1, lr /* svc_lr */
  262. cps #Mode_IRQ
  263. sub lr, #4
  264. stmfd r0!, {r1, lr} /* svc_lr, svc_pc */
  265. stmfd r0!, {r2 - r12}
  266. ldmfd sp!, {r1, r2} /* original r0, r1 */
  267. stmfd r0!, {r1 - r2}
  268. mrs r1, spsr /* original mode */
  269. stmfd r0!, {r1}
  270. #ifdef RT_USING_SMART
  271. stmfd r0, {r13, r14}^ /* usr_sp, usr_lr */
  272. sub r0, #8
  273. #endif
  274. #ifdef RT_USING_FPU
  275. /* fpu context */
  276. vmrs r6, fpexc
  277. tst r6, #(1<<30)
  278. beq 1f
  279. vstmdb r0!, {d0-d15}
  280. vstmdb r0!, {d16-d31}
  281. vmrs r5, fpscr
  282. stmfd r0!, {r5}
  283. 1:
  284. stmfd r0!, {r6}
  285. #endif
  286. /* now irq stack is clean */
  287. /* r0 is task svc_sp */
  288. /* backup r0 -> r8 */
  289. mov r8, r0
  290. cps #Mode_SVC
  291. mov sp, r8
  292. bl rt_interrupt_enter
  293. bl rt_hw_trap_irq
  294. bl rt_interrupt_leave
  295. mov r0, r8
  296. bl rt_scheduler_do_irq_switch
  297. b rt_hw_context_switch_exit
  298. #else
  299. stmfd sp!, {r0-r12,lr}
  300. bl rt_interrupt_enter
  301. bl rt_hw_trap_irq
  302. bl rt_interrupt_leave
  303. /* if rt_thread_switch_interrupt_flag set, jump to
  304. * rt_hw_context_switch_interrupt_do and don't return */
  305. ldr r0, =rt_thread_switch_interrupt_flag
  306. ldr r1, [r0]
  307. cmp r1, #1
  308. beq rt_hw_context_switch_interrupt_do
  309. #ifdef RT_USING_SMART
  310. ldmfd sp!, {r0-r12,lr}
  311. cps #Mode_SVC
  312. push {r0-r12}
  313. mov r7, lr
  314. cps #Mode_IRQ
  315. mrs r4, spsr
  316. sub r5, lr, #4
  317. cps #Mode_SVC
  318. and r6, r4, #0x1f
  319. cmp r6, #0x10
  320. bne 1f
  321. msr spsr_csxf, r4
  322. mov lr, r5
  323. pop {r0-r12}
  324. b arch_ret_to_user
  325. 1:
  326. mov lr, r7
  327. cps #Mode_IRQ
  328. msr spsr_csxf, r4
  329. mov lr, r5
  330. cps #Mode_SVC
  331. pop {r0-r12}
  332. cps #Mode_IRQ
  333. movs pc, lr
  334. #else
  335. ldmfd sp!, {r0-r12,lr}
  336. subs pc, lr, #4
  337. #endif
  338. rt_hw_context_switch_interrupt_do:
  339. mov r1, #0 /* clear flag */
  340. str r1, [r0]
  341. mov r1, sp /* r1 point to {r0-r3} in stack */
  342. add sp, sp, #4*4
  343. ldmfd sp!, {r4-r12,lr} /* reload saved registers */
  344. mrs r0, spsr /* get cpsr of interrupt thread */
  345. sub r2, lr, #4 /* save old task's pc to r2 */
  346. /* Switch to SVC mode with no interrupt. If the usr mode guest is
  347. * interrupted, this will just switch to the stack of kernel space.
  348. * save the registers in kernel space won't trigger data abort. */
  349. msr cpsr_c, #I_Bit|F_Bit|Mode_SVC
  350. stmfd sp!, {r2} /* push old task's pc */
  351. stmfd sp!, {r4-r12,lr} /* push old task's lr,r12-r4 */
  352. ldmfd r1, {r1-r4} /* restore r0-r3 of the interrupt thread */
  353. stmfd sp!, {r1-r4} /* push old task's r0-r3 */
  354. stmfd sp!, {r0} /* push old task's cpsr */
  355. #ifdef RT_USING_SMART
  356. stmfd sp, {r13, r14}^ /*push usr_sp, usr_lr */
  357. sub sp, #8
  358. #endif
  359. #ifdef RT_USING_FPU
  360. /* fpu context */
  361. vmrs r6, fpexc
  362. tst r6, #(1<<30)
  363. beq 1f
  364. vstmdb sp!, {d0-d15}
  365. vstmdb sp!, {d16-d31}
  366. vmrs r5, fpscr
  367. stmfd sp!, {r5}
  368. 1:
  369. stmfd sp!, {r6}
  370. #endif
  371. ldr r4, =rt_interrupt_from_thread
  372. ldr r5, [r4]
  373. str sp, [r5] /* store sp in preempted tasks's TCB */
  374. ldr r6, =rt_interrupt_to_thread
  375. ldr r6, [r6]
  376. ldr sp, [r6] /* get new task's stack pointer */
  377. bl rt_thread_self
  378. #ifdef RT_USING_SMART
  379. mov r4, r0
  380. bl lwp_aspace_switch
  381. mov r0, r4
  382. bl lwp_user_setting_restore
  383. #endif
  384. #ifdef RT_USING_FPU
  385. /* fpu context */
  386. ldmfd sp!, {r6}
  387. vmsr fpexc, r6
  388. tst r6, #(1<<30)
  389. beq 1f
  390. ldmfd sp!, {r5}
  391. vmsr fpscr, r5
  392. vldmia sp!, {d16-d31}
  393. vldmia sp!, {d0-d15}
  394. 1:
  395. #endif
  396. #ifdef RT_USING_SMART
  397. ldmfd sp, {r13, r14}^ /*pop usr_sp, usr_lr */
  398. add sp, #8
  399. #endif
  400. ldmfd sp!, {r4} /* pop new task's cpsr to spsr */
  401. msr spsr_cxsf, r4
  402. #ifdef RT_USING_SMART
  403. and r4, #0x1f
  404. cmp r4, #0x10
  405. bne 1f
  406. ldmfd sp!, {r0-r12,lr}
  407. ldmfd sp!, {lr}
  408. b arch_ret_to_user
  409. 1:
  410. #endif
  411. /* pop new task's r0-r12,lr & pc, copy spsr to cpsr */
  412. ldmfd sp!, {r0-r12,lr,pc}^
  413. #endif
  414. .macro push_svc_reg
  415. sub sp, sp, #17 * 4 /* Sizeof(struct rt_hw_exp_stack) */
  416. stmia sp, {r0 - r12} /* Calling r0-r12 */
  417. mov r0, sp
  418. add sp, sp, #17 * 4
  419. mrs r6, spsr /* Save CPSR */
  420. str lr, [r0, #15*4] /* Push PC */
  421. str r6, [r0, #16*4] /* Push CPSR */
  422. and r1, r6, #0x1f
  423. cmp r1, #0x10
  424. cps #Mode_SYS
  425. streq sp, [r0, #13*4] /* Save calling SP */
  426. streq lr, [r0, #14*4] /* Save calling PC */
  427. cps #Mode_SVC
  428. strne sp, [r0, #13*4] /* Save calling SP */
  429. strne lr, [r0, #14*4] /* Save calling PC */
  430. .endm
  431. .align 5
  432. .weak vector_swi
  433. vector_swi:
  434. push_svc_reg
  435. bl rt_hw_trap_swi
  436. b .
  437. .align 5
  438. .globl vector_undef
  439. vector_undef:
  440. push_svc_reg
  441. bl rt_hw_trap_undef
  442. cps #Mode_UND
  443. #ifdef RT_USING_FPU
  444. sub sp, sp, #17 * 4
  445. ldr lr, [sp, #15*4]
  446. ldmia sp, {r0 - r12}
  447. add sp, sp, #17 * 4
  448. movs pc, lr
  449. #endif
  450. b .
  451. .align 5
  452. .globl vector_pabt
  453. vector_pabt:
  454. push_svc_reg
  455. #ifdef RT_USING_SMART
  456. /* cp Mode_ABT stack to SVC */
  457. sub sp, sp, #17 * 4 /* Sizeof(struct rt_hw_exp_stack) */
  458. mov lr, r0
  459. ldmia lr, {r0 - r12}
  460. stmia sp, {r0 - r12}
  461. add r1, lr, #13 * 4
  462. add r2, sp, #13 * 4
  463. ldmia r1, {r4 - r7}
  464. stmia r2, {r4 - r7}
  465. mov r0, sp
  466. bl rt_hw_trap_pabt
  467. /* return to user */
  468. ldr lr, [sp, #16*4] /* orign spsr */
  469. msr spsr_cxsf, lr
  470. ldr lr, [sp, #15*4] /* orign pc */
  471. ldmia sp, {r0 - r12}
  472. add sp, #17 * 4
  473. b arch_ret_to_user
  474. #else
  475. bl rt_hw_trap_pabt
  476. b .
  477. #endif
  478. .align 5
  479. .globl vector_dabt
  480. vector_dabt:
  481. push_svc_reg
  482. #ifdef RT_USING_SMART
  483. /* cp Mode_ABT stack to SVC */
  484. sub sp, sp, #17 * 4 /* Sizeof(struct rt_hw_exp_stack) */
  485. mov lr, r0
  486. ldmia lr, {r0 - r12}
  487. stmia sp, {r0 - r12}
  488. add r1, lr, #13 * 4
  489. add r2, sp, #13 * 4
  490. ldmia r1, {r4 - r7}
  491. stmia r2, {r4 - r7}
  492. mov r0, sp
  493. bl rt_hw_trap_dabt
  494. /* return to user */
  495. ldr lr, [sp, #16*4] /* orign spsr */
  496. msr spsr_cxsf, lr
  497. ldr lr, [sp, #15*4] /* orign pc */
  498. ldmia sp, {r0 - r12}
  499. add sp, #17 * 4
  500. b arch_ret_to_user
  501. #else
  502. bl rt_hw_trap_dabt
  503. b .
  504. #endif
  505. .align 5
  506. .globl vector_resv
  507. vector_resv:
  508. push_svc_reg
  509. bl rt_hw_trap_resv
  510. b .
  511. #ifdef RT_USING_SMP
  512. .global rt_hw_clz
  513. rt_hw_clz:
  514. clz r0, r0
  515. bx lr
  516. .global rt_secondary_cpu_entry
  517. rt_secondary_cpu_entry:
  518. #ifdef RT_USING_SMART
  519. ldr r5, =PV_OFFSET
  520. ldr lr, =after_enable_mmu_n
  521. ldr r0, =init_mtbl
  522. add r0, r5
  523. b enable_mmu
  524. after_enable_mmu_n:
  525. ldr r0, =MMUTable
  526. add r0, r5
  527. bl rt_hw_mmu_switch
  528. #endif
  529. #ifdef RT_USING_FPU
  530. mov r4, #0xfffffff
  531. mcr p15, 0, r4, c1, c0, 2
  532. #endif
  533. mrc p15, 0, r1, c1, c0, 1
  534. mov r0, #(1<<6)
  535. orr r1, r0
  536. mcr p15, 0, r1, c1, c0, 1 /* enable smp */
  537. mrc p15, 0, r0, c1, c0, 0
  538. bic r0, #(1<<13)
  539. mcr p15, 0, r0, c1, c0, 0
  540. bl stack_setup
  541. /* initialize the mmu table and enable mmu */
  542. #ifndef RT_USING_SMART
  543. bl rt_hw_mmu_init
  544. #endif
  545. b rt_hw_secondary_cpu_bsp_start
  546. #endif
  547. #ifndef RT_CPUS_NR
  548. #define RT_CPUS_NR 1
  549. #endif
  550. .bss
  551. .align 3 /* align to 2~3=8 */
  552. svc_stack_n:
  553. .space (RT_CPUS_NR << 12)
  554. svc_stack_n_limit:
  555. irq_stack_n:
  556. .space (RT_CPUS_NR << 12)
  557. und_stack_n:
  558. .space (RT_CPUS_NR << 12)
  559. abt_stack_n:
  560. .space (RT_CPUS_NR << 12)