context_gcc.S 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2013-07-05 Bernard the first version
  9. */
  10. #include "rtconfig.h"
  11. .section .text, "ax"
  12. #ifdef RT_USING_SMP
  13. #define rt_hw_interrupt_disable rt_hw_local_irq_disable
  14. #define rt_hw_interrupt_enable rt_hw_local_irq_enable
  15. #endif
  16. /*
  17. * rt_base_t rt_hw_interrupt_disable();
  18. */
  19. .globl rt_hw_interrupt_disable
  20. rt_hw_interrupt_disable:
  21. mrs r0, cpsr
  22. cpsid i
  23. bx lr
  24. /*
  25. * void rt_hw_interrupt_enable(rt_base_t level);
  26. */
  27. .globl rt_hw_interrupt_enable
  28. rt_hw_interrupt_enable:
  29. msr cpsr, r0
  30. bx lr
  31. /*
  32. * void rt_hw_context_switch_to(rt_uint32 to, struct rt_thread *to_thread);
  33. * r0 --> to (thread stack)
  34. * r1 --> to_thread
  35. */
  36. .globl rt_hw_context_switch_to
  37. rt_hw_context_switch_to:
  38. ldr sp, [r0] @ get new task stack pointer
  39. #ifdef RT_USING_SMP
  40. mov r0, r1
  41. bl rt_cpus_lock_status_restore
  42. #else
  43. #ifdef RT_USING_USERSPACE
  44. ldr r1, =rt_current_thread
  45. ldr r0, [r1]
  46. bl lwp_mmu_switch
  47. #endif
  48. #endif /*RT_USING_SMP*/
  49. b rt_hw_context_switch_exit
  50. .section .bss.share.isr
  51. _guest_switch_lvl:
  52. .word 0
  53. .globl vmm_virq_update
  54. .section .text.isr, "ax"
  55. /*
  56. * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to, struct rt_thread *to_thread);
  57. * r0 --> from (from_thread stack)
  58. * r1 --> to (to_thread stack)
  59. * r2 --> to_thread
  60. */
  61. .globl rt_hw_context_switch
  62. rt_hw_context_switch:
  63. stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC)
  64. stmfd sp!, {r0-r12, lr} @ push lr & register file
  65. mrs r4, cpsr
  66. tst lr, #0x01
  67. orrne r4, r4, #0x20 @ it's thumb code
  68. stmfd sp!, {r4} @ push cpsr
  69. #ifdef RT_USING_LWP
  70. stmfd sp, {r13, r14}^ @ push usr_sp usr_lr
  71. sub sp, #8
  72. #endif
  73. #ifdef RT_USING_FPU
  74. /* fpu context */
  75. vmrs r6, fpexc
  76. tst r6, #(1<<30)
  77. beq 1f
  78. vstmdb sp!, {d0-d15}
  79. vstmdb sp!, {d16-d31}
  80. vmrs r5, fpscr
  81. stmfd sp!, {r5}
  82. 1:
  83. stmfd sp!, {r6}
  84. #endif
  85. str sp, [r0] @ store sp in preempted tasks TCB
  86. ldr sp, [r1] @ get new task stack pointer
  87. #ifdef RT_USING_SMP
  88. mov r0, r2
  89. bl rt_cpus_lock_status_restore
  90. #else
  91. #ifdef RT_USING_USERSPACE
  92. ldr r1, =rt_current_thread
  93. ldr r0, [r1]
  94. bl lwp_mmu_switch
  95. #endif
  96. #endif /*RT_USING_SMP*/
  97. b rt_hw_context_switch_exit
  98. /*
  99. * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
  100. */
  101. .equ Mode_USR, 0x10
  102. .equ Mode_FIQ, 0x11
  103. .equ Mode_IRQ, 0x12
  104. .equ Mode_SVC, 0x13
  105. .equ Mode_ABT, 0x17
  106. .equ Mode_UND, 0x1B
  107. .equ Mode_SYS, 0x1F
  108. .equ I_Bit, 0x80 @ when I bit is set, IRQ is disabled
  109. .equ F_Bit, 0x40 @ when F bit is set, FIQ is disabled
  110. .globl rt_thread_switch_interrupt_flag
  111. .globl rt_interrupt_from_thread
  112. .globl rt_interrupt_to_thread
  113. .globl rt_hw_context_switch_interrupt
  114. rt_hw_context_switch_interrupt:
  115. #ifdef RT_USING_SMP
  116. /* r0 :svc_mod context
  117. * r1 :addr of from_thread's sp
  118. * r2 :addr of to_thread's sp
  119. * r3 :to_thread's tcb
  120. */
  121. str r0, [r1]
  122. ldr sp, [r2]
  123. mov r0, r3
  124. bl rt_cpus_lock_status_restore
  125. b rt_hw_context_switch_exit
  126. #else /*RT_USING_SMP*/
  127. ldr r2, =rt_thread_switch_interrupt_flag
  128. ldr r3, [r2]
  129. cmp r3, #1
  130. beq _reswitch
  131. ldr ip, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
  132. mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
  133. str r0, [ip]
  134. str r3, [r2]
  135. _reswitch:
  136. ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
  137. str r1, [r2]
  138. bx lr
  139. #endif /*RT_USING_SMP*/
  140. .global rt_hw_context_switch_exit
  141. rt_hw_context_switch_exit:
  142. #ifdef RT_USING_SMP
  143. #ifdef RT_USING_SIGNALS
  144. mov r0, sp
  145. cps #Mode_IRQ
  146. bl rt_signal_check
  147. cps #Mode_SVC
  148. mov sp, r0
  149. #endif
  150. #endif
  151. #ifdef RT_USING_FPU
  152. /* fpu context */
  153. ldmfd sp!, {r6}
  154. vmsr fpexc, r6
  155. tst r6, #(1<<30)
  156. beq 1f
  157. ldmfd sp!, {r5}
  158. vmsr fpscr, r5
  159. vldmia sp!, {d16-d31}
  160. vldmia sp!, {d0-d15}
  161. 1:
  162. #endif
  163. #ifdef RT_USING_LWP
  164. ldmfd sp, {r13, r14}^ /* usr_sp, usr_lr */
  165. add sp, #8
  166. #endif
  167. ldmfd sp!, {r1}
  168. msr spsr_cxsf, r1 /* original mode */
  169. #ifdef RT_USING_GDBSERVER
  170. bl lwp_check_debug
  171. #endif
  172. #ifdef RT_USING_LWP
  173. bl lwp_check_exit
  174. #endif
  175. #ifdef RT_USING_LWP
  176. and r1, #0x1f
  177. cmp r1, #0x10
  178. bne 1f
  179. ldmfd sp!, {r0-r12,lr}
  180. ldmfd sp!, {lr}
  181. b ret_to_user
  182. 1:
  183. #endif
  184. ldmfd sp!, {r0-r12,lr,pc}^ /* irq return */
  185. #ifdef RT_USING_FPU
  186. .global set_fpexc
  187. set_fpexc:
  188. vmsr fpexc, r0
  189. bx lr
  190. #endif