context_gcc.S 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2009-10-11 Bernard first version
  9. * 2012-01-01 aozima support context switch load/store FPU register.
  10. * 2013-06-18 aozima add restore MSP feature.
  11. * 2013-06-23 aozima support lazy stack optimized.
  12. * 2018-07-24 aozima enhancement hard fault exception handler.
  13. * 2024-08-13 Evlers allows rewrite to interrupt enable/disable api to support independent interrupts management
  14. */
  15. /**
  16. * @addtogroup cortex-m4
  17. */
  18. /*@{*/
  19. .cpu cortex-m4
  20. .syntax unified
  21. .thumb
  22. .text
  23. .equ SCB_VTOR, 0xE000ED08 /* Vector Table Offset Register */
  24. .equ NVIC_INT_CTRL, 0xE000ED04 /* interrupt control state register */
  25. .equ NVIC_SYSPRI2, 0xE000ED20 /* system priority register (2) */
  26. .equ NVIC_PENDSV_PRI, 0xFFFF0000 /* PendSV and SysTick priority value (lowest) */
  27. .equ NVIC_PENDSVSET, 0x10000000 /* value to trigger PendSV exception */
  28. /*
  29. * rt_base_t rt_hw_interrupt_disable();
  30. */
  31. .global rt_hw_interrupt_disable
  32. .weak rt_hw_interrupt_disable
  33. .type rt_hw_interrupt_disable, %function
  34. rt_hw_interrupt_disable:
  35. MRS r0, PRIMASK
  36. CPSID I
  37. BX LR
  38. /*
  39. * void rt_hw_interrupt_enable(rt_base_t level);
  40. */
  41. .global rt_hw_interrupt_enable
  42. .weak rt_hw_interrupt_enable
  43. .type rt_hw_interrupt_enable, %function
  44. rt_hw_interrupt_enable:
  45. MSR PRIMASK, r0
  46. BX LR
  47. /*
  48. * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
  49. * r0 --> from
  50. * r1 --> to
  51. */
  52. .global rt_hw_context_switch_interrupt
  53. .type rt_hw_context_switch_interrupt, %function
  54. .global rt_hw_context_switch
  55. .type rt_hw_context_switch, %function
  56. rt_hw_context_switch_interrupt:
  57. rt_hw_context_switch:
  58. /* set rt_thread_switch_interrupt_flag to 1 */
  59. LDR r2, =rt_thread_switch_interrupt_flag
  60. LDR r3, [r2]
  61. CMP r3, #1
  62. BEQ _reswitch
  63. MOV r3, #1
  64. STR r3, [r2]
  65. LDR r2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */
  66. STR r0, [r2]
  67. _reswitch:
  68. LDR r2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */
  69. STR r1, [r2]
  70. LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */
  71. LDR r1, =NVIC_PENDSVSET
  72. STR r1, [r0]
  73. BX LR
  74. /* r0 --> switch from thread stack
  75. * r1 --> switch to thread stack
  76. * psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
  77. */
  78. .global PendSV_Handler
  79. .type PendSV_Handler, %function
  80. PendSV_Handler:
  81. /* disable interrupt to protect context switch */
  82. MRS r2, PRIMASK
  83. CPSID I
  84. /* get rt_thread_switch_interrupt_flag */
  85. LDR r0, =rt_thread_switch_interrupt_flag
  86. LDR r1, [r0]
  87. CBZ r1, pendsv_exit /* pendsv already handled */
  88. /* clear rt_thread_switch_interrupt_flag to 0 */
  89. MOV r1, #0x00
  90. STR r1, [r0]
  91. LDR r0, =rt_interrupt_from_thread
  92. LDR r1, [r0]
  93. CBZ r1, switch_to_thread /* skip register save at the first time */
  94. MRS r1, psp /* get from thread stack pointer */
  95. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  96. TST lr, #0x10 /* if(!EXC_RETURN[4]) */
  97. IT EQ
  98. VSTMDBEQ r1!, {d8 - d15} /* push FPU register s16~s31 */
  99. #endif
  100. STMFD r1!, {r4 - r11} /* push r4 - r11 register */
  101. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  102. MOV r4, #0x00 /* flag = 0 */
  103. TST lr, #0x10 /* if(!EXC_RETURN[4]) */
  104. IT EQ
  105. MOVEQ r4, #0x01 /* flag = 1 */
  106. STMFD r1!, {r4} /* push flag */
  107. #endif
  108. LDR r0, [r0]
  109. STR r1, [r0] /* update from thread stack pointer */
  110. switch_to_thread:
  111. LDR r1, =rt_interrupt_to_thread
  112. LDR r1, [r1]
  113. LDR r1, [r1] /* load thread stack pointer */
  114. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  115. LDMFD r1!, {r3} /* pop flag */
  116. #endif
  117. LDMFD r1!, {r4 - r11} /* pop r4 - r11 register */
  118. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  119. CMP r3, #0 /* if(flag_r3 != 0) */
  120. IT NE
  121. VLDMIANE r1!, {d8 - d15} /* pop FPU register s16~s31 */
  122. #endif
  123. MSR psp, r1 /* update stack pointer */
  124. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  125. ORR lr, lr, #0x10 /* lr |= (1 << 4), clean FPCA. */
  126. CMP r3, #0 /* if(flag_r3 != 0) */
  127. IT NE
  128. BICNE lr, lr, #0x10 /* lr &= ~(1 << 4), set FPCA. */
  129. #endif
  130. pendsv_exit:
  131. /* restore interrupt */
  132. MSR PRIMASK, r2
  133. ORR lr, lr, #0x04
  134. BX lr
  135. /*
  136. * void rt_hw_context_switch_to(rt_uint32 to);
  137. * r0 --> to
  138. */
  139. .global rt_hw_context_switch_to
  140. .type rt_hw_context_switch_to, %function
  141. rt_hw_context_switch_to:
  142. LDR r1, =rt_interrupt_to_thread
  143. STR r0, [r1]
  144. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  145. /* CLEAR CONTROL.FPCA */
  146. MRS r2, CONTROL /* read */
  147. BIC r2, #0x04 /* modify */
  148. MSR CONTROL, r2 /* write-back */
  149. #endif
  150. /* set from thread to 0 */
  151. LDR r1, =rt_interrupt_from_thread
  152. MOV r0, #0x0
  153. STR r0, [r1]
  154. /* set interrupt flag to 1 */
  155. LDR r1, =rt_thread_switch_interrupt_flag
  156. MOV r0, #1
  157. STR r0, [r1]
  158. /* set the PendSV and SysTick exception priority */
  159. LDR r0, =NVIC_SYSPRI2
  160. LDR r1, =NVIC_PENDSV_PRI
  161. LDR.W r2, [r0,#0x00] /* read */
  162. ORR r1,r1,r2 /* modify */
  163. STR r1, [r0] /* write-back */
  164. LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */
  165. LDR r1, =NVIC_PENDSVSET
  166. STR r1, [r0]
  167. /* restore MSP */
  168. LDR r0, =SCB_VTOR
  169. LDR r0, [r0]
  170. LDR r0, [r0]
  171. NOP
  172. MSR msp, r0
  173. /* enable interrupts at processor level */
  174. CPSIE F
  175. CPSIE I
  176. /* clear the BASEPRI register to disable masking priority */
  177. MOV r0, #0x00
  178. MSR BASEPRI, r0
  179. /* ensure PendSV exception taken place before subsequent operation */
  180. DSB
  181. ISB
  182. /* never reach here! */
  183. /* compatible with old version */
  184. .global rt_hw_interrupt_thread_switch
  185. .type rt_hw_interrupt_thread_switch, %function
  186. rt_hw_interrupt_thread_switch:
  187. BX lr
  188. NOP
  189. .global HardFault_Handler
  190. .type HardFault_Handler, %function
  191. HardFault_Handler:
  192. /* get current context */
  193. MRS r0, msp /* get fault context from handler. */
  194. TST lr, #0x04 /* if(!EXC_RETURN[2]) */
  195. BEQ _get_sp_done
  196. MRS r0, psp /* get fault context from thread. */
  197. _get_sp_done:
  198. STMFD r0!, {r4 - r11} /* push r4 - r11 register */
  199. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  200. STMFD r0!, {lr} /* push dummy for flag */
  201. #endif
  202. STMFD r0!, {lr} /* push exec_return register */
  203. TST lr, #0x04 /* if(!EXC_RETURN[2]) */
  204. BEQ _update_msp
  205. MSR psp, r0 /* update stack pointer to PSP. */
  206. B _update_done
  207. _update_msp:
  208. MSR msp, r0 /* update stack pointer to MSP. */
  209. _update_done:
  210. PUSH {LR}
  211. BL rt_hw_hard_fault_exception
  212. POP {LR}
  213. ORR lr, lr, #0x04
  214. BX lr