context_gcc.S 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2009-10-11 Bernard first version
  9. * 2012-01-01 aozima support context switch load/store FPU register.
  10. * 2013-06-18 aozima add restore MSP feature.
  11. * 2013-06-23 aozima support lazy stack optimized.
  12. * 2018-07-24 aozima enhancement hard fault exception handler.
  13. */
  14. /**
  15. * @addtogroup cortex-m7
  16. */
  17. /*@{*/
  18. #include <rtconfig.h>
  19. .cpu cortex-m7
  20. .syntax unified
  21. .thumb
  22. .text
  23. .equ SCB_VTOR, 0xE000ED08 /* Vector Table Offset Register */
  24. .equ NVIC_INT_CTRL, 0xE000ED04 /* interrupt control state register */
  25. .equ NVIC_SYSPRI2, 0xE000ED20 /* system priority register (2) */
  26. .equ NVIC_PENDSV_PRI, 0xFFFF0000 /* PendSV and SysTick priority value (lowest) */
  27. .equ NVIC_PENDSVSET, 0x10000000 /* value to trigger PendSV exception */
  28. /*
  29. * rt_base_t rt_hw_interrupt_disable();
  30. */
  31. .global rt_hw_interrupt_disable
  32. .type rt_hw_interrupt_disable, %function
  33. rt_hw_interrupt_disable:
  34. MRS r0, PRIMASK
  35. CPSID I
  36. BX LR
  37. /*
  38. * void rt_hw_interrupt_enable(rt_base_t level);
  39. */
  40. .global rt_hw_interrupt_enable
  41. .type rt_hw_interrupt_enable, %function
  42. rt_hw_interrupt_enable:
  43. MSR PRIMASK, r0
  44. BX LR
  45. /*
  46. * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
  47. * r0 --> from
  48. * r1 --> to
  49. */
  50. .global rt_hw_context_switch_interrupt
  51. .type rt_hw_context_switch_interrupt, %function
  52. .global rt_hw_context_switch
  53. .type rt_hw_context_switch, %function
  54. rt_hw_context_switch_interrupt:
  55. rt_hw_context_switch:
  56. /* set rt_thread_switch_interrupt_flag to 1 */
  57. LDR r2, =rt_thread_switch_interrupt_flag
  58. LDR r3, [r2]
  59. CMP r3, #1
  60. BEQ _reswitch
  61. MOV r3, #1
  62. STR r3, [r2]
  63. LDR r2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */
  64. STR r0, [r2]
  65. _reswitch:
  66. LDR r2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */
  67. STR r1, [r2]
  68. LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */
  69. LDR r1, =NVIC_PENDSVSET
  70. STR r1, [r0]
  71. BX LR
  72. /* r0 --> switch from thread stack
  73. * r1 --> switch to thread stack
  74. * psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
  75. */
  76. .global PendSV_Handler
  77. .type PendSV_Handler, %function
  78. PendSV_Handler:
  79. /* disable interrupt to protect context switch */
  80. MRS r2, PRIMASK
  81. CPSID I
  82. /* get rt_thread_switch_interrupt_flag */
  83. LDR r0, =rt_thread_switch_interrupt_flag
  84. LDR r1, [r0]
  85. CBZ r1, pendsv_exit /* pendsv already handled */
  86. /* clear rt_thread_switch_interrupt_flag to 0 */
  87. MOV r1, #0x00
  88. STR r1, [r0]
  89. LDR r0, =rt_interrupt_from_thread
  90. LDR r1, [r0]
  91. CBZ r1, switch_to_thread /* skip register save at the first time */
  92. MRS r1, psp /* get from thread stack pointer */
  93. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  94. TST lr, #0x10 /* if(!EXC_RETURN[4]) */
  95. IT EQ
  96. VSTMDBEQ r1!, {d8 - d15} /* push FPU register s16~s31 */
  97. #endif
  98. STMFD r1!, {r4 - r11} /* push r4 - r11 register */
  99. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  100. MOV r4, #0x00 /* flag = 0 */
  101. TST lr, #0x10 /* if(!EXC_RETURN[4]) */
  102. IT EQ
  103. MOVEQ r4, #0x01 /* flag = 1 */
  104. STMFD r1!, {r4} /* push flag */
  105. #endif
  106. LDR r0, [r0]
  107. STR r1, [r0] /* update from thread stack pointer */
  108. switch_to_thread:
  109. LDR r1, =rt_interrupt_to_thread
  110. LDR r1, [r1]
  111. LDR r1, [r1] /* load thread stack pointer */
  112. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  113. LDMFD r1!, {r3} /* pop flag */
  114. #endif
  115. LDMFD r1!, {r4 - r11} /* pop r4 - r11 register */
  116. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  117. CMP r3, #0 /* if(flag_r3 != 0) */
  118. IT NE
  119. VLDMIANE r1!, {d8 - d15} /* pop FPU register s16~s31 */
  120. #endif
  121. MSR psp, r1 /* update stack pointer */
  122. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  123. ORR lr, lr, #0x10 /* lr |= (1 << 4), clean FPCA. */
  124. CMP r3, #0 /* if(flag_r3 != 0) */
  125. IT NE
  126. BICNE lr, lr, #0x10 /* lr &= ~(1 << 4), set FPCA. */
  127. #endif
  128. #if defined (RT_USING_MEM_PROTECTION)
  129. PUSH {r0-r3, r12, lr}
  130. BL rt_thread_self
  131. BL rt_hw_mpu_table_switch
  132. POP {r0-r3, r12, lr}
  133. #endif
  134. pendsv_exit:
  135. /* restore interrupt */
  136. MSR PRIMASK, r2
  137. ORR lr, lr, #0x04
  138. BX lr
  139. /*
  140. * void rt_hw_context_switch_to(rt_uint32 to);
  141. * r0 --> to
  142. */
  143. .global rt_hw_context_switch_to
  144. .type rt_hw_context_switch_to, %function
  145. rt_hw_context_switch_to:
  146. LDR r1, =rt_interrupt_to_thread
  147. STR r0, [r1]
  148. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  149. /* CLEAR CONTROL.FPCA */
  150. MRS r2, CONTROL /* read */
  151. BIC r2, #0x04 /* modify */
  152. MSR CONTROL, r2 /* write-back */
  153. #endif
  154. /* set from thread to 0 */
  155. LDR r1, =rt_interrupt_from_thread
  156. MOV r0, #0x0
  157. STR r0, [r1]
  158. /* set interrupt flag to 1 */
  159. LDR r1, =rt_thread_switch_interrupt_flag
  160. MOV r0, #1
  161. STR r0, [r1]
  162. /* set the PendSV and SysTick exception priority */
  163. LDR r0, =NVIC_SYSPRI2
  164. LDR r1, =NVIC_PENDSV_PRI
  165. LDR.W r2, [r0,#0x00] /* read */
  166. ORR r1,r1,r2 /* modify */
  167. STR r1, [r0] /* write-back */
  168. LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */
  169. LDR r1, =NVIC_PENDSVSET
  170. STR r1, [r0]
  171. /* restore MSP */
  172. LDR r0, =SCB_VTOR
  173. LDR r0, [r0]
  174. LDR r0, [r0]
  175. NOP
  176. MSR msp, r0
  177. /* enable interrupts at processor level */
  178. CPSIE F
  179. CPSIE I
  180. /* ensure PendSV exception taken place before subsequent operation */
  181. DSB
  182. ISB
  183. /* never reach here! */
  184. /* compatible with old version */
  185. .global rt_hw_interrupt_thread_switch
  186. .type rt_hw_interrupt_thread_switch, %function
  187. rt_hw_interrupt_thread_switch:
  188. BX lr
  189. NOP
  190. .global HardFault_Handler
  191. .type HardFault_Handler, %function
  192. HardFault_Handler:
  193. /* get current context */
  194. MRS r0, msp /* get fault context from handler. */
  195. TST lr, #0x04 /* if(!EXC_RETURN[2]) */
  196. BEQ _get_sp_done
  197. MRS r0, psp /* get fault context from thread. */
  198. _get_sp_done:
  199. STMFD r0!, {r4 - r11} /* push r4 - r11 register */
  200. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  201. STMFD r0!, {lr} /* push dummy for flag */
  202. #endif
  203. STMFD r0!, {lr} /* push exec_return register */
  204. TST lr, #0x04 /* if(!EXC_RETURN[2]) */
  205. BEQ _update_msp
  206. MSR psp, r0 /* update stack pointer to PSP. */
  207. B _update_done
  208. _update_msp:
  209. MSR msp, r0 /* update stack pointer to MSP. */
  210. _update_done:
  211. PUSH {LR}
  212. BL rt_hw_hard_fault_exception
  213. POP {LR}
  214. ORR lr, lr, #0x04
  215. BX lr