1
0

context_gcc.S 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. /*
  2. * File : context.S
  3. * This file is part of RT-Thread RTOS
  4. * COPYRIGHT (C) 2013, RT-Thread Development Team
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along
  17. * with this program; if not, write to the Free Software Foundation, Inc.,
  18. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  19. *
  20. * Change Logs:
  21. * Date Author Notes
  22. * 2013-07-05 Bernard the first version
  23. */
  24. #include <rtconfig.h>
  25. #ifdef RT_USING_VMM
  26. #include <vmm.h>
  27. #endif
  28. .section .text, "ax"
  29. /*
  30. * rt_base_t rt_hw_interrupt_disable();
  31. */
  32. .globl rt_hw_interrupt_disable
  33. rt_hw_interrupt_disable:
  34. mrs r0, cpsr
  35. cpsid i
  36. bx lr
  37. /*
  38. * void rt_hw_interrupt_enable(rt_base_t level);
  39. */
  40. .globl rt_hw_interrupt_enable
  41. rt_hw_interrupt_enable:
  42. msr cpsr, r0
  43. bx lr
  44. /*
  45. * void rt_hw_context_switch_to(rt_uint32 to);
  46. * r0 --> to
  47. */
  48. .globl rt_hw_context_switch_to
  49. rt_hw_context_switch_to:
  50. ldr sp, [r0] @ get new task stack pointer
  51. ldmfd sp!, {r4} @ pop new task spsr
  52. msr spsr_cxsf, r4
  53. ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc
  54. .section .bss.share.isr
  55. _guest_switch_lvl:
  56. .word 0
  57. .globl vmm_virq_update
  58. .section .text.isr, "ax"
  59. /*
  60. * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
  61. * r0 --> from
  62. * r1 --> to
  63. */
  64. .globl rt_hw_context_switch
  65. rt_hw_context_switch:
  66. stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC)
  67. stmfd sp!, {r0-r12, lr} @ push lr & register file
  68. mrs r4, cpsr
  69. tst lr, #0x01
  70. orrne r4, r4, #0x20 @ it's thumb code
  71. stmfd sp!, {r4} @ push cpsr
  72. str sp, [r0] @ store sp in preempted tasks TCB
  73. ldr sp, [r1] @ get new task stack pointer
  74. #ifdef RT_USING_VMM
  75. #ifdef RT_VMM_USING_DOMAIN
  76. @ need to make sure we are in vmm domain as we would use rt_current_thread
  77. ldr r2, =vmm_domain_val
  78. ldr r7, [r2]
  79. mcr p15, 0, r7, c3, c0
  80. #endif
  81. /* check whether vmm thread, otherwise, update vIRQ */
  82. ldr r3, =rt_current_thread
  83. ldr r4, [r3]
  84. ldr r5, =vmm_thread
  85. cmp r4, r5
  86. beq switch_to_guest
  87. @ not falling into guest. Simple task ;-)
  88. ldmfd sp!, {r6} @ pop new task cpsr to spsr
  89. msr spsr_cxsf, r6
  90. ldmfd sp!, {r0-r12, lr, pc}^
  91. switch_to_guest:
  92. #ifdef RT_VMM_USING_DOMAIN
  93. @ the stack is saved in the guest domain so we need to
  94. @ come back to the guest domain to get the registers.
  95. ldr r1, =super_domain_val
  96. ldr r0, [r1]
  97. mcr p15, 0, r0, c3, c0
  98. #endif
  99. /* The user can do nearly anything in rt_thread_idle_excute because it will
  100. call the thread->cleanup. One common thing is sending events and wake up
  101. threads. So the guest thread will be preempted. This is the only point that
  102. the guest thread would call rt_hw_context_switch and "yield".
  103. More over, rt_schedule will call this function and this function *will*
  104. reentrant. If that happens, we need to make sure that call the
  105. rt_thread_idle_excute and vmm_virq_update again and we are in super domain.
  106. I use a "reference count" to achieve such behaviour. If you have better
  107. idea, tell me. */
  108. ldr r4, =_guest_switch_lvl
  109. ldr r5, [r4]
  110. add r5, r5, #1
  111. str r5, [r4]
  112. cmp r5, #1
  113. bne _switch_through
  114. bl rt_thread_idle_excute
  115. bl vmm_virq_update
  116. /* we need _guest_switch_lvl to protect until _switch_through, but it's OK
  117. * to cleanup the reference count here because the code below will not be
  118. * reentrant. */
  119. sub r5, r5, #1
  120. str r5, [r4]
  121. #ifdef RT_VMM_USING_DOMAIN
  122. ldr r1, =guest_domain_val
  123. ldr r0, [r1]
  124. mcr p15, 0, r0, c3, c0
  125. #endif
  126. _switch_through:
  127. #endif /* RT_USING_VMM */
  128. ldmfd sp!, {r4} @ pop new task cpsr to spsr
  129. msr spsr_cxsf, r4
  130. ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr
  131. /*
  132. * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
  133. */
  134. .globl rt_thread_switch_interrupt_flag
  135. .globl rt_interrupt_from_thread
  136. .globl rt_interrupt_to_thread
  137. .globl rt_hw_context_switch_interrupt
  138. rt_hw_context_switch_interrupt:
  139. ldr r2, =rt_thread_switch_interrupt_flag
  140. ldr r3, [r2]
  141. cmp r3, #1
  142. beq _reswitch
  143. ldr ip, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
  144. mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
  145. str r0, [ip]
  146. str r3, [r2]
  147. _reswitch:
  148. ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
  149. str r1, [r2]
  150. bx lr