context_gcc.S 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2013-07-05 Bernard the first version
  9. */
  10. #include <rtconfig.h>
  11. #ifdef RT_USING_VMM
  12. #include <vmm.h>
  13. #endif
  14. .section .text, "ax"
  15. /*
  16. * rt_base_t rt_hw_interrupt_disable();
  17. */
  18. .globl rt_hw_interrupt_disable
  19. rt_hw_interrupt_disable:
  20. mrs r0, cpsr
  21. cpsid i
  22. bx lr
  23. /*
  24. * void rt_hw_interrupt_enable(rt_base_t level);
  25. */
  26. .globl rt_hw_interrupt_enable
  27. rt_hw_interrupt_enable:
  28. msr cpsr, r0
  29. bx lr
  30. /*
  31. * void rt_hw_context_switch_to(rt_uint32 to);
  32. * r0 --> to
  33. */
  34. .globl rt_hw_context_switch_to
  35. rt_hw_context_switch_to:
  36. ldr sp, [r0] @ get new task stack pointer
  37. ldmfd sp!, {r4} @ pop new task spsr
  38. msr spsr_cxsf, r4
  39. ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc
  40. .section .bss.share.isr
  41. _guest_switch_lvl:
  42. .word 0
  43. .globl vmm_virq_update
  44. .section .text.isr, "ax"
  45. /*
  46. * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
  47. * r0 --> from
  48. * r1 --> to
  49. */
  50. .globl rt_hw_context_switch
  51. rt_hw_context_switch:
  52. stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC)
  53. stmfd sp!, {r0-r12, lr} @ push lr & register file
  54. mrs r4, cpsr
  55. tst lr, #0x01
  56. orrne r4, r4, #0x20 @ it's thumb code
  57. stmfd sp!, {r4} @ push cpsr
  58. str sp, [r0] @ store sp in preempted tasks TCB
  59. ldr sp, [r1] @ get new task stack pointer
  60. #ifdef RT_USING_VMM
  61. #ifdef RT_VMM_USING_DOMAIN
  62. @ need to make sure we are in vmm domain as we would use rt_current_thread
  63. ldr r2, =vmm_domain_val
  64. ldr r7, [r2]
  65. mcr p15, 0, r7, c3, c0
  66. #endif
  67. /* check whether vmm thread, otherwise, update vIRQ */
  68. ldr r3, =rt_current_thread
  69. ldr r4, [r3]
  70. ldr r5, =vmm_thread
  71. cmp r4, r5
  72. beq switch_to_guest
  73. @ not falling into guest. Simple task ;-)
  74. ldmfd sp!, {r6} @ pop new task cpsr to spsr
  75. msr spsr_cxsf, r6
  76. ldmfd sp!, {r0-r12, lr, pc}^
  77. switch_to_guest:
  78. #ifdef RT_VMM_USING_DOMAIN
  79. @ the stack is saved in the guest domain so we need to
  80. @ come back to the guest domain to get the registers.
  81. ldr r1, =super_domain_val
  82. ldr r0, [r1]
  83. mcr p15, 0, r0, c3, c0
  84. #endif
  85. /* The user can do nearly anything in rt_thread_idle_excute because it will
  86. call the thread->cleanup. One common thing is sending events and wake up
  87. threads. So the guest thread will be preempted. This is the only point that
  88. the guest thread would call rt_hw_context_switch and "yield".
  89. More over, rt_schedule will call this function and this function *will*
  90. reentrant. If that happens, we need to make sure that call the
  91. rt_thread_idle_excute and vmm_virq_update again and we are in super domain.
  92. I use a "reference count" to achieve such behaviour. If you have better
  93. idea, tell me. */
  94. ldr r4, =_guest_switch_lvl
  95. ldr r5, [r4]
  96. add r5, r5, #1
  97. str r5, [r4]
  98. cmp r5, #1
  99. bne _switch_through
  100. bl rt_thread_idle_excute
  101. bl vmm_virq_update
  102. /* we need _guest_switch_lvl to protect until _switch_through, but it's OK
  103. * to cleanup the reference count here because the code below will not be
  104. * reentrant. */
  105. sub r5, r5, #1
  106. str r5, [r4]
  107. #ifdef RT_VMM_USING_DOMAIN
  108. ldr r1, =guest_domain_val
  109. ldr r0, [r1]
  110. mcr p15, 0, r0, c3, c0
  111. #endif
  112. _switch_through:
  113. #endif /* RT_USING_VMM */
  114. ldmfd sp!, {r4} @ pop new task cpsr to spsr
  115. msr spsr_cxsf, r4
  116. ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr
  117. /*
  118. * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
  119. */
  120. .globl rt_thread_switch_interrupt_flag
  121. .globl rt_interrupt_from_thread
  122. .globl rt_interrupt_to_thread
  123. .globl rt_hw_context_switch_interrupt
  124. rt_hw_context_switch_interrupt:
  125. ldr r2, =rt_thread_switch_interrupt_flag
  126. ldr r3, [r2]
  127. cmp r3, #1
  128. beq _reswitch
  129. ldr ip, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
  130. mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
  131. str r0, [ip]
  132. str r3, [r2]
  133. _reswitch:
  134. ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
  135. str r1, [r2]
  136. bx lr