interrupt_gcc.S 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018/10/02 Bernard The first version
  9. * 2018/12/27 Jesven Add SMP schedule
  10. * 2021/02/02 lizhirui Add userspace support
  11. * 2021/12/24 JasonHu Add user setting save/restore
  12. */
  13. #define __ASSEMBLY__
  14. #include "cpuport.h"
  15. #include "encoding.h"
  16. #include "stackframe.h"
  17. .section .text.entry
  18. .align 2
  19. .global trap_entry
  20. .extern __stack_cpu0
  21. .extern get_current_thread_kernel_stack_top
  22. trap_entry:
  23. //backup sp
  24. csrrw sp, sscratch, sp
  25. //load interrupt stack
  26. la sp, __stack_cpu0
  27. //backup context
  28. SAVE_ALL
  29. RESTORE_SYS_GP
  30. //check syscall
  31. csrr t0, scause
  32. li t1, 8//environment call from u-mode
  33. beq t0, t1, syscall_entry
  34. csrr a0, scause
  35. csrrc a1, stval, zero
  36. csrr a2, sepc
  37. mv a3, sp
  38. /* scause, stval, sepc, sp */
  39. call handle_trap
  40. /* need to switch new thread */
  41. la s0, rt_thread_switch_interrupt_flag
  42. lw s2, 0(s0)
  43. beqz s2, spurious_interrupt
  44. sw zero, 0(s0)
  45. .global rt_hw_context_switch_interrupt_do
  46. rt_hw_context_switch_interrupt_do:
  47. //swap to thread kernel stack
  48. csrr t0, sstatus
  49. andi t0, t0, 0x100
  50. beqz t0, __restore_sp_from_tcb_interrupt
  51. __restore_sp_from_sscratch_interrupt:
  52. csrr t0, sscratch
  53. j __move_stack_context_interrupt
  54. __restore_sp_from_tcb_interrupt:
  55. la s0, rt_interrupt_from_thread
  56. LOAD a0, 0(s0)
  57. jal rt_thread_sp_to_thread
  58. jal get_thread_kernel_stack_top
  59. mv t0, a0
  60. __move_stack_context_interrupt:
  61. mv t1, sp//src
  62. mv sp, t0//switch stack
  63. addi sp, sp, -CTX_REG_NR * REGBYTES
  64. //copy context
  65. li s0, CTX_REG_NR//cnt
  66. mv t2, sp//dst
  67. copy_context_loop_interrupt:
  68. LOAD t0, 0(t1)
  69. STORE t0, 0(t2)
  70. addi s0, s0, -1
  71. addi t1, t1, 8
  72. addi t2, t2, 8
  73. bnez s0, copy_context_loop_interrupt
  74. la s0, rt_interrupt_from_thread
  75. LOAD s1, 0(s0)
  76. STORE sp, 0(s1)
  77. la s0, rt_interrupt_to_thread
  78. LOAD s1, 0(s0)
  79. LOAD sp, 0(s1)
  80. #ifdef RT_USING_USERSPACE
  81. mv a0, s1
  82. jal rt_thread_sp_to_thread
  83. jal lwp_mmu_switch
  84. #endif
  85. spurious_interrupt:
  86. LOAD t0, 2 * REGBYTES(sp)
  87. andi t0, t0, 0x100
  88. beqz t0, ret_to_user
  89. RESTORE_ALL
  90. sret
  91. syscall_entry:
  92. //swap to thread kernel stack
  93. csrr t0, sstatus
  94. andi t0, t0, 0x100
  95. beqz t0, __restore_sp_from_tcb
  96. __restore_sp_from_sscratch:
  97. csrr t0, sscratch
  98. j __move_stack_context
  99. __restore_sp_from_tcb:
  100. la a0, rt_current_thread
  101. LOAD a0, 0(a0)
  102. jal get_thread_kernel_stack_top
  103. mv t0, a0
  104. __move_stack_context:
  105. mv t1, sp//src
  106. mv sp, t0//switch stack
  107. addi sp, sp, -CTX_REG_NR * REGBYTES
  108. //copy context
  109. li s0, CTX_REG_NR//cnt
  110. mv t2, sp//dst
  111. copy_context_loop:
  112. LOAD t0, 0(t1)
  113. STORE t0, 0(t2)
  114. addi s0, s0, -1
  115. addi t1, t1, 8
  116. addi t2, t2, 8
  117. bnez s0, copy_context_loop
  118. LOAD s0, 7 * REGBYTES(sp)
  119. addi s0, s0, -0xfe
  120. beqz s0, lwp_signal_quit
  121. #ifdef RT_USING_USERSPACE
  122. /* save setting when syscall enter */
  123. call rt_thread_self
  124. call lwp_user_setting_save
  125. #endif
  126. mv a0, sp
  127. OPEN_INTERRUPT
  128. call syscall_handler
  129. CLOSE_INTERRUPT
  130. .global syscall_exit
  131. syscall_exit:
  132. #if defined(RT_USING_USERSPACE)
  133. LOAD s0, 2 * REGBYTES(sp)
  134. andi s0, s0, 0x100
  135. bnez s0, dont_ret_to_user
  136. li s0, 0
  137. j ret_to_user
  138. dont_ret_to_user:
  139. #endif
  140. #ifdef RT_USING_USERSPACE
  141. /* restore setting when syscall exit */
  142. call rt_thread_self
  143. call lwp_user_setting_restore
  144. /* after restore the reg `tp`, need modify context */
  145. STORE tp, 4 * REGBYTES(sp)
  146. #endif
  147. //restore context
  148. RESTORE_ALL
  149. sret
  150. .global rt_hw_interrupt_enable
  151. rt_hw_interrupt_enable:
  152. fence.i
  153. csrs sstatus, a0 /* restore to old csr */
  154. jr ra
  155. .global rt_hw_interrupt_disable
  156. rt_hw_interrupt_disable:
  157. csrrci a0, sstatus, 2 /* clear SIE */
  158. fence.i
  159. jr ra