lwp_gcc.S 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300
  1. /*
  2. * Copyright (c) 2006-2020, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-12-10 Jesven first version
  9. * 2021-02-03 lizhirui port to riscv64
  10. * 2021-02-19 lizhirui port to new version of rt-smart
  11. * 2022-11-08 Wangxiaoyao Cleanup codes;
  12. * Support new context switch
  13. * 2023-07-16 Shell Move part of the codes to C from asm in signal handling
  14. */
  15. #include "rtconfig.h"
  16. #ifndef __ASSEMBLY__
  17. #define __ASSEMBLY__
  18. #endif /* __ASSEMBLY__ */
  19. #include "cpuport.h"
  20. #include "encoding.h"
  21. #include "stackframe.h"
  22. #include "asm-generic.h"
  23. .section .text.lwp
  24. /*
  25. * void arch_start_umode(args, text, ustack, kstack);
  26. */
  27. .global arch_start_umode
  28. .type arch_start_umode, % function
  29. arch_start_umode:
  30. // load kstack for user process
  31. csrw sscratch, a3
  32. li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
  33. csrc sstatus, t0
  34. li t0, SSTATUS_SPIE // enable interrupt when return to user mode
  35. csrs sstatus, t0
  36. csrw sepc, a1
  37. mv a3, a2
  38. sret//enter user mode
  39. /*
  40. * void arch_crt_start_umode(args, text, ustack, kstack);
  41. */
  42. .global arch_crt_start_umode
  43. .type arch_crt_start_umode, % function
  44. arch_crt_start_umode:
  45. li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
  46. csrc sstatus, t0
  47. li t0, SSTATUS_SPIE // enable interrupt when return to user mode
  48. csrs sstatus, t0
  49. csrw sepc, a1
  50. mv s0, a0
  51. mv s1, a1
  52. mv s2, a2
  53. mv s3, a3
  54. mv a0, s2
  55. call lwp_copy_return_code_to_user_stack
  56. mv a0, s2
  57. call lwp_fix_sp
  58. mv sp, a0//user_sp
  59. mv ra, a0//return address
  60. mv a0, s0//args
  61. csrw sscratch, s3
  62. sret//enter user mode
  63. /**
  64. * Unify exit point from kernel mode to enter user space
  65. * we handle following things here:
  66. * 1. restoring user mode debug state (not support yet)
  67. * 2. handling thread's exit request
  68. * 3. handling POSIX signal
  69. * 4. restoring user context
  70. * 5. jump to user mode
  71. */
  72. .global arch_ret_to_user
  73. arch_ret_to_user:
  74. // TODO: we don't support kernel gdb server in risc-v yet
  75. // so we don't check debug state here and handle debugging bussiness
  76. call lwp_check_exit_request
  77. beqz a0, 1f
  78. mv a0, x0
  79. call sys_exit
  80. 1:
  81. mv a0, sp
  82. call lwp_thread_signal_catch
  83. ret_to_user_exit:
  84. RESTORE_ALL
  85. // `RESTORE_ALL` also reset sp to user sp, and setup sscratch
  86. sret
  87. /**
  88. * Restore user context from exception frame stroraged in ustack
  89. * And handle pending signals;
  90. */
  91. arch_signal_quit:
  92. LOAD a0, FRAME_OFF_SP(sp)
  93. call arch_signal_ucontext_restore
  94. /* reset kernel sp to the stack */
  95. STORE sp, FRAME_OFF_SP(a0)
  96. /* return value is user sp */
  97. mv sp, a0
  98. /* restore user sp before enter trap */
  99. addi a0, sp, CTX_REG_NR * REGBYTES
  100. csrw sscratch, a0
  101. RESTORE_ALL
  102. SAVE_ALL
  103. j arch_ret_to_user
  104. /**
  105. * rt_noreturn
  106. * void arch_thread_signal_enter(
  107. * int signo, -> a0
  108. * siginfo_t *psiginfo, -> a1
  109. * void *exp_frame, -> a2
  110. * void *entry_uaddr, -> a3
  111. * lwp_sigset_t *save_sig_mask, -> a4
  112. * )
  113. */
  114. .global arch_thread_signal_enter
  115. arch_thread_signal_enter:
  116. mv s3, a2
  117. mv s2, a0
  118. mv s1, a3
  119. LOAD t0, FRAME_OFF_SP(a2)
  120. mv a3, t0
  121. call arch_signal_ucontext_save
  122. /** restore kernel sp */
  123. addi sp, s3, CTX_REG_NR * REGBYTES
  124. /**
  125. * set regiter RA to user signal handler
  126. * set sp to user sp & save kernel sp in sscratch
  127. */
  128. mv ra, a0
  129. csrw sscratch, sp
  130. mv sp, a0
  131. /**
  132. * s1 is signal_handler,
  133. * s1 = !s1 ? lwp_sigreturn : s1;
  134. */
  135. bnez s1, 1f
  136. mv s1, ra
  137. 1:
  138. /* enter user mode and enable interrupt when return to user mode */
  139. li t0, SSTATUS_SPP
  140. csrc sstatus, t0
  141. li t0, SSTATUS_SPIE
  142. csrs sstatus, t0
  143. /* sepc <- signal_handler */
  144. csrw sepc, s1
  145. /* a0 <- signal id */
  146. mv a0, s2
  147. /* a1 <- siginfo */
  148. add a1, sp, 16
  149. /* dummy a2 */
  150. mv a2, a1
  151. /**
  152. * handler(signo, psi, ucontext);
  153. */
  154. sret
  155. .align 3
  156. lwp_debugreturn:
  157. li a7, 0xff
  158. ecall
  159. .align 3
  160. .global lwp_sigreturn
  161. lwp_sigreturn:
  162. li a7, 0xfe
  163. ecall
  164. .align 3
  165. lwp_sigreturn_end:
  166. .align 3
  167. .global lwp_thread_return
  168. lwp_thread_return:
  169. li a0, 0
  170. li a7, 1
  171. ecall
  172. .align 3
  173. .global lwp_thread_return_end
  174. lwp_thread_return_end:
  175. .globl arch_get_tidr
  176. arch_get_tidr:
  177. mv a0, tp
  178. ret
  179. .global arch_set_thread_area
  180. arch_set_thread_area:
  181. .globl arch_set_tidr
  182. arch_set_tidr:
  183. mv tp, a0
  184. ret
  185. .global arch_clone_exit
  186. .global arch_fork_exit
  187. arch_fork_exit:
  188. arch_clone_exit:
  189. j arch_syscall_exit
  190. START_POINT(syscall_entry)
  191. #ifndef ARCH_USING_NEW_CTX_SWITCH
  192. //swap to thread kernel stack
  193. csrr t0, sstatus
  194. andi t0, t0, 0x100
  195. beqz t0, __restore_sp_from_tcb
  196. __restore_sp_from_sscratch: // from kernel
  197. csrr t0, sscratch
  198. j __move_stack_context
  199. __restore_sp_from_tcb: // from user
  200. la a0, rt_current_thread
  201. LOAD a0, 0(a0)
  202. jal get_thread_kernel_stack_top
  203. mv t0, a0
  204. __move_stack_context:
  205. mv t1, sp//src
  206. mv sp, t0//switch stack
  207. addi sp, sp, -CTX_REG_NR * REGBYTES
  208. //copy context
  209. li s0, CTX_REG_NR//cnt
  210. mv t2, sp//dst
  211. copy_context_loop:
  212. LOAD t0, 0(t1)
  213. STORE t0, 0(t2)
  214. addi s0, s0, -1
  215. addi t1, t1, 8
  216. addi t2, t2, 8
  217. bnez s0, copy_context_loop
  218. #endif /* ARCH_USING_NEW_CTX_SWITCH */
  219. /* fetch SYSCALL ID */
  220. LOAD a7, 17 * REGBYTES(sp)
  221. addi a7, a7, -0xfe
  222. beqz a7, arch_signal_quit
  223. #ifdef ARCH_MM_MMU
  224. /* save setting when syscall enter */
  225. call rt_thread_self
  226. call lwp_user_setting_save
  227. #endif
  228. mv a0, sp
  229. OPEN_INTERRUPT
  230. call syscall_handler
  231. j arch_syscall_exit
  232. START_POINT_END(syscall_entry)
  233. .global arch_syscall_exit
  234. arch_syscall_exit:
  235. CLOSE_INTERRUPT
  236. #if defined(ARCH_MM_MMU)
  237. LOAD s0, 2 * REGBYTES(sp)
  238. andi s0, s0, 0x100
  239. bnez s0, dont_ret_to_user
  240. j arch_ret_to_user
  241. #endif
  242. dont_ret_to_user:
  243. #ifdef ARCH_MM_MMU
  244. /* restore setting when syscall exit */
  245. call rt_thread_self
  246. call lwp_user_setting_restore
  247. /* after restore the reg `tp`, need modify context */
  248. STORE tp, 4 * REGBYTES(sp)
  249. #endif
  250. //restore context
  251. RESTORE_ALL
  252. csrw sscratch, zero
  253. sret