1
0

lwp_gcc.S 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. /*
  2. * Copyright (c) 2006-2020, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-12-10 Jesven first version
  9. * 2021-02-03 lizhirui port to riscv64
  10. * 2021-02-19 lizhirui port to new version of rt-smart
  11. * 2022-11-08 Wangxiaoyao Cleanup codes;
  12. * Support new context switch
  13. * 2023-07-16 Shell Move part of the codes to C from asm in signal handling
  14. */
  15. #include "rtconfig.h"
  16. #ifndef __ASSEMBLY__
  17. #define __ASSEMBLY__
  18. #endif /* __ASSEMBLY__ */
  19. #include "cpuport.h"
  20. #include "encoding.h"
  21. #include "stackframe.h"
  22. #include "asm-generic.h"
  23. .section .text.lwp
  24. /*
  25. * void arch_start_umode(args, text, ustack, kstack);
  26. */
  27. .global arch_start_umode
  28. .type arch_start_umode, % function
  29. arch_start_umode:
  30. // load kstack for user process
  31. csrw sscratch, a3
  32. li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
  33. csrc sstatus, t0
  34. li t0, SSTATUS_SPIE // enable interrupt when return to user mode
  35. csrs sstatus, t0
  36. csrw sepc, a1
  37. mv sp, a2
  38. sret//enter user mode
  39. /*
  40. * void arch_crt_start_umode(args, text, ustack, kstack);
  41. */
  42. .global arch_crt_start_umode
  43. .type arch_crt_start_umode, % function
  44. arch_crt_start_umode:
  45. li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
  46. csrc sstatus, t0
  47. li t0, SSTATUS_SPIE // enable interrupt when return to user mode
  48. csrs sstatus, t0
  49. csrw sepc, a1
  50. mv s0, a0
  51. mv s1, a1
  52. mv s2, a2
  53. mv s3, a3
  54. mv a0, s2
  55. call lwp_copy_return_code_to_user_stack
  56. mv a0, s2
  57. call lwp_fix_sp
  58. mv sp, a0//user_sp
  59. mv ra, a0//return address
  60. mv a0, s0//args
  61. csrw sscratch, s3
  62. sret//enter user mode
  63. /**
  64. * Unify exit point from kernel mode to enter user space
  65. * we handle following things here:
  66. * 1. restoring user mode debug state (not support yet)
  67. * 2. handling thread's exit request
  68. * 3. handling POSIX signal
  69. * 4. restoring user context
  70. * 5. jump to user mode
  71. */
  72. .global arch_ret_to_user
  73. arch_ret_to_user:
  74. // TODO: we don't support kernel gdb server in risc-v yet
  75. // so we don't check debug state here and handle debugging bussiness
  76. call lwp_check_exit_request
  77. beqz a0, 1f
  78. mv a0, x0
  79. call sys_exit
  80. 1:
  81. mv a0, sp
  82. call lwp_thread_signal_catch
  83. ret_to_user_exit:
  84. RESTORE_ALL
  85. // `RESTORE_ALL` also reset sp to user sp, and setup sscratch
  86. sret
  87. /**
  88. * Restore user context from exception frame stroraged in ustack
  89. * And handle pending signals;
  90. */
  91. arch_signal_quit:
  92. LOAD a0, FRAME_OFF_SP(sp)
  93. call arch_signal_ucontext_restore
  94. /* reset kernel sp to the stack */
  95. addi sp, sp, CTX_REG_NR * REGBYTES
  96. STORE sp, FRAME_OFF_SP(a0)
  97. /* return value is user sp */
  98. mv sp, a0
  99. /* restore user sp before enter trap */
  100. addi a0, sp, CTX_REG_NR * REGBYTES
  101. csrw sscratch, a0
  102. RESTORE_ALL
  103. SAVE_ALL
  104. j arch_ret_to_user
  105. /**
  106. * rt_noreturn
  107. * void arch_thread_signal_enter(
  108. * int signo, -> a0
  109. * siginfo_t *psiginfo, -> a1
  110. * void *exp_frame, -> a2
  111. * void *entry_uaddr, -> a3
  112. * lwp_sigset_t *save_sig_mask, -> a4
  113. * )
  114. */
  115. .global arch_thread_signal_enter
  116. arch_thread_signal_enter:
  117. mv s3, a2
  118. mv s2, a0
  119. mv s1, a3
  120. LOAD t0, FRAME_OFF_SP(a2)
  121. mv a3, t0
  122. call arch_signal_ucontext_save
  123. /** restore kernel sp */
  124. addi sp, s3, CTX_REG_NR * REGBYTES
  125. /**
  126. * set regiter RA to user signal handler
  127. * set sp to user sp & save kernel sp in sscratch
  128. */
  129. mv ra, a0
  130. csrw sscratch, sp
  131. mv sp, a0
  132. /**
  133. * s1 is signal_handler,
  134. * s1 = !s1 ? lwp_sigreturn : s1;
  135. */
  136. bnez s1, 1f
  137. mv s1, ra
  138. 1:
  139. /* enter user mode and enable interrupt when return to user mode */
  140. li t0, SSTATUS_SPP
  141. csrc sstatus, t0
  142. li t0, SSTATUS_SPIE
  143. csrs sstatus, t0
  144. /* sepc <- signal_handler */
  145. csrw sepc, s1
  146. /* a0 <- signal id */
  147. mv a0, s2
  148. /* a1 <- siginfo */
  149. add a1, sp, 16
  150. /* dummy a2 */
  151. mv a2, a1
  152. /* restore user GP */
  153. LOAD gp, FRAME_OFF_GP(s3)
  154. /**
  155. * handler(signo, psi, ucontext);
  156. */
  157. sret
  158. .align 3
  159. lwp_debugreturn:
  160. li a7, 0xff
  161. ecall
  162. .align 3
  163. .global lwp_sigreturn
  164. lwp_sigreturn:
  165. li a7, 0xfe
  166. ecall
  167. .align 3
  168. lwp_sigreturn_end:
  169. .align 3
  170. .global lwp_thread_return
  171. lwp_thread_return:
  172. li a0, 0
  173. li a7, 1
  174. ecall
  175. .align 3
  176. .global lwp_thread_return_end
  177. lwp_thread_return_end:
  178. .globl arch_get_tidr
  179. arch_get_tidr:
  180. mv a0, tp
  181. ret
  182. .global arch_set_thread_area
  183. arch_set_thread_area:
  184. .globl arch_set_tidr
  185. arch_set_tidr:
  186. mv tp, a0
  187. ret
  188. .global arch_clone_exit
  189. .global arch_fork_exit
  190. arch_fork_exit:
  191. arch_clone_exit:
  192. j arch_syscall_exit
  193. START_POINT(syscall_entry)
  194. #ifndef ARCH_USING_NEW_CTX_SWITCH
  195. //swap to thread kernel stack
  196. csrr t0, sstatus
  197. andi t0, t0, 0x100
  198. beqz t0, __restore_sp_from_tcb
  199. __restore_sp_from_sscratch: // from kernel
  200. csrr t0, sscratch
  201. j __move_stack_context
  202. __restore_sp_from_tcb: // from user
  203. jal rt_thread_self
  204. jal get_thread_kernel_stack_top
  205. mv t0, a0
  206. __move_stack_context:
  207. mv t1, sp//src
  208. mv sp, t0//switch stack
  209. addi sp, sp, -CTX_REG_NR * REGBYTES
  210. //copy context
  211. li s0, CTX_REG_NR//cnt
  212. mv t2, sp//dst
  213. copy_context_loop:
  214. LOAD t0, 0(t1)
  215. STORE t0, 0(t2)
  216. addi s0, s0, -1
  217. addi t1, t1, 8
  218. addi t2, t2, 8
  219. bnez s0, copy_context_loop
  220. #endif /* ARCH_USING_NEW_CTX_SWITCH */
  221. /* fetch SYSCALL ID */
  222. LOAD a7, 17 * REGBYTES(sp)
  223. addi a7, a7, -0xfe
  224. beqz a7, arch_signal_quit
  225. #ifdef ARCH_MM_MMU
  226. /* save setting when syscall enter */
  227. call rt_thread_self
  228. call lwp_user_setting_save
  229. #endif
  230. mv a0, sp
  231. OPEN_INTERRUPT
  232. call syscall_handler
  233. j arch_syscall_exit
  234. START_POINT_END(syscall_entry)
  235. .global arch_syscall_exit
  236. arch_syscall_exit:
  237. CLOSE_INTERRUPT
  238. #if defined(ARCH_MM_MMU)
  239. LOAD s0, FRAME_OFF_SSTATUS(sp)
  240. andi s0, s0, 0x100
  241. bnez s0, dont_ret_to_user
  242. j arch_ret_to_user
  243. #endif
  244. dont_ret_to_user:
  245. #ifdef ARCH_MM_MMU
  246. /* restore setting when syscall exit */
  247. call rt_thread_self
  248. call lwp_user_setting_restore
  249. /* after restore the reg `tp`, need modify context */
  250. STORE tp, 4 * REGBYTES(sp)
  251. #endif
  252. //restore context
  253. RESTORE_ALL
  254. csrw sscratch, zero
  255. sret