lwp_gcc.S 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. /*
  2. * Copyright (c) 2006-2020, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-12-10 Jesven first version
  9. * 2021-02-03 lizhirui port to riscv64
  10. * 2021-02-19 lizhirui port to new version of rt-smart
  11. * 2022-11-08 Wangxiaoyao Cleanup codes;
  12. * Support new context switch
  13. */
  14. #include "rtconfig.h"
  15. #ifndef __ASSEMBLY__
  16. #define __ASSEMBLY__
  17. #endif /* __ASSEMBLY__ */
  18. #include "cpuport.h"
  19. #include "encoding.h"
  20. #include "stackframe.h"
  21. .section .text.lwp
  22. /*
  23. * void arch_start_umode(args, text, ustack, kstack);
  24. */
  25. .global arch_start_umode
  26. .type arch_start_umode, % function
  27. arch_start_umode:
  28. // load kstack for user process
  29. csrw sscratch, a3
  30. li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
  31. csrc sstatus, t0
  32. li t0, SSTATUS_SPIE // enable interrupt when return to user mode
  33. csrs sstatus, t0
  34. csrw sepc, a1
  35. mv a3, a2
  36. sret//enter user mode
  37. /*
  38. * void arch_crt_start_umode(args, text, ustack, kstack);
  39. */
  40. .global arch_crt_start_umode
  41. .type arch_crt_start_umode, % function
  42. arch_crt_start_umode:
  43. li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
  44. csrc sstatus, t0
  45. li t0, SSTATUS_SPIE // enable interrupt when return to user mode
  46. csrs sstatus, t0
  47. csrw sepc, a1
  48. mv s0, a0
  49. mv s1, a1
  50. mv s2, a2
  51. mv s3, a3
  52. mv a0, s2
  53. call lwp_copy_return_code_to_user_stack
  54. mv a0, s2
  55. call lwp_fix_sp
  56. mv sp, a0//user_sp
  57. mv ra, a0//return address
  58. mv a0, s0//args
  59. csrw sscratch, s3
  60. sret//enter user mode
  61. /**
  62. * Unify exit point from kernel mode to enter user space
  63. * we handle following things here:
  64. * 1. restoring user mode debug state (not support yet)
  65. * 2. handling thread's exit request
  66. * 3. handling POSIX signal
  67. * 4. restoring user context
  68. * 5. jump to user mode
  69. */
  70. .global arch_ret_to_user
  71. arch_ret_to_user:
  72. // TODO: we don't support kernel gdb server in risc-v yet
  73. // so we don't check debug state here and handle debugging bussiness
  74. call lwp_check_exit_request
  75. beqz a0, 1f
  76. mv a0, x0
  77. call sys_exit
  78. 1:
  79. call lwp_signal_check
  80. beqz a0, ret_to_user_exit
  81. J user_do_signal
  82. ret_to_user_exit:
  83. RESTORE_ALL
  84. // `RESTORE_ALL` also reset sp to user sp, and setup sscratch
  85. sret
  86. /**
  87. * Restore user context from exception frame stroraged in ustack
  88. * And handle pending signals;
  89. */
  90. arch_signal_quit:
  91. call lwp_signal_restore
  92. call arch_get_usp_from_uctx
  93. // return value is user sp
  94. mv sp, a0
  95. // restore user sp before enter trap
  96. addi a0, sp, CTX_REG_NR * REGBYTES
  97. csrw sscratch, a0
  98. RESTORE_ALL
  99. SAVE_ALL
  100. j arch_ret_to_user
  101. /**
  102. * Prepare and enter user signal handler
  103. * Move user exception frame and setup signal return
  104. * routine in user stack
  105. */
  106. user_do_signal:
  107. /** restore and backup kernel sp carefully to avoid leaking */
  108. addi t0, sp, CTX_REG_NR * REGBYTES
  109. csrw sscratch, t0
  110. RESTORE_ALL
  111. SAVE_ALL
  112. /**
  113. * save lwp_sigreturn in user memory
  114. */
  115. mv s0, sp
  116. la t0, lwp_sigreturn
  117. la t1, lwp_sigreturn_end
  118. // t1 <- size
  119. sub t1, t1, t0
  120. // s0 <- dst
  121. sub s0, s0, t1
  122. mv s2, t1
  123. lwp_sigreturn_copy_loop:
  124. addi t2, t1, -1
  125. add t3, t0, t2
  126. add t4, s0, t2
  127. lb t5, 0(t3)
  128. sb t5, 0(t4)
  129. mv t1, t2
  130. bnez t1, lwp_sigreturn_copy_loop
  131. /**
  132. * 1. clear sscratch & restore kernel sp to
  133. * enter kernel mode routine
  134. * 2. storage exp frame address to restore context,
  135. * by calling to lwp_signal_backup
  136. * 3. storage lwp_sigreturn entry address
  137. * 4. get signal id as param for signal handler
  138. */
  139. mv s1, sp
  140. csrrw sp, sscratch, x0
  141. /**
  142. * synchronize dcache & icache if target is
  143. * a Harvard Architecture machine, otherwise
  144. * do nothing
  145. */
  146. mv a0, s0
  147. mv a1, s2
  148. call rt_hw_sync_cache_local
  149. /**
  150. * backup user sp (point to saved exception frame, skip sigreturn routine)
  151. * And get signal id
  152. * a0: user sp
  153. * a1: user_pc (not used, marked as 0 to avoid abuse)
  154. * a2: user_flag (not used, marked as 0 to avoid abuse)
  155. */
  156. mv a0, s1
  157. mv a1, zero
  158. mv a2, zero
  159. call lwp_signal_backup
  160. /**
  161. * backup signal id in s2,
  162. * and get sighandler by signal id
  163. */
  164. mv s2, a0
  165. call lwp_sighandler_get
  166. /**
  167. * set regiter RA to user signal handler
  168. * set sp to user sp & save kernel sp in sscratch
  169. */
  170. mv ra, s0
  171. csrw sscratch, sp
  172. mv sp, s0
  173. /**
  174. * a0 is signal_handler,
  175. * s1 = s0 == NULL ? lwp_sigreturn : s0;
  176. */
  177. mv s1, s0
  178. beqz a0, skip_user_signal_handler
  179. mv s1, a0
  180. skip_user_signal_handler:
  181. // enter user mode and enable interrupt when return to user mode
  182. li t0, SSTATUS_SPP
  183. csrc sstatus, t0
  184. li t0, SSTATUS_SPIE
  185. csrs sstatus, t0
  186. // sepc <- signal_handler
  187. csrw sepc, s1
  188. // a0 <- signal id
  189. mv a0, s2
  190. sret
  191. .align 3
  192. lwp_debugreturn:
  193. li a7, 0xff
  194. ecall
  195. .align 3
  196. lwp_sigreturn:
  197. li a7, 0xfe
  198. ecall
  199. .align 3
  200. lwp_sigreturn_end:
  201. .align 3
  202. .global lwp_thread_return
  203. lwp_thread_return:
  204. li a0, 0
  205. li a7, 1
  206. ecall
  207. .align 3
  208. .global lwp_thread_return_end
  209. lwp_thread_return_end:
  210. .globl arch_get_tidr
  211. arch_get_tidr:
  212. mv a0, tp
  213. ret
  214. .global arch_set_thread_area
  215. arch_set_thread_area:
  216. .globl arch_set_tidr
  217. arch_set_tidr:
  218. mv tp, a0
  219. ret
  220. .global arch_clone_exit
  221. .global arch_fork_exit
  222. arch_fork_exit:
  223. arch_clone_exit:
  224. j arch_syscall_exit
  225. .global syscall_entry
  226. syscall_entry:
  227. #ifndef ARCH_USING_NEW_CTX_SWITCH
  228. //swap to thread kernel stack
  229. csrr t0, sstatus
  230. andi t0, t0, 0x100
  231. beqz t0, __restore_sp_from_tcb
  232. __restore_sp_from_sscratch: // from kernel
  233. csrr t0, sscratch
  234. j __move_stack_context
  235. __restore_sp_from_tcb: // from user
  236. la a0, rt_current_thread
  237. LOAD a0, 0(a0)
  238. jal get_thread_kernel_stack_top
  239. mv t0, a0
  240. __move_stack_context:
  241. mv t1, sp//src
  242. mv sp, t0//switch stack
  243. addi sp, sp, -CTX_REG_NR * REGBYTES
  244. //copy context
  245. li s0, CTX_REG_NR//cnt
  246. mv t2, sp//dst
  247. copy_context_loop:
  248. LOAD t0, 0(t1)
  249. STORE t0, 0(t2)
  250. addi s0, s0, -1
  251. addi t1, t1, 8
  252. addi t2, t2, 8
  253. bnez s0, copy_context_loop
  254. #endif /* ARCH_USING_NEW_CTX_SWITCH */
  255. /* fetch SYSCALL ID */
  256. LOAD a7, 17 * REGBYTES(sp)
  257. addi a7, a7, -0xfe
  258. beqz a7, arch_signal_quit
  259. #ifdef ARCH_MM_MMU
  260. /* save setting when syscall enter */
  261. call rt_thread_self
  262. call lwp_user_setting_save
  263. #endif
  264. mv a0, sp
  265. OPEN_INTERRUPT
  266. call syscall_handler
  267. j arch_syscall_exit
  268. .global arch_syscall_exit
  269. arch_syscall_exit:
  270. CLOSE_INTERRUPT
  271. #if defined(ARCH_MM_MMU)
  272. LOAD s0, 2 * REGBYTES(sp)
  273. andi s0, s0, 0x100
  274. bnez s0, dont_ret_to_user
  275. j arch_ret_to_user
  276. #endif
  277. dont_ret_to_user:
  278. #ifdef ARCH_MM_MMU
  279. /* restore setting when syscall exit */
  280. call rt_thread_self
  281. call lwp_user_setting_restore
  282. /* after restore the reg `tp`, need modify context */
  283. STORE tp, 4 * REGBYTES(sp)
  284. #endif
  285. //restore context
  286. RESTORE_ALL
  287. csrw sscratch, zero
  288. sret