lwp_gcc.S 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /*
  2. * Copyright (c) 2006-2020, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-12-10 Jesven first version
  9. * 2021-02-03 lizhirui port to riscv64
  10. * 2021-02-19 lizhirui port to new version of rt-smart
  11. * 2022-11-08 Wangxiaoyao Cleanup codes;
  12. * Support new context switch
  13. */
  14. #include "rtconfig.h"
  15. #ifndef __ASSEMBLY__
  16. #define __ASSEMBLY__
  17. #endif /* __ASSEMBLY__ */
  18. #include "cpuport.h"
  19. #include "encoding.h"
  20. #include "stackframe.h"
  21. .section .text.lwp
  22. /*
  23. * void arch_start_umode(args, text, ustack, kstack);
  24. */
  25. .global arch_start_umode
  26. .type arch_start_umode, % function
  27. arch_start_umode:
  28. // load kstack for user process
  29. csrw sscratch, a3
  30. li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
  31. csrc sstatus, t0
  32. li t0, SSTATUS_SPIE // enable interrupt when return to user mode
  33. csrs sstatus, t0
  34. csrw sepc, a1
  35. mv a3, a2
  36. sret//enter user mode
  37. /*
  38. * void arch_crt_start_umode(args, text, ustack, kstack);
  39. */
  40. .global arch_crt_start_umode
  41. .type arch_crt_start_umode, % function
  42. arch_crt_start_umode:
  43. li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
  44. csrc sstatus, t0
  45. li t0, SSTATUS_SPIE // enable interrupt when return to user mode
  46. csrs sstatus, t0
  47. csrw sepc, a1
  48. mv s0, a0
  49. mv s1, a1
  50. mv s2, a2
  51. mv s3, a3
  52. mv a0, s2
  53. call lwp_copy_return_code_to_user_stack
  54. mv a0, s2
  55. call lwp_fix_sp
  56. mv sp, a0//user_sp
  57. mv ra, a0//return address
  58. mv a0, s0//args
  59. csrw sscratch, s3
  60. sret//enter user mode
  61. /**
  62. * Unify exit point from kernel mode to enter user space
  63. * we handle following things here:
  64. * 1. restoring user mode debug state (not support yet)
  65. * 2. handling thread's exit request
  66. * 3. handling POSIX signal
  67. * 4. restoring user context
  68. * 5. jump to user mode
  69. */
  70. .global arch_ret_to_user
  71. arch_ret_to_user:
  72. // TODO: we don't support kernel gdb server in risc-v yet
  73. // so we don't check debug state here and handle debugging bussiness
  74. call lwp_check_exit_request
  75. beqz a0, 1f
  76. mv a0, x0
  77. call sys_exit
  78. 1:
  79. call lwp_signal_check
  80. beqz a0, ret_to_user_exit
  81. J user_do_signal
  82. ret_to_user_exit:
  83. RESTORE_ALL
  84. // `RESTORE_ALL` also reset sp to user sp, and setup sscratch
  85. sret
  86. /**
  87. * Restore user context from exception frame stroraged in ustack
  88. * And handle pending signals;
  89. */
  90. arch_signal_quit:
  91. call lwp_signal_restore
  92. call arch_get_usp_from_uctx
  93. // return value is user sp
  94. mv sp, a0
  95. // restore user sp before enter trap
  96. addi a0, sp, CTX_REG_NR * REGBYTES
  97. csrw sscratch, a0
  98. RESTORE_ALL
  99. SAVE_ALL
  100. j arch_ret_to_user
  101. /**
  102. * Prepare and enter user signal handler
  103. * Move user exception frame and setup signal return
  104. * routine in user stack
  105. */
  106. user_do_signal:
  107. /* prefetch ustack to avoid corrupted status in RESTORE/STORE pair below */
  108. LOAD t0, FRAME_OFF_SP(sp)
  109. addi t1, t0, -CTX_REG_NR * REGBYTES
  110. LOAD t2, (t0)
  111. li t3, -0x1000
  112. 1:
  113. add t0, t0, t3
  114. LOAD t2, (t0)
  115. bgt t0, t1, 1b
  116. /** restore and backup kernel sp carefully to avoid leaking */
  117. addi t0, sp, CTX_REG_NR * REGBYTES
  118. csrw sscratch, t0
  119. RESTORE_ALL
  120. SAVE_ALL
  121. /**
  122. * save lwp_sigreturn in user memory
  123. */
  124. mv s0, sp
  125. la t0, lwp_sigreturn
  126. la t1, lwp_sigreturn_end
  127. // t1 <- size
  128. sub t1, t1, t0
  129. // s0 <- dst
  130. sub s0, s0, t1
  131. mv s2, t1
  132. lwp_sigreturn_copy_loop:
  133. addi t2, t1, -1
  134. add t3, t0, t2
  135. add t4, s0, t2
  136. lb t5, 0(t3)
  137. sb t5, 0(t4)
  138. mv t1, t2
  139. bnez t1, lwp_sigreturn_copy_loop
  140. /**
  141. * 1. clear sscratch & restore kernel sp to
  142. * enter kernel mode routine
  143. * 2. storage exp frame address to restore context,
  144. * by calling to lwp_signal_backup
  145. * 3. storage lwp_sigreturn entry address
  146. * 4. get signal id as param for signal handler
  147. */
  148. mv s1, sp
  149. csrrw sp, sscratch, x0
  150. /**
  151. * synchronize dcache & icache if target is
  152. * a Harvard Architecture machine, otherwise
  153. * do nothing
  154. */
  155. mv a0, s0
  156. mv a1, s2
  157. call rt_hw_sync_cache_local
  158. /**
  159. * backup user sp (point to saved exception frame, skip sigreturn routine)
  160. * And get signal id
  161. * a0: user sp
  162. * a1: user_pc (not used, marked as 0 to avoid abuse)
  163. * a2: user_flag (not used, marked as 0 to avoid abuse)
  164. */
  165. mv a0, s1
  166. mv a1, zero
  167. mv a2, zero
  168. call lwp_signal_backup
  169. /**
  170. * backup signal id in s2,
  171. * and get sighandler by signal id
  172. */
  173. mv s2, a0
  174. call lwp_sighandler_get
  175. /**
  176. * set regiter RA to user signal handler
  177. * set sp to user sp & save kernel sp in sscratch
  178. */
  179. mv ra, s0
  180. csrw sscratch, sp
  181. mv sp, s0
  182. /**
  183. * a0 is signal_handler,
  184. * s1 = s0 == NULL ? lwp_sigreturn : s0;
  185. */
  186. mv s1, s0
  187. beqz a0, skip_user_signal_handler
  188. mv s1, a0
  189. skip_user_signal_handler:
  190. // enter user mode and enable interrupt when return to user mode
  191. li t0, SSTATUS_SPP
  192. csrc sstatus, t0
  193. li t0, SSTATUS_SPIE
  194. csrs sstatus, t0
  195. // sepc <- signal_handler
  196. csrw sepc, s1
  197. // a0 <- signal id
  198. mv a0, s2
  199. sret
  200. .align 3
  201. lwp_debugreturn:
  202. li a7, 0xff
  203. ecall
  204. .align 3
  205. lwp_sigreturn:
  206. li a7, 0xfe
  207. ecall
  208. .align 3
  209. lwp_sigreturn_end:
  210. .align 3
  211. .global lwp_thread_return
  212. lwp_thread_return:
  213. li a0, 0
  214. li a7, 1
  215. ecall
  216. .align 3
  217. .global lwp_thread_return_end
  218. lwp_thread_return_end:
  219. .globl arch_get_tidr
  220. arch_get_tidr:
  221. mv a0, tp
  222. ret
  223. .global arch_set_thread_area
  224. arch_set_thread_area:
  225. .globl arch_set_tidr
  226. arch_set_tidr:
  227. mv tp, a0
  228. ret
  229. .global arch_clone_exit
  230. .global arch_fork_exit
  231. arch_fork_exit:
  232. arch_clone_exit:
  233. j arch_syscall_exit
  234. .global syscall_entry
  235. syscall_entry:
  236. #ifndef ARCH_USING_NEW_CTX_SWITCH
  237. //swap to thread kernel stack
  238. csrr t0, sstatus
  239. andi t0, t0, 0x100
  240. beqz t0, __restore_sp_from_tcb
  241. __restore_sp_from_sscratch: // from kernel
  242. csrr t0, sscratch
  243. j __move_stack_context
  244. __restore_sp_from_tcb: // from user
  245. la a0, rt_current_thread
  246. LOAD a0, 0(a0)
  247. jal get_thread_kernel_stack_top
  248. mv t0, a0
  249. __move_stack_context:
  250. mv t1, sp//src
  251. mv sp, t0//switch stack
  252. addi sp, sp, -CTX_REG_NR * REGBYTES
  253. //copy context
  254. li s0, CTX_REG_NR//cnt
  255. mv t2, sp//dst
  256. copy_context_loop:
  257. LOAD t0, 0(t1)
  258. STORE t0, 0(t2)
  259. addi s0, s0, -1
  260. addi t1, t1, 8
  261. addi t2, t2, 8
  262. bnez s0, copy_context_loop
  263. #endif /* ARCH_USING_NEW_CTX_SWITCH */
  264. /* fetch SYSCALL ID */
  265. LOAD a7, 17 * REGBYTES(sp)
  266. addi a7, a7, -0xfe
  267. beqz a7, arch_signal_quit
  268. #ifdef ARCH_MM_MMU
  269. /* save setting when syscall enter */
  270. call rt_thread_self
  271. call lwp_user_setting_save
  272. #endif
  273. mv a0, sp
  274. OPEN_INTERRUPT
  275. call syscall_handler
  276. j arch_syscall_exit
  277. .global arch_syscall_exit
  278. arch_syscall_exit:
  279. CLOSE_INTERRUPT
  280. #if defined(ARCH_MM_MMU)
  281. LOAD s0, 2 * REGBYTES(sp)
  282. andi s0, s0, 0x100
  283. bnez s0, dont_ret_to_user
  284. j arch_ret_to_user
  285. #endif
  286. dont_ret_to_user:
  287. #ifdef ARCH_MM_MMU
  288. /* restore setting when syscall exit */
  289. call rt_thread_self
  290. call lwp_user_setting_restore
  291. /* after restore the reg `tp`, need modify context */
  292. STORE tp, 4 * REGBYTES(sp)
  293. #endif
  294. //restore context
  295. RESTORE_ALL
  296. csrw sscratch, zero
  297. sret