lwp_gcc.S 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. /*
  2. * Copyright (c) 2006-2020, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-12-10 Jesven first version
  9. * 2021-02-03 lizhirui port to riscv64
  10. * 2021-02-19 lizhirui port to new version of rt-smart
  11. * 2022-11-08 Wangxiaoyao Cleanup codes;
  12. * Support new context switch
  13. */
  14. #include "rtconfig.h"
  15. #ifndef __ASSEMBLY__
  16. #define __ASSEMBLY__
  17. #endif /* __ASSEMBLY__ */
  18. #include "cpuport.h"
  19. #include "encoding.h"
  20. #include "stackframe.h"
  21. #include "asm-generic.h"
  22. .section .text.lwp
  23. /*
  24. * void arch_start_umode(args, text, ustack, kstack);
  25. */
  26. .global arch_start_umode
  27. .type arch_start_umode, % function
  28. arch_start_umode:
  29. // load kstack for user process
  30. csrw sscratch, a3
  31. li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
  32. csrc sstatus, t0
  33. li t0, SSTATUS_SPIE // enable interrupt when return to user mode
  34. csrs sstatus, t0
  35. csrw sepc, a1
  36. mv a3, a2
  37. sret//enter user mode
  38. /*
  39. * void arch_crt_start_umode(args, text, ustack, kstack);
  40. */
  41. .global arch_crt_start_umode
  42. .type arch_crt_start_umode, % function
  43. arch_crt_start_umode:
  44. li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
  45. csrc sstatus, t0
  46. li t0, SSTATUS_SPIE // enable interrupt when return to user mode
  47. csrs sstatus, t0
  48. csrw sepc, a1
  49. mv s0, a0
  50. mv s1, a1
  51. mv s2, a2
  52. mv s3, a3
  53. mv a0, s2
  54. call lwp_copy_return_code_to_user_stack
  55. mv a0, s2
  56. call lwp_fix_sp
  57. mv sp, a0//user_sp
  58. mv ra, a0//return address
  59. mv a0, s0//args
  60. csrw sscratch, s3
  61. sret//enter user mode
  62. /**
  63. * Unify exit point from kernel mode to enter user space
  64. * we handle following things here:
  65. * 1. restoring user mode debug state (not support yet)
  66. * 2. handling thread's exit request
  67. * 3. handling POSIX signal
  68. * 4. restoring user context
  69. * 5. jump to user mode
  70. */
  71. .global arch_ret_to_user
  72. arch_ret_to_user:
  73. // TODO: we don't support kernel gdb server in risc-v yet
  74. // so we don't check debug state here and handle debugging bussiness
  75. call lwp_check_exit_request
  76. beqz a0, 1f
  77. mv a0, x0
  78. call sys_exit
  79. 1:
  80. call lwp_signal_check
  81. beqz a0, ret_to_user_exit
  82. J user_do_signal
  83. ret_to_user_exit:
  84. RESTORE_ALL
  85. // `RESTORE_ALL` also reset sp to user sp, and setup sscratch
  86. sret
  87. /**
  88. * Restore user context from exception frame stroraged in ustack
  89. * And handle pending signals;
  90. */
  91. arch_signal_quit:
  92. call lwp_signal_restore
  93. call arch_get_usp_from_uctx
  94. // return value is user sp
  95. mv sp, a0
  96. // restore user sp before enter trap
  97. addi a0, sp, CTX_REG_NR * REGBYTES
  98. csrw sscratch, a0
  99. RESTORE_ALL
  100. SAVE_ALL
  101. j arch_ret_to_user
  102. /**
  103. * Prepare and enter user signal handler
  104. * Move user exception frame and setup signal return
  105. * routine in user stack
  106. */
  107. user_do_signal:
  108. /* prefetch ustack to avoid corrupted status in RESTORE/STORE pair below */
  109. LOAD t0, FRAME_OFF_SP(sp)
  110. addi t1, t0, -CTX_REG_NR * REGBYTES
  111. LOAD t2, (t0)
  112. li t3, -0x1000
  113. 1:
  114. add t0, t0, t3
  115. LOAD t2, (t0)
  116. bgt t0, t1, 1b
  117. /** restore and backup kernel sp carefully to avoid leaking */
  118. addi t0, sp, CTX_REG_NR * REGBYTES
  119. csrw sscratch, t0
  120. RESTORE_ALL
  121. SAVE_ALL
  122. /**
  123. * save lwp_sigreturn in user memory
  124. */
  125. mv s0, sp
  126. la t0, lwp_sigreturn
  127. la t1, lwp_sigreturn_end
  128. // t1 <- size
  129. sub t1, t1, t0
  130. // s0 <- dst
  131. sub s0, s0, t1
  132. mv s2, t1
  133. lwp_sigreturn_copy_loop:
  134. addi t2, t1, -1
  135. add t3, t0, t2
  136. add t4, s0, t2
  137. lb t5, 0(t3)
  138. sb t5, 0(t4)
  139. mv t1, t2
  140. bnez t1, lwp_sigreturn_copy_loop
  141. /**
  142. * 1. clear sscratch & restore kernel sp to
  143. * enter kernel mode routine
  144. * 2. storage exp frame address to restore context,
  145. * by calling to lwp_signal_backup
  146. * 3. storage lwp_sigreturn entry address
  147. * 4. get signal id as param for signal handler
  148. */
  149. mv s1, sp
  150. csrrw sp, sscratch, x0
  151. /**
  152. * synchronize dcache & icache if target is
  153. * a Harvard Architecture machine, otherwise
  154. * do nothing
  155. */
  156. mv a0, s0
  157. mv a1, s2
  158. call rt_hw_sync_cache_local
  159. /**
  160. * backup user sp (point to saved exception frame, skip sigreturn routine)
  161. * And get signal id
  162. * a0: user sp
  163. * a1: user_pc (not used, marked as 0 to avoid abuse)
  164. * a2: user_flag (not used, marked as 0 to avoid abuse)
  165. */
  166. mv a0, s1
  167. mv a1, zero
  168. mv a2, zero
  169. call lwp_signal_backup
  170. /**
  171. * backup signal id in s2,
  172. * and get sighandler by signal id
  173. */
  174. mv s2, a0
  175. call lwp_sighandler_get
  176. /**
  177. * set regiter RA to user signal handler
  178. * set sp to user sp & save kernel sp in sscratch
  179. */
  180. mv ra, s0
  181. csrw sscratch, sp
  182. mv sp, s0
  183. /**
  184. * a0 is signal_handler,
  185. * s1 = s0 == NULL ? lwp_sigreturn : s0;
  186. */
  187. mv s1, s0
  188. beqz a0, skip_user_signal_handler
  189. mv s1, a0
  190. skip_user_signal_handler:
  191. // enter user mode and enable interrupt when return to user mode
  192. li t0, SSTATUS_SPP
  193. csrc sstatus, t0
  194. li t0, SSTATUS_SPIE
  195. csrs sstatus, t0
  196. // sepc <- signal_handler
  197. csrw sepc, s1
  198. // a0 <- signal id
  199. mv a0, s2
  200. sret
  201. .align 3
  202. lwp_debugreturn:
  203. li a7, 0xff
  204. ecall
  205. .align 3
  206. lwp_sigreturn:
  207. li a7, 0xfe
  208. ecall
  209. .align 3
  210. lwp_sigreturn_end:
  211. .align 3
  212. .global lwp_thread_return
  213. lwp_thread_return:
  214. li a0, 0
  215. li a7, 1
  216. ecall
  217. .align 3
  218. .global lwp_thread_return_end
  219. lwp_thread_return_end:
  220. .globl arch_get_tidr
  221. arch_get_tidr:
  222. mv a0, tp
  223. ret
  224. .global arch_set_thread_area
  225. arch_set_thread_area:
  226. .globl arch_set_tidr
  227. arch_set_tidr:
  228. mv tp, a0
  229. ret
  230. .global arch_clone_exit
  231. .global arch_fork_exit
  232. arch_fork_exit:
  233. arch_clone_exit:
  234. j arch_syscall_exit
  235. START_POINT(syscall_entry)
  236. #ifndef ARCH_USING_NEW_CTX_SWITCH
  237. //swap to thread kernel stack
  238. csrr t0, sstatus
  239. andi t0, t0, 0x100
  240. beqz t0, __restore_sp_from_tcb
  241. __restore_sp_from_sscratch: // from kernel
  242. csrr t0, sscratch
  243. j __move_stack_context
  244. __restore_sp_from_tcb: // from user
  245. la a0, rt_current_thread
  246. LOAD a0, 0(a0)
  247. jal get_thread_kernel_stack_top
  248. mv t0, a0
  249. __move_stack_context:
  250. mv t1, sp//src
  251. mv sp, t0//switch stack
  252. addi sp, sp, -CTX_REG_NR * REGBYTES
  253. //copy context
  254. li s0, CTX_REG_NR//cnt
  255. mv t2, sp//dst
  256. copy_context_loop:
  257. LOAD t0, 0(t1)
  258. STORE t0, 0(t2)
  259. addi s0, s0, -1
  260. addi t1, t1, 8
  261. addi t2, t2, 8
  262. bnez s0, copy_context_loop
  263. #endif /* ARCH_USING_NEW_CTX_SWITCH */
  264. /* fetch SYSCALL ID */
  265. LOAD a7, 17 * REGBYTES(sp)
  266. addi a7, a7, -0xfe
  267. beqz a7, arch_signal_quit
  268. #ifdef ARCH_MM_MMU
  269. /* save setting when syscall enter */
  270. call rt_thread_self
  271. call lwp_user_setting_save
  272. #endif
  273. mv a0, sp
  274. OPEN_INTERRUPT
  275. call syscall_handler
  276. j arch_syscall_exit
  277. START_POINT_END(syscall_entry)
  278. .global arch_syscall_exit
  279. arch_syscall_exit:
  280. CLOSE_INTERRUPT
  281. #if defined(ARCH_MM_MMU)
  282. LOAD s0, 2 * REGBYTES(sp)
  283. andi s0, s0, 0x100
  284. bnez s0, dont_ret_to_user
  285. j arch_ret_to_user
  286. #endif
  287. dont_ret_to_user:
  288. #ifdef ARCH_MM_MMU
  289. /* restore setting when syscall exit */
  290. call rt_thread_self
  291. call lwp_user_setting_restore
  292. /* after restore the reg `tp`, need modify context */
  293. STORE tp, 4 * REGBYTES(sp)
  294. #endif
  295. //restore context
  296. RESTORE_ALL
  297. csrw sscratch, zero
  298. sret