lwp_gcc.S 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-05-18 Jesven first version
  9. * 2023-07-16 Shell Move part of the codes to C from asm in signal handling
  10. * 2023-08-03 Shell Support of syscall restart (SA_RESTART)
  11. */
  12. #ifndef __ASSEMBLY__
  13. #define __ASSEMBLY__
  14. #endif
  15. #include <rtconfig.h>
  16. #include <asm-generic.h>
  17. #include <asm-fpu.h>
  18. #include <vector_gcc.h>
  19. #include <armv8.h>
  20. #include <lwp_arch.h>
  21. /*********************
  22. * SPSR BIT *
  23. *********************/
  24. #define SPSR_Mode(v) ((v) << 0)
  25. #define SPSR_A64 (0 << 4)
  26. #define SPSR_RESEVRED_5 (0 << 5)
  27. #define SPSR_FIQ_MASKED(v) ((v) << 6)
  28. #define SPSR_IRQ_MASKED(v) ((v) << 7)
  29. #define SPSR_SERROR_MASKED(v) ((v) << 8)
  30. #define SPSR_D_MASKED(v) ((v) << 9)
  31. #define SPSR_RESEVRED_10_19 (0 << 10)
  32. #define SPSR_IL(v) ((v) << 20)
  33. #define SPSR_SS(v) ((v) << 21)
  34. #define SPSR_RESEVRED_22_27 (0 << 22)
  35. #define SPSR_V(v) ((v) << 28)
  36. #define SPSR_C(v) ((v) << 29)
  37. #define SPSR_Z(v) ((v) << 30)
  38. #define SPSR_N(v) ((v) << 31)
  39. /**************************************************/
  40. .text
  41. /*
  42. * void arch_start_umode(args, text, ustack, kstack);
  43. */
  44. .global arch_start_umode
  45. .type arch_start_umode, % function
  46. arch_start_umode:
  47. mov sp, x3
  48. mov x4, #(SPSR_Mode(0) | SPSR_A64)
  49. msr daifset, #3
  50. dsb sy
  51. mrs x30, sp_el0
  52. /* user stack top */
  53. msr sp_el0, x2
  54. mov x3, x2
  55. msr spsr_el1, x4
  56. msr elr_el1, x1
  57. eret
  58. /*
  59. * void arch_crt_start_umode(args, text, ustack, kstack);
  60. */
  61. .global arch_crt_start_umode
  62. .type arch_crt_start_umode, % function
  63. arch_crt_start_umode:
  64. sub x4, x2, #0x10
  65. adr x2, lwp_thread_return
  66. ldr x5, [x2]
  67. str x5, [x4]
  68. ldr x5, [x2, #4]
  69. str x5, [x4, #4]
  70. ldr x5, [x2, #8]
  71. str x5, [x4, #8]
  72. mov x5, x4
  73. dc cvau, x5
  74. add x5, x5, #8
  75. dc cvau, x5
  76. dsb sy
  77. ic ialluis
  78. dsb sy
  79. msr sp_el0, x4
  80. mov sp, x3
  81. mov x4, #(SPSR_Mode(0) | SPSR_A64)
  82. msr daifset, #3
  83. dsb sy
  84. mrs x30, sp_el0
  85. msr spsr_el1, x4
  86. msr elr_el1, x1
  87. eret
  88. .global arch_get_user_sp
  89. arch_get_user_sp:
  90. mrs x0, sp_el0
  91. ret
  92. .global arch_fork_exit
  93. .global arch_clone_exit
  94. arch_fork_exit:
  95. arch_clone_exit:
  96. mov x0, xzr
  97. b arch_syscall_exit
  98. /*
  99. void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
  100. */
  101. .global lwp_exec_user
  102. lwp_exec_user:
  103. mov sp, x1
  104. mov x4, #(SPSR_Mode(0) | SPSR_A64)
  105. ldr x3, =0x0000ffff80000000
  106. msr daifset, #3
  107. msr spsr_el1, x4
  108. msr elr_el1, x2
  109. eret
  110. /*
  111. * void SVC_Handler(regs);
  112. * since this routine reset the SP, we take it as a start point
  113. */
  114. START_POINT(SVC_Handler)
  115. mov fp, xzr
  116. mov lr, xzr
  117. /* x0 is initial sp */
  118. mov sp, x0
  119. bl _SVC_Handler
  120. /* jump explictly, make this code position independant */
  121. b arch_syscall_exit
  122. START_POINT_END(SVC_Handler)
  123. TRACE_SYMBOL(_SVC_Handler)
  124. #define FRAME_REG x19
  125. /**
  126. * x0 -> frame_addr
  127. */
  128. _SVC_Handler:
  129. .local _SVC_Handler
  130. stp fp, lr, [sp, -16]!
  131. mov fp, sp
  132. mov FRAME_REG, x0 /* save the value of frame address */
  133. msr daifclr, #3 /* enable interrupt */
  134. GET_THREAD_SELF x0
  135. bl lwp_user_setting_save
  136. ldp x8, x9, [FRAME_REG, #(CONTEXT_OFFSET_X8)]
  137. and x0, x8, #0xf000
  138. cmp x0, #0xe000
  139. beq arch_signal_quit
  140. cmp x0, #0xf000
  141. beq ret_from_user
  142. uxtb x0, w8
  143. bl lwp_get_sys_api
  144. cmp x0, xzr
  145. mov x30, x0
  146. beq arch_syscall_exit
  147. ldp x0, x1, [FRAME_REG, #(CONTEXT_OFFSET_X0)]
  148. ldp x2, x3, [FRAME_REG, #(CONTEXT_OFFSET_X2)]
  149. ldp x4, x5, [FRAME_REG, #(CONTEXT_OFFSET_X4)]
  150. ldp x6, x7, [FRAME_REG, #(CONTEXT_OFFSET_X6)]
  151. blr x30
  152. ldp fp, lr, [sp], 16
  153. ret
  154. /**
  155. * void arch_syscall_exit(long rc)
  156. */
  157. arch_syscall_exit:
  158. .global arch_syscall_exit
  159. /**
  160. * backup former x0 which is required to restart syscall, then setup
  161. * syscall return value in stack frame
  162. */
  163. mov x1, sp
  164. bl arch_syscall_prepare_signal
  165. /**
  166. * disable local irq so we don't messup with the spsr_el1 witch is not saved
  167. * for kernel space IRQ/EXCEPTION
  168. */
  169. msr daifset, #3
  170. b arch_ret_to_user
  171. /* the sp is reset to the outer most level, irq and fiq are disabled */
  172. START_POINT(arch_ret_to_user)
  173. msr daifset, #3
  174. ldr x2, [sp, #CONTEXT_OFFSET_SP_EL0]
  175. msr sp_el0, x2
  176. ldr x2, [sp, #CONTEXT_OFFSET_ELR_EL1]
  177. msr elr_el1, x2
  178. ldr x3, [sp, #CONTEXT_OFFSET_SPSR_EL1]
  179. msr spsr_el1, x3
  180. /* pre-action */
  181. bl lwp_check_debug
  182. bl lwp_check_exit_request
  183. cbz w0, 1f
  184. /* exit on event */
  185. msr daifclr, #3
  186. mov x0, xzr
  187. b sys_exit
  188. 1: /* handling dbg */
  189. /* check if dbg ops exist */
  190. ldr x0, =rt_dbg_ops
  191. ldr x0, [x0]
  192. cbz x0, 3f
  193. bl dbg_thread_in_debug
  194. mov x1, #(1 << 21)
  195. mrs x2, spsr_el1
  196. cbz w0, 2f
  197. orr x2, x2, x1
  198. msr spsr_el1, x2
  199. b 3f
  200. 2: /* clear software step */
  201. bic x2, x2, x1
  202. msr spsr_el1, x2
  203. 3: /* handling signal */
  204. /**
  205. * push updated spsr & elr to exception frame.
  206. * Note: these 2 maybe updated after handling dbg
  207. */
  208. mrs x0, spsr_el1
  209. str x0, [sp, #CONTEXT_OFFSET_SPSR_EL1]
  210. mrs x1, elr_el1
  211. str x1, [sp, #CONTEXT_OFFSET_ELR_EL1]
  212. mov x0, sp
  213. /* restore the thread execution environment */
  214. msr daifclr, #3
  215. bl lwp_thread_signal_catch
  216. /* restore the exception-return exec-flow */
  217. msr daifset, #3
  218. /* check debug */
  219. ldr x0, =rt_dbg_ops
  220. ldr x0, [x0]
  221. cmp x0, xzr
  222. beq 1f
  223. ldr x0, [sp, #CONTEXT_OFFSET_ELR_EL1]
  224. bl dbg_attach_req
  225. 1:
  226. RESTORE_IRQ_CONTEXT_NO_SPEL0
  227. eret
  228. START_POINT_END(arch_ret_to_user)
  229. .global lwp_check_debug
  230. lwp_check_debug:
  231. ldr x0, =rt_dbg_ops
  232. ldr x0, [x0]
  233. cbnz x0, 1f
  234. ret
  235. 1:
  236. stp x29, x30, [sp, #-0x10]!
  237. bl dbg_check_suspend
  238. cbz w0, lwp_check_debug_quit
  239. mrs x2, sp_el0
  240. sub x2, x2, #0x10
  241. mov x3, x2
  242. msr sp_el0, x2
  243. ldr x0, =lwp_debugreturn
  244. ldr w1, [x0]
  245. str w1, [x2]
  246. ldr w1, [x0, #4]
  247. str w1, [x2, #4]
  248. dc cvau, x2
  249. add x2, x2, #4
  250. dc cvau, x2
  251. dsb sy
  252. isb sy
  253. ic ialluis
  254. isb sy
  255. mrs x0, elr_el1
  256. mrs x1, spsr_el1
  257. stp x0, x1, [sp, #-0x10]!
  258. msr elr_el1, x3 /* lwp_debugreturn */
  259. mov x1, #(SPSR_Mode(0) | SPSR_A64)
  260. orr x1, x1, #(1 << 21)
  261. msr spsr_el1, x1
  262. eret
  263. ret_from_user:
  264. /* sp_el0 += 16 for drop ins lwp_debugreturn */
  265. mrs x0, sp_el0
  266. add x0, x0, #0x10
  267. msr sp_el0, x0
  268. /* now is el1, sp is pos(empty) - sizeof(context) */
  269. mov x0, sp
  270. add x0, x0, #0x220
  271. mov sp, x0
  272. ldp x0, x1, [sp], #0x10 /* x1 is origin spsr_el1 */
  273. msr elr_el1, x0 /* x0 is origin elr_el1 */
  274. msr spsr_el1, x1
  275. lwp_check_debug_quit:
  276. ldp x29, x30, [sp], #0x10
  277. ret
  278. .global arch_syscall_restart
  279. arch_syscall_restart:
  280. msr daifset, 3
  281. mov sp, x1
  282. /* drop exception frame in user stack */
  283. msr sp_el0, x0
  284. /* restore previous exception frame */
  285. msr spsel, #0
  286. RESTORE_IRQ_CONTEXT_NO_SPEL0
  287. msr spsel, #1
  288. b vector_exception
  289. arch_signal_quit:
  290. /* drop current exception frame & sigreturn */
  291. add sp, sp, #(CONTEXT_SIZE + 0x10)
  292. mov x1, sp
  293. mrs x0, sp_el0
  294. bl arch_signal_ucontext_restore
  295. add x0, x0, #-CONTEXT_SIZE
  296. msr sp_el0, x0
  297. /**
  298. * Note: Since we will reset spsr, but the reschedule will
  299. * corrupt the spsr, we diable irq for a short period here
  300. */
  301. msr daifset, #3
  302. /* restore previous exception frame */
  303. msr spsel, #0
  304. RESTORE_IRQ_CONTEXT_NO_SPEL0
  305. msr spsel, #1
  306. SAVE_IRQ_CONTEXT
  307. b arch_ret_to_user
  308. /**
  309. * rt_noreturn
  310. * void arch_thread_signal_enter(
  311. * int signo, -> x0
  312. * siginfo_t *psiginfo, -> x1
  313. * void *exp_frame, -> x2
  314. * void *entry_uaddr, -> x3
  315. * lwp_sigset_t *save_sig_mask, -> x4
  316. * )
  317. */
  318. .global arch_thread_signal_enter
  319. arch_thread_signal_enter:
  320. mov x19, x0
  321. mov x20, x2 /* exp_frame */
  322. mov x21, x3
  323. /**
  324. * move exception frame to user stack
  325. */
  326. mrs x0, sp_el0
  327. mov x3, x4
  328. /* arch_signal_ucontext_save(user_sp, psiginfo, exp_frame, save_sig_mask); */
  329. bl arch_signal_ucontext_save
  330. mov x22, x0
  331. /* get and saved pointer to uframe */
  332. bl arch_signal_ucontext_get_frame
  333. mov x2, x0
  334. mov x0, x22
  335. dc cvau, x0
  336. dsb sy
  337. ic ialluis
  338. dsb sy
  339. /**
  340. * Brief: Prepare the environment for signal handler
  341. */
  342. /**
  343. * reset the cpsr
  344. * and drop exp frame on kernel stack, reset kernel sp
  345. *
  346. * Note: Since we will reset spsr, but the reschedule will
  347. * corrupt the spsr, we diable irq for a short period here
  348. */
  349. msr daifset, #3
  350. ldr x1, [x20, #CONTEXT_OFFSET_SPSR_EL1]
  351. msr spsr_el1, x1
  352. add sp, x20, #CONTEXT_SIZE
  353. /** reset user sp */
  354. msr sp_el0, x0
  355. /** set the return address to the sigreturn */
  356. mov x30, x0
  357. cbnz x21, 1f
  358. mov x21, x30
  359. 1:
  360. /** set the entry address of signal handler */
  361. msr elr_el1, x21
  362. /* siginfo is above the return address */
  363. add x1, x30, UCTX_ABI_OFFSET_TO_SI
  364. /* uframe is saved in x2 */
  365. mov x0, x19
  366. /**
  367. * handler(signo, psi, ucontext);
  368. *
  369. */
  370. eret
  371. lwp_debugreturn:
  372. mov x8, 0xf000
  373. svc #0
  374. .global lwp_sigreturn
  375. lwp_sigreturn:
  376. mov x8, #0xe000
  377. svc #0
  378. lwp_thread_return:
  379. mov x0, xzr
  380. mov x8, #0x01
  381. svc #0
  382. .globl arch_get_tidr
  383. arch_get_tidr:
  384. mrs x0, tpidr_el0
  385. ret
  386. .global arch_set_thread_area
  387. arch_set_thread_area:
  388. .globl arch_set_tidr
  389. arch_set_tidr:
  390. msr tpidr_el0, x0
  391. ret