lwp_gcc.S 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433
  1. /*
  2. * Copyright (c) 2006-2020, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-12-10 Jesven first version
  9. */
  10. #include "rtconfig.h"
  11. #define Mode_USR 0x10
  12. #define Mode_FIQ 0x11
  13. #define Mode_IRQ 0x12
  14. #define Mode_SVC 0x13
  15. #define Mode_MON 0x16
  16. #define Mode_ABT 0x17
  17. #define Mode_UDF 0x1B
  18. #define Mode_SYS 0x1F
  19. #define A_Bit 0x100
  20. #define I_Bit 0x80 @; when I bit is set, IRQ is disabled
  21. #define F_Bit 0x40 @; when F bit is set, FIQ is disabled
  22. #define T_Bit 0x20
  23. .cpu cortex-a9
  24. .syntax unified
  25. .text
  26. /*
  27. * void arch_start_umode(args, text, ustack, kstack);
  28. */
  29. .global arch_start_umode
  30. .type arch_start_umode, % function
  31. arch_start_umode:
  32. mrs r9, cpsr
  33. bic r9, #0x1f
  34. orr r9, #Mode_USR
  35. cpsid i
  36. msr spsr, r9
  37. mov sp, r3
  38. mov r3, r2 ;/* user stack top */
  39. /* set data address. */
  40. movs pc, r1
  41. /*
  42. * void arch_crt_start_umode(args, text, ustack, kstack);
  43. */
  44. .global arch_crt_start_umode
  45. .type arch_crt_start_umode, % function
  46. arch_crt_start_umode:
  47. cps #Mode_SYS
  48. sub sp, r2, #16
  49. ldr r2, =lwp_thread_return
  50. ldr r4, [r2]
  51. str r4, [sp]
  52. ldr r4, [r2, #4]
  53. str r4, [sp, #4]
  54. ldr r4, [r2, #8]
  55. str r4, [sp, #8]
  56. mov r4, sp
  57. mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
  58. add r4, #4
  59. mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
  60. add r4, #4
  61. mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
  62. dsb
  63. isb
  64. mcr p15, 0, r4, c7, c5, 0 ;//iciallu
  65. dsb
  66. isb
  67. mov lr, sp
  68. cps #Mode_SVC
  69. mrs r9, cpsr
  70. bic r9, #0x1f
  71. orr r9, #Mode_USR
  72. cpsid i
  73. msr spsr, r9
  74. mov sp, r3
  75. /* set data address. */
  76. movs pc, r1
  77. /*
  78. void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp);
  79. */
  80. .global arch_set_thread_context
  81. arch_set_thread_context:
  82. sub r1, #(10 * 4 + 4 * 4) /* {r4 - r12, lr} , {r4, r5, spsr, u_pc} */
  83. stmfd r1!, {r0}
  84. mov r12, #0
  85. stmfd r1!, {r12}
  86. stmfd r1!, {r1 - r12}
  87. stmfd r1!, {r12} /* new thread return value */
  88. mrs r12, cpsr
  89. orr r12, #(1 << 7) /* disable irq */
  90. stmfd r1!, {r12} /* spsr */
  91. mov r12, #0
  92. stmfd r1!, {r12} /* now user lr is 0 */
  93. stmfd r1!, {r2} /* user sp */
  94. #ifdef RT_USING_FPU
  95. stmfd r1!, {r12} /* not use fpu */
  96. #endif
  97. str r1, [r3]
  98. mov pc, lr
  99. .global arch_get_user_sp
  100. arch_get_user_sp:
  101. cps #Mode_SYS
  102. mov r0, sp
  103. cps #Mode_SVC
  104. mov pc, lr
  105. .global sys_fork
  106. .global sys_vfork
  107. .global arch_fork_exit
  108. sys_fork:
  109. sys_vfork:
  110. push {r4 - r12, lr}
  111. bl _sys_fork
  112. arch_fork_exit:
  113. pop {r4 - r12, lr}
  114. b arch_syscall_exit
  115. .global sys_clone
  116. .global arch_clone_exit
  117. sys_clone:
  118. push {r4 - r12, lr}
  119. bl _sys_clone
  120. arch_clone_exit:
  121. pop {r4 - r12, lr}
  122. b arch_syscall_exit
  123. /*
  124. void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
  125. */
  126. .global lwp_exec_user
  127. lwp_exec_user:
  128. cpsid i
  129. mov sp, r1
  130. mov lr, r2
  131. mov r2, #Mode_USR
  132. msr spsr_cxsf, r2
  133. ldr r3, =0x80000000
  134. b arch_ret_to_user
  135. /*
  136. * void SVC_Handler(void);
  137. */
  138. .global vector_swi
  139. .type vector_swi, % function
  140. vector_swi:
  141. push {lr}
  142. mrs lr, spsr
  143. push {r4, r5, lr}
  144. cpsie i
  145. push {r0 - r3, r12}
  146. bl rt_thread_self
  147. bl lwp_user_setting_save
  148. and r0, r7, #0xf000
  149. cmp r0, #0xe000
  150. beq arch_signal_quit
  151. cmp r0, #0xf000
  152. beq ret_from_user
  153. and r0, r7, #0xff
  154. bl lwp_get_sys_api
  155. cmp r0, #0 /* r0 = api */
  156. mov lr, r0
  157. pop {r0 - r3, r12}
  158. beq arch_syscall_exit
  159. blx lr
  160. .global arch_syscall_exit
  161. arch_syscall_exit:
  162. cpsid i
  163. pop {r4, r5, lr}
  164. msr spsr_cxsf, lr
  165. pop {lr}
  166. .global arch_ret_to_user
  167. arch_ret_to_user:
  168. push {r0-r3, r12, lr}
  169. bl lwp_check_debug
  170. bl lwp_check_exit_request
  171. cmp r0, #0
  172. beq 1f
  173. mov r0, #0
  174. b sys_exit
  175. 1:
  176. bl lwp_signal_check
  177. cmp r0, #0
  178. pop {r0-r3, r12, lr}
  179. bne user_do_signal
  180. push {r0}
  181. ldr r0, =rt_dbg_ops
  182. ldr r0, [r0]
  183. cmp r0, #0
  184. pop {r0}
  185. beq 2f
  186. push {r0-r3, r12, lr}
  187. mov r0, lr
  188. bl dbg_attach_req
  189. pop {r0-r3, r12, lr}
  190. 2:
  191. movs pc, lr
  192. #ifdef RT_USING_SMART
  193. .global lwp_check_debug
  194. lwp_check_debug:
  195. ldr r0, =rt_dbg_ops
  196. ldr r0, [r0]
  197. cmp r0, #0
  198. bne 1f
  199. bx lr
  200. 1:
  201. push {lr}
  202. bl dbg_check_suspend
  203. cmp r0, #0
  204. beq lwp_check_debug_quit
  205. cps #Mode_SYS
  206. sub sp, #8
  207. ldr r0, =lwp_debugreturn
  208. ldr r1, [r0]
  209. str r1, [sp]
  210. ldr r1, [r0, #4]
  211. str r1, [sp, #4]
  212. mov r1, sp
  213. mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
  214. add r1, #4
  215. mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
  216. dsb
  217. isb
  218. mcr p15, 0, r0, c7, c5, 0 ;//iciallu
  219. dsb
  220. isb
  221. mov r0, sp /* lwp_debugreturn */
  222. cps #Mode_SVC
  223. mrs r1, spsr
  224. push {r1}
  225. mov r1, #Mode_USR
  226. msr spsr_cxsf, r1
  227. movs pc, r0
  228. ret_from_user:
  229. cps #Mode_SYS
  230. add sp, #8
  231. cps #Mode_SVC
  232. /*
  233. pop {r0 - r3, r12}
  234. pop {r4 - r6, lr}
  235. */
  236. add sp, #(4*9)
  237. pop {r4}
  238. msr spsr_cxsf, r4
  239. lwp_check_debug_quit:
  240. pop {pc}
  241. arch_signal_quit:
  242. cpsid i
  243. pop {r0 - r3, r12}
  244. pop {r4, r5, lr}
  245. pop {lr}
  246. bl lwp_signal_restore
  247. /* r0 is user_ctx : ori sp, pc, cpsr*/
  248. ldr r1, [r0]
  249. ldr r2, [r0, #4]
  250. ldr r3, [r0, #8]
  251. msr spsr_cxsf, r3
  252. mov lr, r2
  253. cps #Mode_SYS
  254. mov sp, r1
  255. pop {r0-r12, lr}
  256. cps #Mode_SVC
  257. b arch_ret_to_user
  258. user_do_signal:
  259. mov r0, r0
  260. cps #Mode_SYS
  261. push {r0-r12, lr}
  262. sub sp, #8
  263. ldr r0, =lwp_sigreturn
  264. ldr r1, [r0]
  265. str r1, [sp]
  266. ldr r1, [r0, #4]
  267. str r1, [sp, #4]
  268. mov r1, sp
  269. mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
  270. add r1, #4
  271. mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
  272. dsb
  273. isb
  274. mcr p15, 0, r0, c7, c5, 0 ;//iciallu
  275. dsb
  276. isb
  277. mov r5, sp ;//if func is 0
  278. mov lr, sp
  279. add r0, sp, #8 /* lwp_sigreturn */
  280. cps #Mode_SVC
  281. mov r1, lr
  282. mrs r2, spsr
  283. bl lwp_signal_backup
  284. /* r0 is signal */
  285. mov r4, r0
  286. bl lwp_sighandler_get
  287. mov lr, r0
  288. cmp lr, #0
  289. moveq lr, r5
  290. mov r0, r4
  291. movs pc, lr
  292. lwp_debugreturn:
  293. mov r7, #0xf000
  294. svc #0
  295. lwp_sigreturn:
  296. mov r7, #0xe000
  297. svc #0
  298. lwp_thread_return:
  299. mov r0, #0
  300. mov r7, #0x01
  301. svc #0
  302. #endif
  303. .global check_vfp
  304. check_vfp:
  305. #ifdef RT_USING_FPU
  306. vmrs r0, fpexc
  307. ubfx r0, r0, #30, #1
  308. #else
  309. mov r0, #0
  310. #endif
  311. mov pc, lr
  312. .global get_vfp
  313. get_vfp:
  314. #ifdef RT_USING_FPU
  315. vstmia r0!, {d0-d15}
  316. vstmia r0!, {d16-d31}
  317. vmrs r1, fpscr
  318. str r1, [r0]
  319. #endif
  320. mov pc, lr
  321. .globl arch_get_tidr
  322. arch_get_tidr:
  323. mrc p15, 0, r0, c13, c0, 3
  324. bx lr
  325. .global arch_set_thread_area
  326. arch_set_thread_area:
  327. .globl arch_set_tidr
  328. arch_set_tidr:
  329. mcr p15, 0, r0, c13, c0, 3
  330. bx lr
  331. /* kuser suppurt */
  332. .macro kuser_pad, sym, size
  333. .if (. - \sym) & 3
  334. .rept 4 - (. - \sym) & 3
  335. .byte 0
  336. .endr
  337. .endif
  338. .rept (\size - (. - \sym)) / 4
  339. .word 0xe7fddef1
  340. .endr
  341. .endm
  342. .align 5
  343. .globl __kuser_helper_start
  344. __kuser_helper_start:
  345. __kuser_cmpxchg64: @ 0xffff0f60
  346. stmfd sp!, {r4, r5, r6, lr}
  347. ldmia r0, {r4, r5} @ load old val
  348. ldmia r1, {r6, lr} @ load new val
  349. 1: ldmia r2, {r0, r1} @ load current val
  350. eors r3, r0, r4 @ compare with oldval (1)
  351. eorseq r3, r1, r5 @ compare with oldval (2)
  352. 2: stmiaeq r2, {r6, lr} @ store newval if eq
  353. rsbs r0, r3, #0 @ set return val and C flag
  354. ldmfd sp!, {r4, r5, r6, pc}
  355. kuser_pad __kuser_cmpxchg64, 64
  356. __kuser_memory_barrier: @ 0xffff0fa0
  357. dmb
  358. mov pc, lr
  359. kuser_pad __kuser_memory_barrier, 32
  360. __kuser_cmpxchg: @ 0xffff0fc0
  361. 1: ldr r3, [r2] @ load current val
  362. subs r3, r3, r0 @ compare with oldval
  363. 2: streq r1, [r2] @ store newval if eq
  364. rsbs r0, r3, #0 @ set return val and C flag
  365. mov pc, lr
  366. kuser_pad __kuser_cmpxchg, 32
  367. __kuser_get_tls: @ 0xffff0fe0
  368. mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
  369. mov pc, lr
  370. ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
  371. kuser_pad __kuser_get_tls, 16
  372. .rep 3
  373. .word 0 @ 0xffff0ff0 software TLS value, then
  374. .endr @ pad up to __kuser_helper_version
  375. __kuser_helper_version: @ 0xffff0ffc
  376. .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
  377. .globl __kuser_helper_end
  378. __kuser_helper_end: