lwp_gcc.S 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. /*
  2. * Copyright (c) 2006-2020, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-12-10 Jesven first version
  9. */
  10. #include "rtconfig.h"
  11. #include "asm-generic.h"
  12. #define Mode_USR 0x10
  13. #define Mode_FIQ 0x11
  14. #define Mode_IRQ 0x12
  15. #define Mode_SVC 0x13
  16. #define Mode_MON 0x16
  17. #define Mode_ABT 0x17
  18. #define Mode_UDF 0x1B
  19. #define Mode_SYS 0x1F
  20. #define A_Bit 0x100
  21. #define I_Bit 0x80 @; when I bit is set, IRQ is disabled
  22. #define F_Bit 0x40 @; when F bit is set, FIQ is disabled
  23. #define T_Bit 0x20
  24. .cpu cortex-a9
  25. .syntax unified
  26. .text
  27. /*
  28. * void arch_start_umode(args, text, ustack, kstack);
  29. */
  30. .global arch_start_umode
  31. .type arch_start_umode, % function
  32. arch_start_umode:
  33. mrs r9, cpsr
  34. bic r9, #0x1f
  35. orr r9, #Mode_USR
  36. cpsid i
  37. msr spsr, r9
  38. mov sp, r3
  39. mov r3, r2 ;/* user stack top */
  40. /* set data address. */
  41. movs pc, r1
  42. /*
  43. * void arch_crt_start_umode(args, text, ustack, kstack);
  44. */
  45. .global arch_crt_start_umode
  46. .type arch_crt_start_umode, % function
  47. arch_crt_start_umode:
  48. cps #Mode_SYS
  49. sub sp, r2, #16
  50. ldr r2, =lwp_thread_return
  51. ldr r4, [r2]
  52. str r4, [sp]
  53. ldr r4, [r2, #4]
  54. str r4, [sp, #4]
  55. ldr r4, [r2, #8]
  56. str r4, [sp, #8]
  57. mov r4, sp
  58. mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
  59. add r4, #4
  60. mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
  61. add r4, #4
  62. mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
  63. dsb
  64. isb
  65. mcr p15, 0, r4, c7, c5, 0 ;//iciallu
  66. dsb
  67. isb
  68. mov lr, sp
  69. cps #Mode_SVC
  70. mrs r9, cpsr
  71. bic r9, #0x1f
  72. orr r9, #Mode_USR
  73. cpsid i
  74. msr spsr, r9
  75. mov sp, r3
  76. /* set data address. */
  77. movs pc, r1
  78. /*
  79. void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp);
  80. */
  81. .global arch_set_thread_context
  82. arch_set_thread_context:
  83. sub r1, #(10 * 4 + 4 * 4) /* {r4 - r12, lr} , {r4, r5, spsr, u_pc} */
  84. stmfd r1!, {r0}
  85. mov r12, #0
  86. stmfd r1!, {r12}
  87. stmfd r1!, {r1 - r12}
  88. stmfd r1!, {r12} /* new thread return value */
  89. mrs r12, cpsr
  90. orr r12, #(1 << 7) /* disable irq */
  91. stmfd r1!, {r12} /* spsr */
  92. mov r12, #0
  93. stmfd r1!, {r12} /* now user lr is 0 */
  94. stmfd r1!, {r2} /* user sp */
  95. #ifdef RT_USING_FPU
  96. stmfd r1!, {r12} /* not use fpu */
  97. #endif
  98. str r1, [r3]
  99. mov pc, lr
  100. .global arch_get_user_sp
  101. arch_get_user_sp:
  102. cps #Mode_SYS
  103. mov r0, sp
  104. cps #Mode_SVC
  105. mov pc, lr
  106. .global sys_fork
  107. .global sys_vfork
  108. .global arch_fork_exit
  109. sys_fork:
  110. sys_vfork:
  111. push {r4 - r12, lr}
  112. bl _sys_fork
  113. arch_fork_exit:
  114. pop {r4 - r12, lr}
  115. b arch_syscall_exit
  116. .global sys_clone
  117. .global arch_clone_exit
  118. sys_clone:
  119. push {r4 - r12, lr}
  120. bl _sys_clone
  121. arch_clone_exit:
  122. pop {r4 - r12, lr}
  123. b arch_syscall_exit
  124. /*
  125. void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
  126. */
  127. .global lwp_exec_user
  128. lwp_exec_user:
  129. cpsid i
  130. mov sp, r1
  131. mov lr, r2
  132. mov r2, #Mode_USR
  133. msr spsr_cxsf, r2
  134. ldr r3, =0x80000000
  135. b arch_ret_to_user
  136. /*
  137. * void SVC_Handler(void);
  138. */
  139. .global vector_swi
  140. .type vector_swi, % function
  141. START_POINT(vector_swi)
  142. push {lr}
  143. mrs lr, spsr
  144. push {r4, r5, lr}
  145. cpsie i
  146. push {r0 - r3, r12}
  147. bl rt_thread_self
  148. bl lwp_user_setting_save
  149. and r0, r7, #0xf000
  150. cmp r0, #0xe000
  151. beq arch_signal_quit
  152. cmp r0, #0xf000
  153. beq ret_from_user
  154. and r0, r7, #0xff
  155. bl lwp_get_sys_api
  156. cmp r0, #0 /* r0 = api */
  157. mov lr, r0
  158. pop {r0 - r3, r12}
  159. beq arch_syscall_exit
  160. blx lr
  161. START_POINT_END(vector_swi)
  162. .global arch_syscall_exit
  163. arch_syscall_exit:
  164. cpsid i
  165. pop {r4, r5, lr}
  166. msr spsr_cxsf, lr
  167. pop {lr}
  168. .global arch_ret_to_user
  169. arch_ret_to_user:
  170. push {r0-r3, r12, lr}
  171. bl lwp_check_debug
  172. bl lwp_check_exit_request
  173. cmp r0, #0
  174. beq 1f
  175. mov r0, #0
  176. b sys_exit
  177. 1:
  178. bl lwp_signal_check
  179. cmp r0, #0
  180. pop {r0-r3, r12, lr}
  181. bne user_do_signal
  182. push {r0}
  183. ldr r0, =rt_dbg_ops
  184. ldr r0, [r0]
  185. cmp r0, #0
  186. pop {r0}
  187. beq 2f
  188. push {r0-r3, r12, lr}
  189. mov r0, lr
  190. bl dbg_attach_req
  191. pop {r0-r3, r12, lr}
  192. 2:
  193. movs pc, lr
  194. #ifdef RT_USING_SMART
  195. .global lwp_check_debug
  196. lwp_check_debug:
  197. ldr r0, =rt_dbg_ops
  198. ldr r0, [r0]
  199. cmp r0, #0
  200. bne 1f
  201. bx lr
  202. 1:
  203. push {lr}
  204. bl dbg_check_suspend
  205. cmp r0, #0
  206. beq lwp_check_debug_quit
  207. cps #Mode_SYS
  208. sub sp, #8
  209. ldr r0, =lwp_debugreturn
  210. ldr r1, [r0]
  211. str r1, [sp]
  212. ldr r1, [r0, #4]
  213. str r1, [sp, #4]
  214. mov r1, sp
  215. mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
  216. add r1, #4
  217. mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
  218. dsb
  219. isb
  220. mcr p15, 0, r0, c7, c5, 0 ;//iciallu
  221. dsb
  222. isb
  223. mov r0, sp /* lwp_debugreturn */
  224. cps #Mode_SVC
  225. mrs r1, spsr
  226. push {r1}
  227. mov r1, #Mode_USR
  228. msr spsr_cxsf, r1
  229. movs pc, r0
  230. ret_from_user:
  231. cps #Mode_SYS
  232. add sp, #8
  233. cps #Mode_SVC
  234. /*
  235. pop {r0 - r3, r12}
  236. pop {r4 - r6, lr}
  237. */
  238. add sp, #(4*9)
  239. pop {r4}
  240. msr spsr_cxsf, r4
  241. lwp_check_debug_quit:
  242. pop {pc}
  243. arch_signal_quit:
  244. cpsid i
  245. pop {r0 - r3, r12}
  246. pop {r4, r5, lr}
  247. pop {lr}
  248. bl lwp_signal_restore
  249. /* r0 is user_ctx : ori sp, pc, cpsr*/
  250. ldr r1, [r0]
  251. ldr r2, [r0, #4]
  252. ldr r3, [r0, #8]
  253. msr spsr_cxsf, r3
  254. mov lr, r2
  255. cps #Mode_SYS
  256. mov sp, r1
  257. pop {r0-r12, lr}
  258. cps #Mode_SVC
  259. b arch_ret_to_user
  260. user_do_signal:
  261. mov r0, r0
  262. cps #Mode_SYS
  263. push {r0-r12, lr}
  264. sub sp, #8
  265. ldr r0, =lwp_sigreturn
  266. ldr r1, [r0]
  267. str r1, [sp]
  268. ldr r1, [r0, #4]
  269. str r1, [sp, #4]
  270. mov r1, sp
  271. mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
  272. add r1, #4
  273. mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
  274. dsb
  275. isb
  276. mcr p15, 0, r0, c7, c5, 0 ;//iciallu
  277. dsb
  278. isb
  279. mov r5, sp ;//if func is 0
  280. mov lr, sp
  281. add r0, sp, #8 /* lwp_sigreturn */
  282. cps #Mode_SVC
  283. mov r1, lr
  284. mrs r2, spsr
  285. bl lwp_signal_backup
  286. /* r0 is signal */
  287. mov r4, r0
  288. bl lwp_sighandler_get
  289. mov lr, r0
  290. cmp lr, #0
  291. moveq lr, r5
  292. mov r0, r4
  293. movs pc, lr
  294. lwp_debugreturn:
  295. mov r7, #0xf000
  296. svc #0
  297. lwp_sigreturn:
  298. mov r7, #0xe000
  299. svc #0
  300. lwp_thread_return:
  301. mov r0, #0
  302. mov r7, #0x01
  303. svc #0
  304. #endif
  305. .global check_vfp
  306. check_vfp:
  307. #ifdef RT_USING_FPU
  308. vmrs r0, fpexc
  309. ubfx r0, r0, #30, #1
  310. #else
  311. mov r0, #0
  312. #endif
  313. mov pc, lr
  314. .global get_vfp
  315. get_vfp:
  316. #ifdef RT_USING_FPU
  317. vstmia r0!, {d0-d15}
  318. vstmia r0!, {d16-d31}
  319. vmrs r1, fpscr
  320. str r1, [r0]
  321. #endif
  322. mov pc, lr
  323. .globl arch_get_tidr
  324. arch_get_tidr:
  325. mrc p15, 0, r0, c13, c0, 3
  326. bx lr
  327. .global arch_set_thread_area
  328. arch_set_thread_area:
  329. .globl arch_set_tidr
  330. arch_set_tidr:
  331. mcr p15, 0, r0, c13, c0, 3
  332. bx lr
  333. /* kuser suppurt */
  334. .macro kuser_pad, sym, size
  335. .if (. - \sym) & 3
  336. .rept 4 - (. - \sym) & 3
  337. .byte 0
  338. .endr
  339. .endif
  340. .rept (\size - (. - \sym)) / 4
  341. .word 0xe7fddef1
  342. .endr
  343. .endm
  344. .align 5
  345. .globl __kuser_helper_start
  346. __kuser_helper_start:
  347. __kuser_cmpxchg64: @ 0xffff0f60
  348. stmfd sp!, {r4, r5, r6, lr}
  349. ldmia r0, {r4, r5} @ load old val
  350. ldmia r1, {r6, lr} @ load new val
  351. 1: ldmia r2, {r0, r1} @ load current val
  352. eors r3, r0, r4 @ compare with oldval (1)
  353. eorseq r3, r1, r5 @ compare with oldval (2)
  354. 2: stmiaeq r2, {r6, lr} @ store newval if eq
  355. rsbs r0, r3, #0 @ set return val and C flag
  356. ldmfd sp!, {r4, r5, r6, pc}
  357. kuser_pad __kuser_cmpxchg64, 64
  358. __kuser_memory_barrier: @ 0xffff0fa0
  359. dmb
  360. mov pc, lr
  361. kuser_pad __kuser_memory_barrier, 32
  362. __kuser_cmpxchg: @ 0xffff0fc0
  363. 1: ldr r3, [r2] @ load current val
  364. subs r3, r3, r0 @ compare with oldval
  365. 2: streq r1, [r2] @ store newval if eq
  366. rsbs r0, r3, #0 @ set return val and C flag
  367. mov pc, lr
  368. kuser_pad __kuser_cmpxchg, 32
  369. __kuser_get_tls: @ 0xffff0fe0
  370. mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
  371. mov pc, lr
  372. ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
  373. kuser_pad __kuser_get_tls, 16
  374. .rep 3
  375. .word 0 @ 0xffff0ff0 software TLS value, then
  376. .endr @ pad up to __kuser_helper_version
  377. __kuser_helper_version: @ 0xffff0ffc
  378. .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
  379. .globl __kuser_helper_end
  380. __kuser_helper_end: