contex_gcc_mw.S 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369
  1. /*
  2. * Copyright (c) 2018, Synopsys, Inc.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #define __ASSEMBLY__
  7. #include "include/arc/arc.h"
  8. #include "include/arc/arc_asm_common.h"
  9. .global rt_interrupt_enter;
  10. .global rt_interrupt_leave;
  11. .global rt_thread_switch_interrupt_flag;
  12. .global rt_interrupt_from_thread;
  13. .global rt_interrupt_to_thread;
  14. .global exc_nest_count;
  15. .global set_hw_stack_check;
  16. .text
  17. .align 4
  18. dispatcher:
  19. st sp, [r0]
  20. ld sp, [r1]
  21. #if ARC_FEATURE_STACK_CHECK
  22. #if ARC_FEATURE_SEC_PRESENT
  23. lr r0, [AUX_SEC_STAT]
  24. bclr r0, r0, AUX_SEC_STAT_BIT_SSC
  25. sflag r0
  26. #else
  27. lr r0, [AUX_STATUS32]
  28. bclr r0, r0, AUX_STATUS_BIT_SC
  29. kflag r0
  30. #endif
  31. jl set_hw_stack_check
  32. #if ARC_FEATURE_SEC_PRESENT
  33. lr r0, [AUX_SEC_STAT]
  34. bset r0, r0, AUX_SEC_STAT_BIT_SSC
  35. sflag r0
  36. #else
  37. lr r0, [AUX_STATUS32]
  38. bset r0, r0, AUX_STATUS_BIT_SC
  39. kflag r0
  40. #endif
  41. #endif
  42. pop r0
  43. j [r0]
  44. /* return routine when task dispatch happened in task context */
  45. dispatch_r:
  46. RESTORE_NONSCRATCH_REGS
  47. RESTORE_R0_TO_R12
  48. j [blink]
  49. /*
  50. * rt_base_t rt_hw_interrupt_disable();
  51. */
  52. .global rt_hw_interrupt_disable
  53. .align 4
  54. rt_hw_interrupt_disable:
  55. clri r0
  56. j [blink]
  57. /*
  58. * void rt_hw_interrupt_enable(rt_base_t level);
  59. */
  60. .global rt_hw_interrupt_enable
  61. .align 4
  62. rt_hw_interrupt_enable:
  63. seti r0
  64. j [blink]
  65. .global rt_hw_context_switch_interrupt
  66. .align 4
  67. rt_hw_context_switch_interrupt:
  68. ld r2, [rt_thread_switch_interrupt_flag]
  69. breq r2, 1, _reswitch /* Check the flag, if it is 1, skip to reswitch */
  70. mov r2, 1
  71. st r2, [rt_thread_switch_interrupt_flag]
  72. st r0, [rt_interrupt_from_thread]
  73. _reswitch:
  74. st r1, [rt_interrupt_to_thread]
  75. j [blink]
  76. /*
  77. * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
  78. * r0 --> from
  79. * r1 --> to
  80. */
  81. .global rt_hw_context_switch
  82. .align 4
  83. rt_hw_context_switch:
  84. SAVE_R0_TO_R12
  85. SAVE_NONSCRATCH_REGS
  86. mov r2, dispatch_r
  87. push r2
  88. b dispatcher
  89. /*
  90. * void rt_hw_context_switch_to(rt_uint32 to);
  91. * r0 --> to
  92. */
  93. .global rt_hw_context_switch_to
  94. .align 4
  95. rt_hw_context_switch_to:
  96. ld sp, [r0]
  97. #if ARC_FEATURE_STACK_CHECK
  98. mov r1, r0
  99. #if ARC_FEATURE_SEC_PRESENT
  100. lr r0, [AUX_SEC_STAT]
  101. bclr r0, r0, AUX_SEC_STAT_BIT_SSC
  102. sflag r0
  103. #else
  104. lr r0, [AUX_STATUS32]
  105. bclr r0, r0, AUX_STATUS_BIT_SC
  106. kflag r0
  107. #endif
  108. jl set_hw_stack_check
  109. #if ARC_FEATURE_SEC_PRESENT
  110. lr r0, [AUX_SEC_STAT]
  111. bset r0, r0, AUX_SEC_STAT_BIT_SSC
  112. sflag r0
  113. #else
  114. lr r0, [AUX_STATUS32]
  115. bset r0, r0, AUX_STATUS_BIT_SC
  116. kflag r0
  117. #endif
  118. #endif
  119. pop r0
  120. j [r0]
  121. .global start_r
  122. .align 4
  123. start_r:
  124. pop blink;
  125. pop r1
  126. pop r2
  127. pop r0
  128. j_s.d [r1]
  129. kflag r2
  130. /*
  131. * int __rt_ffs(int value);
  132. * r0 --> value
  133. */
  134. .global __rt_ffs
  135. .align 4
  136. __rt_ffs:
  137. breq r0, 0, __rt_ffs_return
  138. ffs r1, r0
  139. add r0, r1, 1
  140. __rt_ffs_return:
  141. j [blink]
  142. /****** exceptions and interrupts handing ******/
  143. /****** entry for exception handling ******/
  144. .global exc_entry_cpu
  145. .align 4
  146. exc_entry_cpu:
  147. EXCEPTION_PROLOGUE
  148. mov blink, sp
  149. mov r3, sp /* as exception handler's para(p_excinfo) */
  150. ld r0, [exc_nest_count]
  151. add r1, r0, 1
  152. st r1, [exc_nest_count]
  153. brne r0, 0, exc_handler_1
  154. /* change to exception stack if interrupt happened in task context */
  155. mov sp, _e_stack
  156. exc_handler_1:
  157. PUSH blink
  158. lr r0, [AUX_ECR]
  159. lsr r0, r0, 16
  160. mov r1, exc_int_handler_table
  161. ld.as r2, [r1, r0]
  162. mov r0, r3
  163. jl [r2]
  164. /* interrupts are not allowed */
  165. ret_exc:
  166. POP sp
  167. mov r1, exc_nest_count
  168. ld r0, [r1]
  169. sub r0, r0, 1
  170. st r0, [r1]
  171. brne r0, 0, ret_exc_1 /* nest exception case */
  172. lr r1, [AUX_IRQ_ACT] /* nest interrupt case */
  173. brne r1, 0, ret_exc_1
  174. ld r0, [rt_thread_switch_interrupt_flag]
  175. brne r0, 0, ret_exc_2
  176. ret_exc_1: /* return from non-task context, interrupts or exceptions are nested */
  177. EXCEPTION_EPILOGUE
  178. rtie
  179. /* there is a dispatch request */
  180. ret_exc_2:
  181. /* clear dispatch request */
  182. mov r0, 0
  183. st r0, [rt_thread_switch_interrupt_flag]
  184. SAVE_CALLEE_REGS /* save callee save registers */
  185. /* clear exception bit to do exception exit by SW */
  186. lr r0, [AUX_STATUS32]
  187. bclr r0, r0, AUX_STATUS_BIT_AE
  188. kflag r0
  189. mov r1, ret_exc_r /* save return address */
  190. PUSH r1
  191. ld r0, [rt_interrupt_from_thread]
  192. ld r1, [rt_interrupt_to_thread]
  193. b dispatcher
  194. ret_exc_r:
  195. /* recover exception status */
  196. lr r0, [AUX_STATUS32]
  197. bset r0, r0, AUX_STATUS_BIT_AE
  198. kflag r0
  199. RESTORE_CALLEE_REGS
  200. EXCEPTION_EPILOGUE
  201. rtie
  202. /****** entry for normal interrupt exception handling ******/
  203. .global exc_entry_int /* entry for interrupt handling */
  204. .align 4
  205. exc_entry_int:
  206. #if ARC_FEATURE_FIRQ == 1
  207. /* check whether it is P0 interrupt */
  208. #if ARC_FEATURE_RGF_NUM_BANKS > 1
  209. lr r0, [AUX_IRQ_ACT]
  210. btst r0, 0
  211. jnz exc_entry_firq
  212. #else
  213. PUSH r10
  214. lr r10, [AUX_IRQ_ACT]
  215. btst r10, 0
  216. POP r10
  217. jnz exc_entry_firq
  218. #endif
  219. #endif
  220. INTERRUPT_PROLOGUE
  221. mov blink, sp
  222. clri /* disable interrupt */
  223. ld r3, [exc_nest_count]
  224. add r2, r3, 1
  225. st r2, [exc_nest_count]
  226. seti /* enable higher priority interrupt */
  227. brne r3, 0, irq_handler_1
  228. /* change to exception stack if interrupt happened in task context */
  229. mov sp, _e_stack
  230. #if ARC_FEATURE_STACK_CHECK
  231. #if ARC_FEATURE_SEC_PRESENT
  232. lr r0, [AUX_SEC_STAT]
  233. bclr r0, r0, AUX_SEC_STAT_BIT_SSC
  234. sflag r0
  235. #else
  236. lr r0, [AUX_STATUS32]
  237. bclr r0, r0, AUX_STATUS_BIT_SC
  238. kflag r0
  239. #endif
  240. #endif
  241. irq_handler_1:
  242. PUSH blink
  243. jl rt_interrupt_enter
  244. lr r0, [AUX_IRQ_CAUSE]
  245. sr r0, [AUX_IRQ_SELECT]
  246. mov r1, exc_int_handler_table
  247. ld.as r2, [r1, r0] /* r2 = exc_int_handler_table + irqno *4 */
  248. /* handle software triggered interrupt */
  249. lr r3, [AUX_IRQ_HINT]
  250. cmp r3, r0
  251. bne.d irq_hint_handled
  252. xor r3, r3, r3
  253. sr r3, [AUX_IRQ_HINT]
  254. irq_hint_handled:
  255. lr r3, [AUX_IRQ_PRIORITY]
  256. PUSH r3 /* save irq priority */
  257. jl [r2] /* jump to interrupt handler */
  258. jl rt_interrupt_leave
  259. ret_int:
  260. clri /* disable interrupt */
  261. POP r3 /* irq priority */
  262. POP sp
  263. mov r1, exc_nest_count
  264. ld r0, [r1]
  265. sub r0, r0, 1
  266. st r0, [r1]
  267. /* if there are multi-bits set in IRQ_ACT, it's still in nest interrupt */
  268. lr r0, [AUX_IRQ_CAUSE]
  269. sr r0, [AUX_IRQ_SELECT]
  270. lr r3, [AUX_IRQ_PRIORITY]
  271. lr r1, [AUX_IRQ_ACT]
  272. bclr r2, r1, r3
  273. brne r2, 0, ret_int_1
  274. ld r0, [rt_thread_switch_interrupt_flag]
  275. brne r0, 0, ret_int_2
  276. ret_int_1: /* return from non-task context */
  277. INTERRUPT_EPILOGUE
  278. rtie
  279. /* there is a dispatch request */
  280. ret_int_2:
  281. /* clear dispatch request */
  282. mov r0, 0
  283. st r0, [rt_thread_switch_interrupt_flag]
  284. /* interrupt return by SW */
  285. lr r10, [AUX_IRQ_ACT]
  286. PUSH r10
  287. bclr r10, r10, r3 /* clear related bits in IRQ_ACT */
  288. sr r10, [AUX_IRQ_ACT]
  289. SAVE_CALLEE_REGS /* save callee save registers */
  290. mov r1, ret_int_r /* save return address */
  291. PUSH r1
  292. ld r0, [rt_interrupt_from_thread]
  293. ld r1, [rt_interrupt_to_thread]
  294. b dispatcher
  295. ret_int_r:
  296. RESTORE_CALLEE_REGS
  297. /* recover AUX_IRQ_ACT to restore the interrup status */
  298. POPAX AUX_IRQ_ACT
  299. INTERRUPT_EPILOGUE
  300. rtie
  301. /****** entry for fast irq exception handling ******/
  302. .global exc_entry_firq
  303. .weak exc_entry_firq
  304. .align 4
  305. exc_entry_firq:
  306. SAVE_FIQ_EXC_REGS
  307. lr r0, [AUX_IRQ_CAUSE]
  308. mov r1, exc_int_handler_table
  309. /* r2 = _kernel_exc_tbl + irqno *4 */
  310. ld.as r2, [r1, r0]
  311. /* for the case of software triggered interrupt */
  312. lr r3, [AUX_IRQ_HINT]
  313. cmp r3, r0
  314. bne.d firq_hint_handled
  315. xor r3, r3, r3
  316. sr r3, [AUX_IRQ_HINT]
  317. firq_hint_handled:
  318. /* jump to interrupt handler */
  319. mov r0, sp
  320. jl [r2]
  321. firq_return:
  322. RESTORE_FIQ_EXC_REGS
  323. rtie