contex_gcc_mw.S 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364
  1. /*
  2. * Copyright (c) 2018, Synopsys, Inc.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #define __ASSEMBLY__
  7. #include "include/arc/arc.h"
  8. #include "include/arc/arc_asm_common.h"
  9. .global rt_interrupt_enter;
  10. .global rt_interrupt_leave;
  11. .global context_switch_reqflg;
  12. .global rt_interrupt_from_thread;
  13. .global rt_interrupt_to_thread;
  14. .global exc_nest_count;
  15. .global set_hw_stack_check;
  16. .text
  17. .align 4
  18. dispatcher:
  19. st sp, [r0]
  20. ld sp, [r1]
  21. #if ARC_FEATURE_STACK_CHECK
  22. #if ARC_FEATURE_SEC_PRESENT
  23. lr r0, [AUX_SEC_STAT]
  24. bclr r0, r0, AUX_SEC_STAT_BIT_SSC
  25. sflag r0
  26. #else
  27. lr r0, [AUX_STATUS32]
  28. bclr r0, r0, AUX_STATUS_BIT_SC
  29. kflag r0
  30. #endif
  31. jl set_hw_stack_check
  32. #if ARC_FEATURE_SEC_PRESENT
  33. lr r0, [AUX_SEC_STAT]
  34. bset r0, r0, AUX_SEC_STAT_BIT_SSC
  35. sflag r0
  36. #else
  37. lr r0, [AUX_STATUS32]
  38. bset r0, r0, AUX_STATUS_BIT_SC
  39. kflag r0
  40. #endif
  41. #endif
  42. pop r0
  43. j [r0]
  44. /* return routine when task dispatch happened in task context */
  45. dispatch_r:
  46. RESTORE_NONSCRATCH_REGS
  47. j [blink]
  48. /*
  49. * rt_base_t rt_hw_interrupt_disable();
  50. */
  51. .global rt_hw_interrupt_disable
  52. .align 4
  53. rt_hw_interrupt_disable:
  54. clri r0
  55. j [blink]
  56. /*
  57. * void rt_hw_interrupt_enable(rt_base_t level);
  58. */
  59. .global rt_hw_interrupt_enable
  60. .align 4
  61. rt_hw_interrupt_enable:
  62. seti r0
  63. j [blink]
  64. .global rt_hw_context_switch_interrupt
  65. .align 4
  66. rt_hw_context_switch_interrupt:
  67. st r0, [rt_interrupt_from_thread]
  68. st r1, [rt_interrupt_to_thread]
  69. mov r0, 1
  70. st r0, [context_switch_reqflg]
  71. j [blink]
  72. /*
  73. * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
  74. * r0 --> from
  75. * r1 --> to
  76. */
  77. .global rt_hw_context_switch
  78. .align 4
  79. rt_hw_context_switch:
  80. SAVE_NONSCRATCH_REGS
  81. mov r2, dispatch_r
  82. push r2
  83. b dispatcher
  84. /*
  85. * void rt_hw_context_switch_to(rt_uint32 to);
  86. * r0 --> to
  87. */
  88. .global rt_hw_context_switch_to
  89. .align 4
  90. rt_hw_context_switch_to:
  91. ld sp, [r0]
  92. #if ARC_FEATURE_STACK_CHECK
  93. mov r1, r0
  94. #if ARC_FEATURE_SEC_PRESENT
  95. lr r0, [AUX_SEC_STAT]
  96. bclr r0, r0, AUX_SEC_STAT_BIT_SSC
  97. sflag r0
  98. #else
  99. lr r0, [AUX_STATUS32]
  100. bclr r0, r0, AUX_STATUS_BIT_SC
  101. kflag r0
  102. #endif
  103. jl set_hw_stack_check
  104. #if ARC_FEATURE_SEC_PRESENT
  105. lr r0, [AUX_SEC_STAT]
  106. bset r0, r0, AUX_SEC_STAT_BIT_SSC
  107. sflag r0
  108. #else
  109. lr r0, [AUX_STATUS32]
  110. bset r0, r0, AUX_STATUS_BIT_SC
  111. kflag r0
  112. #endif
  113. #endif
  114. pop r0
  115. j [r0]
  116. .global start_r
  117. .align 4
  118. start_r:
  119. pop blink;
  120. pop r1
  121. pop r2
  122. pop r0
  123. j_s.d [r1]
  124. kflag r2
  125. /*
  126. * int __rt_ffs(int value);
  127. * r0 --> value
  128. */
  129. .global __rt_ffs
  130. .align 4
  131. __rt_ffs:
  132. breq r0, 0, __rt_ffs_return
  133. ffs r1, r0
  134. add r0, r1, 1
  135. __rt_ffs_return:
  136. j [blink]
  137. /****** exceptions and interrupts handing ******/
  138. /****** entry for exception handling ******/
  139. .global exc_entry_cpu
  140. .align 4
  141. exc_entry_cpu:
  142. EXCEPTION_PROLOGUE
  143. mov blink, sp
  144. mov r3, sp /* as exception handler's para(p_excinfo) */
  145. ld r0, [exc_nest_count]
  146. add r1, r0, 1
  147. st r1, [exc_nest_count]
  148. brne r0, 0, exc_handler_1
  149. /* change to exception stack if interrupt happened in task context */
  150. mov sp, _e_stack
  151. exc_handler_1:
  152. PUSH blink
  153. lr r0, [AUX_ECR]
  154. lsr r0, r0, 16
  155. mov r1, exc_int_handler_table
  156. ld.as r2, [r1, r0]
  157. mov r0, r3
  158. jl [r2]
  159. /* interrupts are not allowed */
  160. ret_exc:
  161. POP sp
  162. mov r1, exc_nest_count
  163. ld r0, [r1]
  164. sub r0, r0, 1
  165. st r0, [r1]
  166. brne r0, 0, ret_exc_1 /* nest exception case */
  167. lr r1, [AUX_IRQ_ACT] /* nest interrupt case */
  168. brne r1, 0, ret_exc_1
  169. ld r0, [context_switch_reqflg]
  170. brne r0, 0, ret_exc_2
  171. ret_exc_1: /* return from non-task context, interrupts or exceptions are nested */
  172. EXCEPTION_EPILOGUE
  173. rtie
  174. /* there is a dispatch request */
  175. ret_exc_2:
  176. /* clear dispatch request */
  177. mov r0, 0
  178. st r0, [context_switch_reqflg]
  179. SAVE_CALLEE_REGS /* save callee save registers */
  180. /* clear exception bit to do exception exit by SW */
  181. lr r0, [AUX_STATUS32]
  182. bclr r0, r0, AUX_STATUS_BIT_AE
  183. kflag r0
  184. mov r1, ret_exc_r /* save return address */
  185. PUSH r1
  186. ld r0, [rt_interrupt_from_thread]
  187. ld r1, [rt_interrupt_to_thread]
  188. b dispatcher
  189. ret_exc_r:
  190. /* recover exception status */
  191. lr r0, [AUX_STATUS32]
  192. bset r0, r0, AUX_STATUS_BIT_AE
  193. kflag r0
  194. RESTORE_CALLEE_REGS
  195. EXCEPTION_EPILOGUE
  196. rtie
  197. /****** entry for normal interrupt exception handling ******/
  198. .global exc_entry_int /* entry for interrupt handling */
  199. .align 4
  200. exc_entry_int:
  201. #if ARC_FEATURE_FIRQ == 1
  202. /* check whether it is P0 interrupt */
  203. #if ARC_FEATURE_RGF_NUM_BANKS > 1
  204. lr r0, [AUX_IRQ_ACT]
  205. btst r0, 0
  206. jnz exc_entry_firq
  207. #else
  208. PUSH r10
  209. lr r10, [AUX_IRQ_ACT]
  210. btst r10, 0
  211. POP r10
  212. jnz exc_entry_firq
  213. #endif
  214. #endif
  215. INTERRUPT_PROLOGUE
  216. mov blink, sp
  217. clri /* disable interrupt */
  218. ld r3, [exc_nest_count]
  219. add r2, r3, 1
  220. st r2, [exc_nest_count]
  221. seti /* enable higher priority interrupt */
  222. brne r3, 0, irq_handler_1
  223. /* change to exception stack if interrupt happened in task context */
  224. mov sp, _e_stack
  225. #if ARC_FEATURE_STACK_CHECK
  226. #if ARC_FEATURE_SEC_PRESENT
  227. lr r0, [AUX_SEC_STAT]
  228. bclr r0, r0, AUX_SEC_STAT_BIT_SSC
  229. sflag r0
  230. #else
  231. lr r0, [AUX_STATUS32]
  232. bclr r0, r0, AUX_STATUS_BIT_SC
  233. kflag r0
  234. #endif
  235. #endif
  236. irq_handler_1:
  237. PUSH blink
  238. jl rt_interrupt_enter
  239. lr r0, [AUX_IRQ_CAUSE]
  240. sr r0, [AUX_IRQ_SELECT]
  241. mov r1, exc_int_handler_table
  242. ld.as r2, [r1, r0] /* r2 = exc_int_handler_table + irqno *4 */
  243. /* handle software triggered interrupt */
  244. lr r3, [AUX_IRQ_HINT]
  245. cmp r3, r0
  246. bne.d irq_hint_handled
  247. xor r3, r3, r3
  248. sr r3, [AUX_IRQ_HINT]
  249. irq_hint_handled:
  250. lr r3, [AUX_IRQ_PRIORITY]
  251. PUSH r3 /* save irq priority */
  252. jl [r2] /* jump to interrupt handler */
  253. jl rt_interrupt_leave
  254. ret_int:
  255. clri /* disable interrupt */
  256. POP r3 /* irq priority */
  257. POP sp
  258. mov r1, exc_nest_count
  259. ld r0, [r1]
  260. sub r0, r0, 1
  261. st r0, [r1]
  262. /* if there are multi-bits set in IRQ_ACT, it's still in nest interrupt */
  263. lr r0, [AUX_IRQ_CAUSE]
  264. sr r0, [AUX_IRQ_SELECT]
  265. lr r3, [AUX_IRQ_PRIORITY]
  266. lr r1, [AUX_IRQ_ACT]
  267. bclr r2, r1, r3
  268. brne r2, 0, ret_int_1
  269. ld r0, [context_switch_reqflg]
  270. brne r0, 0, ret_int_2
  271. ret_int_1: /* return from non-task context */
  272. INTERRUPT_EPILOGUE
  273. rtie
  274. /* there is a dispatch request */
  275. ret_int_2:
  276. /* clear dispatch request */
  277. mov r0, 0
  278. st r0, [context_switch_reqflg]
  279. /* interrupt return by SW */
  280. lr r10, [AUX_IRQ_ACT]
  281. PUSH r10
  282. bclr r10, r10, r3 /* clear related bits in IRQ_ACT */
  283. sr r10, [AUX_IRQ_ACT]
  284. SAVE_CALLEE_REGS /* save callee save registers */
  285. mov r1, ret_int_r /* save return address */
  286. PUSH r1
  287. ld r0, [rt_interrupt_from_thread]
  288. ld r1, [rt_interrupt_to_thread]
  289. b dispatcher
  290. ret_int_r:
  291. RESTORE_CALLEE_REGS
  292. /* recover AUX_IRQ_ACT to restore the interrup status */
  293. POPAX AUX_IRQ_ACT
  294. INTERRUPT_EPILOGUE
  295. rtie
  296. /****** entry for fast irq exception handling ******/
  297. .global exc_entry_firq
  298. .weak exc_entry_firq
  299. .align 4
  300. exc_entry_firq:
  301. SAVE_FIQ_EXC_REGS
  302. lr r0, [AUX_IRQ_CAUSE]
  303. mov r1, exc_int_handler_table
  304. /* r2 = _kernel_exc_tbl + irqno *4 */
  305. ld.as r2, [r1, r0]
  306. /* for the case of software triggered interrupt */
  307. lr r3, [AUX_IRQ_HINT]
  308. cmp r3, r0
  309. bne.d firq_hint_handled
  310. xor r3, r3, r3
  311. sr r3, [AUX_IRQ_HINT]
  312. firq_hint_handled:
  313. /* jump to interrupt handler */
  314. mov r0, sp
  315. jl [r2]
  316. firq_return:
  317. RESTORE_FIQ_EXC_REGS
  318. rtie