trap.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. /*
  2. * Copyright (c) 2021 HPMicro
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. *
  6. */
  7. #include "hpm_common.h"
  8. #include "hpm_soc.h"
  9. /********************** MCAUSE exception types **************************************/
  10. #define MCAUSE_INSTR_ADDR_MISALIGNED (0U) /* !< Instruction Address misaligned */
  11. #define MCAUSE_INSTR_ACCESS_FAULT (1U) /* !< Instruction access fault */
  12. #define MCAUSE_ILLEGAL_INSTR (2U) /* !< Illegal instruction */
  13. #define MCAUSE_BREAKPOINT (3U) /* !< Breakpoint */
  14. #define MCAUSE_LOAD_ADDR_MISALIGNED (4U) /* !< Load address misaligned */
  15. #define MCAUSE_LOAD_ACCESS_FAULT (5U) /* !< Load access fault */
  16. #define MCAUSE_STORE_AMO_ADDR_MISALIGNED (6U) /* !< Store/AMO address misaligned */
  17. #define MCAUSE_STORE_AMO_ACCESS_FAULT (7U) /* !< Store/AMO access fault */
  18. #define MCAUSE_ECALL_FROM_USER_MODE (8U) /* !< Environment call from User mode */
  19. #define MCAUSE_ECALL_FROM_SUPERVISOR_MODE (9U) /* !< Environment call from Supervisor mode */
  20. #define MCAUSE_ECALL_FROM_MACHINE_MODE (11U) /* !< Environment call from machine mode */
  21. #define MCAUSE_INSTR_PAGE_FAULT (12U) /* !< Instruction page fault */
  22. #define MCAUSE_LOAD_PAGE_FAULT (13) /* !< Load page fault */
  23. #define MCAUSE_STORE_AMO_PAGE_FAULT (15U) /* !< Store/AMO page fault */
  24. #define IRQ_S_SOFT 1
  25. #define IRQ_H_SOFT 2
  26. #define IRQ_M_SOFT 3
  27. #define IRQ_S_TIMER 5
  28. #define IRQ_H_TIMER 6
  29. #define IRQ_M_TIMER 7
  30. #define IRQ_S_EXT 9
  31. #define IRQ_H_EXT 10
  32. #define IRQ_M_EXT 11
  33. #define IRQ_COP 12
  34. #define IRQ_HOST 13
  35. __attribute__((weak)) void mchtmr_isr(void)
  36. {
  37. }
  38. __attribute__((weak)) void swi_isr(void)
  39. {
  40. }
  41. __attribute__((weak)) void syscall_handler(long n, long a0, long a1, long a2, long a3)
  42. {
  43. }
  44. __attribute__((weak)) long exception_handler(long cause, long epc)
  45. {
  46. switch (cause) {
  47. case MCAUSE_INSTR_ADDR_MISALIGNED:
  48. break;
  49. case MCAUSE_INSTR_ACCESS_FAULT:
  50. break;
  51. case MCAUSE_ILLEGAL_INSTR:
  52. break;
  53. case MCAUSE_BREAKPOINT:
  54. break;
  55. case MCAUSE_LOAD_ADDR_MISALIGNED:
  56. break;
  57. case MCAUSE_LOAD_ACCESS_FAULT:
  58. break;
  59. case MCAUSE_STORE_AMO_ADDR_MISALIGNED:
  60. break;
  61. case MCAUSE_STORE_AMO_ACCESS_FAULT:
  62. break;
  63. case MCAUSE_ECALL_FROM_USER_MODE:
  64. break;
  65. case MCAUSE_ECALL_FROM_SUPERVISOR_MODE:
  66. break;
  67. case MCAUSE_ECALL_FROM_MACHINE_MODE:
  68. break;
  69. case MCAUSE_INSTR_PAGE_FAULT:
  70. break;
  71. case MCAUSE_LOAD_PAGE_FAULT:
  72. break;
  73. case MCAUSE_STORE_AMO_PAGE_FAULT:
  74. break;
  75. default:
  76. break;
  77. }
  78. /* Unhandled Trap */
  79. return epc;
  80. }
  81. __attribute__((weak)) long exception_s_handler(long cause, long epc)
  82. {
  83. return epc;
  84. }
  85. __attribute__((weak)) void swi_s_isr(void)
  86. {
  87. }
  88. __attribute__((weak)) void mchtmr_s_isr(void)
  89. {
  90. }
  91. #if !defined(CONFIG_FREERTOS) && !defined(CONFIG_UCOS_III) && !defined(CONFIG_THREADX)
  92. void irq_handler_trap(void) __attribute__ ((section(".isr_vector"), interrupt("machine"), aligned(4)));
  93. #else
  94. void irq_handler_trap(void) __attribute__ ((section(".isr_vector")));
  95. #endif
  96. void irq_handler_trap(void)
  97. {
  98. long mcause = read_csr(CSR_MCAUSE);
  99. long mepc = read_csr(CSR_MEPC);
  100. long mstatus = read_csr(CSR_MSTATUS);
  101. #if defined(SUPPORT_PFT_ARCH) && SUPPORT_PFT_ARCH
  102. long mxstatus = read_csr(CSR_MXSTATUS);
  103. #endif
  104. #ifdef __riscv_dsp
  105. int ucode = read_csr(CSR_UCODE);
  106. #endif
  107. #ifdef __riscv_flen
  108. int fcsr = read_fcsr();
  109. #endif
  110. /* clobbers list for ecall */
  111. #ifdef __riscv_32e
  112. __asm volatile("" : : :"t0", "a0", "a1", "a2", "a3");
  113. #else
  114. __asm volatile("" : : :"a7", "a0", "a1", "a2", "a3");
  115. #endif
  116. /* Do your trap handling */
  117. if ((mcause & CSR_MCAUSE_INTERRUPT_MASK) && ((mcause & CSR_MCAUSE_EXCEPTION_CODE_MASK) == IRQ_M_TIMER)) {
  118. /* Machine timer interrupt */
  119. mchtmr_isr();
  120. }
  121. #ifdef USE_NONVECTOR_MODE
  122. else if ((mcause & CSR_MCAUSE_INTERRUPT_MASK) && ((mcause & CSR_MCAUSE_EXCEPTION_CODE_MASK) == IRQ_M_EXT)) {
  123. typedef void(*isr_func_t)(void);
  124. /* Machine-level interrupt from PLIC */
  125. uint32_t irq_index = __plic_claim_irq(HPM_PLIC_BASE, HPM_PLIC_TARGET_M_MODE);
  126. if (irq_index) {
  127. /* Workaround: irq number returned by __plic_claim_irq might be 0, which is caused by plic. So skip invalid irq_index as a workaround */
  128. #if !defined(DISABLE_IRQ_PREEMPTIVE) || (DISABLE_IRQ_PREEMPTIVE == 0)
  129. enable_global_irq(CSR_MSTATUS_MIE_MASK);
  130. #endif
  131. ((isr_func_t)__vector_table[irq_index])();
  132. __plic_complete_irq(HPM_PLIC_BASE, HPM_PLIC_TARGET_M_MODE, irq_index);
  133. }
  134. }
  135. #endif
  136. else if ((mcause & CSR_MCAUSE_INTERRUPT_MASK) && ((mcause & CSR_MCAUSE_EXCEPTION_CODE_MASK) == IRQ_M_SOFT)) {
  137. /* Machine SWI interrupt */
  138. swi_isr();
  139. intc_m_complete_swi();
  140. } else if (!(mcause & CSR_MCAUSE_INTERRUPT_MASK) && ((mcause & CSR_MCAUSE_EXCEPTION_CODE_MASK) == MCAUSE_ECALL_FROM_MACHINE_MODE)) {
  141. /* Machine Syscal call */
  142. __asm volatile(
  143. "mv a4, a3\n"
  144. "mv a3, a2\n"
  145. "mv a2, a1\n"
  146. "mv a1, a0\n"
  147. #ifdef __riscv_32e
  148. "mv a0, t0\n"
  149. #else
  150. "mv a0, a7\n"
  151. #endif
  152. "jalr %0\n"
  153. : :"r"(syscall_handler) : "a4"
  154. );
  155. mepc += 4;
  156. } else {
  157. mepc = exception_handler(mcause, mepc);
  158. }
  159. /* Restore CSR */
  160. write_csr(CSR_MSTATUS, mstatus);
  161. write_csr(CSR_MEPC, mepc);
  162. #if defined(SUPPORT_PFT_ARCH) && SUPPORT_PFT_ARCH
  163. write_csr(CSR_MXSTATUS, mxstatus);
  164. #endif
  165. #ifdef __riscv_dsp
  166. write_csr(CSR_UCODE, ucode);
  167. #endif
  168. #ifdef __riscv_flen
  169. write_fcsr(fcsr);
  170. #endif
  171. }
  172. #ifndef CONFIG_FREERTOS
  173. void irq_handler_s_trap(void) __attribute__ ((section(".isr_s_vector"), interrupt("supervisor"), aligned(4)));
  174. #else
  175. void irq_handler_s_trap(void) __attribute__ ((section(".isr_s_vector")));
  176. #endif
  177. void irq_handler_s_trap(void)
  178. {
  179. long scause = read_csr(CSR_SCAUSE);
  180. long sepc = read_csr(CSR_SEPC);
  181. long sstatus = read_csr(CSR_SSTATUS);
  182. /* clobbers list for ecall */
  183. #ifdef __riscv_32e
  184. __asm volatile("" : : :"t0", "a0", "a1", "a2", "a3");
  185. #else
  186. __asm volatile("" : : :"a7", "a0", "a1", "a2", "a3");
  187. #endif
  188. /* Do your trap handling */
  189. if ((scause & CSR_SCAUSE_INTERRUPT_MASK) && ((scause & CSR_SCAUSE_EXCEPTION_CODE_MASK) == IRQ_S_TIMER)) {
  190. /* Machine timer interrupt */
  191. mchtmr_s_isr();
  192. }
  193. #ifdef USE_NONVECTOR_MODE
  194. else if ((scause & CSR_SCAUSE_INTERRUPT_MASK) && ((scause & CSR_SCAUSE_EXCEPTION_CODE_MASK) == IRQ_S_EXT)) {
  195. typedef void(*isr_func_t)(void);
  196. /* Machine-level interrupt from PLIC */
  197. uint32_t irq_index = __plic_claim_irq(HPM_PLIC_BASE, HPM_PLIC_TARGET_S_MODE);
  198. #if !defined(DISABLE_IRQ_PREEMPTIVE) || (DISABLE_IRQ_PREEMPTIVE == 0)
  199. enable_s_global_irq(CSR_SSTATUS_SIE_MASK);
  200. #endif
  201. ((isr_func_t)__vector_s_table[irq_index])();
  202. __plic_complete_irq(HPM_PLIC_BASE, HPM_PLIC_TARGET_S_MODE, irq_index);
  203. }
  204. #endif
  205. else if ((scause & CSR_SCAUSE_INTERRUPT_MASK) && ((scause & CSR_SCAUSE_EXCEPTION_CODE_MASK) == IRQ_S_SOFT)) {
  206. /* Machine SWI interrupt */
  207. intc_m_claim_swi();
  208. swi_s_isr();
  209. intc_s_complete_swi();
  210. } else if (!(scause & CSR_SCAUSE_INTERRUPT_MASK) && ((scause & CSR_SCAUSE_EXCEPTION_CODE_MASK) == MCAUSE_ECALL_FROM_SUPERVISOR_MODE)) {
  211. /* Machine Syscal call */
  212. __asm volatile(
  213. "mv a4, a3\n"
  214. "mv a3, a2\n"
  215. "mv a2, a1\n"
  216. "mv a1, a0\n"
  217. #ifdef __riscv_32e
  218. "mv a0, t0\n"
  219. #else
  220. "mv a0, a7\n"
  221. #endif
  222. "jalr %0\n"
  223. : :"r"(syscall_handler) : "a4"
  224. );
  225. sepc += 4;
  226. } else {
  227. sepc = exception_s_handler(scause, sepc);
  228. }
  229. /* Restore CSR */
  230. write_csr(CSR_SSTATUS, sstatus);
  231. write_csr(CSR_SEPC, sepc);
  232. }