trap.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2013-07-20 Bernard first version
  9. */
  10. #include <rtthread.h>
  11. #include <rthw.h>
  12. #include <board.h>
  13. #include <armv8.h>
  14. #include "interrupt.h"
  15. #include "mm_fault.h"
  16. #include <backtrace.h>
  17. void rt_unwind(struct rt_hw_exp_stack *regs, int pc_adj)
  18. {
  19. }
  20. #ifdef RT_USING_FINSH
  21. extern long list_thread(void);
  22. #endif
  23. #ifdef RT_USING_LWP
  24. #include <lwp.h>
  25. #include <lwp_arch.h>
  26. #ifdef LWP_USING_CORE_DUMP
  27. #include <lwp_core_dump.h>
  28. #endif
  29. void sys_exit(int value);
  30. void check_user_fault(struct rt_hw_exp_stack *regs, uint32_t pc_adj, char *info)
  31. {
  32. uint32_t mode = regs->cpsr;
  33. if ((mode & 0x1f) == 0x00)
  34. {
  35. rt_kprintf("%s! pc = 0x%08x\n", info, regs->pc - pc_adj);
  36. #ifdef LWP_USING_CORE_DUMP
  37. lwp_core_dump(regs, pc_adj);
  38. #endif
  39. backtrace((unsigned long)regs->pc, (unsigned long)regs->x30, (unsigned long)regs->x29);
  40. sys_exit(-1);
  41. }
  42. }
  43. int _get_type(unsigned long esr)
  44. {
  45. int ret;
  46. int fsc = esr & 0x3f;
  47. switch (fsc)
  48. {
  49. case 0x4:
  50. case 0x5:
  51. case 0x6:
  52. case 0x7:
  53. ret = MM_FAULT_TYPE_PAGE_FAULT;
  54. break;
  55. case 0x9:
  56. case 0xa:
  57. case 0xb:
  58. ret = MM_FAULT_TYPE_ACCESS_FAULT;
  59. break;
  60. default:
  61. ret = MM_FAULT_TYPE_GENERIC;
  62. }
  63. return ret;
  64. }
  65. int check_user_stack(unsigned long esr, struct rt_hw_exp_stack *regs)
  66. {
  67. unsigned char ec;
  68. void *dfar;
  69. int ret = 0;
  70. ec = (unsigned char)((esr >> 26) & 0x3fU);
  71. enum rt_mm_fault_op fault_op;
  72. enum rt_mm_fault_type fault_type;
  73. switch (ec)
  74. {
  75. case 0x20:
  76. fault_op = MM_FAULT_OP_EXECUTE;
  77. fault_type = _get_type(esr);
  78. break;
  79. case 0x21:
  80. case 0x24:
  81. case 0x25:
  82. fault_op = MM_FAULT_OP_WRITE;
  83. fault_type = _get_type(esr);
  84. break;
  85. default:
  86. fault_op = 0;
  87. break;
  88. }
  89. if (fault_op)
  90. {
  91. asm volatile("mrs %0, far_el1":"=r"(dfar));
  92. struct rt_aspace_fault_msg msg = {
  93. .fault_op = fault_op,
  94. .fault_type = fault_type,
  95. .fault_vaddr = dfar,
  96. };
  97. if (rt_aspace_fault_try_fix(&msg))
  98. {
  99. ret = 1;
  100. }
  101. }
  102. return ret;
  103. }
  104. #endif
  105. /**
  106. * this function will show registers of CPU
  107. *
  108. * @param regs the registers point
  109. */
  110. void rt_hw_show_register(struct rt_hw_exp_stack *regs)
  111. {
  112. rt_kprintf("Execption:\n");
  113. rt_kprintf("X00:0x%16.16p X01:0x%16.16p X02:0x%16.16p X03:0x%16.16p\n", (void *)regs->x0, (void *)regs->x1, (void *)regs->x2, (void *)regs->x3);
  114. rt_kprintf("X04:0x%16.16p X05:0x%16.16p X06:0x%16.16p X07:0x%16.16p\n", (void *)regs->x4, (void *)regs->x5, (void *)regs->x6, (void *)regs->x7);
  115. rt_kprintf("X08:0x%16.16p X09:0x%16.16p X10:0x%16.16p X11:0x%16.16p\n", (void *)regs->x8, (void *)regs->x9, (void *)regs->x10, (void *)regs->x11);
  116. rt_kprintf("X12:0x%16.16p X13:0x%16.16p X14:0x%16.16p X15:0x%16.16p\n", (void *)regs->x12, (void *)regs->x13, (void *)regs->x14, (void *)regs->x15);
  117. rt_kprintf("X16:0x%16.16p X17:0x%16.16p X18:0x%16.16p X19:0x%16.16p\n", (void *)regs->x16, (void *)regs->x17, (void *)regs->x18, (void *)regs->x19);
  118. rt_kprintf("X20:0x%16.16p X21:0x%16.16p X22:0x%16.16p X23:0x%16.16p\n", (void *)regs->x20, (void *)regs->x21, (void *)regs->x22, (void *)regs->x23);
  119. rt_kprintf("X24:0x%16.16p X25:0x%16.16p X26:0x%16.16p X27:0x%16.16p\n", (void *)regs->x24, (void *)regs->x25, (void *)regs->x26, (void *)regs->x27);
  120. rt_kprintf("X28:0x%16.16p X29:0x%16.16p X30:0x%16.16p\n", (void *)regs->x28, (void *)regs->x29, (void *)regs->x30);
  121. rt_kprintf("SP_EL0:0x%16.16p\n", (void *)regs->sp_el0);
  122. rt_kprintf("SPSR :0x%16.16p\n", (void *)regs->cpsr);
  123. rt_kprintf("EPC :0x%16.16p\n", (void *)regs->pc);
  124. }
  125. void rt_hw_trap_irq(void)
  126. {
  127. #ifdef SOC_BCM283x
  128. extern rt_uint8_t core_timer_flag;
  129. void *param;
  130. uint32_t irq;
  131. rt_isr_handler_t isr_func;
  132. extern struct rt_irq_desc isr_table[];
  133. uint32_t value = 0;
  134. value = IRQ_PEND_BASIC & 0x3ff;
  135. if(core_timer_flag != 0)
  136. {
  137. uint32_t cpu_id = rt_hw_cpu_id();
  138. uint32_t int_source = CORE_IRQSOURCE(cpu_id);
  139. if (int_source & 0x0f)
  140. {
  141. if (int_source & 0x08)
  142. {
  143. isr_func = isr_table[IRQ_ARM_TIMER].handler;
  144. #ifdef RT_USING_INTERRUPT_INFO
  145. isr_table[IRQ_ARM_TIMER].counter++;
  146. #endif
  147. if (isr_func)
  148. {
  149. param = isr_table[IRQ_ARM_TIMER].param;
  150. isr_func(IRQ_ARM_TIMER, param);
  151. }
  152. }
  153. }
  154. }
  155. /* local interrupt*/
  156. if (value)
  157. {
  158. if (value & (1 << 8))
  159. {
  160. value = IRQ_PEND1;
  161. irq = __rt_ffs(value) - 1;
  162. }
  163. else if (value & (1 << 9))
  164. {
  165. value = IRQ_PEND2;
  166. irq = __rt_ffs(value) + 31;
  167. }
  168. else
  169. {
  170. value &= 0x0f;
  171. irq = __rt_ffs(value) + 63;
  172. }
  173. /* get interrupt service routine */
  174. isr_func = isr_table[irq].handler;
  175. #ifdef RT_USING_INTERRUPT_INFO
  176. isr_table[irq].counter++;
  177. #endif
  178. if (isr_func)
  179. {
  180. /* Interrupt for myself. */
  181. param = isr_table[irq].param;
  182. /* turn to interrupt service routine */
  183. isr_func(irq, param);
  184. }
  185. }
  186. #else
  187. void *param;
  188. int ir, ir_self;
  189. rt_isr_handler_t isr_func;
  190. extern struct rt_irq_desc isr_table[];
  191. ir = rt_hw_interrupt_get_irq();
  192. if (ir == 1023)
  193. {
  194. /* Spurious interrupt */
  195. return;
  196. }
  197. /* bit 10~12 is cpuid, bit 0~9 is interrupt id */
  198. ir_self = ir & 0x3ffUL;
  199. /* get interrupt service routine */
  200. isr_func = isr_table[ir_self].handler;
  201. #ifdef RT_USING_INTERRUPT_INFO
  202. isr_table[ir_self].counter++;
  203. #endif
  204. if (isr_func)
  205. {
  206. /* Interrupt for myself. */
  207. param = isr_table[ir_self].param;
  208. /* turn to interrupt service routine */
  209. isr_func(ir_self, param);
  210. }
  211. /* end of interrupt */
  212. rt_hw_interrupt_ack(ir);
  213. #endif
  214. }
  215. void rt_hw_trap_fiq(void)
  216. {
  217. void *param;
  218. int ir, ir_self;
  219. rt_isr_handler_t isr_func;
  220. extern struct rt_irq_desc isr_table[];
  221. ir = rt_hw_interrupt_get_irq();
  222. /* bit 10~12 is cpuid, bit 0~9 is interrup id */
  223. ir_self = ir & 0x3ffUL;
  224. /* get interrupt service routine */
  225. isr_func = isr_table[ir_self].handler;
  226. param = isr_table[ir_self].param;
  227. /* turn to interrupt service routine */
  228. isr_func(ir_self, param);
  229. /* end of interrupt */
  230. rt_hw_interrupt_ack(ir);
  231. }
  232. void process_exception(unsigned long esr, unsigned long epc);
  233. void SVC_Handler(struct rt_hw_exp_stack *regs);
  234. void rt_hw_trap_exception(struct rt_hw_exp_stack *regs)
  235. {
  236. unsigned long esr;
  237. unsigned char ec;
  238. asm volatile("mrs %0, esr_el1":"=r"(esr));
  239. ec = (unsigned char)((esr >> 26) & 0x3fU);
  240. #ifdef RT_USING_LWP
  241. if (dbg_check_event(regs, esr))
  242. {
  243. return;
  244. }
  245. else
  246. #endif
  247. if (ec == 0x15) /* is 64bit syscall ? */
  248. {
  249. SVC_Handler(regs);
  250. /* never return here */
  251. }
  252. #ifdef RT_USING_LWP
  253. if (check_user_stack(esr, regs))
  254. {
  255. return;
  256. }
  257. #endif
  258. process_exception(esr, regs->pc);
  259. rt_hw_show_register(regs);
  260. rt_kprintf("current: %s\n", rt_thread_self()->parent.name);
  261. #ifdef RT_USING_LWP
  262. check_user_fault(regs, 0, "user fault");
  263. #endif
  264. #ifdef RT_USING_FINSH
  265. list_thread();
  266. #endif
  267. backtrace((unsigned long)regs->pc, (unsigned long)regs->x30, (unsigned long)regs->x29);
  268. rt_hw_cpu_shutdown();
  269. }
  270. void rt_hw_trap_serror(struct rt_hw_exp_stack *regs)
  271. {
  272. rt_kprintf("SError\n");
  273. rt_hw_show_register(regs);
  274. rt_kprintf("current: %s\n", rt_thread_self()->parent.name);
  275. #ifdef RT_USING_FINSH
  276. list_thread();
  277. #endif
  278. rt_hw_cpu_shutdown();
  279. }