trap.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2013-07-20 Bernard first version
  9. */
  10. #include <rtthread.h>
  11. #include <rthw.h>
  12. #include <board.h>
  13. #include <armv8.h>
  14. #include "interrupt.h"
  15. #include "mm_aspace.h"
  16. #define DBG_TAG "libcpu.trap"
  17. #define DBG_LVL DBG_LOG
  18. #include <rtdbg.h>
  19. #ifdef RT_USING_FINSH
  20. extern long list_thread(void);
  21. #endif
  22. #ifdef RT_USING_LWP
  23. #include <lwp.h>
  24. #include <lwp_arch.h>
  25. #ifdef LWP_USING_CORE_DUMP
  26. #include <lwp_core_dump.h>
  27. #endif
  28. static void _check_fault(struct rt_hw_exp_stack *regs, uint32_t pc_adj, char *info)
  29. {
  30. uint32_t is_user_fault;
  31. rt_thread_t th;
  32. is_user_fault = !(regs->cpsr & 0x1f);
  33. if (is_user_fault)
  34. {
  35. rt_kprintf("%s! pc = 0x%x\n", info, regs->pc - pc_adj);
  36. }
  37. /* user stack backtrace */
  38. th = rt_thread_self();
  39. if (th && th->lwp)
  40. {
  41. arch_backtrace_uthread(th);
  42. }
  43. if (is_user_fault)
  44. {
  45. #ifdef LWP_USING_CORE_DUMP
  46. lwp_core_dump(regs, pc_adj);
  47. #endif
  48. sys_exit_group(-1);
  49. }
  50. }
  51. rt_inline int _get_type(unsigned long esr)
  52. {
  53. int ret;
  54. int fsc = ARM64_ESR_EXTRACT_FSC(esr);
  55. switch (fsc)
  56. {
  57. case ARM64_FSC_TRANSLATION_FAULT_LEVEL_0:
  58. case ARM64_FSC_TRANSLATION_FAULT_LEVEL_1:
  59. case ARM64_FSC_TRANSLATION_FAULT_LEVEL_2:
  60. case ARM64_FSC_TRANSLATION_FAULT_LEVEL_3:
  61. ret = MM_FAULT_TYPE_PAGE_FAULT;
  62. break;
  63. case ARM64_FSC_PERMISSION_FAULT_LEVEL_0:
  64. case ARM64_FSC_PERMISSION_FAULT_LEVEL_1:
  65. case ARM64_FSC_PERMISSION_FAULT_LEVEL_2:
  66. case ARM64_FSC_PERMISSION_FAULT_LEVEL_3:
  67. ret = MM_FAULT_TYPE_RWX_PERM;
  68. break;
  69. case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_0:
  70. case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_1:
  71. case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_2:
  72. case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_3:
  73. /* access flag fault, not handle currently */
  74. default:
  75. ret = MM_FAULT_TYPE_GENERIC;
  76. }
  77. return ret;
  78. }
  79. rt_inline long _irq_is_disable(long cpsr)
  80. {
  81. return !!(cpsr & 0x80);
  82. }
  83. static int user_fault_fixable(unsigned long esr, struct rt_hw_exp_stack *regs)
  84. {
  85. rt_ubase_t level;
  86. enum rt_mm_fault_op fault_op;
  87. enum rt_mm_fault_type fault_type;
  88. struct rt_lwp *lwp;
  89. void *dfar;
  90. int ret = 0;
  91. unsigned char ec = ARM64_ESR_EXTRACT_EC(esr);
  92. rt_bool_t is_write = ARM64_ABORT_WNR(esr);
  93. switch (ec)
  94. {
  95. case ARM64_EC_INST_ABORT_FROM_LO_EXCEPTION:
  96. fault_op = MM_FAULT_OP_EXECUTE;
  97. fault_type = _get_type(esr);
  98. break;
  99. case ARM64_EC_INST_ABORT_WITHOUT_A_CHANGE:
  100. case ARM64_EC_DATA_ABORT_FROM_LO_EXCEPTION:
  101. case ARM64_EC_DATA_ABORT_WITHOUT_A_CHANGE:
  102. fault_op = is_write ? MM_FAULT_OP_WRITE : MM_FAULT_OP_READ;
  103. fault_type = _get_type(esr);
  104. break;
  105. default:
  106. /* non-fixable */
  107. fault_op = 0;
  108. break;
  109. }
  110. /* page fault exception only allow from user space */
  111. lwp = lwp_self();
  112. if (lwp && fault_op)
  113. {
  114. __asm__ volatile("mrs %0, far_el1":"=r"(dfar));
  115. struct rt_aspace_fault_msg msg = {
  116. .fault_op = fault_op,
  117. .fault_type = fault_type,
  118. .fault_vaddr = dfar,
  119. };
  120. lwp_user_setting_save(rt_thread_self());
  121. __asm__ volatile("mrs %0, daif\nmsr daifclr, 0x3\nisb\n":"=r"(level));
  122. if (rt_aspace_fault_try_fix(lwp->aspace, &msg))
  123. {
  124. ret = 1;
  125. }
  126. __asm__ volatile("msr daif, %0\nisb\n"::"r"(level));
  127. }
  128. return ret;
  129. }
  130. #endif
  131. /**
  132. * this function will show registers of CPU
  133. *
  134. * @param regs the registers point
  135. */
  136. void rt_hw_show_register(struct rt_hw_exp_stack *regs)
  137. {
  138. rt_kprintf("Execption:\n");
  139. rt_kprintf("X00:0x%16.16p X01:0x%16.16p X02:0x%16.16p X03:0x%16.16p\n", (void *)regs->x0, (void *)regs->x1, (void *)regs->x2, (void *)regs->x3);
  140. rt_kprintf("X04:0x%16.16p X05:0x%16.16p X06:0x%16.16p X07:0x%16.16p\n", (void *)regs->x4, (void *)regs->x5, (void *)regs->x6, (void *)regs->x7);
  141. rt_kprintf("X08:0x%16.16p X09:0x%16.16p X10:0x%16.16p X11:0x%16.16p\n", (void *)regs->x8, (void *)regs->x9, (void *)regs->x10, (void *)regs->x11);
  142. rt_kprintf("X12:0x%16.16p X13:0x%16.16p X14:0x%16.16p X15:0x%16.16p\n", (void *)regs->x12, (void *)regs->x13, (void *)regs->x14, (void *)regs->x15);
  143. rt_kprintf("X16:0x%16.16p X17:0x%16.16p X18:0x%16.16p X19:0x%16.16p\n", (void *)regs->x16, (void *)regs->x17, (void *)regs->x18, (void *)regs->x19);
  144. rt_kprintf("X20:0x%16.16p X21:0x%16.16p X22:0x%16.16p X23:0x%16.16p\n", (void *)regs->x20, (void *)regs->x21, (void *)regs->x22, (void *)regs->x23);
  145. rt_kprintf("X24:0x%16.16p X25:0x%16.16p X26:0x%16.16p X27:0x%16.16p\n", (void *)regs->x24, (void *)regs->x25, (void *)regs->x26, (void *)regs->x27);
  146. rt_kprintf("X28:0x%16.16p X29:0x%16.16p X30:0x%16.16p\n", (void *)regs->x28, (void *)regs->x29, (void *)regs->x30);
  147. rt_kprintf("SP_EL0:0x%16.16p\n", (void *)regs->sp_el0);
  148. rt_kprintf("SPSR :0x%16.16p\n", (void *)regs->cpsr);
  149. rt_kprintf("EPC :0x%16.16p\n", (void *)regs->pc);
  150. }
  151. #ifndef RT_USING_PIC
  152. static void _rt_hw_trap_irq(rt_interrupt_context_t irq_context)
  153. {
  154. #ifdef SOC_BCM283x
  155. extern rt_uint8_t core_timer_flag;
  156. void *param;
  157. uint32_t irq;
  158. rt_isr_handler_t isr_func;
  159. extern struct rt_irq_desc isr_table[];
  160. uint32_t value = 0;
  161. value = IRQ_PEND_BASIC & 0x3ff;
  162. if(core_timer_flag != 0)
  163. {
  164. uint32_t cpu_id = rt_hw_cpu_id();
  165. uint32_t int_source = CORE_IRQSOURCE(cpu_id);
  166. if (int_source & 0x0f)
  167. {
  168. if (int_source & 0x08)
  169. {
  170. isr_func = isr_table[IRQ_ARM_TIMER].handler;
  171. #ifdef RT_USING_INTERRUPT_INFO
  172. isr_table[IRQ_ARM_TIMER].counter++;
  173. #endif
  174. if (isr_func)
  175. {
  176. param = isr_table[IRQ_ARM_TIMER].param;
  177. isr_func(IRQ_ARM_TIMER, param);
  178. }
  179. }
  180. }
  181. }
  182. /* local interrupt*/
  183. if (value)
  184. {
  185. if (value & (1 << 8))
  186. {
  187. value = IRQ_PEND1;
  188. irq = __rt_ffs(value) - 1;
  189. }
  190. else if (value & (1 << 9))
  191. {
  192. value = IRQ_PEND2;
  193. irq = __rt_ffs(value) + 31;
  194. }
  195. else
  196. {
  197. value &= 0x0f;
  198. irq = __rt_ffs(value) + 63;
  199. }
  200. /* get interrupt service routine */
  201. isr_func = isr_table[irq].handler;
  202. #ifdef RT_USING_INTERRUPT_INFO
  203. isr_table[irq].counter++;
  204. #endif
  205. if (isr_func)
  206. {
  207. /* Interrupt for myself. */
  208. param = isr_table[irq].param;
  209. /* turn to interrupt service routine */
  210. isr_func(irq, param);
  211. }
  212. }
  213. #else
  214. void *param;
  215. int ir, ir_self;
  216. rt_isr_handler_t isr_func;
  217. extern struct rt_irq_desc isr_table[];
  218. ir = rt_hw_interrupt_get_irq();
  219. if (ir == 1023)
  220. {
  221. /* Spurious interrupt */
  222. return;
  223. }
  224. /* bit 10~12 is cpuid, bit 0~9 is interrupt id */
  225. ir_self = ir & 0x3ffUL;
  226. /* get interrupt service routine */
  227. isr_func = isr_table[ir_self].handler;
  228. #ifdef RT_USING_INTERRUPT_INFO
  229. isr_table[ir_self].counter++;
  230. #ifdef RT_USING_SMP
  231. isr_table[ir_self].cpu_counter[rt_hw_cpu_id()]++;
  232. #endif
  233. #endif
  234. if (isr_func)
  235. {
  236. /* Interrupt for myself. */
  237. param = isr_table[ir_self].param;
  238. /* turn to interrupt service routine */
  239. isr_func(ir_self, param);
  240. }
  241. /* end of interrupt */
  242. rt_hw_interrupt_ack(ir);
  243. #endif
  244. }
  245. #else
  246. static void _rt_hw_trap_irq(struct rt_interrupt_context *this_ctx)
  247. {
  248. rt_pic_do_traps();
  249. }
  250. #endif
  251. void rt_hw_trap_irq(struct rt_hw_exp_stack *regs)
  252. {
  253. struct rt_interrupt_context this_ctx = {
  254. .context = regs,
  255. .node = RT_SLIST_OBJECT_INIT(this_ctx.node),
  256. };
  257. rt_interrupt_context_push(&this_ctx);
  258. _rt_hw_trap_irq(&this_ctx);
  259. rt_interrupt_context_pop();
  260. }
  261. #ifdef RT_USING_SMART
  262. #define DBG_CHECK_EVENT(regs, esr) dbg_check_event(regs, esr)
  263. #else
  264. #define DBG_CHECK_EVENT(regs, esr) (0)
  265. #endif
  266. #ifndef RT_USING_PIC
  267. void rt_hw_trap_fiq(void)
  268. {
  269. void *param;
  270. int ir, ir_self;
  271. rt_isr_handler_t isr_func;
  272. extern struct rt_irq_desc isr_table[];
  273. ir = rt_hw_interrupt_get_irq();
  274. /* bit 10~12 is cpuid, bit 0~9 is interrup id */
  275. ir_self = ir & 0x3ffUL;
  276. /* get interrupt service routine */
  277. isr_func = isr_table[ir_self].handler;
  278. param = isr_table[ir_self].param;
  279. /* turn to interrupt service routine */
  280. isr_func(ir_self, param);
  281. /* end of interrupt */
  282. rt_hw_interrupt_ack(ir);
  283. }
  284. #else
  285. void rt_hw_trap_fiq(void)
  286. {
  287. rt_pic_do_traps();
  288. }
  289. #endif
  290. void print_exception(unsigned long esr, unsigned long epc);
  291. void SVC_Handler(struct rt_hw_exp_stack *regs);
  292. void rt_hw_trap_exception(struct rt_hw_exp_stack *regs)
  293. {
  294. unsigned long esr;
  295. unsigned char ec;
  296. asm volatile("mrs %0, esr_el1":"=r"(esr));
  297. ec = (unsigned char)((esr >> 26) & 0x3fU);
  298. if (DBG_CHECK_EVENT(regs, esr))
  299. {
  300. return;
  301. }
  302. else if (ec == 0x15) /* is 64bit syscall ? */
  303. {
  304. SVC_Handler(regs);
  305. /* never return here */
  306. }
  307. #ifdef RT_USING_SMART
  308. /**
  309. * Note: check_user_stack will take lock and it will possibly be a dead-lock
  310. * if exception comes from kernel.
  311. */
  312. if ((regs->cpsr & 0x1f) == 0)
  313. {
  314. if (user_fault_fixable(esr, regs))
  315. return;
  316. }
  317. else
  318. {
  319. if (_irq_is_disable(regs->cpsr))
  320. {
  321. LOG_E("Kernel fault from interrupt/critical section");
  322. }
  323. if (rt_critical_level() != 0)
  324. {
  325. LOG_E("scheduler is not available");
  326. }
  327. else if (user_fault_fixable(esr, regs))
  328. return;
  329. }
  330. #endif
  331. print_exception(esr, regs->pc);
  332. rt_hw_show_register(regs);
  333. LOG_E("current thread: %s\n", rt_thread_self()->parent.name);
  334. #ifdef RT_USING_FINSH
  335. list_thread();
  336. #endif
  337. #ifdef RT_USING_LWP
  338. /* restore normal execution environment */
  339. __asm__ volatile("msr daifclr, 0x3\ndmb ishst\nisb\n");
  340. _check_fault(regs, 0, "user fault");
  341. #endif
  342. struct rt_hw_backtrace_frame frame = {.fp = regs->x29, .pc = regs->pc};
  343. rt_backtrace_frame(rt_thread_self(), &frame);
  344. rt_hw_cpu_shutdown();
  345. }
  346. void rt_hw_trap_serror(struct rt_hw_exp_stack *regs)
  347. {
  348. rt_kprintf("SError\n");
  349. rt_hw_show_register(regs);
  350. rt_kprintf("current: %s\n", rt_thread_self()->parent.name);
  351. #ifdef RT_USING_FINSH
  352. list_thread();
  353. #endif
  354. rt_hw_cpu_shutdown();
  355. }