cpuport.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2011-10-21 Bernard the first version.
  9. * 2011-10-27 aozima update for cortex-M4 FPU.
  10. * 2011-12-31 aozima fixed stack align issues.
  11. * 2012-01-01 aozima support context switch load/store FPU register.
  12. * 2012-12-11 lgnq fixed the coding style.
  13. * 2012-12-23 aozima stack addr align to 8byte.
  14. * 2012-12-29 Bernard Add exception hook.
  15. * 2013-06-23 aozima support lazy stack optimized.
  16. * 2018-07-24 aozima enhancement hard fault exception handler.
  17. * 2019-07-03 yangjie add __rt_ffs() for armclang.
  18. */
  19. #include <rtthread.h>
  20. #ifdef RT_USING_HW_STACK_GUARD
  21. #include <mprotect.h>
  22. #endif
  23. #if /* ARMCC */ ( (defined ( __CC_ARM ) && defined ( __TARGET_FPU_VFP )) \
  24. /* Clang */ || (defined ( __clang__ ) && defined ( __VFP_FP__ ) && !defined(__SOFTFP__)) \
  25. /* IAR */ || (defined ( __ICCARM__ ) && defined ( __ARMVFP__ )) \
  26. /* GNU */ || (defined ( __GNUC__ ) && defined ( __VFP_FP__ ) && !defined(__SOFTFP__)) )
  27. #define USE_FPU 1
  28. #else
  29. #define USE_FPU 0
  30. #endif
  31. /* exception and interrupt handler table */
  32. rt_uint32_t rt_interrupt_from_thread;
  33. rt_uint32_t rt_interrupt_to_thread;
  34. rt_uint32_t rt_thread_switch_interrupt_flag;
  35. /* exception hook */
  36. static rt_err_t (*rt_exception_hook)(void *context) = RT_NULL;
  37. struct exception_stack_frame
  38. {
  39. rt_uint32_t r0;
  40. rt_uint32_t r1;
  41. rt_uint32_t r2;
  42. rt_uint32_t r3;
  43. rt_uint32_t r12;
  44. rt_uint32_t lr;
  45. rt_uint32_t pc;
  46. rt_uint32_t psr;
  47. };
  48. struct stack_frame
  49. {
  50. #if USE_FPU
  51. rt_uint32_t flag;
  52. #endif /* USE_FPU */
  53. /* r4 ~ r11 register */
  54. rt_uint32_t r4;
  55. rt_uint32_t r5;
  56. rt_uint32_t r6;
  57. rt_uint32_t r7;
  58. rt_uint32_t r8;
  59. rt_uint32_t r9;
  60. rt_uint32_t r10;
  61. rt_uint32_t r11;
  62. struct exception_stack_frame exception_stack_frame;
  63. };
  64. struct exception_stack_frame_fpu
  65. {
  66. rt_uint32_t r0;
  67. rt_uint32_t r1;
  68. rt_uint32_t r2;
  69. rt_uint32_t r3;
  70. rt_uint32_t r12;
  71. rt_uint32_t lr;
  72. rt_uint32_t pc;
  73. rt_uint32_t psr;
  74. #if USE_FPU
  75. /* FPU register */
  76. rt_uint32_t S0;
  77. rt_uint32_t S1;
  78. rt_uint32_t S2;
  79. rt_uint32_t S3;
  80. rt_uint32_t S4;
  81. rt_uint32_t S5;
  82. rt_uint32_t S6;
  83. rt_uint32_t S7;
  84. rt_uint32_t S8;
  85. rt_uint32_t S9;
  86. rt_uint32_t S10;
  87. rt_uint32_t S11;
  88. rt_uint32_t S12;
  89. rt_uint32_t S13;
  90. rt_uint32_t S14;
  91. rt_uint32_t S15;
  92. rt_uint32_t FPSCR;
  93. rt_uint32_t NO_NAME;
  94. #endif
  95. };
  96. struct stack_frame_fpu
  97. {
  98. rt_uint32_t flag;
  99. /* r4 ~ r11 register */
  100. rt_uint32_t r4;
  101. rt_uint32_t r5;
  102. rt_uint32_t r6;
  103. rt_uint32_t r7;
  104. rt_uint32_t r8;
  105. rt_uint32_t r9;
  106. rt_uint32_t r10;
  107. rt_uint32_t r11;
  108. #if USE_FPU
  109. /* FPU register s16 ~ s31 */
  110. rt_uint32_t s16;
  111. rt_uint32_t s17;
  112. rt_uint32_t s18;
  113. rt_uint32_t s19;
  114. rt_uint32_t s20;
  115. rt_uint32_t s21;
  116. rt_uint32_t s22;
  117. rt_uint32_t s23;
  118. rt_uint32_t s24;
  119. rt_uint32_t s25;
  120. rt_uint32_t s26;
  121. rt_uint32_t s27;
  122. rt_uint32_t s28;
  123. rt_uint32_t s29;
  124. rt_uint32_t s30;
  125. rt_uint32_t s31;
  126. #endif
  127. struct exception_stack_frame_fpu exception_stack_frame;
  128. };
  129. rt_uint8_t *rt_hw_stack_init(void *tentry,
  130. void *parameter,
  131. rt_uint8_t *stack_addr,
  132. void *texit)
  133. {
  134. struct stack_frame *stack_frame;
  135. rt_uint8_t *stk;
  136. unsigned long i;
  137. stk = stack_addr + sizeof(rt_uint32_t);
  138. stk = (rt_uint8_t *)RT_ALIGN_DOWN((rt_uint32_t)stk, 8);
  139. stk -= sizeof(struct stack_frame);
  140. stack_frame = (struct stack_frame *)stk;
  141. /* init all register */
  142. for (i = 0; i < sizeof(struct stack_frame) / sizeof(rt_uint32_t); i ++)
  143. {
  144. ((rt_uint32_t *)stack_frame)[i] = 0xdeadbeef;
  145. }
  146. stack_frame->exception_stack_frame.r0 = (unsigned long)parameter; /* r0 : argument */
  147. stack_frame->exception_stack_frame.r1 = 0; /* r1 */
  148. stack_frame->exception_stack_frame.r2 = 0; /* r2 */
  149. stack_frame->exception_stack_frame.r3 = 0; /* r3 */
  150. stack_frame->exception_stack_frame.r12 = 0; /* r12 */
  151. stack_frame->exception_stack_frame.lr = (unsigned long)texit; /* lr */
  152. stack_frame->exception_stack_frame.pc = (unsigned long)tentry; /* entry point, pc */
  153. stack_frame->exception_stack_frame.psr = 0x01000000L; /* PSR */
  154. #if USE_FPU
  155. stack_frame->flag = 0;
  156. #endif /* USE_FPU */
  157. /* return task's current stack address */
  158. return stk;
  159. }
  160. #ifdef RT_USING_HW_STACK_GUARD
  161. void rt_hw_stack_guard_init(rt_thread_t thread)
  162. {
  163. rt_mem_region_t stack_top_region, stack_bottom_region;
  164. rt_ubase_t stack_bottom = (rt_ubase_t)thread->stack_addr;
  165. rt_ubase_t stack_top = (rt_ubase_t)((rt_uint8_t *)thread->stack_addr + thread->stack_size);
  166. rt_ubase_t stack_bottom_region_start = RT_ALIGN(stack_bottom, MPU_MIN_REGION_SIZE);
  167. rt_ubase_t stack_top_region_start = RT_ALIGN_DOWN(stack_top - MPU_MIN_REGION_SIZE, MPU_MIN_REGION_SIZE);
  168. stack_top_region.start = (void *)stack_top_region_start;
  169. stack_top_region.size = MPU_MIN_REGION_SIZE;
  170. stack_top_region.attr = RT_MEM_REGION_P_NA_U_NA;
  171. stack_bottom_region.start = (void *)stack_bottom_region_start;
  172. stack_bottom_region.size = MPU_MIN_REGION_SIZE;
  173. stack_bottom_region.attr = RT_MEM_REGION_P_NA_U_NA;
  174. rt_mprotect_add_region(thread, &stack_top_region);
  175. rt_mprotect_add_region(thread, &stack_bottom_region);
  176. thread->stack_buf = thread->stack_addr;
  177. thread->stack_addr = (void *)(stack_bottom_region_start + MPU_MIN_REGION_SIZE);
  178. thread->stack_size = (rt_uint32_t)(stack_top_region_start - stack_bottom_region_start - MPU_MIN_REGION_SIZE);
  179. }
  180. #endif
  181. /**
  182. * This function set the hook, which is invoked on fault exception handling.
  183. *
  184. * @param exception_handle the exception handling hook function.
  185. */
  186. void rt_hw_exception_install(rt_err_t (*exception_handle)(void *context))
  187. {
  188. rt_exception_hook = exception_handle;
  189. }
  190. #define SCB_CFSR (*(volatile const unsigned *)0xE000ED28) /* Configurable Fault Status Register */
  191. #define SCB_HFSR (*(volatile const unsigned *)0xE000ED2C) /* HardFault Status Register */
  192. #define SCB_MMAR (*(volatile const unsigned *)0xE000ED34) /* MemManage Fault Address register */
  193. #define SCB_BFAR (*(volatile const unsigned *)0xE000ED38) /* Bus Fault Address Register */
  194. #define SCB_AIRCR (*(volatile unsigned long *)0xE000ED0C) /* Reset control Address Register */
  195. #define SCB_RESET_VALUE 0x05FA0004 /* Reset value, write to SCB_AIRCR can reset cpu */
  196. #define SCB_CFSR_MFSR (*(volatile const unsigned char*)0xE000ED28) /* Memory-management Fault Status Register */
  197. #define SCB_CFSR_BFSR (*(volatile const unsigned char*)0xE000ED29) /* Bus Fault Status Register */
  198. #define SCB_CFSR_UFSR (*(volatile const unsigned short*)0xE000ED2A) /* Usage Fault Status Register */
  199. #ifdef RT_USING_FINSH
  200. static void usage_fault_track(void)
  201. {
  202. rt_kprintf("usage fault:\n");
  203. rt_kprintf("SCB_CFSR_UFSR:0x%02X ", SCB_CFSR_UFSR);
  204. if(SCB_CFSR_UFSR & (1<<0))
  205. {
  206. /* [0]:UNDEFINSTR */
  207. rt_kprintf("UNDEFINSTR ");
  208. }
  209. if(SCB_CFSR_UFSR & (1<<1))
  210. {
  211. /* [1]:INVSTATE */
  212. rt_kprintf("INVSTATE ");
  213. }
  214. if(SCB_CFSR_UFSR & (1<<2))
  215. {
  216. /* [2]:INVPC */
  217. rt_kprintf("INVPC ");
  218. }
  219. if(SCB_CFSR_UFSR & (1<<3))
  220. {
  221. /* [3]:NOCP */
  222. rt_kprintf("NOCP ");
  223. }
  224. if(SCB_CFSR_UFSR & (1<<8))
  225. {
  226. /* [8]:UNALIGNED */
  227. rt_kprintf("UNALIGNED ");
  228. }
  229. if(SCB_CFSR_UFSR & (1<<9))
  230. {
  231. /* [9]:DIVBYZERO */
  232. rt_kprintf("DIVBYZERO ");
  233. }
  234. rt_kprintf("\n");
  235. }
  236. static void bus_fault_track(void)
  237. {
  238. rt_kprintf("bus fault:\n");
  239. rt_kprintf("SCB_CFSR_BFSR:0x%02X ", SCB_CFSR_BFSR);
  240. if(SCB_CFSR_BFSR & (1<<0))
  241. {
  242. /* [0]:IBUSERR */
  243. rt_kprintf("IBUSERR ");
  244. }
  245. if(SCB_CFSR_BFSR & (1<<1))
  246. {
  247. /* [1]:PRECISERR */
  248. rt_kprintf("PRECISERR ");
  249. }
  250. if(SCB_CFSR_BFSR & (1<<2))
  251. {
  252. /* [2]:IMPRECISERR */
  253. rt_kprintf("IMPRECISERR ");
  254. }
  255. if(SCB_CFSR_BFSR & (1<<3))
  256. {
  257. /* [3]:UNSTKERR */
  258. rt_kprintf("UNSTKERR ");
  259. }
  260. if(SCB_CFSR_BFSR & (1<<4))
  261. {
  262. /* [4]:STKERR */
  263. rt_kprintf("STKERR ");
  264. }
  265. if(SCB_CFSR_BFSR & (1<<7))
  266. {
  267. rt_kprintf("SCB->BFAR:%08X\n", SCB_BFAR);
  268. }
  269. else
  270. {
  271. rt_kprintf("\n");
  272. }
  273. }
  274. static void mem_manage_fault_track(void)
  275. {
  276. rt_kprintf("mem manage fault:\n");
  277. rt_kprintf("SCB_CFSR_MFSR:0x%02X ", SCB_CFSR_MFSR);
  278. if(SCB_CFSR_MFSR & (1<<0))
  279. {
  280. /* [0]:IACCVIOL */
  281. rt_kprintf("IACCVIOL ");
  282. }
  283. if(SCB_CFSR_MFSR & (1<<1))
  284. {
  285. /* [1]:DACCVIOL */
  286. rt_kprintf("DACCVIOL ");
  287. }
  288. if(SCB_CFSR_MFSR & (1<<3))
  289. {
  290. /* [3]:MUNSTKERR */
  291. rt_kprintf("MUNSTKERR ");
  292. }
  293. if(SCB_CFSR_MFSR & (1<<4))
  294. {
  295. /* [4]:MSTKERR */
  296. rt_kprintf("MSTKERR ");
  297. }
  298. if(SCB_CFSR_MFSR & (1<<7))
  299. {
  300. /* [7]:MMARVALID */
  301. rt_kprintf("SCB->MMAR:%08X\n", SCB_MMAR);
  302. }
  303. else
  304. {
  305. rt_kprintf("\n");
  306. }
  307. }
  308. static void hard_fault_track(void)
  309. {
  310. if(SCB_HFSR & (1UL<<1))
  311. {
  312. /* [1]:VECTBL, Indicates hard fault is caused by failed vector fetch. */
  313. rt_kprintf("failed vector fetch\n");
  314. }
  315. if(SCB_HFSR & (1UL<<30))
  316. {
  317. /* [30]:FORCED, Indicates hard fault is taken because of bus fault,
  318. memory management fault, or usage fault. */
  319. if(SCB_CFSR_BFSR)
  320. {
  321. bus_fault_track();
  322. }
  323. if(SCB_CFSR_MFSR)
  324. {
  325. mem_manage_fault_track();
  326. }
  327. if(SCB_CFSR_UFSR)
  328. {
  329. usage_fault_track();
  330. }
  331. }
  332. if(SCB_HFSR & (1UL<<31))
  333. {
  334. /* [31]:DEBUGEVT, Indicates hard fault is triggered by debug event. */
  335. rt_kprintf("debug event\n");
  336. }
  337. }
  338. #endif /* RT_USING_FINSH */
  339. struct exception_info
  340. {
  341. rt_uint32_t exc_return;
  342. struct stack_frame stack_frame;
  343. };
  344. void rt_hw_hard_fault_exception(struct exception_info *exception_info)
  345. {
  346. #if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
  347. extern long list_thread(void);
  348. #endif
  349. struct exception_stack_frame *exception_stack = &exception_info->stack_frame.exception_stack_frame;
  350. struct stack_frame *context = &exception_info->stack_frame;
  351. if (rt_exception_hook != RT_NULL)
  352. {
  353. rt_err_t result;
  354. result = rt_exception_hook(exception_stack);
  355. if (result == RT_EOK) return;
  356. }
  357. rt_kprintf("psr: 0x%08x\n", context->exception_stack_frame.psr);
  358. rt_kprintf("r00: 0x%08x\n", context->exception_stack_frame.r0);
  359. rt_kprintf("r01: 0x%08x\n", context->exception_stack_frame.r1);
  360. rt_kprintf("r02: 0x%08x\n", context->exception_stack_frame.r2);
  361. rt_kprintf("r03: 0x%08x\n", context->exception_stack_frame.r3);
  362. rt_kprintf("r04: 0x%08x\n", context->r4);
  363. rt_kprintf("r05: 0x%08x\n", context->r5);
  364. rt_kprintf("r06: 0x%08x\n", context->r6);
  365. rt_kprintf("r07: 0x%08x\n", context->r7);
  366. rt_kprintf("r08: 0x%08x\n", context->r8);
  367. rt_kprintf("r09: 0x%08x\n", context->r9);
  368. rt_kprintf("r10: 0x%08x\n", context->r10);
  369. rt_kprintf("r11: 0x%08x\n", context->r11);
  370. rt_kprintf("r12: 0x%08x\n", context->exception_stack_frame.r12);
  371. rt_kprintf(" lr: 0x%08x\n", context->exception_stack_frame.lr);
  372. rt_kprintf(" pc: 0x%08x\n", context->exception_stack_frame.pc);
  373. if (exception_info->exc_return & (1 << 2))
  374. {
  375. rt_kprintf("hard fault on thread: %s\r\n\r\n", rt_thread_self()->parent.name);
  376. #if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
  377. list_thread();
  378. #endif
  379. }
  380. else
  381. {
  382. rt_kprintf("hard fault on handler\r\n\r\n");
  383. }
  384. if ( (exception_info->exc_return & 0x10) == 0)
  385. {
  386. rt_kprintf("FPU active!\r\n");
  387. }
  388. #ifdef RT_USING_FINSH
  389. hard_fault_track();
  390. #endif /* RT_USING_FINSH */
  391. while (1);
  392. }
  393. /**
  394. * reset CPU
  395. */
  396. void rt_hw_cpu_reset(void)
  397. {
  398. SCB_AIRCR = SCB_RESET_VALUE;
  399. }
  400. #ifdef RT_USING_CPU_FFS
  401. /**
  402. * This function finds the first bit set (beginning with the least significant bit)
  403. * in value and return the index of that bit.
  404. *
  405. * Bits are numbered starting at 1 (the least significant bit). A return value of
  406. * zero from any of these functions means that the argument was zero.
  407. *
  408. * @return return the index of the first bit set. If value is 0, then this function
  409. * shall return 0.
  410. */
  411. #if defined(__CC_ARM)
  412. __asm int __rt_ffs(int value)
  413. {
  414. CMP r0, #0x00
  415. BEQ exit
  416. RBIT r0, r0
  417. CLZ r0, r0
  418. ADDS r0, r0, #0x01
  419. exit
  420. BX lr
  421. }
  422. #elif defined(__clang__)
  423. int __rt_ffs(int value)
  424. {
  425. __asm volatile(
  426. "CMP %1, #0x00 \n"
  427. "BEQ 1f \n"
  428. "RBIT %1, %1 \n"
  429. "CLZ %0, %1 \n"
  430. "ADDS %0, %0, #0x01 \n"
  431. "1: \n"
  432. : "=r"(value)
  433. : "r"(value)
  434. );
  435. return value;
  436. }
  437. #elif defined(__IAR_SYSTEMS_ICC__)
  438. int __rt_ffs(int value)
  439. {
  440. if (value == 0) return value;
  441. asm("RBIT %0, %1" : "=r"(value) : "r"(value));
  442. asm("CLZ %0, %1" : "=r"(value) : "r"(value));
  443. asm("ADDS %0, %1, #0x01" : "=r"(value) : "r"(value));
  444. return value;
  445. }
  446. #elif defined(__GNUC__)
  447. int __rt_ffs(int value)
  448. {
  449. return __builtin_ffs(value);
  450. }
  451. #endif
  452. #endif