cpuport.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2011-10-21 Bernard the first version.
  9. * 2011-10-27 aozima update for cortex-M4 FPU.
  10. * 2011-12-31 aozima fixed stack align issues.
  11. * 2012-01-01 aozima support context switch load/store FPU register.
  12. * 2012-12-11 lgnq fixed the coding style.
  13. * 2012-12-23 aozima stack addr align to 8byte.
  14. * 2012-12-29 Bernard Add exception hook.
  15. * 2013-06-23 aozima support lazy stack optimized.
  16. * 2018-07-24 aozima enhancement hard fault exception handler.
  17. * 2019-07-03 yangjie add __rt_ffs() for armclang.
  18. */
  19. #include <rtthread.h>
  20. #ifdef RT_USING_HW_STACK_GUARD
  21. #include <mprotect.h>
  22. #endif
  23. #if /* ARMCC */ ( (defined ( __CC_ARM ) && defined ( __TARGET_FPU_VFP )) \
  24. /* Clang */ || (defined ( __clang__ ) && defined ( __VFP_FP__ ) && !defined(__SOFTFP__)) \
  25. /* IAR */ || (defined ( __ICCARM__ ) && defined ( __ARMVFP__ )) \
  26. /* GNU */ || (defined ( __GNUC__ ) && defined ( __VFP_FP__ ) && !defined(__SOFTFP__)) )
  27. #define USE_FPU 1
  28. #else
  29. #define USE_FPU 0
  30. #endif
  31. /* exception and interrupt handler table */
  32. rt_uint32_t rt_interrupt_from_thread;
  33. rt_uint32_t rt_interrupt_to_thread;
  34. rt_uint32_t rt_thread_switch_interrupt_flag;
  35. /* exception hook */
  36. static rt_err_t (*rt_exception_hook)(void *context) = RT_NULL;
  37. struct exception_stack_frame
  38. {
  39. rt_uint32_t r0;
  40. rt_uint32_t r1;
  41. rt_uint32_t r2;
  42. rt_uint32_t r3;
  43. rt_uint32_t r12;
  44. rt_uint32_t lr;
  45. rt_uint32_t pc;
  46. rt_uint32_t psr;
  47. };
  48. struct stack_frame
  49. {
  50. rt_uint32_t tz;
  51. rt_uint32_t lr;
  52. rt_uint32_t psplim;
  53. rt_uint32_t control;
  54. /* r4 ~ r11 register */
  55. rt_uint32_t r4;
  56. rt_uint32_t r5;
  57. rt_uint32_t r6;
  58. rt_uint32_t r7;
  59. rt_uint32_t r8;
  60. rt_uint32_t r9;
  61. rt_uint32_t r10;
  62. rt_uint32_t r11;
  63. struct exception_stack_frame exception_stack_frame;
  64. };
  65. struct exception_stack_frame_fpu
  66. {
  67. rt_uint32_t r0;
  68. rt_uint32_t r1;
  69. rt_uint32_t r2;
  70. rt_uint32_t r3;
  71. rt_uint32_t r12;
  72. rt_uint32_t lr;
  73. rt_uint32_t pc;
  74. rt_uint32_t psr;
  75. #if USE_FPU
  76. /* FPU register */
  77. rt_uint32_t S0;
  78. rt_uint32_t S1;
  79. rt_uint32_t S2;
  80. rt_uint32_t S3;
  81. rt_uint32_t S4;
  82. rt_uint32_t S5;
  83. rt_uint32_t S6;
  84. rt_uint32_t S7;
  85. rt_uint32_t S8;
  86. rt_uint32_t S9;
  87. rt_uint32_t S10;
  88. rt_uint32_t S11;
  89. rt_uint32_t S12;
  90. rt_uint32_t S13;
  91. rt_uint32_t S14;
  92. rt_uint32_t S15;
  93. rt_uint32_t FPSCR;
  94. rt_uint32_t NO_NAME;
  95. #endif
  96. };
  97. struct stack_frame_fpu
  98. {
  99. rt_uint32_t flag;
  100. /* r4 ~ r11 register */
  101. rt_uint32_t r4;
  102. rt_uint32_t r5;
  103. rt_uint32_t r6;
  104. rt_uint32_t r7;
  105. rt_uint32_t r8;
  106. rt_uint32_t r9;
  107. rt_uint32_t r10;
  108. rt_uint32_t r11;
  109. #if USE_FPU
  110. /* FPU register s16 ~ s31 */
  111. rt_uint32_t s16;
  112. rt_uint32_t s17;
  113. rt_uint32_t s18;
  114. rt_uint32_t s19;
  115. rt_uint32_t s20;
  116. rt_uint32_t s21;
  117. rt_uint32_t s22;
  118. rt_uint32_t s23;
  119. rt_uint32_t s24;
  120. rt_uint32_t s25;
  121. rt_uint32_t s26;
  122. rt_uint32_t s27;
  123. rt_uint32_t s28;
  124. rt_uint32_t s29;
  125. rt_uint32_t s30;
  126. rt_uint32_t s31;
  127. #endif
  128. struct exception_stack_frame_fpu exception_stack_frame;
  129. };
  130. rt_uint8_t *rt_hw_stack_init(void *tentry,
  131. void *parameter,
  132. rt_uint8_t *stack_addr,
  133. void *texit)
  134. {
  135. struct stack_frame *stack_frame;
  136. rt_uint8_t *stk;
  137. unsigned long i;
  138. stk = stack_addr + sizeof(rt_uint32_t);
  139. stk = (rt_uint8_t *)RT_ALIGN_DOWN((rt_uint32_t)stk, 8);
  140. stk -= sizeof(struct stack_frame);
  141. stack_frame = (struct stack_frame *)stk;
  142. /* init all register */
  143. for (i = 0; i < sizeof(struct stack_frame) / sizeof(rt_uint32_t); i ++)
  144. {
  145. ((rt_uint32_t *)stack_frame)[i] = 0xdeadbeef;
  146. }
  147. stack_frame->exception_stack_frame.r0 = (unsigned long)parameter; /* r0 : argument */
  148. stack_frame->exception_stack_frame.r1 = 0; /* r1 */
  149. stack_frame->exception_stack_frame.r2 = 0; /* r2 */
  150. stack_frame->exception_stack_frame.r3 = 0; /* r3 */
  151. stack_frame->exception_stack_frame.r12 = 0; /* r12 */
  152. stack_frame->exception_stack_frame.lr = (unsigned long)texit; /* lr */
  153. stack_frame->exception_stack_frame.pc = (unsigned long)tentry; /* entry point, pc */
  154. stack_frame->exception_stack_frame.psr = 0x01000000L; /* PSR */
  155. stack_frame->tz = 0x00; /* trustzone thread context */
  156. /*
  157. * Exception return behavior
  158. * +--------+---+---+------+-------+------+-------+---+----+
  159. * | PREFIX | - | S | DCRS | FType | Mode | SPSEL | - | ES |
  160. * +--------+---+---+------+-------+------+-------+---+----+
  161. * PREFIX [31:24] - Indicates that this is an EXC_RETURN value. This field reads as 0b11111111.
  162. * S [6] - Indicates whether registers have been pushed to a Secure or Non-secure stack.
  163. * 0: Non-secure stack used.
  164. * 1: Secure stack used.
  165. * DCRS [5] - Indicates whether the default stacking rules apply, or whether the callee registers are already on the stack.
  166. * 0: Stacking of the callee saved registers is skipped.
  167. * 1: Default rules for stacking the callee registers are followed.
  168. * FType [4] - In a PE with the Main and Floating-point Extensions:
  169. * 0: The PE allocated space on the stack for FP context.
  170. * 1: The PE did not allocate space on the stack for FP context.
  171. * In a PE without the Floating-point Extension, this bit is Reserved, RES1.
  172. * Mode [3] - Indicates the mode that was stacked from.
  173. * 0: Handler mode.
  174. * 1: Thread mode.
  175. * SPSEL [2] - Indicates which stack contains the exception stack frame.
  176. * 0: Main stack pointer.
  177. * 1: Process stack pointer.
  178. * ES [0] - Indicates the Security state the exception was taken to.
  179. * 0: Non-secure.
  180. * 1: Secure.
  181. */
  182. #ifdef ARCH_ARM_CORTEX_SECURE
  183. stack_frame->lr = 0xfffffffdL;
  184. #else
  185. stack_frame->lr = 0xffffffbcL;
  186. #endif
  187. stack_frame->psplim = 0x00;
  188. /*
  189. * CONTROL register bit assignments
  190. * +---+------+------+-------+-------+
  191. * | - | SFPA | FPCA | SPSEL | nPRIV |
  192. * +---+------+------+-------+-------+
  193. * SFPA [3] - Indicates that the floating-point registers contain active state that belongs to the Secure state:
  194. * 0: The floating-point registers do not contain state that belongs to the Secure state.
  195. * 1: The floating-point registers contain state that belongs to the Secure state.
  196. * This bit is not banked between Security states and RAZ/WI from Non-secure state.
  197. * FPCA [2] - Indicates whether floating-point context is active:
  198. * 0: No floating-point context active.
  199. * 1: Floating-point context active.
  200. * This bit is used to determine whether to preserve floating-point state when processing an exception.
  201. * This bit is not banked between Security states.
  202. * SPSEL [1] - Defines the currently active stack pointer:
  203. * 0: MSP is the current stack pointer.
  204. * 1: PSP is the current stack pointer.
  205. * In Handler mode, this bit reads as zero and ignores writes. The CortexM33 core updates this bit automatically onexception return.
  206. * This bit is banked between Security states.
  207. * nPRIV [0] - Defines the Thread mode privilege level:
  208. * 0: Privileged.
  209. * 1: Unprivileged.
  210. * This bit is banked between Security states.
  211. *
  212. */
  213. stack_frame->control = 0x00000000L;
  214. /* return task's current stack address */
  215. return stk;
  216. }
  217. #ifdef RT_USING_HW_STACK_GUARD
  218. void rt_hw_stack_guard_init(rt_thread_t thread)
  219. {
  220. rt_mem_region_t stack_top_region, stack_bottom_region;
  221. rt_ubase_t stack_bottom = (rt_ubase_t)thread->stack_addr;
  222. rt_ubase_t stack_top = (rt_ubase_t)((rt_uint8_t *)thread->stack_addr + thread->stack_size);
  223. rt_ubase_t stack_bottom_region_start = RT_ALIGN(stack_bottom, MPU_MIN_REGION_SIZE);
  224. rt_ubase_t stack_top_region_start = RT_ALIGN_DOWN(stack_top - MPU_MIN_REGION_SIZE, MPU_MIN_REGION_SIZE);
  225. stack_top_region.start = (void *)stack_top_region_start;
  226. stack_top_region.size = MPU_MIN_REGION_SIZE;
  227. stack_top_region.attr = RT_MEM_REGION_P_RO_U_NA;
  228. stack_bottom_region.start = (void *)stack_bottom_region_start;
  229. stack_bottom_region.size = MPU_MIN_REGION_SIZE;
  230. stack_bottom_region.attr = RT_MEM_REGION_P_RO_U_NA;
  231. rt_mprotect_add_region(thread, &stack_top_region);
  232. rt_mprotect_add_region(thread, &stack_bottom_region);
  233. thread->stack_buf = thread->stack_addr;
  234. thread->stack_addr = (void *)(stack_bottom_region_start + MPU_MIN_REGION_SIZE);
  235. thread->stack_size = (rt_uint32_t)(stack_top_region_start - (rt_ubase_t)thread->stack_addr);
  236. }
  237. #endif
  238. /**
  239. * This function set the hook, which is invoked on fault exception handling.
  240. *
  241. * @param exception_handle the exception handling hook function.
  242. */
  243. void rt_hw_exception_install(rt_err_t (*exception_handle)(void *context))
  244. {
  245. rt_exception_hook = exception_handle;
  246. }
  247. #define SCB_CFSR (*(volatile const unsigned *)0xE000ED28) /* Configurable Fault Status Register */
  248. #define SCB_HFSR (*(volatile const unsigned *)0xE000ED2C) /* HardFault Status Register */
  249. #define SCB_MMAR (*(volatile const unsigned *)0xE000ED34) /* MemManage Fault Address register */
  250. #define SCB_BFAR (*(volatile const unsigned *)0xE000ED38) /* Bus Fault Address Register */
  251. #define SCB_AIRCR (*(volatile unsigned long *)0xE000ED0C) /* Reset control Address Register */
  252. #define SCB_RESET_VALUE 0x05FA0004 /* Reset value, write to SCB_AIRCR can reset cpu */
  253. #define SCB_CFSR_MFSR (*(volatile const unsigned char*)0xE000ED28) /* Memory-management Fault Status Register */
  254. #define SCB_CFSR_BFSR (*(volatile const unsigned char*)0xE000ED29) /* Bus Fault Status Register */
  255. #define SCB_CFSR_UFSR (*(volatile const unsigned short*)0xE000ED2A) /* Usage Fault Status Register */
  256. #ifdef RT_USING_FINSH
  257. static void usage_fault_track(void)
  258. {
  259. rt_kprintf("usage fault:\n");
  260. rt_kprintf("SCB_CFSR_UFSR:0x%02X ", SCB_CFSR_UFSR);
  261. if(SCB_CFSR_UFSR & (1<<0))
  262. {
  263. /* [0]:UNDEFINSTR */
  264. rt_kprintf("UNDEFINSTR ");
  265. }
  266. if(SCB_CFSR_UFSR & (1<<1))
  267. {
  268. /* [1]:INVSTATE */
  269. rt_kprintf("INVSTATE ");
  270. }
  271. if(SCB_CFSR_UFSR & (1<<2))
  272. {
  273. /* [2]:INVPC */
  274. rt_kprintf("INVPC ");
  275. }
  276. if(SCB_CFSR_UFSR & (1<<3))
  277. {
  278. /* [3]:NOCP */
  279. rt_kprintf("NOCP ");
  280. }
  281. if(SCB_CFSR_UFSR & (1<<8))
  282. {
  283. /* [8]:UNALIGNED */
  284. rt_kprintf("UNALIGNED ");
  285. }
  286. if(SCB_CFSR_UFSR & (1<<9))
  287. {
  288. /* [9]:DIVBYZERO */
  289. rt_kprintf("DIVBYZERO ");
  290. }
  291. rt_kprintf("\n");
  292. }
  293. static void bus_fault_track(void)
  294. {
  295. rt_kprintf("bus fault:\n");
  296. rt_kprintf("SCB_CFSR_BFSR:0x%02X ", SCB_CFSR_BFSR);
  297. if(SCB_CFSR_BFSR & (1<<0))
  298. {
  299. /* [0]:IBUSERR */
  300. rt_kprintf("IBUSERR ");
  301. }
  302. if(SCB_CFSR_BFSR & (1<<1))
  303. {
  304. /* [1]:PRECISERR */
  305. rt_kprintf("PRECISERR ");
  306. }
  307. if(SCB_CFSR_BFSR & (1<<2))
  308. {
  309. /* [2]:IMPRECISERR */
  310. rt_kprintf("IMPRECISERR ");
  311. }
  312. if(SCB_CFSR_BFSR & (1<<3))
  313. {
  314. /* [3]:UNSTKERR */
  315. rt_kprintf("UNSTKERR ");
  316. }
  317. if(SCB_CFSR_BFSR & (1<<4))
  318. {
  319. /* [4]:STKERR */
  320. rt_kprintf("STKERR ");
  321. }
  322. if(SCB_CFSR_BFSR & (1<<7))
  323. {
  324. rt_kprintf("SCB->BFAR:%08X\n", SCB_BFAR);
  325. }
  326. else
  327. {
  328. rt_kprintf("\n");
  329. }
  330. }
  331. static void mem_manage_fault_track(void)
  332. {
  333. rt_kprintf("mem manage fault:\n");
  334. rt_kprintf("SCB_CFSR_MFSR:0x%02X ", SCB_CFSR_MFSR);
  335. if(SCB_CFSR_MFSR & (1<<0))
  336. {
  337. /* [0]:IACCVIOL */
  338. rt_kprintf("IACCVIOL ");
  339. }
  340. if(SCB_CFSR_MFSR & (1<<1))
  341. {
  342. /* [1]:DACCVIOL */
  343. rt_kprintf("DACCVIOL ");
  344. }
  345. if(SCB_CFSR_MFSR & (1<<3))
  346. {
  347. /* [3]:MUNSTKERR */
  348. rt_kprintf("MUNSTKERR ");
  349. }
  350. if(SCB_CFSR_MFSR & (1<<4))
  351. {
  352. /* [4]:MSTKERR */
  353. rt_kprintf("MSTKERR ");
  354. }
  355. if(SCB_CFSR_MFSR & (1<<7))
  356. {
  357. /* [7]:MMARVALID */
  358. rt_kprintf("SCB->MMAR:%08X\n", SCB_MMAR);
  359. }
  360. else
  361. {
  362. rt_kprintf("\n");
  363. }
  364. }
  365. static void hard_fault_track(void)
  366. {
  367. if(SCB_HFSR & (1UL<<1))
  368. {
  369. /* [1]:VECTBL, Indicates hard fault is caused by failed vector fetch. */
  370. rt_kprintf("failed vector fetch\n");
  371. }
  372. if(SCB_HFSR & (1UL<<30))
  373. {
  374. /* [30]:FORCED, Indicates hard fault is taken because of bus fault,
  375. memory management fault, or usage fault. */
  376. if(SCB_CFSR_BFSR)
  377. {
  378. bus_fault_track();
  379. }
  380. if(SCB_CFSR_MFSR)
  381. {
  382. mem_manage_fault_track();
  383. }
  384. if(SCB_CFSR_UFSR)
  385. {
  386. usage_fault_track();
  387. }
  388. }
  389. if(SCB_HFSR & (1UL<<31))
  390. {
  391. /* [31]:DEBUGEVT, Indicates hard fault is triggered by debug event. */
  392. rt_kprintf("debug event\n");
  393. }
  394. }
  395. #endif /* RT_USING_FINSH */
  396. struct exception_info
  397. {
  398. rt_uint32_t exc_return;
  399. struct stack_frame stack_frame;
  400. };
  401. void rt_hw_hard_fault_exception(struct exception_info *exception_info)
  402. {
  403. #if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
  404. extern long list_thread(void);
  405. #endif
  406. struct exception_stack_frame *exception_stack = &exception_info->stack_frame.exception_stack_frame;
  407. struct stack_frame *context = &exception_info->stack_frame;
  408. if (rt_exception_hook != RT_NULL)
  409. {
  410. rt_err_t result;
  411. result = rt_exception_hook(exception_stack);
  412. if (result == RT_EOK) return;
  413. }
  414. rt_kprintf("psr: 0x%08x\n", context->exception_stack_frame.psr);
  415. rt_kprintf("r00: 0x%08x\n", context->exception_stack_frame.r0);
  416. rt_kprintf("r01: 0x%08x\n", context->exception_stack_frame.r1);
  417. rt_kprintf("r02: 0x%08x\n", context->exception_stack_frame.r2);
  418. rt_kprintf("r03: 0x%08x\n", context->exception_stack_frame.r3);
  419. rt_kprintf("r04: 0x%08x\n", context->r4);
  420. rt_kprintf("r05: 0x%08x\n", context->r5);
  421. rt_kprintf("r06: 0x%08x\n", context->r6);
  422. rt_kprintf("r07: 0x%08x\n", context->r7);
  423. rt_kprintf("r08: 0x%08x\n", context->r8);
  424. rt_kprintf("r09: 0x%08x\n", context->r9);
  425. rt_kprintf("r10: 0x%08x\n", context->r10);
  426. rt_kprintf("r11: 0x%08x\n", context->r11);
  427. rt_kprintf("r12: 0x%08x\n", context->exception_stack_frame.r12);
  428. rt_kprintf(" lr: 0x%08x\n", context->exception_stack_frame.lr);
  429. rt_kprintf(" pc: 0x%08x\n", context->exception_stack_frame.pc);
  430. if (exception_info->exc_return & (1 << 2))
  431. {
  432. rt_kprintf("hard fault on thread: %s\r\n\r\n", rt_thread_self()->parent.name);
  433. #if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
  434. list_thread();
  435. #endif
  436. }
  437. else
  438. {
  439. rt_kprintf("hard fault on handler\r\n\r\n");
  440. }
  441. if ( (exception_info->exc_return & 0x10) == 0)
  442. {
  443. rt_kprintf("FPU active!\r\n");
  444. }
  445. #ifdef RT_USING_FINSH
  446. hard_fault_track();
  447. #endif /* RT_USING_FINSH */
  448. while (1);
  449. }
  450. /**
  451. * reset CPU
  452. */
  453. void rt_hw_cpu_reset(void)
  454. {
  455. SCB_AIRCR = SCB_RESET_VALUE;
  456. }
  457. #ifdef RT_USING_CPU_FFS
  458. /**
  459. * This function finds the first bit set (beginning with the least significant bit)
  460. * in value and return the index of that bit.
  461. *
  462. * Bits are numbered starting at 1 (the least significant bit). A return value of
  463. * zero from any of these functions means that the argument was zero.
  464. *
  465. * @return return the index of the first bit set. If value is 0, then this function
  466. * shall return 0.
  467. */
  468. #if defined(__CC_ARM)
  469. __asm int __rt_ffs(int value)
  470. {
  471. CMP r0, #0x00
  472. BEQ exit
  473. RBIT r0, r0
  474. CLZ r0, r0
  475. ADDS r0, r0, #0x01
  476. exit
  477. BX lr
  478. }
  479. #elif defined(__clang__)
  480. int __rt_ffs(int value)
  481. {
  482. if (value == 0) return value;
  483. __asm volatile(
  484. "RBIT r0, r0 \n"
  485. "CLZ r0, r0 \n"
  486. "ADDS r0, r0, #0x01 \n"
  487. : "=r"(value)
  488. : "r"(value)
  489. );
  490. return value;
  491. }
  492. #elif defined(__IAR_SYSTEMS_ICC__)
  493. int __rt_ffs(int value)
  494. {
  495. if (value == 0) return value;
  496. asm("RBIT %0, %1" : "=r"(value) : "r"(value));
  497. asm("CLZ %0, %1" : "=r"(value) : "r"(value));
  498. asm("ADDS %0, %1, #0x01" : "=r"(value) : "r"(value));
  499. return value;
  500. }
  501. #elif defined(__GNUC__)
  502. int __rt_ffs(int value)
  503. {
  504. return __builtin_ffs(value);
  505. }
  506. #endif
  507. #endif