mpu.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-09-25 tangzz98 the first version
  9. */
  10. #include <rtdef.h>
  11. #include <mprotect.h>
  12. #define DBG_ENABLE
  13. #define DBG_SECTION_NAME "MEMORY PROTECTION"
  14. #define DBG_LEVEL DBG_ERROR
  15. #include <rtdbg.h>
  16. #define MEM_REGION_TO_MPU_INDEX(thread, region) ((((rt_size_t)region - (rt_size_t)(thread->mem_regions)) / sizeof(rt_mem_region_t)) + NUM_STATIC_REGIONS)
  17. extern rt_mem_region_t *rt_mprotect_find_free_region(rt_thread_t thread);
  18. extern rt_mem_region_t *rt_mprotect_find_region(rt_thread_t thread, rt_mem_region_t *region);
  19. static rt_hw_mpu_exception_hook_t mem_manage_hook = RT_NULL;
  20. rt_weak rt_uint32_t rt_hw_mpu_region_default_attr(rt_mem_region_t *region)
  21. {
  22. static rt_uint32_t default_mem_attr[] =
  23. {
  24. NORMAL_OUTER_INNER_WRITE_THROUGH_NON_SHAREABLE,
  25. NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE,
  26. DEVICE_NON_SHAREABLE,
  27. NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE,
  28. NORMAL_OUTER_INNER_WRITE_THROUGH_NON_SHAREABLE,
  29. DEVICE_SHAREABLE,
  30. DEVICE_NON_SHAREABLE
  31. };
  32. rt_uint32_t attr = 0U;
  33. if ((rt_uint32_t)region->start >= 0xE0000000U)
  34. {
  35. attr = ((rt_uint32_t)region->start >= 0xE0100000U) ? STRONGLY_ORDERED_SHAREABLE : DEVICE_SHAREABLE;
  36. }
  37. else
  38. {
  39. attr = default_mem_attr[((rt_uint32_t)region->start & ~0xFFFFFFFU) >> 29U];
  40. }
  41. return attr;
  42. }
  43. static rt_uint32_t _mpu_rasr(rt_mem_region_t *region)
  44. {
  45. rt_uint32_t rasr = 0U;
  46. if ((region->attr.rasr & RESERVED) == RESERVED)
  47. {
  48. rasr |= rt_hw_mpu_region_default_attr(region);
  49. rasr |= region->attr.rasr & (MPU_RASR_XN_Msk | MPU_RASR_AP_Msk);
  50. }
  51. else
  52. {
  53. rasr |= region->attr.rasr & MPU_RASR_ATTRS_Msk;
  54. }
  55. rasr |= ((32U - __builtin_clz(region->size - 1U) - 2U + 1U) << MPU_RASR_SIZE_Pos) & MPU_RASR_SIZE_Msk;
  56. rasr |= MPU_RASR_ENABLE_Msk;
  57. return rasr;
  58. }
  59. rt_bool_t rt_hw_mpu_region_valid(rt_mem_region_t *region)
  60. {
  61. if (region->size < MPU_MIN_REGION_SIZE)
  62. {
  63. LOG_E("Region size is too small");
  64. return RT_FALSE;
  65. }
  66. if (region->size & (region->size - 1U) != 0U)
  67. {
  68. LOG_E("Region size is not power of 2");
  69. return RT_FALSE;
  70. }
  71. if ((rt_uint32_t)region->start & (region->size - 1U) != 0U)
  72. {
  73. LOG_E("Region is not naturally aligned");
  74. return RT_FALSE;
  75. }
  76. return RT_TRUE;
  77. }
  78. rt_err_t rt_hw_mpu_init(void)
  79. {
  80. extern rt_mem_region_t static_regions[NUM_STATIC_REGIONS];
  81. rt_uint8_t num_mpu_regions;
  82. rt_uint8_t num_dynamic_regions;
  83. rt_uint8_t index;
  84. num_mpu_regions = (rt_uint8_t)((MPU->TYPE & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos);
  85. if (num_mpu_regions == 0U)
  86. {
  87. LOG_E("Hardware does not support MPU");
  88. return RT_ERROR;
  89. }
  90. if (num_mpu_regions != NUM_MEM_REGIONS)
  91. {
  92. LOG_E("Incorrect setting of NUM_MEM_REGIONS");
  93. LOG_E("NUM_MEM_REGIONS = %d, hardware support %d MPU regions", NUM_MEM_REGIONS, num_mpu_regions);
  94. return RT_ERROR;
  95. }
  96. num_dynamic_regions = NUM_DYNAMIC_REGIONS + NUM_EXCLUSIVE_REGIONS;
  97. if (num_dynamic_regions + NUM_STATIC_REGIONS > num_mpu_regions)
  98. {
  99. LOG_E("Insufficient MPU regions: %d hardware MPU regions", num_mpu_regions);
  100. #ifdef RT_USING_HW_STACK_GUARD
  101. LOG_E("Current configuration requires %d static regions + %d configurable regions + %d exclusive regions + %d stack guard regions", NUM_STATIC_REGIONS, NUM_CONFIGURABLE_REGIONS, NUM_EXCLUSIVE_REGIONS, 2);
  102. #else
  103. LOG_E("Current configuration requires %d static regions + %d configurable regions + %d exclusive regions", NUM_STATIC_REGIONS, NUM_CONFIGURABLE_REGIONS, NUM_EXCLUSIVE_REGIONS);
  104. #endif
  105. return RT_ERROR;
  106. }
  107. ARM_MPU_Disable();
  108. for (index = 0U; index < NUM_STATIC_REGIONS; index++)
  109. {
  110. if (rt_hw_mpu_region_valid(&(static_regions[index])) == RT_FALSE)
  111. {
  112. return RT_ERROR;
  113. }
  114. static_regions[index].attr.rasr = _mpu_rasr(&(static_regions[index]));
  115. ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)static_regions[index].start), static_regions[index].attr.rasr);
  116. }
  117. /* Enable background region. */
  118. ARM_MPU_Enable(MPU_CTRL_PRIVDEFENA_Msk);
  119. return RT_EOK;
  120. }
  121. rt_err_t rt_hw_mpu_add_region(rt_thread_t thread, rt_mem_region_t *region)
  122. {
  123. rt_uint8_t index;
  124. rt_mem_region_t *free_region;
  125. if (rt_hw_mpu_region_valid(region) == RT_FALSE)
  126. {
  127. return RT_ERROR;
  128. }
  129. region->attr.rasr = _mpu_rasr(region);
  130. if (thread == RT_NULL)
  131. {
  132. return RT_EOK;
  133. }
  134. rt_enter_critical();
  135. free_region = rt_mprotect_find_free_region(thread);
  136. if (free_region == RT_NULL)
  137. {
  138. rt_exit_critical();
  139. LOG_E("Insufficient regions");
  140. return RT_ERROR;
  141. }
  142. rt_memcpy(free_region, region, sizeof(rt_mem_region_t));
  143. if (thread == rt_thread_self())
  144. {
  145. index = MEM_REGION_TO_MPU_INDEX(thread, free_region);
  146. ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)region->start), region->attr.rasr);
  147. }
  148. rt_exit_critical();
  149. return RT_EOK;
  150. }
  151. rt_err_t rt_hw_mpu_delete_region(rt_thread_t thread, rt_mem_region_t *region)
  152. {
  153. rt_uint8_t index;
  154. rt_enter_critical();
  155. rt_mem_region_t *found_region = rt_mprotect_find_region(thread, region);
  156. if (found_region == RT_NULL)
  157. {
  158. rt_exit_critical();
  159. LOG_E("Region not found");
  160. return RT_ERROR;
  161. }
  162. rt_memset(found_region, 0, sizeof(rt_mem_region_t));
  163. if (thread == rt_thread_self())
  164. {
  165. index = MEM_REGION_TO_MPU_INDEX(thread, found_region);
  166. ARM_MPU_ClrRegion(index);
  167. }
  168. rt_exit_critical();
  169. return RT_EOK;
  170. }
  171. rt_err_t rt_hw_mpu_update_region(rt_thread_t thread, rt_mem_region_t *region)
  172. {
  173. rt_uint8_t index;
  174. if (rt_hw_mpu_region_valid(region) == RT_FALSE)
  175. {
  176. return RT_ERROR;
  177. }
  178. region->attr.rasr = _mpu_rasr(region);
  179. rt_enter_critical();
  180. rt_mem_region_t *old_region = rt_mprotect_find_region(thread, region);
  181. if (old_region == RT_NULL)
  182. {
  183. rt_exit_critical();
  184. LOG_E("Region not found");
  185. return RT_ERROR;
  186. }
  187. rt_memcpy(old_region, region, sizeof(rt_mem_region_t));
  188. if (thread == rt_thread_self())
  189. {
  190. index = MEM_REGION_TO_MPU_INDEX(thread, old_region);
  191. ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)region->start), region->attr.rasr);
  192. }
  193. rt_exit_critical();
  194. return RT_EOK;
  195. }
  196. rt_err_t rt_hw_mpu_exception_set_hook(rt_hw_mpu_exception_hook_t hook)
  197. {
  198. mem_manage_hook = hook;
  199. return RT_EOK;
  200. }
  201. void rt_hw_mpu_table_switch(rt_thread_t thread)
  202. {
  203. extern rt_mem_exclusive_region_t exclusive_regions[NUM_EXCLUSIVE_REGIONS];
  204. rt_uint8_t i;
  205. rt_uint8_t index = NUM_STATIC_REGIONS;
  206. if (thread->mem_regions != RT_NULL)
  207. {
  208. for (i = 0U; i < NUM_DYNAMIC_REGIONS; i++)
  209. {
  210. if (((rt_mem_region_t *)thread->mem_regions)[i].size != 0U)
  211. {
  212. ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)(((rt_mem_region_t *)thread->mem_regions)[i].start)), ((rt_mem_region_t *)thread->mem_regions)[i].attr.rasr);
  213. index += 1U;
  214. }
  215. }
  216. }
  217. for (i = 0U; i < NUM_EXCLUSIVE_REGIONS; i++)
  218. {
  219. if ((exclusive_regions[i].owner != RT_NULL) && (exclusive_regions[i].owner != thread))
  220. {
  221. ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)(exclusive_regions[i].region.start)), exclusive_regions[i].region.attr.rasr);
  222. index += 1U;
  223. }
  224. }
  225. for ( ; index < NUM_MEM_REGIONS; index++)
  226. {
  227. ARM_MPU_ClrRegion(index);
  228. }
  229. }
  230. void MemManage_Handler(void)
  231. {
  232. extern rt_mem_region_t static_regions[NUM_STATIC_REGIONS];
  233. extern rt_mem_exclusive_region_t exclusive_regions[NUM_EXCLUSIVE_REGIONS];
  234. rt_mem_exception_info_t info;
  235. rt_int8_t i;
  236. rt_memset(&info, 0, sizeof(rt_mem_exception_info_t));
  237. info.thread = rt_thread_self();
  238. if (SCB->CFSR & SCB_CFSR_MMARVALID_Msk)
  239. {
  240. info.addr = (void *)(SCB->MMFAR);
  241. for (i = NUM_EXCLUSIVE_REGIONS - 1; i >= 0; i--)
  242. {
  243. if ((exclusive_regions[i].owner != RT_NULL) && ((exclusive_regions[i].owner != rt_thread_self())) && ADDR_IN_REGION(info.addr, (rt_mem_region_t *)&(exclusive_regions[i])))
  244. {
  245. rt_memcpy(&(info.region), &(exclusive_regions[i]), sizeof(rt_mem_region_t));
  246. break;
  247. }
  248. }
  249. if (info.region.size == 0U)
  250. {
  251. if (info.thread->mem_regions != RT_NULL)
  252. {
  253. for (i = NUM_DYNAMIC_REGIONS - 1; i >= 0; i--)
  254. {
  255. if ((((rt_mem_region_t *)info.thread->mem_regions)[i].size != 0U) && ADDR_IN_REGION(info.addr, &(((rt_mem_region_t *)info.thread->mem_regions)[i])))
  256. {
  257. rt_memcpy(&(info.region), &(((rt_mem_region_t *)info.thread->mem_regions)[i]), sizeof(rt_mem_region_t));
  258. break;
  259. }
  260. }
  261. }
  262. if (info.region.size == 0U)
  263. {
  264. for (i = NUM_STATIC_REGIONS - 1; i >= 0; i--)
  265. {
  266. if (ADDR_IN_REGION(info.addr, &(static_regions[i])))
  267. {
  268. rt_memcpy(&(info.region), &(static_regions[i]), sizeof(rt_mem_region_t));
  269. break;
  270. }
  271. }
  272. }
  273. }
  274. }
  275. info.mmfsr = (SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) >> SCB_CFSR_MEMFAULTSR_Pos;
  276. if (mem_manage_hook != RT_NULL)
  277. {
  278. mem_manage_hook(&info);
  279. }
  280. while (1);
  281. }