mpu.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-09-25 tangzz98 the first version
  9. */
  10. #include <rtdef.h>
  11. #include <mprotect.h>
  12. #define DBG_ENABLE
  13. #define DBG_SECTION_NAME "MEMORY PROTECTION"
  14. #define DBG_LEVEL DBG_ERROR
  15. #include <rtdbg.h>
  16. #define MEM_REGION_TO_MPU_INDEX(thread, region) ((((rt_size_t)region - (rt_size_t)(thread->mem_regions)) / sizeof(rt_mem_region_t)) + NUM_STATIC_REGIONS)
  17. extern rt_mem_region_t *rt_mprotect_find_free_region(rt_thread_t thread);
  18. extern rt_mem_region_t *rt_mprotect_find_region(rt_thread_t thread, rt_mem_region_t *region);
  19. static rt_hw_mpu_exception_hook_t mem_manage_hook = RT_NULL;
  20. static rt_uint8_t mpu_mair[8U];
  21. rt_weak rt_uint8_t rt_hw_mpu_region_default_attr(rt_mem_region_t *region)
  22. {
  23. static rt_uint8_t default_mem_attr[] =
  24. {
  25. ARM_MPU_ATTR(ARM_MPU_ATTR_MEMORY_(1U, 0U, 1U, 0U), ARM_MPU_ATTR_MEMORY_(1U, 0U, 1U, 0U)),
  26. ARM_MPU_ATTR(ARM_MPU_ATTR_MEMORY_(1U, 1U, 1U, 1U), ARM_MPU_ATTR_MEMORY_(1U, 1U, 1U, 1U)),
  27. ARM_MPU_ATTR_DEVICE_nGnRE,
  28. ARM_MPU_ATTR(ARM_MPU_ATTR_MEMORY_(1U, 1U, 1U, 1U), ARM_MPU_ATTR_MEMORY_(1U, 1U, 1U, 1U)),
  29. ARM_MPU_ATTR(ARM_MPU_ATTR_MEMORY_(1U, 0U, 1U, 0U), ARM_MPU_ATTR_MEMORY_(1U, 0U, 1U, 0U)),
  30. ARM_MPU_ATTR_DEVICE_nGnRE,
  31. ARM_MPU_ATTR_DEVICE_nGnRE
  32. };
  33. rt_uint8_t attr = 0U;
  34. if ((rt_uint32_t)region->start >= 0xE0000000U)
  35. {
  36. attr = ((rt_uint32_t)region->start >= 0xE0100000U) ? ARM_MPU_ATTR_DEVICE_nGnRE : ARM_MPU_ATTR_DEVICE_nGnRnE;
  37. }
  38. else
  39. {
  40. attr = default_mem_attr[((rt_uint32_t)region->start & ~0xFFFFFFFU) >> 29U];
  41. }
  42. return attr;
  43. }
  44. static rt_err_t _mpu_rbar_rlar(rt_mem_region_t *region)
  45. {
  46. rt_uint32_t rlar = 0U;
  47. rt_uint8_t mair_attr;
  48. rt_uint8_t index;
  49. rt_uint8_t attr_indx = 0xFFU;
  50. region->attr.rbar = (rt_uint32_t)region->start | (region->attr.rbar & (~MPU_RBAR_BASE_Msk));
  51. rlar |= ((rt_uint32_t)region->start + region->size - 1U) & MPU_RLAR_LIMIT_Msk;
  52. if (region->attr.mair_attr == RT_ARM_DEFAULT_MAIR_ATTR)
  53. {
  54. mair_attr = rt_hw_mpu_region_default_attr(region);
  55. }
  56. else
  57. {
  58. mair_attr = (rt_uint8_t)region->attr.mair_attr;
  59. }
  60. for (index = 0U; index < 8U; index++)
  61. {
  62. if (mpu_mair[index] == RT_ARM_DEFAULT_MAIR_ATTR)
  63. {
  64. break;
  65. }
  66. else if (mpu_mair[index] == mair_attr)
  67. {
  68. attr_indx = index;
  69. break;
  70. }
  71. }
  72. /*
  73. * Current region's mair_attr does not match any existing region.
  74. * All entries in MPU_MAIR are configured.
  75. */
  76. if (index == 8U)
  77. {
  78. return RT_ERROR;
  79. }
  80. /* An existing region has the same mair_attr. */
  81. if (attr_indx != 0xFFU)
  82. {
  83. rlar |= attr_indx & MPU_RLAR_AttrIndx_Msk;
  84. }
  85. /* Current region's mair_attr does not match any existing region. */
  86. else
  87. {
  88. ARM_MPU_SetMemAttr(index, mair_attr);
  89. rlar |= index & MPU_RLAR_AttrIndx_Msk;
  90. }
  91. rlar |= MPU_RLAR_EN_Msk;
  92. region->attr.rlar = rlar;
  93. return RT_EOK;
  94. }
  95. rt_bool_t rt_hw_mpu_region_valid(rt_mem_region_t *region)
  96. {
  97. if (region->size < MPU_MIN_REGION_SIZE)
  98. {
  99. LOG_E("Region size is too small");
  100. return RT_FALSE;
  101. }
  102. if (region->size & (~(MPU_MIN_REGION_SIZE - 1U)) != region->size)
  103. {
  104. LOG_E("Region size is not a multiple of 32 bytes");
  105. return RT_FALSE;
  106. }
  107. if ((rt_uint32_t)region->start & (MPU_MIN_REGION_SIZE - 1U) != 0U)
  108. {
  109. LOG_E("Region is not aligned by 32 bytes");
  110. return RT_FALSE;
  111. }
  112. return RT_TRUE;
  113. }
  114. rt_err_t rt_hw_mpu_init(void)
  115. {
  116. extern rt_mem_region_t static_regions[NUM_STATIC_REGIONS];
  117. rt_uint8_t num_mpu_regions;
  118. rt_uint8_t num_dynamic_regions;
  119. rt_uint8_t index;
  120. num_mpu_regions = (rt_uint8_t)((MPU->TYPE & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos);
  121. if (num_mpu_regions == 0U)
  122. {
  123. LOG_E("Hardware does not support MPU");
  124. return RT_ERROR;
  125. }
  126. if (num_mpu_regions != NUM_MEM_REGIONS)
  127. {
  128. LOG_E("Incorrect setting of NUM_MEM_REGIONS");
  129. LOG_E("NUM_MEM_REGIONS = %d, hardware support %d MPU regions", NUM_MEM_REGIONS, num_mpu_regions);
  130. return RT_ERROR;
  131. }
  132. num_dynamic_regions = NUM_DYNAMIC_REGIONS + NUM_EXCLUSIVE_REGIONS;
  133. if (num_dynamic_regions + NUM_STATIC_REGIONS > num_mpu_regions)
  134. {
  135. LOG_E("Insufficient MPU regions: %d hardware MPU regions", num_mpu_regions);
  136. #ifdef RT_USING_HW_STACK_GUARD
  137. LOG_E("Current configuration requires %d static regions + %d configurable regions + %d exclusive regions + %d stack guard regions", NUM_STATIC_REGIONS, NUM_CONFIGURABLE_REGIONS, NUM_EXCLUSIVE_REGIONS, 1);
  138. #else
  139. LOG_E("Current configuration requires %d static regions + %d configurable regions + %d exclusive regions", NUM_STATIC_REGIONS, NUM_CONFIGURABLE_REGIONS, NUM_EXCLUSIVE_REGIONS);
  140. #endif
  141. return RT_ERROR;
  142. }
  143. for (index = 0U; index < 8U; index++)
  144. {
  145. mpu_mair[index] = RT_ARM_DEFAULT_MAIR_ATTR;
  146. }
  147. ARM_MPU_Disable();
  148. for (index = 0U; index < NUM_STATIC_REGIONS; index++)
  149. {
  150. if (rt_hw_mpu_region_valid(&(static_regions[index])) == RT_FALSE)
  151. {
  152. return RT_ERROR;
  153. }
  154. if (_mpu_rbar_rlar(&(static_regions[index])) == RT_ERROR)
  155. {
  156. LOG_E("Number of different mair_attr configurations exceeds 8");
  157. return RT_ERROR;
  158. }
  159. ARM_MPU_SetRegion(index, static_regions[index].attr.rbar, static_regions[index].attr.rlar);
  160. }
  161. /* Enable background region. */
  162. ARM_MPU_Enable(MPU_CTRL_PRIVDEFENA_Msk);
  163. return RT_EOK;
  164. }
  165. rt_err_t rt_hw_mpu_add_region(rt_thread_t thread, rt_mem_region_t *region)
  166. {
  167. rt_uint8_t index;
  168. rt_mem_region_t *free_region;
  169. if (rt_hw_mpu_region_valid(region) == RT_FALSE)
  170. {
  171. return RT_ERROR;
  172. }
  173. rt_enter_critical();
  174. if (_mpu_rbar_rlar(region) == RT_ERROR)
  175. {
  176. rt_exit_critical();
  177. LOG_E("Number of different mair_attr configurations exceeds 8");
  178. return RT_ERROR;
  179. }
  180. if (thread == RT_NULL)
  181. {
  182. rt_exit_critical();
  183. return RT_EOK;
  184. }
  185. free_region = rt_mprotect_find_free_region(thread);
  186. if (free_region == RT_NULL)
  187. {
  188. rt_exit_critical();
  189. LOG_E("Insufficient regions");
  190. return RT_ERROR;
  191. }
  192. rt_memcpy(free_region, region, sizeof(rt_mem_region_t));
  193. if (thread == rt_thread_self())
  194. {
  195. index = MEM_REGION_TO_MPU_INDEX(thread, free_region);
  196. ARM_MPU_SetRegion(index, region->attr.rbar, region->attr.rlar);
  197. }
  198. rt_exit_critical();
  199. return RT_EOK;
  200. }
  201. rt_err_t rt_hw_mpu_delete_region(rt_thread_t thread, rt_mem_region_t *region)
  202. {
  203. rt_uint8_t index;
  204. rt_enter_critical();
  205. rt_mem_region_t *found_region = rt_mprotect_find_region(thread, region);
  206. if (found_region == RT_NULL)
  207. {
  208. rt_exit_critical();
  209. LOG_E("Region not found");
  210. return RT_ERROR;
  211. }
  212. rt_memset(found_region, 0, sizeof(rt_mem_region_t));
  213. if (thread == rt_thread_self())
  214. {
  215. index = MEM_REGION_TO_MPU_INDEX(thread, found_region);
  216. ARM_MPU_ClrRegion(index);
  217. }
  218. rt_exit_critical();
  219. return RT_EOK;
  220. }
  221. rt_err_t rt_hw_mpu_update_region(rt_thread_t thread, rt_mem_region_t *region)
  222. {
  223. rt_uint8_t index;
  224. if (rt_hw_mpu_region_valid(region) == RT_FALSE)
  225. {
  226. return RT_ERROR;
  227. }
  228. rt_enter_critical();
  229. if (_mpu_rbar_rlar(region) == RT_ERROR)
  230. {
  231. rt_exit_critical();
  232. LOG_E("Number of different mair_attr configurations exceeds 8");
  233. return RT_ERROR;
  234. }
  235. rt_mem_region_t *old_region = rt_mprotect_find_region(thread, region);
  236. if (old_region == RT_NULL)
  237. {
  238. rt_exit_critical();
  239. LOG_E("Region not found");
  240. return RT_ERROR;
  241. }
  242. rt_memcpy(old_region, region, sizeof(rt_mem_region_t));
  243. if (thread == rt_thread_self())
  244. {
  245. index = MEM_REGION_TO_MPU_INDEX(thread, old_region);
  246. ARM_MPU_SetRegion(index, region->attr.rbar, region->attr.rlar);
  247. }
  248. rt_exit_critical();
  249. return RT_EOK;
  250. }
  251. rt_err_t rt_hw_mpu_exception_set_hook(rt_hw_mpu_exception_hook_t hook)
  252. {
  253. mem_manage_hook = hook;
  254. return RT_EOK;
  255. }
  256. void rt_hw_mpu_table_switch(rt_thread_t thread)
  257. {
  258. extern rt_mem_exclusive_region_t exclusive_regions[NUM_EXCLUSIVE_REGIONS];
  259. rt_uint8_t i;
  260. rt_uint8_t index = NUM_STATIC_REGIONS;
  261. if (thread->mem_regions != RT_NULL)
  262. {
  263. for (i = 0U; i < NUM_DYNAMIC_REGIONS; i++)
  264. {
  265. if (((rt_mem_region_t *)thread->mem_regions)[i].size != 0U)
  266. {
  267. ARM_MPU_SetRegion(index, ((rt_mem_region_t *)thread->mem_regions)[i].attr.rbar, ((rt_mem_region_t *)thread->mem_regions)[i].attr.rlar);
  268. index += 1U;
  269. }
  270. }
  271. }
  272. for (i = 0U; i < NUM_EXCLUSIVE_REGIONS; i++)
  273. {
  274. if ((exclusive_regions[i].owner != RT_NULL) && (exclusive_regions[i].owner != thread))
  275. {
  276. ARM_MPU_SetRegion(index, exclusive_regions[i].region.attr.rbar, exclusive_regions[i].region.attr.rlar);
  277. index += 1U;
  278. }
  279. }
  280. for ( ; index < NUM_MEM_REGIONS; index++)
  281. {
  282. ARM_MPU_ClrRegion(index);
  283. }
  284. }
  285. void MemManage_Handler(void)
  286. {
  287. extern rt_mem_region_t static_regions[NUM_STATIC_REGIONS];
  288. extern rt_mem_exclusive_region_t exclusive_regions[NUM_EXCLUSIVE_REGIONS];
  289. rt_mem_exception_info_t info;
  290. rt_int8_t i;
  291. rt_memset(&info, 0, sizeof(rt_mem_exception_info_t));
  292. info.thread = rt_thread_self();
  293. if (SCB->CFSR & SCB_CFSR_MMARVALID_Msk)
  294. {
  295. info.addr = (void *)(SCB->MMFAR);
  296. for (i = NUM_EXCLUSIVE_REGIONS - 1; i >= 0; i--)
  297. {
  298. if ((exclusive_regions[i].owner != RT_NULL) && ((exclusive_regions[i].owner != rt_thread_self())) && ADDR_IN_REGION(info.addr, (rt_mem_region_t *)&(exclusive_regions[i])))
  299. {
  300. rt_memcpy(&(info.region), &(exclusive_regions[i]), sizeof(rt_mem_region_t));
  301. break;
  302. }
  303. }
  304. if (info.region.size == 0U)
  305. {
  306. if (info.thread->mem_regions != RT_NULL)
  307. {
  308. for (i = NUM_DYNAMIC_REGIONS - 1; i >= 0; i--)
  309. {
  310. if ((((rt_mem_region_t *)info.thread->mem_regions)[i].size != 0U) && ADDR_IN_REGION(info.addr, &(((rt_mem_region_t *)info.thread->mem_regions)[i])))
  311. {
  312. rt_memcpy(&(info.region), &(((rt_mem_region_t *)info.thread->mem_regions)[i]), sizeof(rt_mem_region_t));
  313. break;
  314. }
  315. }
  316. }
  317. if (info.region.size == 0U)
  318. {
  319. for (i = NUM_STATIC_REGIONS - 1; i >= 0; i--)
  320. {
  321. if (ADDR_IN_REGION(info.addr, &(static_regions[i])))
  322. {
  323. rt_memcpy(&(info.region), &(static_regions[i]), sizeof(rt_mem_region_t));
  324. break;
  325. }
  326. }
  327. }
  328. }
  329. }
  330. info.mmfsr = (SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) >> SCB_CFSR_MEMFAULTSR_Pos;
  331. if (mem_manage_hook != RT_NULL)
  332. {
  333. mem_manage_hook(&info);
  334. }
  335. while (1);
  336. }