mmu.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-03-25 quanzhao the first version
  9. * 2023-10-10 Shell Add permission control API
  10. */
  11. #ifndef __MMU_H_
  12. #define __MMU_H_
  13. #include <rtthread.h>
  14. #include <mm_aspace.h>
  15. #define DESC_SEC (0x2)
  16. #define MEMWBWA ((1<<12)|(3<<2)) /* write back, write allocate */
  17. #define MEMWB (3<<2) /* write back, no write allocate */
  18. #define MEMWT (2<<2) /* write through, no write allocate */
  19. #define SHAREDEVICE (1<<2) /* shared device */
  20. #define STRONGORDER (0<<2) /* strong ordered */
  21. #define XN (1<<4) /* eXecute Never */
  22. #ifdef RT_USING_SMART
  23. #define AP_RW (1<<10) /* supervisor=RW, user=No */
  24. #define AP_RO ((1<<10) |(1 << 15)) /* supervisor=RW, user=No */
  25. #else
  26. #define AP_RW (3<<10) /* supervisor=RW, user=RW */
  27. #define AP_RO (2<<10) /* supervisor=RW, user=RO */
  28. #endif
  29. #define SHARED (1<<16) /* shareable */
  30. #define DOMAIN_FAULT (0x0)
  31. #define DOMAIN_CHK (0x1)
  32. #define DOMAIN_NOTCHK (0x3)
  33. #define DOMAIN0 (0x0<<5)
  34. #define DOMAIN1 (0x1<<5)
  35. #define DOMAIN0_ATTR (DOMAIN_CHK<<0)
  36. #define DOMAIN1_ATTR (DOMAIN_FAULT<<2)
  37. /* device mapping type */
  38. #define DEVICE_MEM (SHARED|AP_RW|DOMAIN0|SHAREDEVICE|DESC_SEC|XN)
  39. /* normal memory mapping type */
  40. #define NORMAL_MEM (SHARED|AP_RW|DOMAIN0|MEMWBWA|DESC_SEC)
  41. #define STRONG_ORDER_MEM (SHARED|AP_RO|XN|DESC_SEC)
  42. struct mem_desc
  43. {
  44. rt_uint32_t vaddr_start;
  45. rt_uint32_t vaddr_end;
  46. rt_uint32_t paddr_start;
  47. rt_uint32_t attr;
  48. struct rt_varea varea;
  49. };
  50. #define MMU_MAP_MTBL_XN (1<<0)
  51. #define MMU_MAP_MTBL_A (1<<1)
  52. #define MMU_MAP_MTBL_B (1<<2)
  53. #define MMU_MAP_MTBL_C (1<<3)
  54. #define MMU_MAP_MTBL_AP01(x) (x<<4)
  55. #define MMU_MAP_MTBL_TEX(x) (x<<6)
  56. #define MMU_MAP_MTBL_AP2(x) (x<<9)
  57. #define MMU_MAP_MTBL_SHARE (1<<10)
  58. #define MMU_MAP_MTBL_NG(x) (x<<11)
  59. #define MMU_MAP_K_ROCB ((MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(1)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
  60. #define MMU_MAP_K_RO ((MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(1)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
  61. #define MMU_MAP_K_RWCB ((MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
  62. #define MMU_MAP_K_RW ((MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_SHARE))
  63. #define MMU_MAP_K_DEVICE ((MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_SHARE))
  64. #define MMU_MAP_U_ROCB ((MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(2)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
  65. #define MMU_MAP_U_RO ((MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(2)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
  66. #define MMU_MAP_U_RWCB ((MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
  67. #define MMU_MAP_U_RW ((MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_SHARE))
  68. #define MMU_MAP_U_DEVICE ((MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_SHARE))
  69. #define MMU_MAP_TRACE(attr) (attr)
  70. #define ARCH_SECTION_SHIFT 20
  71. #define ARCH_SECTION_SIZE (1 << ARCH_SECTION_SHIFT)
  72. #define ARCH_SECTION_MASK (ARCH_SECTION_SIZE - 1)
  73. #define ARCH_PAGE_SHIFT 12
  74. #define ARCH_PAGE_SIZE (1 << ARCH_PAGE_SHIFT)
  75. #define ARCH_PAGE_MASK (ARCH_PAGE_SIZE - 1)
  76. #define ARCH_PAGE_TBL_SHIFT 10
  77. #define ARCH_PAGE_TBL_SIZE (1 << ARCH_PAGE_TBL_SHIFT)
  78. #define ARCH_PAGE_TBL_MASK (ARCH_PAGE_TBL_SIZE - 1)
  79. #define ARCH_MMU_USED_MASK 3
  80. #define ARCH_TYPE_SUPERSECTION (1 << 18)
  81. #define ARCH_ADDRESS_WIDTH_BITS 32
  82. #define ARCH_VADDR_WIDTH 32
  83. /**
  84. * *info it's possible to map (-1ul & ~ARCH_PAGE_MASK) but a not aligned -1 is
  85. * never returned on a successful mapping
  86. */
  87. #define ARCH_MAP_FAILED ((void *)-1)
  88. #define RT_HW_MMU_PROT_READ 1
  89. #define RT_HW_MMU_PROT_WRITE 2
  90. #define RT_HW_MMU_PROT_EXECUTE 4
  91. #define RT_HW_MMU_PROT_KERNEL 8
  92. #define RT_HW_MMU_PROT_USER 16
  93. #define RT_HW_MMU_PROT_CACHE 32
  94. int rt_hw_mmu_ioremap_init(struct rt_aspace *aspace, void *v_address, size_t size);
  95. void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size);
  96. void rt_hw_mmu_setup(struct rt_aspace *aspace, struct mem_desc *mdesc, int desc_nr);
  97. void rt_hw_mmu_init(void);
  98. int rt_hw_mmu_map_init(struct rt_aspace *aspace, void *v_address, size_t size, size_t *vtable, size_t pv_off);
  99. void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr, size_t size, size_t attr);
  100. void rt_hw_mmu_unmap(struct rt_aspace *aspace, void *v_addr, size_t size);
  101. void rt_hw_aspace_switch(struct rt_aspace *aspace);
  102. void rt_hw_mmu_switch(void *tbl);
  103. void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr);
  104. void rt_hw_mmu_kernel_map_init(struct rt_aspace *aspace, size_t vaddr_start, size_t size);
  105. void *rt_hw_mmu_tbl_get(void);
  106. int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size, enum rt_mmu_cntl cmd);
  107. void *rt_hw_mmu_pgtbl_create(void);
  108. void rt_hw_mmu_pgtbl_delete(void *pgtbl);
  109. #define AP_APX_MASK (MMU_MAP_MTBL_AP2(0x1) | MMU_MAP_MTBL_AP01(0x3))
  110. #define AP_APX_URW_KRW (MMU_MAP_MTBL_AP2(0x0) | MMU_MAP_MTBL_AP01(0x3))
  111. #define AP_APX_URO_KRO (MMU_MAP_MTBL_AP2(0x1) | MMU_MAP_MTBL_AP01(0x2))
  112. /**
  113. * @brief Remove permission from attribution
  114. *
  115. * @param attr architecture specified mmu attribution
  116. * @param prot protect that will be removed
  117. * @return size_t returned attribution
  118. */
  119. rt_inline size_t rt_hw_mmu_attr_rm_perm(size_t attr, rt_base_t prot)
  120. {
  121. switch (prot)
  122. {
  123. /* remove write permission for user */
  124. case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
  125. if ((attr & AP_APX_MASK) == AP_APX_URW_KRW)
  126. attr &= ~MMU_MAP_MTBL_AP01(0x1);
  127. break;
  128. case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_KERNEL:
  129. switch (attr & AP_APX_MASK)
  130. {
  131. case MMU_MAP_MTBL_AP01(0):
  132. break;
  133. case MMU_MAP_MTBL_AP01(3):
  134. attr = (attr & AP_APX_MASK) | AP_APX_URO_KRO;
  135. default:
  136. attr |= MMU_MAP_MTBL_AP2(0x1);
  137. break;
  138. }
  139. break;
  140. default:
  141. RT_ASSERT(0);
  142. }
  143. return attr;
  144. }
  145. /**
  146. * @brief Add permission from attribution
  147. *
  148. * @param attr architecture specified mmu attribution
  149. * @param prot protect that will be added
  150. * @return size_t returned attribution
  151. */
  152. rt_inline size_t rt_hw_mmu_attr_add_perm(size_t attr, rt_base_t prot)
  153. {
  154. switch (prot)
  155. {
  156. /* add write permission for user */
  157. case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
  158. case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_KERNEL:
  159. attr |= MMU_MAP_MTBL_AP01(0x3);
  160. attr &= ~MMU_MAP_MTBL_AP2(0x1);
  161. break;
  162. default:
  163. RT_ASSERT(0);
  164. }
  165. return attr;
  166. }
  167. /**
  168. * @brief Test permission from attribution
  169. *
  170. * @param attr architecture specified mmu attribution
  171. * @param prot protect that will be test
  172. * @return rt_bool_t RT_TRUE if the prot is allowed, otherwise RT_FALSE
  173. */
  174. rt_inline rt_bool_t rt_hw_mmu_attr_test_perm(size_t attr, rt_base_t prot)
  175. {
  176. rt_bool_t rc = 0;
  177. switch (prot)
  178. {
  179. /* test write permission for user */
  180. case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
  181. rc = (AP_APX_MASK & attr) == (AP_APX_URW_KRW);
  182. break;
  183. default:
  184. RT_ASSERT(0);
  185. }
  186. return rc;
  187. }
  188. #endif