1
0

mmu.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-05-12 RT-Thread the first version
  9. * 2023-08-15 Shell Support more mapping attribution
  10. */
  11. #ifndef __MMU_H_
  12. #define __MMU_H_
  13. #ifndef __ASSEMBLY__
  14. #include <rtthread.h>
  15. #include <mm_aspace.h>
  16. /* normal memory wra mapping type */
  17. #define NORMAL_MEM 0
  18. /* normal nocache memory mapping type */
  19. #define NORMAL_NOCACHE_MEM 1
  20. /* device mapping type */
  21. #define DEVICE_MEM 2
  22. struct mem_desc
  23. {
  24. unsigned long vaddr_start;
  25. unsigned long vaddr_end;
  26. unsigned long paddr_start;
  27. unsigned long attr;
  28. struct rt_varea varea;
  29. };
  30. #endif /* !__ASSEMBLY__ */
  31. #define RT_HW_MMU_PROT_READ 1
  32. #define RT_HW_MMU_PROT_WRITE 2
  33. #define RT_HW_MMU_PROT_EXECUTE 4
  34. #define RT_HW_MMU_PROT_KERNEL 8
  35. #define RT_HW_MMU_PROT_USER 16
  36. #define RT_HW_MMU_PROT_CACHE 32
  37. #define MMU_AF_SHIFT 10
  38. #define MMU_SHARED_SHIFT 8
  39. #define MMU_AP_SHIFT 6
  40. #define MMU_MA_SHIFT 2
  41. #define MMU_AP_MASK (0x3 << MMU_AP_SHIFT)
  42. #define MMU_AP_KAUN 0UL /* kernel r/w, user none */
  43. #define MMU_AP_KAUA 1UL /* kernel r/w, user r/w */
  44. #define MMU_AP_KRUN 2UL /* kernel r, user none */
  45. #define MMU_AP_KRUR 3UL /* kernel r, user r */
  46. #define MMU_ATTR_AF (1ul << MMU_AF_SHIFT) /* the access flag */
  47. #define MMU_ATTR_DBM (1ul << 51) /* the dirty bit modifier */
  48. #define MMU_MAP_CUSTOM(ap, mtype) \
  49. ((0x1UL << MMU_AF_SHIFT) | (0x2UL << MMU_SHARED_SHIFT) | \
  50. ((ap) << MMU_AP_SHIFT) | ((mtype) << MMU_MA_SHIFT))
  51. #define MMU_MAP_K_ROCB MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_MEM)
  52. #define MMU_MAP_K_RO MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_NOCACHE_MEM)
  53. #define MMU_MAP_K_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM)
  54. #define MMU_MAP_K_RW MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_NOCACHE_MEM)
  55. #define MMU_MAP_K_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUN, DEVICE_MEM)
  56. #define MMU_MAP_U_ROCB MMU_MAP_CUSTOM(MMU_AP_KRUR, NORMAL_MEM)
  57. #define MMU_MAP_U_RO MMU_MAP_CUSTOM(MMU_AP_KRUR, NORMAL_NOCACHE_MEM)
  58. #define MMU_MAP_U_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_MEM)
  59. #define MMU_MAP_U_RW MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_NOCACHE_MEM)
  60. #define MMU_MAP_U_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUA, DEVICE_MEM)
  61. #define MMU_MAP_TRACE(attr) ((attr) & ~(MMU_ATTR_AF | MMU_ATTR_DBM))
  62. #define ARCH_SECTION_SHIFT 21
  63. #define ARCH_SECTION_SIZE (1 << ARCH_SECTION_SHIFT)
  64. #define ARCH_SECTION_MASK (ARCH_SECTION_SIZE - 1)
  65. #define ARCH_PAGE_SHIFT 12
  66. #define ARCH_PAGE_SIZE (1 << ARCH_PAGE_SHIFT)
  67. #define ARCH_PAGE_MASK (ARCH_PAGE_SIZE - 1)
  68. #define ARCH_PAGE_TBL_SHIFT 12
  69. #define ARCH_PAGE_TBL_SIZE (1 << ARCH_PAGE_TBL_SHIFT)
  70. #define ARCH_PAGE_TBL_MASK (ARCH_PAGE_TBL_SIZE - 1)
  71. #define ARCH_VADDR_WIDTH 48
  72. #define ARCH_ADDRESS_WIDTH_BITS 64
  73. #define MMU_MAP_ERROR_VANOTALIGN -1
  74. #define MMU_MAP_ERROR_PANOTALIGN -2
  75. #define MMU_MAP_ERROR_NOPAGE -3
  76. #define MMU_MAP_ERROR_CONFLICT -4
  77. #define ARCH_MAP_FAILED ((void *)0x1ffffffffffff)
  78. #ifndef __ASSEMBLY__
  79. struct rt_aspace;
  80. void rt_hw_mmu_ktbl_set(unsigned long tbl);
  81. void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
  82. unsigned long size, unsigned long pv_off);
  83. void rt_hw_mmu_setup(struct rt_aspace *aspace, struct mem_desc *mdesc,
  84. int desc_nr);
  85. int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, size_t size, size_t *vtable, size_t pv_off);
  86. void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
  87. size_t size, size_t attr);
  88. void rt_hw_mmu_unmap(struct rt_aspace *aspace, void *v_addr, size_t size);
  89. void rt_hw_aspace_switch(struct rt_aspace *aspace);
  90. void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr);
  91. void rt_hw_mmu_kernel_map_init(struct rt_aspace *aspace, rt_size_t vaddr_start,
  92. rt_size_t size);
  93. void *rt_hw_mmu_pgtbl_create(void);
  94. void rt_hw_mmu_pgtbl_delete(void *pgtbl);
  95. rt_inline void *rt_hw_mmu_tbl_get()
  96. {
  97. uintptr_t tbl;
  98. __asm__ volatile("MRS %0, TTBR0_EL1" : "=r"(tbl));
  99. return rt_kmem_p2v((void *)(tbl & ((1ul << 48) - 2)));
  100. }
  101. static inline void *rt_hw_mmu_kernel_v2p(void *v_addr)
  102. {
  103. rt_ubase_t par;
  104. void *paddr;
  105. __asm__ volatile("at s1e1w, %0"::"r"(v_addr):"memory");
  106. __asm__ volatile("mrs %0, par_el1":"=r"(par)::"memory");
  107. if (par & 0x1)
  108. {
  109. paddr = ARCH_MAP_FAILED;
  110. }
  111. else
  112. {
  113. #define MMU_ADDRESS_MASK 0x0000fffffffff000UL
  114. par &= MMU_ADDRESS_MASK;
  115. par |= (rt_ubase_t)v_addr & ARCH_PAGE_MASK;
  116. paddr = (void *)par;
  117. }
  118. return paddr;
  119. }
  120. /**
  121. * @brief Add permission from attribution
  122. *
  123. * @param attr architecture specified mmu attribution
  124. * @param prot protect that will be added
  125. * @return size_t returned attribution
  126. */
  127. rt_inline size_t rt_hw_mmu_attr_add_perm(size_t attr, rt_base_t prot)
  128. {
  129. switch (prot)
  130. {
  131. /* remove write permission for user */
  132. case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
  133. attr = (attr & ~MMU_AP_MASK) | (MMU_AP_KAUA << MMU_AP_SHIFT);
  134. break;
  135. default:
  136. RT_ASSERT(0);
  137. }
  138. return attr;
  139. }
  140. /**
  141. * @brief Remove permission from attribution
  142. *
  143. * @param attr architecture specified mmu attribution
  144. * @param prot protect that will be removed
  145. * @return size_t returned attribution
  146. */
  147. rt_inline size_t rt_hw_mmu_attr_rm_perm(size_t attr, rt_base_t prot)
  148. {
  149. switch (prot)
  150. {
  151. /* remove write permission for user */
  152. case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
  153. if (attr & 0x40)
  154. attr |= 0x80;
  155. break;
  156. default:
  157. RT_ASSERT(0);
  158. }
  159. return attr;
  160. }
  161. /**
  162. * @brief Test permission from attribution
  163. *
  164. * @param attr architecture specified mmu attribution
  165. * @param prot protect that will be test
  166. * @return rt_bool_t RT_TRUE if the prot is allowed, otherwise RT_FALSE
  167. */
  168. rt_inline rt_bool_t rt_hw_mmu_attr_test_perm(size_t attr, rt_base_t prot)
  169. {
  170. rt_bool_t rc;
  171. switch (prot)
  172. {
  173. /* test write permission for user */
  174. case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
  175. if ((attr & MMU_AP_MASK) == (MMU_AP_KAUA << MMU_AP_SHIFT))
  176. rc = RT_TRUE;
  177. else
  178. rc = RT_FALSE;
  179. break;
  180. default:
  181. RT_ASSERT(0);
  182. }
  183. return rc;
  184. }
  185. int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
  186. enum rt_mmu_cntl cmd);
  187. #endif /* !__ASSEMBLY__ */
  188. #endif