mmu.h 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-05-12 RT-Thread the first version
  9. */
  10. #ifndef __MMU_H_
  11. #define __MMU_H_
  12. #include <rtthread.h>
  13. #include <mm_aspace.h>
  14. /* normal memory wra mapping type */
  15. #define NORMAL_MEM 0
  16. /* normal nocache memory mapping type */
  17. #define NORMAL_NOCACHE_MEM 1
  18. /* device mapping type */
  19. #define DEVICE_MEM 2
  20. struct mem_desc
  21. {
  22. unsigned long vaddr_start;
  23. unsigned long vaddr_end;
  24. unsigned long paddr_start;
  25. unsigned long attr;
  26. struct rt_varea varea;
  27. };
  28. #define MMU_AF_SHIFT 10
  29. #define MMU_SHARED_SHIFT 8
  30. #define MMU_AP_SHIFT 6
  31. #define MMU_MA_SHIFT 2
  32. #define MMU_AP_KAUN 0UL /* kernel r/w, user none */
  33. #define MMU_AP_KAUA 1UL /* kernel r/w, user r/w */
  34. #define MMU_AP_KRUN 2UL /* kernel r, user none */
  35. #define MMU_AP_KRUR 3UL /* kernel r, user r */
  36. #define MMU_MAP_CUSTOM(ap, mtype) \
  37. ((0x1UL << MMU_AF_SHIFT) | (0x2UL << MMU_SHARED_SHIFT) | \
  38. ((ap) << MMU_AP_SHIFT) | ((mtype) << MMU_MA_SHIFT))
  39. #define MMU_MAP_K_RO MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_MEM)
  40. #define MMU_MAP_K_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM)
  41. #define MMU_MAP_K_RW MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_NOCACHE_MEM)
  42. #define MMU_MAP_K_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUN, DEVICE_MEM)
  43. #define MMU_MAP_U_RO MMU_MAP_CUSTOM(MMU_AP_KRUR, NORMAL_NOCACHE_MEM)
  44. #define MMU_MAP_U_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_MEM)
  45. #define MMU_MAP_U_RW MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_NOCACHE_MEM)
  46. #define MMU_MAP_U_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUA, DEVICE_MEM)
  47. #define ARCH_SECTION_SHIFT 21
  48. #define ARCH_SECTION_SIZE (1 << ARCH_SECTION_SHIFT)
  49. #define ARCH_SECTION_MASK (ARCH_SECTION_SIZE - 1)
  50. #define ARCH_PAGE_SHIFT 12
  51. #define ARCH_PAGE_SIZE (1 << ARCH_PAGE_SHIFT)
  52. #define ARCH_PAGE_MASK (ARCH_PAGE_SIZE - 1)
  53. #define ARCH_PAGE_TBL_SHIFT 12
  54. #define ARCH_PAGE_TBL_SIZE (1 << ARCH_PAGE_TBL_SHIFT)
  55. #define ARCH_PAGE_TBL_MASK (ARCH_PAGE_TBL_SIZE - 1)
  56. #define ARCH_VADDR_WIDTH 48
  57. #define ARCH_ADDRESS_WIDTH_BITS 64
  58. #define MMU_MAP_ERROR_VANOTALIGN -1
  59. #define MMU_MAP_ERROR_PANOTALIGN -2
  60. #define MMU_MAP_ERROR_NOPAGE -3
  61. #define MMU_MAP_ERROR_CONFLICT -4
  62. #define ARCH_MAP_FAILED ((void *)0x1ffffffffffff)
  63. struct rt_aspace;
  64. void rt_hw_mmu_ktbl_set(unsigned long tbl);
  65. void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
  66. unsigned long size, unsigned long pv_off);
  67. void rt_hw_mmu_setup(struct rt_aspace *aspace, struct mem_desc *mdesc,
  68. int desc_nr);
  69. int rt_hw_mmu_map_init(struct rt_aspace *aspace, void *v_address,
  70. rt_size_t size, rt_size_t *vtable, rt_size_t pv_off);
  71. void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
  72. size_t size, size_t attr);
  73. void rt_hw_mmu_unmap(struct rt_aspace *aspace, void *v_addr, size_t size);
  74. void rt_hw_aspace_switch(struct rt_aspace *aspace);
  75. void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr);
  76. void rt_hw_mmu_kernel_map_init(struct rt_aspace *aspace, rt_size_t vaddr_start,
  77. rt_size_t size);
  78. void rt_hw_mmu_ktbl_set(unsigned long tbl);
  79. static inline void *rt_hw_mmu_tbl_get()
  80. {
  81. uintptr_t tbl;
  82. __asm__ volatile("MRS %0, TTBR0_EL1" : "=r"(tbl));
  83. return (void *)(tbl & ((1ul << 48) - 2));
  84. }
  85. static inline void *_rt_kmem_v2p(void *vaddr)
  86. {
  87. return rt_hw_mmu_v2p(&rt_kernel_space, vaddr);
  88. }
  89. static inline void *rt_kmem_v2p(void *vaddr)
  90. {
  91. MM_PGTBL_LOCK(&rt_kernel_space);
  92. void *paddr = _rt_kmem_v2p(vaddr);
  93. MM_PGTBL_UNLOCK(&rt_kernel_space);
  94. return paddr;
  95. }
  96. int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
  97. enum rt_mmu_cntl cmd);
  98. #endif