mmu.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-05-12 RT-Thread the first version
  9. */
  10. #ifndef __MMU_H_
  11. #define __MMU_H_
  12. #include <rtthread.h>
  13. /* normal memory wra mapping type */
  14. #define NORMAL_MEM 0
  15. /* normal nocache memory mapping type */
  16. #define NORMAL_NOCACHE_MEM 1
  17. /* device mapping type */
  18. #define DEVICE_MEM 2
  19. struct mem_desc
  20. {
  21. unsigned long vaddr_start;
  22. unsigned long vaddr_end;
  23. unsigned long paddr_start;
  24. unsigned long attr;
  25. };
  26. #define MMU_AF_SHIFT 10
  27. #define MMU_SHARED_SHIFT 8
  28. #define MMU_AP_SHIFT 6
  29. #define MMU_MA_SHIFT 2
  30. #define MMU_AP_KAUN 0UL /* kernel r/w, user none */
  31. #define MMU_AP_KAUA 1UL /* kernel r/w, user r/w */
  32. #define MMU_AP_KRUN 2UL /* kernel r, user none */
  33. #define MMU_AP_KRUR 3UL /* kernel r, user r */
  34. #define MMU_MAP_K_RO (\
  35. (0x1UL << MMU_AF_SHIFT) |\
  36. (0x2UL << MMU_SHARED_SHIFT) |\
  37. (MMU_AP_KRUN << MMU_AP_SHIFT) |\
  38. (NORMAL_MEM << MMU_MA_SHIFT)\
  39. )
  40. #define MMU_MAP_K_RWCB (\
  41. (0x1UL << MMU_AF_SHIFT) |\
  42. (0x2UL << MMU_SHARED_SHIFT) |\
  43. (MMU_AP_KAUN << MMU_AP_SHIFT) |\
  44. (NORMAL_MEM << MMU_MA_SHIFT)\
  45. )
  46. #define MMU_MAP_K_RW (\
  47. (0x1UL << MMU_AF_SHIFT) |\
  48. (0x2UL << MMU_SHARED_SHIFT) |\
  49. (MMU_AP_KAUN << MMU_AP_SHIFT) |\
  50. (NORMAL_NOCACHE_MEM << MMU_MA_SHIFT)\
  51. )
  52. #define MMU_MAP_K_DEVICE (\
  53. (0x1UL << MMU_AF_SHIFT) |\
  54. (0x2UL << MMU_SHARED_SHIFT) |\
  55. (MMU_AP_KAUN << MMU_AP_SHIFT) |\
  56. (DEVICE_MEM << MMU_MA_SHIFT)\
  57. )
  58. #define MMU_MAP_U_RO (\
  59. (0x1UL << MMU_AF_SHIFT) |\
  60. (0x2UL << MMU_SHARED_SHIFT) |\
  61. (MMU_AP_KRUR << MMU_AP_SHIFT) |\
  62. (NORMAL_NOCACHE_MEM << MMU_MA_SHIFT)\
  63. )
  64. #define MMU_MAP_U_RWCB (\
  65. (0x1UL << MMU_AF_SHIFT) |\
  66. (0x2UL << MMU_SHARED_SHIFT) |\
  67. (MMU_AP_KAUA << MMU_AP_SHIFT) |\
  68. (NORMAL_MEM << MMU_MA_SHIFT)\
  69. )
  70. #define MMU_MAP_U_RW (\
  71. (0x1UL << MMU_AF_SHIFT) |\
  72. (0x2UL << MMU_SHARED_SHIFT) |\
  73. (MMU_AP_KAUA << MMU_AP_SHIFT) |\
  74. (NORMAL_NOCACHE_MEM << MMU_MA_SHIFT)\
  75. )
  76. #define MMU_MAP_U_DEVICE (\
  77. (0x1UL << MMU_AF_SHIFT) |\
  78. (0x2UL << MMU_SHARED_SHIFT) |\
  79. (MMU_AP_KAUA << MMU_AP_SHIFT) |\
  80. (DEVICE_MEM << MMU_MA_SHIFT)\
  81. )
  82. #define MMU_MAP_CUSTOM(ap, mtype) (\
  83. (0x1UL << MMU_AF_SHIFT) |\
  84. (0x2UL << MMU_SHARED_SHIFT) |\
  85. ((ap) << MMU_AP_SHIFT) |\
  86. ((mtype) << MMU_MA_SHIFT)\
  87. )
  88. #define ARCH_SECTION_SHIFT 21
  89. #define ARCH_SECTION_SIZE (1 << ARCH_SECTION_SHIFT)
  90. #define ARCH_SECTION_MASK (ARCH_SECTION_SIZE - 1)
  91. #define ARCH_PAGE_SHIFT 12
  92. #define ARCH_PAGE_SIZE (1 << ARCH_PAGE_SHIFT)
  93. #define ARCH_PAGE_MASK (ARCH_PAGE_SIZE - 1)
  94. #define ARCH_PAGE_TBL_SHIFT 12
  95. #define ARCH_PAGE_TBL_SIZE (1 << ARCH_PAGE_TBL_SHIFT)
  96. #define ARCH_PAGE_TBL_MASK (ARCH_PAGE_TBL_SIZE - 1)
  97. #define ARCH_ADDRESS_WIDTH_BITS 64
  98. #define MMU_MAP_ERROR_VANOTALIGN -1
  99. #define MMU_MAP_ERROR_PANOTALIGN -2
  100. #define MMU_MAP_ERROR_NOPAGE -3
  101. #define MMU_MAP_ERROR_CONFLICT -4
  102. typedef struct
  103. {
  104. size_t *vtable;
  105. size_t vstart;
  106. size_t vend;
  107. size_t pv_off;
  108. } rt_mmu_info;
  109. void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned long size, unsigned long pv_off);
  110. void rt_hw_mmu_setup(struct mem_desc *mdesc, int desc_nr);
  111. int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off);
  112. int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size);
  113. #ifdef RT_USING_USERSPACE
  114. void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr);
  115. void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr);
  116. #else
  117. void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr);
  118. #endif
  119. void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size);
  120. void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr);
  121. void rt_hw_mmu_ktbl_set(unsigned long tbl);
  122. void *rt_hw_mmu_tbl_get();
  123. void rt_hw_mmu_switch(void *mmu_table);
  124. extern rt_mmu_info mmu_info;
  125. #endif