mmu.h 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. /*
  2. * File : mmu.h
  3. * This file is part of RT-Thread RTOS
  4. * COPYRIGHT (C) 2006, RT-Thread Development Team
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along
  17. * with this program; if not, write to the Free Software Foundation, Inc.,
  18. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  19. *
  20. * Change Logs:
  21. * Date Author Notes
  22. */
  23. #ifndef __MMU_H__
  24. #define __MMU_H__
  25. #include <rtthread.h>
  26. #define CACHE_LINE_SIZE 32
  27. /*
  28. * Hardware page table definitions.
  29. *
  30. * + Level 1 descriptor (PGD)
  31. * - common
  32. */
  33. #define PGD_TYPE_MASK (3 << 0)
  34. #define PGD_TYPE_FAULT (0 << 0)
  35. #define PGD_TYPE_TABLE (1 << 0)
  36. #define PGD_TYPE_SECT (2 << 0)
  37. #define PGD_BIT4 (1 << 4)
  38. #define PGD_DOMAIN(x) ((x) << 5)
  39. #define PGD_PROTECTION (1 << 9) /* ARMv5 */
  40. /*
  41. * - section
  42. */
  43. #define PGD_SECT_BUFFERABLE (1 << 2)
  44. #define PGD_SECT_CACHEABLE (1 << 3)
  45. #define PGD_SECT_XN (1 << 4) /* ARMv6 */
  46. #define PGD_SECT_AP0 (1 << 10)
  47. #define PGD_SECT_AP1 (1 << 11)
  48. #define PGD_SECT_TEX(x) ((x) << 12) /* ARMv5 */
  49. #define PGD_SECT_APX (1 << 15) /* ARMv6 */
  50. #define PGD_SECT_S (1 << 16) /* ARMv6 */
  51. #define PGD_SECT_nG (1 << 17) /* ARMv6 */
  52. #define PGD_SECT_SUPER (1 << 18) /* ARMv6 */
  53. #define PGD_SECT_UNCACHED (0)
  54. #define PGD_SECT_BUFFERED (PGD_SECT_BUFFERABLE)
  55. #define PGD_SECT_WT (PGD_SECT_CACHEABLE)
  56. #define PGD_SECT_WB (PGD_SECT_CACHEABLE | PGD_SECT_BUFFERABLE)
  57. #define PGD_SECT_MINICACHE (PGD_SECT_TEX(1) | PGD_SECT_CACHEABLE)
  58. #define PGD_SECT_WBWA (PGD_SECT_TEX(1) | PGD_SECT_CACHEABLE | PGD_SECT_BUFFERABLE)
  59. #define PGD_SECT_NONSHARED_DEV (PGD_SECT_TEX(2))
  60. /*
  61. * + Level 2 descriptor (PTE)
  62. * - common
  63. */
  64. #define PTE_TYPE_MASK (3 << 0)
  65. #define PTE_TYPE_FAULT (0 << 0)
  66. #define PTE_TYPE_LARGE (1 << 0)
  67. #define PTE_TYPE_SMALL (2 << 0)
  68. #define PTE_TYPE_EXT (3 << 0) /* ARMv5 */
  69. #define PTE_BUFFERABLE (1 << 2)
  70. #define PTE_CACHEABLE (1 << 3)
  71. /*
  72. * - extended small page/tiny page
  73. */
  74. #define PTE_EXT_XN (1 << 0) /* ARMv6 */
  75. #define PTE_EXT_AP_MASK (3 << 4)
  76. #define PTE_EXT_AP0 (1 << 4)
  77. #define PTE_EXT_AP1 (2 << 4)
  78. #define PTE_EXT_AP_UNO_SRO (0 << 4)
  79. #define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0)
  80. #define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1)
  81. #define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0)
  82. #define PTE_EXT_TEX(x) ((x) << 6) /* ARMv5 */
  83. #define PTE_EXT_APX (1 << 9) /* ARMv6 */
  84. #define PTE_EXT_SHARED (1 << 10) /* ARMv6 */
  85. #define PTE_EXT_NG (1 << 11) /* ARMv6 */
  86. /*
  87. * - small page
  88. */
  89. #define PTE_SMALL_AP_MASK (0xff << 4)
  90. #define PTE_SMALL_AP_UNO_SRO (0x00 << 4)
  91. #define PTE_SMALL_AP_UNO_SRW (0x55 << 4)
  92. #define PTE_SMALL_AP_URO_SRW (0xaa << 4)
  93. #define PTE_SMALL_AP_URW_SRW (0xff << 4)
  94. /*
  95. * sector table properities
  96. */
  97. #define SECT_CB (PGD_SECT_CACHEABLE|PGD_SECT_BUFFERABLE) //cache_on, write_back
  98. #define SECT_CNB (PGD_SECT_CACHEABLE) //cache_on, write_through
  99. #define SECT_NCB (PGD_SECT_BUFFERABLE) //cache_off,WR_BUF on
  100. #define SECT_NCNB (0 << 2) //cache_off,WR_BUF off
  101. #define SECT_AP_RW (PGD_SECT_AP0|PGD_SECT_AP1) //supervisor=RW, user=RW
  102. #define SECT_AP_RO ((0 << 10)|(0 << 11)) //supervisor=RO, user=NO Access(SR=10)
  103. #define SECT_RW_CB (SECT_AP_RW|PGD_DOMAIN(0)|PGD_SECT_WB|PGD_TYPE_SECT|PGD_BIT4) /* Read/Write, cache, write back */
  104. #define SECT_RW_CNB (SECT_AP_RW|PGD_DOMAIN(0)|PGD_SECT_WT|PGD_TYPE_SECT|PGD_BIT4) /* Read/Write, cache, write through */
  105. #define SECT_RW_NCNB (SECT_AP_RW|PGD_DOMAIN(0)|PGD_TYPE_SECT|PGD_BIT4) /* Read/Write without cache and write buffer */
  106. #define SECT_RW_FAULT (SECT_AP_RW|PGD_DOMAIN(1)|PGD_TYPE_SECT|PGD_BIT4) /* Read/Write without cache and write buffer */
  107. #define SECT_RO_CB (SECT_AP_RO|PGD_DOMAIN(0)|PGD_SECT_WB|PGD_TYPE_SECT|PGD_BIT4) /* Read Only, cache, write back */
  108. #define SECT_RO_CNB (SECT_AP_RO|PGD_DOMAIN(0)|PGD_SECT_WT|PGD_TYPE_SECT|PGD_BIT4) /* Read Only, cache, write through */
  109. #define SECT_RO_NCNB (SECT_AP_RO|PGD_DOMAIN(0)|PGD_TYPE_SECT|PGD_BIT4) /* Read Only without cache and write buffer */
  110. #define SECT_RO_FAULT (SECT_AP_RO|PGD_DOMAIN(1)|PGD_TYPE_SECT|PGD_BIT4) /* Read Only without cache and write buffer */
  111. #define SECT_TO_PAGE (PGD_DOMAIN(0)|PGD_TYPE_TABLE|PGD_BIT4) /* Level 2 descriptor (PTE) entry properity */
  112. /*
  113. * page table properities
  114. */
  115. #define PAGE_CB (PTE_BUFFERABLE|PTE_CACHEABLE) //cache_on, write_back
  116. #define PAGE_CNB (PTE_CACHEABLE) //cache_on, write_through
  117. #define PAGE_NCB (PTE_BUFFERABLE) //cache_off,WR_BUF on
  118. #define PAGE_NCNB (0 << 2) //cache_off,WR_BUF off
  119. #define PAGE_AP_RW PTE_SMALL_AP_URW_SRW //supervisor=RW, user=RW
  120. #define PAGE_AP_RO PTE_SMALL_AP_UNO_SRO //supervisor=RO, user=NO Access(SR=10)
  121. #define PAGE_RW_CB (PAGE_AP_RW|PAGE_CB|PTE_TYPE_SMALL) /* Read/Write, cache, write back */
  122. #define PAGE_RW_CNB (PAGE_AP_RW|PAGE_CNB|PTE_TYPE_SMALL) /* Read/Write, cache, write through */
  123. #define PAGE_RW_NCNB (PAGE_AP_RW|PTE_TYPE_SMALL) /* Read/Write without cache and write buffer */
  124. #define PAGE_RW_FAULT (PAGE_AP_RW|PTE_TYPE_SMALL) /* Read/Write without cache and write buffer */
  125. #define PAGE_RO_CB (PAGE_AP_RO|PAGE_CB|PTE_TYPE_SMALL) /* Read Only, cache, write back */
  126. #define PAGE_RO_CNB (PAGE_AP_RO|PAGE_CNB|PTE_TYPE_SMALL) /* Read Only, cache, write through */
  127. #define PAGE_RO_NCNB (PAGE_AP_RO|PTE_TYPE_SMALL) /* Read Only without cache and write buffer */
  128. #define PAGE_RO_FAULT (PAGE_AP_RO|PTE_TYPE_SMALL) /* Read Only without cache and write buffer */
  129. struct mem_desc {
  130. rt_uint32_t vaddr_start;
  131. rt_uint32_t vaddr_end;
  132. rt_uint32_t paddr_start;
  133. rt_uint32_t sect_attr; /* when page mapped */
  134. rt_uint32_t page_attr; /* only sector mapped valid */
  135. rt_uint32_t mapped_mode;
  136. #define SECT_MAPPED 0
  137. #define PAGE_MAPPED 1
  138. };
  139. void rt_hw_mmu_init(struct mem_desc *mdesc, rt_uint32_t size);
  140. #endif