riscv_mmu.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. /*
  2. * Copyright (c) 2006-2024, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-01-30 lizhirui first version
  9. * 2021-05-03 lizhirui porting to c906
  10. * 2023-10-12 Shell Add permission control API
  11. */
  12. #ifndef __RISCV_MMU_H__
  13. #define __RISCV_MMU_H__
  14. #include <rtthread.h>
  15. #include <rthw.h>
  16. #include "riscv.h"
  17. #undef PAGE_SIZE
  18. /* C-SKY extend */
  19. #define PTE_SEC (1UL << 59) /* Security */
  20. #define PTE_SHARE (1UL << 60) /* Shareable */
  21. #define PTE_BUF (1UL << 61) /* Bufferable */
  22. #define PTE_CACHE (1UL << 62) /* Cacheable */
  23. #define PTE_SO (1UL << 63) /* Strong Order */
  24. #define PAGE_OFFSET_SHIFT 0
  25. #define PAGE_OFFSET_BIT 12
  26. #define PAGE_SIZE __SIZE(PAGE_OFFSET_BIT)
  27. #define PAGE_OFFSET_MASK __MASK(PAGE_OFFSET_BIT)
  28. #define VPN0_SHIFT (PAGE_OFFSET_SHIFT + PAGE_OFFSET_BIT)
  29. #define VPN0_BIT 9
  30. #define VPN1_SHIFT (VPN0_SHIFT + VPN0_BIT)
  31. #define VPN1_BIT 9
  32. #define VPN2_SHIFT (VPN1_SHIFT + VPN1_BIT)
  33. #define VPN2_BIT 9
  34. #define PPN0_SHIFT (PAGE_OFFSET_SHIFT + PAGE_OFFSET_BIT)
  35. #define PPN0_BIT 9
  36. #define PPN1_SHIFT (PPN0_SHIFT + PPN0_BIT)
  37. #define PPN1_BIT 9
  38. #define PPN2_SHIFT (PPN1_SHIFT + PPN1_BIT)
  39. #define PPN2_BIT 26
  40. #define PPN_BITS (PPN0_BIT + PPN1_BIT + PPN2_BIT)
  41. #define L1_PAGE_SIZE __SIZE(PAGE_OFFSET_BIT + VPN0_BIT + VPN1_BIT)
  42. #define L2_PAGE_SIZE __SIZE(PAGE_OFFSET_BIT + VPN0_BIT)
  43. #define L3_PAGE_SIZE __SIZE(PAGE_OFFSET_BIT)
  44. #define ARCH_ADDRESS_WIDTH_BITS 64
  45. #define PHYSICAL_ADDRESS_WIDTH_BITS 56
  46. #define PAGE_ATTR_NEXT_LEVEL (0)
  47. #define PAGE_ATTR_RWX (PTE_X | PTE_W | PTE_R)
  48. #define PAGE_ATTR_READONLY (PTE_R)
  49. #define PAGE_ATTR_XN (PTE_W | PTE_R)
  50. #define PAGE_ATTR_READEXECUTE (PTE_X | PTE_R)
  51. #define PAGE_ATTR_USER (PTE_U)
  52. #define PAGE_ATTR_SYSTEM (0)
  53. #define PAGE_ATTR_CB (PTE_BUF | PTE_CACHE)
  54. #define PAGE_ATTR_DEV (PTE_SO)
  55. #define PAGE_DEFAULT_ATTR_LEAF \
  56. (PTE_SHARE | PTE_BUF | PTE_CACHE | PTE_A | PTE_D | PTE_G | PTE_U | \
  57. PAGE_ATTR_RWX | PTE_V)
  58. #define PAGE_DEFAULT_ATTR_NEXT \
  59. (PTE_SHARE | PTE_BUF | PTE_CACHE | PTE_A | PTE_D | PTE_G | PTE_V)
  60. #define PAGE_IS_LEAF(pte) __MASKVALUE(pte, PAGE_ATTR_RWX)
  61. #define PTE_USED(pte) __MASKVALUE(pte, PTE_V)
  62. #define PTE_WRAP(attr) (attr | PTE_A | PTE_D)
  63. /**
  64. * encoding of SATP (Supervisor Address Translation and Protection register)
  65. */
  66. #define SATP_MODE_OFFSET 60
  67. #define SATP_MODE_BARE 0
  68. #define SATP_MODE_SV39 8
  69. #define SATP_MODE_SV48 9
  70. #define SATP_MODE_SV57 10
  71. #define SATP_MODE_SV64 11
  72. #define ARCH_VADDR_WIDTH 39
  73. #define SATP_MODE SATP_MODE_SV39
  74. #define MMU_MAP_K_DEVICE PTE_WRAP(PAGE_ATTR_DEV | PTE_G | PAGE_ATTR_XN | PTE_V)
  75. #define MMU_MAP_K_RWCB PTE_WRAP(PAGE_ATTR_CB | PTE_G | PAGE_ATTR_RWX | PTE_V)
  76. #define MMU_MAP_K_RW PTE_WRAP(PTE_G | PAGE_ATTR_RWX | PTE_V)
  77. #define MMU_MAP_U_RWCB PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_RWX | PTE_V)
  78. #define MMU_MAP_U_ROCB \
  79. PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_READONLY | PTE_V)
  80. #define MMU_MAP_U_RWCB_XN PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_XN | PTE_V)
  81. #define MMU_MAP_U_RW PTE_WRAP(PTE_U | PAGE_ATTR_RWX | PTE_V)
  82. #define MMU_MAP_EARLY \
  83. PTE_WRAP(PAGE_ATTR_RWX | PTE_G | PTE_V | PTE_CACHE | PTE_SHARE | PTE_BUF)
  84. #define MMU_MAP_TRACE(attr) (attr)
  85. #define PTE_XWR_MASK 0xe
  86. #define ARCH_PAGE_SIZE PAGE_SIZE
  87. #define ARCH_PAGE_MASK (ARCH_PAGE_SIZE - 1)
  88. #define ARCH_PAGE_SHIFT PAGE_OFFSET_BIT
  89. #define ARCH_INDEX_WIDTH 9
  90. #define ARCH_INDEX_SIZE (1ul << ARCH_INDEX_WIDTH)
  91. #define ARCH_INDEX_MASK (ARCH_INDEX_SIZE - 1)
  92. #define ARCH_MAP_FAILED ((void *)0x8000000000000000)
  93. void mmu_set_pagetable(rt_ubase_t addr);
  94. void mmu_enable_user_page_access(void);
  95. void mmu_disable_user_page_access(void);
  96. #define RT_HW_MMU_PROT_READ 1
  97. #define RT_HW_MMU_PROT_WRITE 2
  98. #define RT_HW_MMU_PROT_EXECUTE 4
  99. #define RT_HW_MMU_PROT_KERNEL 8
  100. #define RT_HW_MMU_PROT_USER 16
  101. #define RT_HW_MMU_PROT_CACHE 32
  102. void rt_hw_asid_init(void);
  103. struct rt_aspace;
  104. void rt_hw_asid_switch_pgtbl(struct rt_aspace *aspace, rt_ubase_t pgtbl);
  105. /**
  106. * @brief Remove permission from attribution
  107. *
  108. * @param attr architecture specified mmu attribution
  109. * @param prot protect that will be removed
  110. * @return size_t returned attribution
  111. */
  112. rt_inline size_t rt_hw_mmu_attr_rm_perm(size_t attr, rt_base_t prot)
  113. {
  114. switch (prot)
  115. {
  116. /* remove write permission for user */
  117. case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
  118. attr &= ~PTE_W;
  119. break;
  120. /* remove write permission for kernel */
  121. case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_KERNEL:
  122. attr &= ~PTE_W;
  123. break;
  124. default:
  125. RT_ASSERT(0);
  126. }
  127. return attr;
  128. }
  129. /**
  130. * @brief Add permission from attribution
  131. *
  132. * @param attr architecture specified mmu attribution
  133. * @param prot protect that will be added
  134. * @return size_t returned attribution
  135. */
  136. rt_inline size_t rt_hw_mmu_attr_add_perm(size_t attr, rt_base_t prot)
  137. {
  138. switch (prot)
  139. {
  140. /* add write permission for user */
  141. case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
  142. attr |= (PTE_R | PTE_W | PTE_U);
  143. break;
  144. default:
  145. RT_ASSERT(0);
  146. }
  147. return attr;
  148. }
  149. /**
  150. * @brief Test permission from attribution
  151. *
  152. * @param attr architecture specified mmu attribution
  153. * @param prot protect that will be test
  154. * @return rt_bool_t RT_TRUE if the prot is allowed, otherwise RT_FALSE
  155. */
  156. rt_inline rt_bool_t rt_hw_mmu_attr_test_perm(size_t attr, rt_base_t prot)
  157. {
  158. rt_bool_t rc = 0;
  159. switch (prot & ~RT_HW_MMU_PROT_USER)
  160. {
  161. /* test write permission for user */
  162. case RT_HW_MMU_PROT_WRITE:
  163. rc = ((attr & PTE_W) && (attr & PTE_R));
  164. break;
  165. case RT_HW_MMU_PROT_READ:
  166. rc = !!(attr & PTE_R);
  167. break;
  168. case RT_HW_MMU_PROT_EXECUTE:
  169. rc = !!(attr & PTE_X);
  170. break;
  171. default:
  172. RT_ASSERT(0);
  173. }
  174. if (rc && (prot & RT_HW_MMU_PROT_USER))
  175. {
  176. rc = !!(attr & PTE_U);
  177. }
  178. return rc;
  179. }
  180. #endif