mm_aspace.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-11-14 WangXiaoyao the first version
  9. */
  10. #ifndef __MM_ASPACE_H__
  11. #define __MM_ASPACE_H__
  12. #include <rthw.h>
  13. #include <rtthread.h>
  14. #include <stddef.h>
  15. #include "avl_adpt.h"
  16. #include "mm_fault.h"
  17. #include "mm_flag.h"
  18. #define MM_PAGE_SHIFT 12
  19. #define MM_PA_TO_OFF(pa) ((uintptr_t)(pa) >> MM_PAGE_SHIFT)
  20. #ifndef RT_USING_SMP
  21. typedef rt_spinlock_t mm_spinlock;
  22. #define MM_PGTBL_LOCK_INIT(aspace)
  23. #define MM_PGTBL_LOCK(aspace) (rt_hw_spin_lock(&((aspace)->pgtbl_lock)))
  24. #define MM_PGTBL_UNLOCK(aspace) (rt_hw_spin_unlock(&((aspace)->pgtbl_lock)))
  25. #else
  26. typedef rt_hw_spinlock_t mm_spinlock;
  27. #define MM_PGTBL_LOCK_INIT(aspace) (rt_hw_spin_lock_init(&((aspace)->pgtbl_lock)))
  28. #define MM_PGTBL_LOCK(aspace) (rt_hw_spin_lock(&((aspace)->pgtbl_lock)))
  29. #define MM_PGTBL_UNLOCK(aspace) (rt_hw_spin_unlock(&((aspace)->pgtbl_lock)))
  30. #endif /* RT_USING_SMP */
  31. struct rt_aspace;
  32. struct rt_varea;
  33. struct rt_mem_obj;
  34. extern struct rt_aspace rt_kernel_space;
  35. typedef struct rt_aspace
  36. {
  37. void *start;
  38. rt_size_t size;
  39. void *page_table;
  40. mm_spinlock pgtbl_lock;
  41. struct _aspace_tree tree;
  42. struct rt_mutex bst_lock;
  43. rt_uint64_t asid;
  44. } *rt_aspace_t;
  45. typedef struct rt_varea
  46. {
  47. void *start;
  48. rt_size_t size;
  49. rt_size_t offset;
  50. rt_size_t attr;
  51. rt_size_t flag;
  52. struct rt_aspace *aspace;
  53. struct rt_mem_obj *mem_obj;
  54. struct _aspace_node node;
  55. struct rt_page *frames;
  56. void *data;
  57. } *rt_varea_t;
  58. typedef struct rt_mm_va_hint
  59. {
  60. void *limit_start;
  61. rt_size_t limit_range_size;
  62. void *prefer;
  63. const rt_size_t map_size;
  64. mm_flag_t flags;
  65. } *rt_mm_va_hint_t;
  66. typedef struct rt_mem_obj
  67. {
  68. void (*hint_free)(rt_mm_va_hint_t hint);
  69. void (*on_page_fault)(struct rt_varea *varea, struct rt_mm_fault_msg *msg);
  70. /* do pre open bushiness like inc a ref */
  71. void (*on_varea_open)(struct rt_varea *varea);
  72. /* do post close bushiness like def a ref */
  73. void (*on_varea_close)(struct rt_varea *varea);
  74. void (*on_page_offload)(struct rt_varea *varea, void *vaddr, rt_size_t size);
  75. const char *(*get_name)(rt_varea_t varea);
  76. } *rt_mem_obj_t;
  77. extern struct rt_mem_obj rt_mm_dummy_mapper;
  78. enum rt_mmu_cntl
  79. {
  80. MMU_CNTL_NONCACHE,
  81. MMU_CNTL_CACHE,
  82. MMU_CNTL_DUMMY_END,
  83. };
  84. /**
  85. * @brief Lock to access page table of address space
  86. */
  87. #define WR_LOCK(aspace) \
  88. rt_thread_self() ? rt_mutex_take(&(aspace)->bst_lock, RT_WAITING_FOREVER) \
  89. : 0
  90. #define WR_UNLOCK(aspace) \
  91. rt_thread_self() ? rt_mutex_release(&(aspace)->bst_lock) : 0
  92. #define RD_LOCK(aspace) WR_LOCK(aspace)
  93. #define RD_UNLOCK(aspace) WR_UNLOCK(aspace)
  94. rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl);
  95. rt_aspace_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length,
  96. void *pgtbl);
  97. void rt_aspace_delete(rt_aspace_t aspace);
  98. void rt_aspace_detach(rt_aspace_t aspace);
  99. /**
  100. * @brief Memory Map on Virtual Address Space to Mappable Object
  101. * *INFO There is no restriction to use NULL address(physical/virtual).
  102. * Vaddr passing in addr must be page aligned. If vaddr is MM_MAP_FAILED,
  103. * a suitable address will be chose automatically.
  104. *
  105. * @param aspace target virtual address space
  106. * @param addr virtual address of the mapping
  107. * @param length length of mapping region
  108. * @param attr MMU attribution
  109. * @param flags desired memory protection and behaviour of the mapping
  110. * @param mem_obj memory map backing store object
  111. * @param offset offset of mapping in 4KB page for mem_obj
  112. * @return int E_OK on success, with addr set to vaddr of mapping
  113. * E_INVAL
  114. */
  115. int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length, rt_size_t attr,
  116. mm_flag_t flags, rt_mem_obj_t mem_obj, rt_size_t offset);
  117. /** no malloc routines call */
  118. int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr,
  119. rt_size_t length, rt_size_t attr, mm_flag_t flags,
  120. rt_mem_obj_t mem_obj, rt_size_t offset);
  121. /**
  122. * @brief Memory Map on Virtual Address Space to Physical Memory
  123. *
  124. * @param aspace target virtual address space
  125. * @param hint hint of mapping va
  126. * @param attr MMU attribution
  127. * @param pa_off (physical address >> 12)
  128. * @param ret_va pointer to the location to store va
  129. * @return int E_OK on success, with ret_va set to vaddr of mapping
  130. * E_INVAL
  131. */
  132. int rt_aspace_map_phy(rt_aspace_t aspace, rt_mm_va_hint_t hint, rt_size_t attr,
  133. rt_size_t pa_off, void **ret_va);
  134. /** no malloc routines call */
  135. int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
  136. rt_mm_va_hint_t hint, rt_size_t attr, rt_size_t pa_off,
  137. void **ret_va);
  138. /**
  139. * @brief Remove any mappings overlap the range [addr, addr + bytes)
  140. *
  141. * @param aspace
  142. * @param addr
  143. * @param length
  144. * @return int
  145. */
  146. int rt_aspace_unmap(rt_aspace_t aspace, void *addr, rt_size_t length);
  147. int mm_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd);
  148. int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage);
  149. int rt_aspace_offload_page(rt_aspace_t aspace, void *addr, rt_size_t npage);
  150. int rt_aspace_traversal(rt_aspace_t aspace,
  151. int (*fn)(rt_varea_t varea, void *arg), void *arg);
  152. void rt_aspace_print_all(rt_aspace_t aspace);
  153. void rt_varea_insert_page(rt_varea_t varea, void *page_addr);
  154. void rt_varea_free_pages(rt_varea_t varea);
  155. void rt_varea_offload_page(rt_varea_t varea, void *vaddr, rt_size_t size);
  156. #endif /* __MM_ASPACE_H__ */