mm_aspace.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-11-14 WangXiaoyao the first version
  9. * 2023-08-17 Shell Add unmap_range for MAP_PRIVATE
  10. */
  11. #ifndef __MM_ASPACE_H__
  12. #define __MM_ASPACE_H__
  13. #include <rthw.h>
  14. #include <rtthread.h>
  15. #include "avl_adpt.h"
  16. #include "mm_fault.h"
  17. #include "mm_flag.h"
  18. #include <stddef.h>
  19. #include <string.h>
  20. #define MM_PAGE_SHIFT 12
  21. #define MM_PA_TO_OFF(pa) ((uintptr_t)(pa) >> MM_PAGE_SHIFT)
  22. #define PV_OFFSET (rt_kmem_pvoff())
  23. #ifndef RT_USING_SMP
  24. typedef rt_spinlock_t mm_spinlock;
  25. #define MM_PGTBL_LOCK_INIT(aspace)
  26. #define MM_PGTBL_LOCK(aspace) (rt_hw_spin_lock(&((aspace)->pgtbl_lock)))
  27. #define MM_PGTBL_UNLOCK(aspace) (rt_hw_spin_unlock(&((aspace)->pgtbl_lock)))
  28. #else
  29. typedef struct rt_spinlock mm_spinlock;
  30. #define MM_PGTBL_LOCK_INIT(aspace) (rt_spin_lock_init(&((aspace)->pgtbl_lock)))
  31. #define MM_PGTBL_LOCK(aspace) (rt_spin_lock(&((aspace)->pgtbl_lock)))
  32. #define MM_PGTBL_UNLOCK(aspace) (rt_spin_unlock(&((aspace)->pgtbl_lock)))
  33. #endif /* RT_USING_SMP */
  34. struct rt_aspace;
  35. struct rt_varea;
  36. struct rt_mem_obj;
  37. extern struct rt_aspace rt_kernel_space;
  38. typedef struct rt_aspace
  39. {
  40. void *start;
  41. rt_size_t size;
  42. void *page_table;
  43. mm_spinlock pgtbl_lock;
  44. struct _aspace_tree tree;
  45. struct rt_mutex bst_lock;
  46. struct rt_mem_obj *private_object;
  47. rt_uint64_t asid;
  48. } *rt_aspace_t;
  49. typedef struct rt_varea
  50. {
  51. void *start;
  52. rt_size_t size;
  53. rt_size_t offset;
  54. rt_size_t attr;
  55. rt_size_t flag;
  56. struct rt_aspace *aspace;
  57. struct rt_mem_obj *mem_obj;
  58. struct _aspace_node node;
  59. void *data;
  60. } *rt_varea_t;
  61. typedef struct rt_mm_va_hint
  62. {
  63. void *limit_start;
  64. rt_size_t limit_range_size;
  65. void *prefer;
  66. const rt_size_t map_size;
  67. mm_flag_t flags;
  68. } *rt_mm_va_hint_t;
  69. typedef struct rt_mem_obj
  70. {
  71. void (*hint_free)(rt_mm_va_hint_t hint);
  72. void (*on_page_fault)(struct rt_varea *varea, struct rt_aspace_fault_msg *msg);
  73. /* do pre open bushiness like inc a ref */
  74. void (*on_varea_open)(struct rt_varea *varea);
  75. /* do post close bushiness like def a ref */
  76. void (*on_varea_close)(struct rt_varea *varea);
  77. /* do preparation for address space modification of varea */
  78. rt_err_t (*on_varea_shrink)(struct rt_varea *varea, void *new_vaddr, rt_size_t size);
  79. /* do preparation for address space modification of varea */
  80. rt_err_t (*on_varea_expand)(struct rt_varea *varea, void *new_vaddr, rt_size_t size);
  81. /**
  82. * this is like an on_varea_open() to `subset`, and an on_varea_shrink() to `existed`
  83. * while resource can migrate from `existed` to `subset` at the same time
  84. */
  85. rt_err_t (*on_varea_split)(struct rt_varea *existed, void *unmap_start,
  86. rt_size_t unmap_len, struct rt_varea *subset);
  87. /**
  88. * this is like a on_varea_expand() to `merge_to` and on_varea_close() to `merge_from`
  89. * while resource can migrate from `merge_from` to `merge_to` at the same time
  90. */
  91. rt_err_t (*on_varea_merge)(struct rt_varea *merge_to, struct rt_varea *merge_from);
  92. /* dynamic mem_obj API */
  93. void (*page_read)(struct rt_varea *varea, struct rt_aspace_io_msg *msg);
  94. void (*page_write)(struct rt_varea *varea, struct rt_aspace_io_msg *msg);
  95. const char *(*get_name)(rt_varea_t varea);
  96. } *rt_mem_obj_t;
  97. extern struct rt_mem_obj rt_mm_dummy_mapper;
  98. enum rt_mmu_cntl
  99. {
  100. MMU_CNTL_NONCACHE,
  101. MMU_CNTL_CACHE,
  102. MMU_CNTL_READONLY,
  103. MMU_CNTL_READWRITE,
  104. MMU_CNTL_OFFLOAD,
  105. MMU_CNTL_INSTALL,
  106. MMU_CNTL_DUMMY_END,
  107. };
  108. /**
  109. * @brief Lock to access page table of address space
  110. */
  111. #define WR_LOCK(aspace) \
  112. rt_thread_self() ? rt_mutex_take(&(aspace)->bst_lock, RT_WAITING_FOREVER) \
  113. : 0
  114. #define WR_UNLOCK(aspace) \
  115. rt_thread_self() ? rt_mutex_release(&(aspace)->bst_lock) : 0
  116. /* FIXME: fix rd_lock */
  117. #define RD_LOCK(aspace) WR_LOCK(aspace)
  118. #define RD_UNLOCK(aspace) WR_UNLOCK(aspace)
  119. #define RDWR_LOCK(aspace) ((void)aspace)
  120. #define RDWR_UNLOCK(aspace) ((void)aspace)
  121. rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl);
  122. rt_err_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length, void *pgtbl);
  123. void rt_aspace_delete(rt_aspace_t aspace);
  124. void rt_aspace_detach(rt_aspace_t aspace);
  125. /**
  126. * @brief Memory Map on Virtual Address Space to Mappable Object
  127. * *INFO There is no restriction to use NULL address(physical/virtual).
  128. * Vaddr passing in addr must be page aligned. If vaddr is RT_NULL,
  129. * a suitable address will be chose automatically.
  130. *
  131. * @param aspace target virtual address space
  132. * @param addr virtual address of the mapping
  133. * @param length length of mapping region
  134. * @param attr MMU attribution
  135. * @param flags desired memory protection and behaviour of the mapping
  136. * @param mem_obj memory map backing store object
  137. * @param offset offset of mapping in 4KB page for mem_obj
  138. * @return int E_OK on success, with addr set to vaddr of mapping
  139. * E_INVAL
  140. */
  141. int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length, rt_size_t attr,
  142. mm_flag_t flags, rt_mem_obj_t mem_obj, rt_size_t offset);
  143. /** no malloc routines call */
  144. int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr,
  145. rt_size_t length, rt_size_t attr, mm_flag_t flags,
  146. rt_mem_obj_t mem_obj, rt_size_t offset);
  147. /**
  148. * @brief Memory Map on Virtual Address Space to Physical Memory
  149. *
  150. * @param aspace target virtual address space
  151. * @param hint hint of mapping va
  152. * @param attr MMU attribution
  153. * @param pa_off (physical address >> 12)
  154. * @param ret_va pointer to the location to store va
  155. * @return int E_OK on success, with ret_va set to vaddr of mapping
  156. * E_INVAL
  157. */
  158. int rt_aspace_map_phy(rt_aspace_t aspace, rt_mm_va_hint_t hint, rt_size_t attr,
  159. rt_size_t pa_off, void **ret_va);
  160. /** no malloc routines call */
  161. int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
  162. rt_mm_va_hint_t hint, rt_size_t attr, rt_size_t pa_off,
  163. void **ret_va);
  164. /** map a private memory region to aspace */
  165. int rt_aspace_map_private(rt_aspace_t aspace, void **addr, rt_size_t length,
  166. rt_size_t attr, mm_flag_t flags);
  167. /**
  168. * @brief Remove mappings containing address specified by addr
  169. *
  170. * @param aspace target virtual address space
  171. * @param addr addresses that mapping to be removed contains
  172. * @return int rt errno
  173. */
  174. int rt_aspace_unmap(rt_aspace_t aspace, void *addr);
  175. /**
  176. * @brief Remove pages of existed mappings in the range [addr, addr+length)
  177. * Length is automatically rounded up to the next multiple of the page size.
  178. *
  179. * @param aspace target virtual address space
  180. * @param addr the beginning of the range of pages to be unmapped
  181. * @param length length of range in bytes
  182. * @return int rt errno
  183. */
  184. int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length);
  185. /**
  186. * @brief Remove pages of existed mappings in the range [addr, addr+length)
  187. * Length is automatically rounded up to the next multiple of the page size.
  188. *
  189. * @param aspace target virtual address space
  190. * @param addr the beginning of the range of pages to be unmapped
  191. * @param length length of range in bytes
  192. * @return int rt errno
  193. */
  194. int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length);
  195. /**
  196. * @brief Remove pages of existed mappings in the range [addr, addr+length)
  197. * Length is automatically rounded up to the next multiple of the page size.
  198. *
  199. * @param aspace target virtual address space
  200. * @param addr
  201. * @return int
  202. */
  203. int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length);
  204. int rt_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd);
  205. int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage);
  206. int rt_aspace_offload_page(rt_aspace_t aspace, void *addr, rt_size_t npage);
  207. rt_err_t rt_aspace_page_put(rt_aspace_t aspace, void *page_va, void *buffer);
  208. rt_err_t rt_aspace_page_get(rt_aspace_t aspace, void *page_va, void *buffer);
  209. int rt_aspace_traversal(rt_aspace_t aspace,
  210. int (*fn)(rt_varea_t varea, void *arg), void *arg);
  211. void rt_aspace_print_all(rt_aspace_t aspace);
  212. rt_base_t rt_aspace_count_vsz(rt_aspace_t aspace);
  213. rt_varea_t rt_aspace_query(rt_aspace_t aspace, void *vaddr);
  214. rt_err_t rt_aspace_duplicate_locked(rt_aspace_t src, rt_aspace_t dst);
  215. rt_err_t rt_aspace_fork(rt_aspace_t *psrc, rt_aspace_t *pdst);
  216. rt_err_t rt_aspace_compare(rt_aspace_t src, rt_aspace_t dst);
  217. /**
  218. * @brief Map one page to varea
  219. *
  220. * @note caller should take the read/write lock
  221. *
  222. * @param varea target varea
  223. * @param addr user address
  224. * @param page the page frame to be mapped
  225. * @return int
  226. */
  227. int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page);
  228. /**
  229. * @brief Unmap one page in varea
  230. *
  231. * @note caller should take the read/write lock
  232. *
  233. * @param varea target varea
  234. * @param addr user address
  235. * @param page the page frame to be mapped
  236. * @return int
  237. */
  238. int rt_varea_unmap_page(rt_varea_t varea, void *vaddr);
  239. /**
  240. * @brief Map a range of physical address to varea
  241. *
  242. * @warning Caller should take care of synchronization of its varea among all
  243. * the map/unmap operation
  244. *
  245. * @param varea target varea
  246. * @param vaddr user address
  247. * @param paddr physical address
  248. * @param length map range
  249. * @return int
  250. */
  251. int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t length);
  252. /**
  253. * @brief Unmap a range of physical address in varea
  254. *
  255. * @warning Caller should take care of synchronization of its varea among all
  256. * the map/unmap operation
  257. *
  258. * @param varea target varea
  259. * @param vaddr user address
  260. * @param length map range
  261. * @return int
  262. */
  263. int rt_varea_unmap_range(rt_varea_t varea, void *vaddr, rt_size_t length);
  264. /**
  265. * @brief Insert page to page manager of varea
  266. * The page will be freed by varea on uninstall automatically
  267. *
  268. * @param varea target varea
  269. * @param page_addr the page frame to be added
  270. */
  271. void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr);
  272. rt_inline rt_mem_obj_t rt_mem_obj_create(rt_mem_obj_t source)
  273. {
  274. rt_mem_obj_t target;
  275. target = rt_malloc(sizeof(*target));
  276. if (target)
  277. memcpy(target, source, sizeof(*target));
  278. return target;
  279. }
  280. const rt_ubase_t rt_kmem_pvoff(void);
  281. void rt_kmem_pvoff_set(rt_ubase_t pvoff);
  282. int rt_kmem_map_phy(void *va, void *pa, rt_size_t length, rt_size_t attr);
  283. void *rt_kmem_v2p(void *vaddr);
  284. void *rt_kmem_p2v(void *paddr);
  285. void rt_kmem_list(void);
  286. #endif /* __MM_ASPACE_H__ */