mm_page.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. /*
  2. * Copyright (c) 2006-2019, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-11-01 Jesven The first version
  9. * 2022-12-13 WangXiaoyao Hot-pluggable, extensible
  10. * page management algorithm
  11. */
  12. #ifndef __MM_PAGE_H__
  13. #define __MM_PAGE_H__
  14. #include <rthw.h>
  15. #include <rtthread.h>
  16. #include <stdint.h>
  17. #define GET_FLOOR(type) \
  18. (1ul << (8 * sizeof(rt_size_t) - __builtin_clzl(2 * sizeof(type) - 1) - 1))
  19. #define DEF_PAGE_T(fields) \
  20. typedef struct rt_page {\
  21. union {struct {fields}; char _padding[GET_FLOOR(struct {fields})];};\
  22. } *rt_page_t
  23. /**
  24. * @brief PAGE ALLOC FLAGS
  25. *
  26. * @info PAGE_ANY_AVAILABLE
  27. * page allocation default to use lower region, this behavior can change by setting
  28. * PAGE_ANY_AVAILABLE
  29. */
  30. #define PAGE_ANY_AVAILABLE 0x1ul
  31. #ifdef RT_DEBUGING_PAGE_LEAK
  32. #define DEBUG_FIELD struct { \
  33. /* trace list */ \
  34. struct rt_page *tl_next; \
  35. struct rt_page *tl_prev; \
  36. void *caller; \
  37. size_t trace_size; \
  38. }
  39. #else
  40. #define DEBUG_FIELD
  41. #endif
  42. DEF_PAGE_T(
  43. struct rt_page *next; /* same level next */
  44. struct rt_page *pre; /* same level pre */
  45. DEBUG_FIELD;
  46. rt_uint32_t size_bits; /* if is ARCH_ADDRESS_WIDTH_BITS, means not free */
  47. rt_uint32_t ref_cnt; /* page group ref count */
  48. );
  49. #undef GET_FLOOR
  50. #undef DEF_PAGE_T
  51. typedef struct tag_region
  52. {
  53. rt_size_t start;
  54. rt_size_t end;
  55. const char *name;
  56. } rt_region_t;
  57. extern const rt_size_t rt_mpr_size;
  58. extern void *rt_mpr_start;
  59. void rt_page_init(rt_region_t reg);
  60. void rt_page_cleanup(void);
  61. void *rt_pages_alloc(rt_uint32_t size_bits);
  62. void *rt_pages_alloc_ext(rt_uint32_t size_bits, size_t flags);
  63. void rt_page_ref_inc(void *addr, rt_uint32_t size_bits);
  64. int rt_page_ref_get(void *addr, rt_uint32_t size_bits);
  65. int rt_pages_free(void *addr, rt_uint32_t size_bits);
  66. void rt_page_list(void);
  67. rt_size_t rt_page_bits(rt_size_t size);
  68. void rt_page_get_info(rt_size_t *total_nr, rt_size_t *free_nr);
  69. void rt_page_high_get_info(rt_size_t *total_nr, rt_size_t *free_nr);
  70. void *rt_page_page2addr(struct rt_page *p);
  71. struct rt_page *rt_page_addr2page(void *addr);
  72. /**
  73. * @brief Install page frames at run-time
  74. * Region size must be aligned to 2^(RT_PAGE_MAX_ORDER + ARCH_PAGE_SHIFT - 1)
  75. * bytes currently (typically 2 MB).
  76. *
  77. * !WARNING this API will NOT check whether region is valid or not in list
  78. *
  79. * @param region region.start as first page frame(inclusive),
  80. * region.end as first page frame after free region
  81. * @return int 0 on success
  82. */
  83. int rt_page_install(rt_region_t region);
  84. void rt_page_leak_trace_start(void);
  85. void rt_page_leak_trace_stop(void);
  86. #endif /* __MM_PAGE_H__ */