mm_object.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-11-30 WangXiaoyao the first version
  9. * 2023-08-19 Shell Support varea modification handler
  10. * 2023-10-13 Shell Replace the page management algorithm of pgmgr
  11. */
  12. #define DBG_TAG "mm.object"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. #include <rtthread.h>
  16. #include "mm_aspace.h"
  17. #include "mm_fault.h"
  18. #include "mm_page.h"
  19. #include <mmu.h>
  20. #include <string.h>
  21. #include <stdlib.h>
  22. /** varea based dummy memory object whose data comes directly from page frame */
  23. static const char *get_name(rt_varea_t varea)
  24. {
  25. return "dummy-mapper";
  26. }
  27. static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
  28. {
  29. void *page;
  30. int affid = RT_PAGE_PICK_AFFID(msg->fault_vaddr);
  31. page = rt_pages_alloc_tagged(0, affid, PAGE_ANY_AVAILABLE);
  32. if (!page)
  33. {
  34. LOG_W("%s: page alloc failed", __func__);
  35. return;
  36. }
  37. msg->response.status = MM_FAULT_STATUS_OK;
  38. msg->response.size = ARCH_PAGE_SIZE;
  39. msg->response.vaddr = page;
  40. }
  41. static void on_varea_open(struct rt_varea *varea)
  42. {
  43. varea->data = NULL;
  44. }
  45. static void on_varea_close(struct rt_varea *varea)
  46. {
  47. }
  48. static rt_err_t on_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
  49. {
  50. return RT_EOK;
  51. }
  52. static rt_err_t on_varea_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
  53. {
  54. return RT_EOK;
  55. }
  56. static rt_err_t on_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
  57. {
  58. return RT_EOK;
  59. }
  60. static rt_err_t on_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
  61. {
  62. return RT_EOK;
  63. }
  64. static void page_read(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
  65. {
  66. char *dst_k;
  67. rt_aspace_t aspace = varea->aspace;
  68. dst_k = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
  69. if (dst_k != ARCH_MAP_FAILED)
  70. {
  71. RT_ASSERT(!((long)dst_k & ARCH_PAGE_MASK));
  72. dst_k = (void *)((char *)dst_k - PV_OFFSET);
  73. memcpy(msg->buffer_vaddr, dst_k, ARCH_PAGE_SIZE);
  74. msg->response.status = MM_FAULT_STATUS_OK;
  75. }
  76. }
  77. static void page_write(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
  78. {
  79. void *dst_k;
  80. rt_aspace_t aspace = varea->aspace;
  81. dst_k = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
  82. if (dst_k != ARCH_MAP_FAILED)
  83. {
  84. RT_ASSERT(!((long)dst_k & ARCH_PAGE_MASK));
  85. dst_k = (void *)((char *)dst_k - PV_OFFSET);
  86. memcpy(dst_k, msg->buffer_vaddr, ARCH_PAGE_SIZE);
  87. msg->response.status = MM_FAULT_STATUS_OK;
  88. }
  89. }
  90. struct rt_mem_obj rt_mm_dummy_mapper = {
  91. .get_name = get_name,
  92. .on_page_fault = on_page_fault,
  93. .hint_free = NULL,
  94. .on_varea_open = on_varea_open,
  95. .on_varea_close = on_varea_close,
  96. .on_varea_shrink = on_varea_shrink,
  97. .on_varea_split = on_varea_split,
  98. .on_varea_expand = on_varea_expand,
  99. .on_varea_merge = on_varea_merge,
  100. .page_write = page_write,
  101. .page_read = page_read,
  102. };