ioremap.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-05-06 Jesven first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #include <ioremap.h>
  13. #ifdef RT_USING_USERSPACE
  14. #include <mmu.h>
  15. #include <lwp_mm_area.h>
  16. #include <lwp_mm.h>
  17. static struct lwp_avl_struct *k_map_area;
  18. extern rt_mmu_info mmu_info;
  19. static void _iounmap_range(void *addr, size_t size)
  20. {
  21. void *va = RT_NULL, *pa = RT_NULL;
  22. int i = 0;
  23. for (va = addr, i = 0; i < size; va = (void *)((char *)va + ARCH_PAGE_SIZE), i += ARCH_PAGE_SIZE)
  24. {
  25. pa = rt_hw_mmu_v2p(&mmu_info, va);
  26. if (pa)
  27. {
  28. rt_hw_mmu_unmap(&mmu_info, va, ARCH_PAGE_SIZE);
  29. }
  30. }
  31. }
  32. static void *_ioremap_type(void *paddr, size_t size, int type)
  33. {
  34. void *v_addr = NULL;
  35. size_t attr;
  36. switch (type)
  37. {
  38. case MM_AREA_TYPE_PHY:
  39. attr = MMU_MAP_K_DEVICE;
  40. break;
  41. case MM_AREA_TYPE_PHY_CACHED:
  42. attr = MMU_MAP_K_RWCB;
  43. break;
  44. default:
  45. return v_addr;
  46. }
  47. rt_mm_lock();
  48. v_addr = rt_hw_mmu_map(&mmu_info, 0, paddr, size, attr);
  49. if (v_addr)
  50. {
  51. int ret = lwp_map_area_insert(&k_map_area, (size_t)v_addr, size, type);
  52. if (ret != 0)
  53. {
  54. _iounmap_range(v_addr, size);
  55. v_addr = NULL;
  56. }
  57. }
  58. rt_mm_unlock();
  59. return v_addr;
  60. }
  61. void *rt_ioremap(void *paddr, size_t size)
  62. {
  63. return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY);
  64. }
  65. void *rt_ioremap_nocache(void *paddr, size_t size)
  66. {
  67. return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY);
  68. }
  69. void *rt_ioremap_cached(void *paddr, size_t size)
  70. {
  71. return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY_CACHED);
  72. }
  73. void rt_iounmap(volatile void *vaddr)
  74. {
  75. struct lwp_avl_struct *ma_avl_node;
  76. rt_mm_lock();
  77. ma_avl_node = lwp_map_find(k_map_area, (size_t)vaddr);
  78. if (ma_avl_node)
  79. {
  80. struct rt_mm_area_struct *ma = (struct rt_mm_area_struct *)ma_avl_node->data;
  81. _iounmap_range((void *)ma->addr, ma->size);
  82. lwp_map_area_remove(&k_map_area, (size_t)vaddr);
  83. }
  84. rt_mm_unlock();
  85. }
  86. #else
  87. void *rt_ioremap(void *paddr, size_t size)
  88. {
  89. return paddr;
  90. }
  91. void *rt_ioremap_nocache(void *paddr, size_t size)
  92. {
  93. return paddr;
  94. }
  95. void *rt_ioremap_cached(void *paddr, size_t size)
  96. {
  97. return paddr;
  98. }
  99. void rt_iounmap(volatile void *vaddr)
  100. {
  101. }
  102. #endif