ioremap.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-05-06 Jesven first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #include <ioremap.h>
  13. #ifdef RT_USING_USERSPACE
  14. #include <mmu.h>
  15. #include <lwp_mm_area.h>
  16. static struct lwp_avl_struct *k_map_area;
  17. extern rt_mmu_info mmu_info;
  18. static void _iounmap_range(void *addr, size_t size)
  19. {
  20. void *va = RT_NULL, *pa = RT_NULL;
  21. int i = 0;
  22. for (va = addr, i = 0; i < size; va = (void *)((char *)va + ARCH_PAGE_SIZE), i += ARCH_PAGE_SIZE)
  23. {
  24. pa = rt_hw_mmu_v2p(&mmu_info, va);
  25. if (pa)
  26. {
  27. rt_hw_mmu_unmap(&mmu_info, va, ARCH_PAGE_SIZE);
  28. }
  29. }
  30. }
  31. static void *_ioremap_type(void *paddr, size_t size, int type)
  32. {
  33. void *v_addr = NULL;
  34. size_t attr;
  35. switch (type)
  36. {
  37. case MM_AREA_TYPE_PHY:
  38. attr = MMU_MAP_K_DEVICE;
  39. break;
  40. case MM_AREA_TYPE_PHY_CACHED:
  41. attr = MMU_MAP_K_RWCB;
  42. break;
  43. default:
  44. return v_addr;
  45. }
  46. rt_mm_lock();
  47. v_addr = rt_hw_mmu_map(&mmu_info, 0, paddr, size, attr);
  48. if (v_addr)
  49. {
  50. int ret = lwp_map_area_insert(&k_map_area, (size_t)v_addr, size, type);
  51. if (ret != 0)
  52. {
  53. _iounmap_range(v_addr, size);
  54. v_addr = NULL;
  55. }
  56. }
  57. rt_mm_unlock();
  58. return v_addr;
  59. }
  60. void *rt_ioremap(void *paddr, size_t size)
  61. {
  62. return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY);
  63. }
  64. void *rt_ioremap_nocache(void *paddr, size_t size)
  65. {
  66. return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY);
  67. }
  68. void *rt_ioremap_cached(void *paddr, size_t size)
  69. {
  70. return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY_CACHED);
  71. }
  72. void rt_iounmap(volatile void *vaddr)
  73. {
  74. struct lwp_avl_struct *ma_avl_node;
  75. rt_mm_lock();
  76. ma_avl_node = lwp_map_find(k_map_area, (size_t)vaddr);
  77. if (ma_avl_node)
  78. {
  79. struct rt_mm_area_struct *ma = (struct rt_mm_area_struct *)ma_avl_node->data;
  80. _iounmap_range((void *)ma->addr, ma->size);
  81. lwp_map_area_remove(&k_map_area, (size_t)vaddr);
  82. }
  83. rt_mm_unlock();
  84. }
  85. #else
  86. void *rt_ioremap(void *paddr, size_t size)
  87. {
  88. return paddr;
  89. }
  90. void *rt_ioremap_nocache(void *paddr, size_t size)
  91. {
  92. return paddr;
  93. }
  94. void *rt_ioremap_cached(void *paddr, size_t size)
  95. {
  96. return paddr;
  97. }
  98. void rt_iounmap(volatile void *vaddr)
  99. {
  100. }
  101. #endif