ioremap.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-05-06 Jesven first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #include <ioremap.h>
  13. #ifdef RT_USING_USERSPACE
  14. #include <mmu.h>
  15. #include <lwp_mm_area.h>
  16. static struct lwp_avl_struct *k_map_area;
  17. extern rt_mmu_info mmu_info;
  18. static void _iounmap_range(void *addr, size_t size)
  19. {
  20. void *va = RT_NULL, *pa = RT_NULL;
  21. int i = 0;
  22. for (va = addr, i = 0; i < size; va = (void *)((char *)va + ARCH_PAGE_SIZE), i += ARCH_PAGE_SIZE)
  23. {
  24. pa = rt_hw_mmu_v2p(&mmu_info, va);
  25. if (pa)
  26. {
  27. rt_hw_mmu_unmap(&mmu_info, va, ARCH_PAGE_SIZE);
  28. }
  29. }
  30. }
  31. static void *_ioremap_type(void *paddr, size_t size, int type)
  32. {
  33. rt_base_t level;
  34. void *v_addr = NULL;
  35. size_t attr;
  36. switch (type)
  37. {
  38. case MM_AREA_TYPE_PHY:
  39. attr = MMU_MAP_K_DEVICE;
  40. break;
  41. case MM_AREA_TYPE_PHY_CACHED:
  42. attr = MMU_MAP_K_RWCB;
  43. break;
  44. default:
  45. return v_addr;
  46. }
  47. level = rt_hw_interrupt_disable();
  48. v_addr = rt_hw_mmu_map(&mmu_info, 0, paddr, size, attr);
  49. if (v_addr)
  50. {
  51. int ret = lwp_map_area_insert(&k_map_area, (size_t)v_addr, size, type);
  52. if (ret != 0)
  53. {
  54. _iounmap_range(v_addr, size);
  55. v_addr = NULL;
  56. }
  57. }
  58. rt_hw_interrupt_enable(level);
  59. return v_addr;
  60. }
  61. void *rt_ioremap(void *paddr, size_t size)
  62. {
  63. return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY);
  64. }
  65. void *rt_ioremap_nocache(void *paddr, size_t size)
  66. {
  67. return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY);
  68. }
  69. void *rt_ioremap_cached(void *paddr, size_t size)
  70. {
  71. return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY_CACHED);
  72. }
  73. void rt_iounmap(volatile void *vaddr)
  74. {
  75. rt_base_t level;
  76. struct lwp_avl_struct *ma_avl_node;
  77. level = rt_hw_interrupt_disable();
  78. ma_avl_node = lwp_map_find(k_map_area, (size_t)vaddr);
  79. if (ma_avl_node)
  80. {
  81. struct rt_mm_area_struct *ma = (struct rt_mm_area_struct *)ma_avl_node->data;
  82. _iounmap_range((void *)ma->addr, ma->size);
  83. lwp_map_area_remove(&k_map_area, (size_t)vaddr);
  84. }
  85. rt_hw_interrupt_enable(level);
  86. }
  87. #else
  88. void *rt_ioremap(void *paddr, size_t size)
  89. {
  90. return paddr;
  91. }
  92. void *rt_ioremap_nocache(void *paddr, size_t size)
  93. {
  94. return paddr;
  95. }
  96. void *rt_ioremap_cached(void *paddr, size_t size)
  97. {
  98. return paddr;
  99. }
  100. void rt_iounmap(volatile void *vaddr)
  101. {
  102. }
  103. #endif