mmu.h 2.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-01-30 lizhirui first version
  9. */
  10. #ifndef __MMU_H__
  11. #define __MMU_H__
  12. #include "riscv.h"
  13. #include "riscv_mmu.h"
  14. /* RAM, Flash, or ROM */
  15. #define NORMAL_MEM 0
  16. /* normal nocache memory mapping type */
  17. #define NORMAL_NOCACHE_MEM 1
  18. /* MMIO region */
  19. #define DEVICE_MEM 2
  20. struct mem_desc
  21. {
  22. rt_size_t vaddr_start;
  23. rt_size_t vaddr_end;
  24. rt_size_t paddr_start;
  25. rt_size_t attr;
  26. };
  27. #define GET_PF_ID(addr) ((addr) >> PAGE_OFFSET_BIT)
  28. #define GET_PF_OFFSET(addr) __MASKVALUE(addr, PAGE_OFFSET_MASK)
  29. #define GET_L1(addr) __PARTBIT(addr, VPN2_SHIFT, VPN2_BIT)
  30. #define GET_L2(addr) __PARTBIT(addr, VPN1_SHIFT, VPN1_BIT)
  31. #define GET_L3(addr) __PARTBIT(addr, VPN0_SHIFT, VPN0_BIT)
  32. #define GET_PPN(pte) (__PARTBIT(pte, PTE_PPN_SHIFT, PHYSICAL_ADDRESS_WIDTH_BITS - PTE_PPN_SHIFT))
  33. #define GET_PADDR(pte) (GET_PPN(pte) << PAGE_OFFSET_BIT)
  34. #define VPN_TO_PPN(vaddr, pv_off) (((rt_size_t)(vaddr)) + (pv_off))
  35. #define PPN_TO_VPN(paddr, pv_off) (((rt_size_t)(paddr)) - (pv_off))
  36. #define COMBINEVADDR(l1_off, l2_off, l3_off) (((l1_off) << VPN2_SHIFT) | ((l2_off) << VPN1_SHIFT) | ((l3_off) << VPN0_SHIFT))
  37. #define COMBINEPTE(paddr, attr) ((((paddr) >> PAGE_OFFSET_BIT) << PTE_PPN_SHIFT) | (attr))
  38. typedef struct
  39. {
  40. size_t *vtable;
  41. size_t vstart;
  42. size_t vend;
  43. size_t pv_off;
  44. } rt_mmu_info;
  45. void *mmu_table_get();
  46. void switch_mmu(void *mmu_table);
  47. int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void *v_address, rt_size_t size, rt_size_t *vtable, rt_size_t pv_off);
  48. void rt_hw_mmu_kernel_map_init(rt_mmu_info *mmu_info, rt_size_t vaddr_start, rt_size_t size);
  49. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, rt_size_t size, rt_size_t attr);
  50. void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size, rt_size_t attr);
  51. void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size);
  52. void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, rt_size_t size, rt_size_t attr);
  53. void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size, rt_size_t attr);
  54. void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size);
  55. void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr);
  56. void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr);
  57. void rt_hw_mmu_setup(rt_mmu_info *mmu_info, struct mem_desc *mdesc, int desc_nr);
  58. void rt_mm_lock(void);
  59. void rt_mm_unlock(void);
  60. #define ARCH_ADDRESS_WIDTH_BITS 64
  61. #define MMU_MAP_ERROR_VANOTALIGN -1
  62. #define MMU_MAP_ERROR_PANOTALIGN -2
  63. #define MMU_MAP_ERROR_NOPAGE -3
  64. #define MMU_MAP_ERROR_CONFLICT -4
  65. #endif