lwp_arch.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-28 Jesven first version
  9. */
  10. #include <rtthread.h>
  11. #include <rthw.h>
  12. #ifdef ARCH_MM_MMU
  13. #include <mmu.h>
  14. #include <page.h>
  15. #include <lwp_mm_area.h>
  16. #include <lwp_user_mm.h>
  17. #include <lwp_arch.h>
  18. extern size_t MMUTable[];
  19. int arch_user_space_init(struct rt_lwp *lwp)
  20. {
  21. size_t *mmu_table;
  22. mmu_table = (size_t*)rt_pages_alloc(2);
  23. if (!mmu_table)
  24. {
  25. return -1;
  26. }
  27. lwp->end_heap = USER_HEAP_VADDR;
  28. rt_memcpy(mmu_table + (KERNEL_VADDR_START >> ARCH_SECTION_SHIFT), MMUTable + (KERNEL_VADDR_START >> ARCH_SECTION_SHIFT), ARCH_PAGE_SIZE);
  29. rt_memset(mmu_table, 0, 3 * ARCH_PAGE_SIZE);
  30. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, 4 * ARCH_PAGE_SIZE);
  31. rt_hw_mmu_map_init(&lwp->mmu_info, (void*)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table, PV_OFFSET);
  32. return 0;
  33. }
  34. void *arch_kernel_mmu_table_get(void)
  35. {
  36. return (void*)((char*)MMUTable + PV_OFFSET);
  37. }
  38. void arch_kuser_init(rt_mmu_info *mmu_info, void *vectors)
  39. {
  40. extern char __kuser_helper_start[], __kuser_helper_end[];
  41. int kuser_sz = __kuser_helper_end - __kuser_helper_start;
  42. rt_hw_mmu_map_auto(mmu_info, vectors, 0x1000, MMU_MAP_U_RO);
  43. rt_memcpy((void*)((char*)vectors + 0x1000 - kuser_sz), __kuser_helper_start, kuser_sz);
  44. /*
  45. * vectors + 0xfe0 = __kuser_get_tls
  46. * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
  47. */
  48. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void*)((char*)vectors + 0x1000 - kuser_sz), kuser_sz);
  49. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, (void*)((char*)vectors + 0x1000 - kuser_sz), kuser_sz);
  50. }
  51. void arch_user_space_vtable_free(struct rt_lwp *lwp)
  52. {
  53. if (lwp && lwp->mmu_info.vtable)
  54. {
  55. rt_pages_free(lwp->mmu_info.vtable, 2);
  56. }
  57. }
  58. int arch_expand_user_stack(void *addr)
  59. {
  60. int ret = 0;
  61. size_t stack_addr = (size_t)addr;
  62. stack_addr &= ~ARCH_PAGE_MASK;
  63. if ((stack_addr >= (size_t)USER_STACK_VSTART) && (stack_addr < (size_t)USER_STACK_VEND))
  64. {
  65. void *map = lwp_map_user(lwp_self(), (void*)stack_addr, ARCH_PAGE_SIZE, 0);
  66. if (map || lwp_user_accessable(addr, 1))
  67. {
  68. ret = 1;
  69. }
  70. }
  71. return ret;
  72. }
  73. #ifdef LWP_ENABLE_ASID
  74. #define MAX_ASID_BITS 8
  75. #define MAX_ASID (1 << MAX_ASID_BITS)
  76. static uint64_t global_generation = 1;
  77. static char asid_valid_bitmap[MAX_ASID];
  78. unsigned int arch_get_asid(struct rt_lwp *lwp)
  79. {
  80. if (lwp == RT_NULL)
  81. {
  82. // kernel
  83. return 0;
  84. }
  85. if (lwp->generation == global_generation)
  86. {
  87. return lwp->asid;
  88. }
  89. if (lwp->asid && !asid_valid_bitmap[lwp->asid])
  90. {
  91. asid_valid_bitmap[lwp->asid] = 1;
  92. return lwp->asid;
  93. }
  94. for (unsigned i = 1; i < MAX_ASID; i++)
  95. {
  96. if (asid_valid_bitmap[i] == 0)
  97. {
  98. asid_valid_bitmap[i] = 1;
  99. lwp->generation = global_generation;
  100. lwp->asid = i;
  101. return lwp->asid;
  102. }
  103. }
  104. global_generation++;
  105. memset(asid_valid_bitmap, 0, MAX_ASID * sizeof(char));
  106. asid_valid_bitmap[1] = 1;
  107. lwp->generation = global_generation;
  108. lwp->asid = 1;
  109. asm volatile ("mcr p15, 0, r0, c8, c7, 0\ndsb\nisb" ::: "memory");
  110. return lwp->asid;
  111. }
  112. #endif
  113. #endif