lwp_arch.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-28 Jesven first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #include <stddef.h>
  13. #ifdef ARCH_MM_MMU
  14. #define DBG_TAG "lwp.arch"
  15. #define DBG_LVL DBG_INFO
  16. #include <rtdbg.h>
  17. #include <lwp_arch.h>
  18. #include <lwp_user_mm.h>
  19. #define KPTE_START (KERNEL_VADDR_START >> ARCH_SECTION_SHIFT)
  20. int arch_user_space_init(struct rt_lwp *lwp)
  21. {
  22. size_t *mmu_table;
  23. mmu_table = (size_t *)rt_pages_alloc(2);
  24. if (!mmu_table)
  25. {
  26. return -RT_ENOMEM;
  27. }
  28. lwp->end_heap = USER_HEAP_VADDR;
  29. rt_memcpy(mmu_table + KPTE_START, (size_t *)rt_kernel_space.page_table + KPTE_START, ARCH_PAGE_SIZE);
  30. rt_memset(mmu_table, 0, 3 * ARCH_PAGE_SIZE);
  31. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, 4 * ARCH_PAGE_SIZE);
  32. lwp->aspace = rt_aspace_create((void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
  33. if (!lwp->aspace)
  34. {
  35. return -RT_ERROR;
  36. }
  37. return 0;
  38. }
  39. static struct rt_varea kuser_varea;
  40. void arch_kuser_init(rt_aspace_t aspace, void *vectors)
  41. {
  42. const size_t kuser_size = 0x1000;
  43. int err;
  44. extern char __kuser_helper_start[], __kuser_helper_end[];
  45. int kuser_sz = __kuser_helper_end - __kuser_helper_start;
  46. err = rt_aspace_map_static(aspace, &kuser_varea, &vectors, kuser_size,
  47. MMU_MAP_U_RO, MMF_MAP_FIXED | MMF_PREFETCH,
  48. &rt_mm_dummy_mapper, 0);
  49. if (err != 0)
  50. while (1)
  51. ; // early failed
  52. rt_memcpy((void *)((char *)vectors + 0x1000 - kuser_sz), __kuser_helper_start, kuser_sz);
  53. /*
  54. * vectors + 0xfe0 = __kuser_get_tls
  55. * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
  56. */
  57. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)vectors + 0x1000 - kuser_sz), kuser_sz);
  58. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, (void *)((char *)vectors + 0x1000 - kuser_sz), kuser_sz);
  59. }
  60. void arch_user_space_free(struct rt_lwp *lwp)
  61. {
  62. if (lwp)
  63. {
  64. RT_ASSERT(lwp->aspace);
  65. void *pgtbl = lwp->aspace->page_table;
  66. rt_aspace_delete(lwp->aspace);
  67. /* must be freed after aspace delete, pgtbl is required for unmap */
  68. rt_pages_free(pgtbl, 2);
  69. lwp->aspace = RT_NULL;
  70. }
  71. else
  72. {
  73. LOG_W("%s: NULL lwp as parameter", __func__);
  74. RT_ASSERT(0);
  75. }
  76. }
  77. int arch_expand_user_stack(void *addr)
  78. {
  79. int ret = 0;
  80. size_t stack_addr = (size_t)addr;
  81. stack_addr &= ~ARCH_PAGE_MASK;
  82. if ((stack_addr >= (size_t)USER_STACK_VSTART) && (stack_addr < (size_t)USER_STACK_VEND))
  83. {
  84. void *map = lwp_map_user(lwp_self(), (void *)stack_addr, ARCH_PAGE_SIZE, 0);
  85. if (map || lwp_user_accessable(addr, 1))
  86. {
  87. ret = 1;
  88. }
  89. }
  90. return ret;
  91. }
  92. #ifdef LWP_ENABLE_ASID
  93. #define MAX_ASID_BITS 8
  94. #define MAX_ASID (1 << MAX_ASID_BITS)
  95. static uint64_t global_generation = 1;
  96. static char asid_valid_bitmap[MAX_ASID];
  97. unsigned int arch_get_asid(struct rt_lwp *lwp)
  98. {
  99. if (lwp == RT_NULL)
  100. {
  101. // kernel
  102. return 0;
  103. }
  104. if (lwp->generation == global_generation)
  105. {
  106. return lwp->asid;
  107. }
  108. if (lwp->asid && !asid_valid_bitmap[lwp->asid])
  109. {
  110. asid_valid_bitmap[lwp->asid] = 1;
  111. return lwp->asid;
  112. }
  113. for (unsigned i = 1; i < MAX_ASID; i++)
  114. {
  115. if (asid_valid_bitmap[i] == 0)
  116. {
  117. asid_valid_bitmap[i] = 1;
  118. lwp->generation = global_generation;
  119. lwp->asid = i;
  120. return lwp->asid;
  121. }
  122. }
  123. global_generation++;
  124. memset(asid_valid_bitmap, 0, MAX_ASID * sizeof(char));
  125. asid_valid_bitmap[1] = 1;
  126. lwp->generation = global_generation;
  127. lwp->asid = 1;
  128. asm volatile ("mcr p15, 0, r0, c8, c7, 0\ndsb\nisb" ::: "memory");
  129. return lwp->asid;
  130. }
  131. #endif
  132. #endif