lwp_arch.c 1.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-05-18 Jesven first version
  9. */
  10. #include <rtthread.h>
  11. #include <rthw.h>
  12. #ifdef RT_USING_USERSPACE
  13. #include <mmu.h>
  14. #include <page.h>
  15. #include <lwp_mm_area.h>
  16. #include <lwp_user_mm.h>
  17. #include <lwp_arch.h>
  18. extern size_t MMUTable[];
  19. int arch_user_space_init(struct rt_lwp *lwp)
  20. {
  21. size_t *mmu_table;
  22. mmu_table = (size_t*)rt_pages_alloc(0);
  23. if (!mmu_table)
  24. {
  25. return -1;
  26. }
  27. lwp->end_heap = USER_HEAP_VADDR;
  28. memset(mmu_table, 0, ARCH_PAGE_SIZE);
  29. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
  30. rt_hw_mmu_map_init(&lwp->mmu_info, (void*)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table, PV_OFFSET);
  31. return 0;
  32. }
  33. void *arch_kernel_mmu_table_get(void)
  34. {
  35. return (void*)NULL;
  36. }
  37. void arch_kuser_init(rt_mmu_info *mmu_info, void *vectors)
  38. {
  39. }
  40. void arch_user_space_vtable_free(struct rt_lwp *lwp)
  41. {
  42. if (lwp && lwp->mmu_info.vtable)
  43. {
  44. rt_pages_free(lwp->mmu_info.vtable, 0);
  45. }
  46. }
  47. int arch_expand_user_stack(void *addr)
  48. {
  49. int ret = 0;
  50. size_t stack_addr = (size_t)addr;
  51. stack_addr &= ~ARCH_PAGE_MASK;
  52. if ((stack_addr >= (size_t)USER_STACK_VSTART) && (stack_addr < (size_t)USER_STACK_VEND))
  53. {
  54. void *map = lwp_map_user(lwp_self(), (void*)stack_addr, ARCH_PAGE_SIZE, 0);
  55. if (map || lwp_user_accessable(addr, 1))
  56. {
  57. ret = 1;
  58. }
  59. }
  60. return ret;
  61. }
  62. #endif