lwp_arch.c 1.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-05-18 Jesven first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #ifdef ARCH_MM_MMU
  13. #include <lwp_arch.h>
  14. #include <lwp_user_mm.h>
  15. extern size_t MMUTable[];
  16. int arch_user_space_init(struct rt_lwp *lwp)
  17. {
  18. size_t *mmu_table;
  19. mmu_table = (size_t *)rt_pages_alloc(0);
  20. if (!mmu_table)
  21. {
  22. return -1;
  23. }
  24. lwp->end_heap = USER_HEAP_VADDR;
  25. memset(mmu_table, 0, ARCH_PAGE_SIZE);
  26. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
  27. lwp->aspace = rt_aspace_create(
  28. (void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
  29. if (!lwp->aspace)
  30. {
  31. return -1;
  32. }
  33. return 0;
  34. }
  35. void *arch_kernel_mmu_table_get(void)
  36. {
  37. return (void *)NULL;
  38. }
  39. void arch_user_space_vtable_free(struct rt_lwp *lwp)
  40. {
  41. if (lwp && lwp->aspace->page_table)
  42. {
  43. rt_pages_free(lwp->aspace->page_table, 0);
  44. lwp->aspace->page_table = NULL;
  45. }
  46. }
  47. int arch_expand_user_stack(void *addr)
  48. {
  49. int ret = 0;
  50. size_t stack_addr = (size_t)addr;
  51. stack_addr &= ~ARCH_PAGE_MASK;
  52. if ((stack_addr >= (size_t)USER_STACK_VSTART) &&
  53. (stack_addr < (size_t)USER_STACK_VEND))
  54. {
  55. void *map =
  56. lwp_map_user(lwp_self(), (void *)stack_addr, ARCH_PAGE_SIZE, 0);
  57. if (map || lwp_user_accessable(addr, 1))
  58. {
  59. ret = 1;
  60. }
  61. }
  62. return ret;
  63. }
  64. #endif