cpu.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2011-09-15 Bernard first version
  9. * 2019-07-28 zdzn add smp support
  10. * 2021-12-21 GuEe-GUI set tpidr_el1 as multiprocessor id instead of mpidr_el1
  11. * 2021-12-28 GuEe-GUI add spinlock for aarch64
  12. */
  13. #include <rthw.h>
  14. #include <rtthread.h>
  15. #include <cpuport.h>
  16. #ifdef RT_USING_SMP
  17. /* The more common mpidr_el1 table, redefine it in BSP if it is in other cases */
  18. RT_WEAK rt_uint64_t rt_cpu_mpidr_early[] =
  19. {
  20. [0] = 0x80000000,
  21. [1] = 0x80000001,
  22. [2] = 0x80000002,
  23. [3] = 0x80000003,
  24. [4] = 0x80000004,
  25. [5] = 0x80000005,
  26. [6] = 0x80000006,
  27. [7] = 0x80000007,
  28. [RT_CPUS_NR] = 0
  29. };
  30. #endif
  31. int rt_hw_cpu_id(void)
  32. {
  33. rt_base_t value;
  34. __asm__ volatile ("mrs %0, tpidr_el1":"=r"(value));
  35. return value;
  36. }
  37. #ifdef RT_USING_SMP
  38. void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
  39. {
  40. lock->slock = 0;
  41. }
  42. void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
  43. {
  44. rt_hw_spinlock_t lock_val, new_lockval;
  45. unsigned int tmp;
  46. __asm__ volatile (
  47. /* Increment the next ticket. */
  48. " prfm pstl1strm, %3\n"
  49. "1: ldaxr %w0, %3\n"
  50. " add %w1, %w0, %w5\n"
  51. " stxr %w2, %w1, %3\n"
  52. " cbnz %w2, 1b\n"
  53. /* Check wether we get the lock */
  54. " eor %w1, %w0, %w0, ror #16\n"
  55. " cbz %w1, 3f\n"
  56. /*
  57. * Didn't get lock and spin on the owner.
  58. * Should send a local event to avoid missing an
  59. * unlock before the exclusive load.
  60. */
  61. " sevl\n"
  62. "2: wfe\n"
  63. " ldaxrh %w2, %4\n"
  64. " eor %w1, %w2, %w0, lsr #16\n"
  65. " cbnz %w1, 2b\n"
  66. /* got the lock. */
  67. "3:"
  68. : "=&r" (lock_val), "=&r" (new_lockval), "=&r" (tmp), "+Q" (*lock)
  69. : "Q" (lock->tickets.owner), "I" (1 << 16)
  70. : "memory");
  71. __DMB();
  72. }
  73. void rt_hw_spin_unlock(rt_hw_spinlock_t *lock)
  74. {
  75. __DMB();
  76. __asm__ volatile (
  77. "stlrh %w1, %0\n"
  78. : "=Q" (lock->tickets.owner)
  79. : "r" (lock->tickets.owner + 1)
  80. : "memory");
  81. }
  82. #endif /*RT_USING_SMP*/
  83. /**
  84. * @addtogroup ARM CPU
  85. */
  86. /*@{*/
  87. /** shutdown CPU */
  88. RT_WEAK void rt_hw_cpu_shutdown()
  89. {
  90. rt_uint32_t level;
  91. rt_kprintf("shutdown...\n");
  92. level = rt_hw_interrupt_disable();
  93. while (level)
  94. {
  95. RT_ASSERT(0);
  96. }
  97. }
  98. /*@}*/