vdso_sys.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. /*
  2. * Copyright (c) 2006-2024 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2024-07-04 rcitach init ver.
  9. */
  10. #ifndef ASM_VDSO_SYS_H
  11. #define ASM_VDSO_SYS_H
  12. #include <time.h>
  13. #include <unistd.h>
  14. #include <sys/types.h>
  15. #include <vdso_config.h>
  16. #include <vdso_datapage.h>
  17. #define __always_unused __attribute__((__unused__))
  18. #define __maybe_unused __attribute__((__unused__))
  19. #define likely(x) __builtin_expect(!!(x), 1)
  20. #define unlikely(x) __builtin_expect(!!(x), 0)
  21. #define arch_counter_enforce_ordering(val) do { \
  22. uint64_t tmp, _val = (val); \
  23. \
  24. asm volatile( \
  25. " eor %0, %1, %1\n" \
  26. " add %0, sp, %0\n" \
  27. " ldr xzr, [%0]" \
  28. : "=r" (tmp) : "r" (_val)); \
  29. } while (0)
  30. static inline uint64_t __arch_get_hw_counter()
  31. {
  32. uint64_t res;
  33. __asm__ volatile("mrs %0, CNTVCT_EL0":"=r"(res));
  34. arch_counter_enforce_ordering(res);
  35. return res;
  36. }
  37. static inline uint64_t __arch_get_hw_frq()
  38. {
  39. uint64_t res;
  40. __asm__ volatile("mrs %0, CNTFRQ_EL0":"=r"(res));
  41. arch_counter_enforce_ordering(res);
  42. return res;
  43. }
  44. static inline uint32_t
  45. __iter_div_u64_rem(uint64_t dividend, uint32_t divisor, uint64_t *remainder)
  46. {
  47. uint32_t ret = 0;
  48. while (dividend >= divisor) {
  49. /* The following asm() prevents the compiler from
  50. optimising this loop into a modulo operation. */
  51. __asm__("" : "+rm"(dividend));
  52. dividend -= divisor;
  53. ret++;
  54. }
  55. *remainder = dividend;
  56. return ret;
  57. }
  58. #define __RT_STRINGIFY(x...) #x
  59. #define RT_STRINGIFY(x...) __RT_STRINGIFY(x)
  60. #define rt_hw_barrier(cmd, ...) \
  61. __asm__ volatile (RT_STRINGIFY(cmd) " "RT_STRINGIFY(__VA_ARGS__):::"memory")
  62. #define rt_hw_isb() rt_hw_barrier(isb)
  63. #define rt_hw_dmb() rt_hw_barrier(dmb, ish)
  64. #define rt_hw_wmb() rt_hw_barrier(dmb, ishst)
  65. #define rt_hw_rmb() rt_hw_barrier(dmb, ishld)
  66. #define rt_hw_dsb() rt_hw_barrier(dsb, ish)
  67. #ifndef barrier
  68. /* The "volatile" is due to gcc bugs */
  69. # define barrier() __asm__ __volatile__("": : :"memory")
  70. #endif
  71. static inline void cpu_relax(void)
  72. {
  73. __asm__ volatile("yield" ::: "memory");
  74. }
  75. #define __READ_ONCE_SIZE \
  76. ({ \
  77. switch (size) { \
  78. case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
  79. case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
  80. case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
  81. case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
  82. default: \
  83. barrier(); \
  84. __builtin_memcpy((void *)res, (const void *)p, size); \
  85. barrier(); \
  86. } \
  87. })
  88. static inline
  89. void __read_once_size(const volatile void *p, void *res, int size)
  90. {
  91. __READ_ONCE_SIZE;
  92. }
  93. #define __READ_ONCE(x, check) \
  94. ({ \
  95. union { typeof(x) __val; char __c[1]; } __u; \
  96. if (check) \
  97. __read_once_size(&(x), __u.__c, sizeof(x)); \
  98. smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
  99. __u.__val; \
  100. })
  101. #define READ_ONCE(x) __READ_ONCE(x, 1)
  102. extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden")));
  103. static inline struct vdso_data *__arch_get_vdso_data(void)
  104. {
  105. return _vdso_data;
  106. }
  107. static inline uint32_t rt_vdso_read_begin(const struct vdso_data *vd)
  108. {
  109. uint32_t seq;
  110. while (unlikely((seq = READ_ONCE(vd->seq)) & 1))
  111. cpu_relax();
  112. rt_hw_rmb();
  113. return seq;
  114. }
  115. static inline uint32_t rt_vdso_read_retry(const struct vdso_data *vd,
  116. uint32_t start)
  117. {
  118. uint32_t seq;
  119. rt_hw_rmb();
  120. seq = READ_ONCE(vd->seq);
  121. return seq != start;
  122. }
  123. #endif