lwp_internal.h 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-07-25 Shell first version
  9. * 2023-11-25 Shell Add pgrp, session lock API
  10. */
  11. #ifndef __LWP_INTERNAL_H__
  12. #define __LWP_INTERNAL_H__
  13. #include "lwp.h"
  14. #include "lwp_arch.h"
  15. #include "lwp_user_mm.h"
  16. #include "lwp_mm.h"
  17. #include <rtthread.h>
  18. #include "libc_musl.h"
  19. struct rt_lwp;
  20. #define LWP_MTX_FLAGS_INTR 0x1 /* interruptible waiting */
  21. #define LWP_MTX_FALGS_NESTED 0x2 /* allow nested */
  22. rt_err_t lwp_mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, int flags);
  23. rt_err_t lwp_mutex_release_safe(rt_mutex_t mtx);
  24. rt_inline rt_bool_t lwp_in_user_space(const char *addr)
  25. {
  26. return (addr >= (char *)USER_VADDR_START && addr < (char *)USER_VADDR_TOP);
  27. }
  28. #ifdef RT_USING_SMP
  29. #define LOCAL_IRQ_MASK() rt_hw_local_irq_disable()
  30. #define LOCAL_IRQ_UNMASK(level) rt_hw_local_irq_enable(level)
  31. #else
  32. #define LOCAL_IRQ_MASK() rt_hw_interrupt_disable()
  33. #define LOCAL_IRQ_UNMASK(level) rt_hw_interrupt_enable(level)
  34. #endif
  35. #ifndef LWP_USING_CPUS_LOCK
  36. rt_err_t lwp_sess_critical_enter(struct rt_session *sess, int flags);
  37. rt_err_t lwp_sess_critical_exit(struct rt_session *sess);
  38. rt_err_t lwp_pgrp_critical_enter(struct rt_processgroup *pgrp, int flags);
  39. rt_err_t lwp_pgrp_critical_exit(struct rt_processgroup *pgrp);
  40. rt_err_t lwp_critical_enter(struct rt_lwp *lwp, int flags);
  41. rt_err_t lwp_critical_exit(struct rt_lwp *lwp);
  42. #define LWP_ASSERT_LOCKED(proc) RT_ASSERT(rt_mutex_get_owner(&(proc)->lwp_lock) == rt_thread_self())
  43. #define PGRP_ASSERT_LOCKED(pgrp) RT_ASSERT(rt_mutex_get_owner(&(pgrp)->mutex) == rt_thread_self())
  44. #define LWP_LOCK(lwp) \
  45. do \
  46. { \
  47. RT_DEBUG_SCHEDULER_AVAILABLE(1); \
  48. if (lwp_critical_enter(lwp, 0) != RT_EOK) \
  49. { \
  50. RT_ASSERT(0); \
  51. } \
  52. } while (0)
  53. #define LWP_LOCK_NESTED(lwp) \
  54. do \
  55. { \
  56. RT_DEBUG_SCHEDULER_AVAILABLE(1); \
  57. if (lwp_critical_enter(lwp, LWP_MTX_FALGS_NESTED) != RT_EOK) \
  58. { \
  59. RT_ASSERT(0); \
  60. } \
  61. } while (0)
  62. #define LWP_UNLOCK(lwp) \
  63. do { \
  64. if (lwp_critical_exit(lwp) != RT_EOK) \
  65. { \
  66. RT_ASSERT(0); \
  67. } \
  68. } while (0)
  69. #define PGRP_LOCK(pgrp) \
  70. do \
  71. { \
  72. RT_DEBUG_SCHEDULER_AVAILABLE(1); \
  73. if (lwp_pgrp_critical_enter(pgrp, 0) != RT_EOK) \
  74. { \
  75. RT_ASSERT(0); \
  76. } \
  77. } while (0)
  78. #define PGRP_LOCK_NESTED(pgrp) \
  79. do \
  80. { \
  81. RT_DEBUG_SCHEDULER_AVAILABLE(1); \
  82. if (lwp_pgrp_critical_enter(pgrp, LWP_MTX_FALGS_NESTED) != RT_EOK) \
  83. { \
  84. RT_ASSERT(0); \
  85. } \
  86. } while (0)
  87. #define PGRP_UNLOCK(pgrp) \
  88. do \
  89. { \
  90. if (lwp_pgrp_critical_exit(pgrp) != RT_EOK) \
  91. { \
  92. RT_ASSERT(0); \
  93. } \
  94. } while (0)
  95. #define SESS_LOCK(sess) \
  96. do \
  97. { \
  98. RT_DEBUG_SCHEDULER_AVAILABLE(1); \
  99. if (lwp_sess_critical_enter(sess, 0) != RT_EOK) \
  100. { \
  101. RT_ASSERT(0); \
  102. } \
  103. } while (0)
  104. #define SESS_LOCK_NESTED(sess) \
  105. do \
  106. { \
  107. RT_DEBUG_SCHEDULER_AVAILABLE(1); \
  108. if (lwp_sess_critical_enter(sess, LWP_MTX_FALGS_NESTED) != RT_EOK) \
  109. { \
  110. RT_ASSERT(0); \
  111. } \
  112. } while (0)
  113. #define SESS_UNLOCK(sess) \
  114. do \
  115. { \
  116. if (lwp_sess_critical_exit(sess) != RT_EOK) \
  117. { \
  118. RT_ASSERT(0); \
  119. } \
  120. } while (0)
  121. #else
  122. #define LWP_LOCK(lwp) rt_base_t level = rt_hw_interrupt_disable()
  123. #define LWP_UNLOCK(lwp) rt_hw_interrupt_enable(level)
  124. #define PGRP_LOCK(pgrp) rt_base_t level = rt_hw_interrupt_disable()
  125. #define PGRP_UNLOCK(pgrp) rt_hw_interrupt_enable(level)
  126. #define SESS_LOCK(sess) rt_base_t level = rt_hw_interrupt_disable()
  127. #define SESS_UNLOCK(sess) rt_hw_interrupt_enable(level)
  128. #endif /* LWP_USING_CPUS_LOCK */
  129. /* cpus lock */
  130. #ifdef LWP_OVERRIDE_CPUS_LOCK
  131. #undef rt_hw_interrupt_disable
  132. #undef rt_hw_interrupt_enable
  133. #define rt_hw_interrupt_disable() ({ \
  134. rt_base_t irq = rt_hw_interrupt_is_disabled(); \
  135. if (irq) \
  136. { \
  137. LOG_W("Nested interrupt disable"); \
  138. rt_backtrace(); \
  139. irq = 0xabadcafe; \
  140. } else { \
  141. irq = rt_cpus_lock(); \
  142. } \
  143. irq; \
  144. })
  145. #define rt_hw_interrupt_enable(level) do { \
  146. if (level != 0xabadcafe) \
  147. rt_cpus_unlock(level); \
  148. } while (0)
  149. #endif /* LWP_OVERRIDE_CPUS_LOCK */
  150. /**
  151. * Brief: Return code with safety check
  152. * There tend to be chances where a return value is returned without correctly init
  153. */
  154. #ifndef LWP_DEBUG
  155. #define LWP_DEF_RETURN_CODE(name) rt_err_t name;RT_UNUSED(name)
  156. #define LWP_RETURN(name) return name
  157. #else
  158. #define _LWP_UNINITIALIZED_RC 0xbeefcafe
  159. #define LWP_DEF_RETURN_CODE(name) rt_err_t name = _LWP_UNINITIALIZED_RC
  160. #define LWP_RETURN(name) {RT_ASSERT(name != _LWP_UNINITIALIZED_RC);return name;}
  161. #endif /* LWP_DEBUG */
  162. #endif /* __LWP_INTERNAL_H__ */