lwp_internal.c 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-07-25 Shell first version
  9. */
  10. #define DBG_TAG "lwp.internal"
  11. #define DBG_LVL DBG_INFO
  12. #include <rtdbg.h>
  13. #include <stdlib.h>
  14. #include "lwp_internal.h"
  15. static rt_err_t _mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t interruptable)
  16. {
  17. DEF_RETURN_CODE(rc);
  18. int retry;
  19. rt_int32_t effect_timeout;
  20. #ifdef LWP_DEBUG
  21. rt_thread_t thread = rt_thread_self();
  22. #endif
  23. if (mtx)
  24. {
  25. effect_timeout = timeout;
  26. #if DBG_LVL == DBG_LOG && defined(LWP_DEBUG)
  27. int exception;
  28. rt_list_t *node = RT_NULL;
  29. struct rt_mutex *tak_obj = RT_NULL;
  30. if (!rt_list_isempty(&(thread->taken_object_list)) && timeout == RT_WAITING_FOREVER)
  31. {
  32. exception = 1;
  33. effect_timeout = 0;
  34. }
  35. else
  36. {
  37. exception = 0;
  38. }
  39. #endif /* DBG_LOG && defined(LWP_DEBUG) */
  40. do {
  41. retry = 0;
  42. if (interruptable)
  43. rc = rt_mutex_take_interruptible(mtx, effect_timeout);
  44. else
  45. rc = rt_mutex_take(mtx, effect_timeout);
  46. #ifdef LWP_DEBUG
  47. if (rc == RT_EOK)
  48. {
  49. if (rt_mutex_get_hold(mtx) > 1)
  50. {
  51. LOG_W("Already hold the lock");
  52. }
  53. }
  54. else if (rc == -RT_ETIMEOUT)
  55. {
  56. #if DBG_LVL == DBG_LOG
  57. if (exception)
  58. {
  59. rt_list_for_each(node, &(thread->taken_object_list))
  60. {
  61. tak_obj = rt_list_entry(node, struct rt_mutex, taken_list);
  62. if (rt_mutex_get_owner(tak_obj)->stat & RT_THREAD_SUSPEND_MASK)
  63. LOG_D("Potential dead lock - Taken: %s, Try take: %s",
  64. tak_obj->parent.parent.name, mtx->parent.parent.name);
  65. }
  66. rt_backtrace();
  67. retry = 1;
  68. exception = 0;
  69. }
  70. #endif
  71. }
  72. else if (rc != -RT_EINTR)
  73. {
  74. char tname[RT_NAME_MAX];
  75. rt_thread_get_name(thread, tname, sizeof(tname));
  76. LOG_W("Possible kernel corruption detected on thread %s with errno %ld", tname, rc);
  77. }
  78. #endif /* LWP_DEBUG */
  79. } while (retry);
  80. }
  81. else
  82. {
  83. LOG_W("%s: mtx should not be NULL", __func__);
  84. RT_ASSERT(0);
  85. }
  86. RETURN(rc);
  87. }
  88. rt_err_t lwp_mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t interruptable)
  89. {
  90. DEF_RETURN_CODE(rc);
  91. rc = _mutex_take_safe(mtx, timeout, interruptable);
  92. RETURN(rc);
  93. }
  94. rt_err_t lwp_mutex_release_safe(rt_mutex_t mtx)
  95. {
  96. DEF_RETURN_CODE(rc);
  97. rc = rt_mutex_release(mtx);
  98. if (rc)
  99. {
  100. LOG_I("%s: release failed with code %ld", __func__, rc);
  101. }
  102. RETURN(rc);
  103. }
  104. rt_err_t lwp_critical_enter(struct rt_lwp *lwp)
  105. {
  106. rt_err_t rc;
  107. rc = lwp_mutex_take_safe(&lwp->lwp_lock, RT_WAITING_FOREVER, 0);
  108. /* if current process is force killed */
  109. if (rc != RT_EOK)
  110. {
  111. if (rc == -RT_EINTR && lwp_self() != RT_NULL)
  112. sys_exit(EXIT_SUCCESS);
  113. else
  114. LOG_I("%s: unexpected return code = %ld", __func__, rc);
  115. }
  116. return rc;
  117. }
  118. rt_err_t lwp_critical_exit(struct rt_lwp *lwp)
  119. {
  120. return lwp_mutex_release_safe(&lwp->lwp_lock);
  121. }