lwp_pmutex.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021/01/02 bernard the first version
  9. * 2022/12/18 bernard fix the _m_lock to tid in user land.
  10. */
  11. #include "lwp_internal.h"
  12. #include <rtthread.h>
  13. #ifdef ARCH_MM_MMU
  14. #include <lwp_user_mm.h>
  15. #endif
  16. #include <sys/time.h>
  17. #include <syscall_generic.h>
  18. #define PMUTEX_NORMAL 0 /* Unable to recursion */
  19. #define PMUTEX_RECURSIVE 1 /* Can be recursion */
  20. #define PMUTEX_ERRORCHECK 2 /* This type of mutex provides error checking */
  21. struct rt_pmutex
  22. {
  23. union
  24. {
  25. rt_mutex_t kmutex;
  26. rt_sem_t ksem; /* use sem to emulate the mutex without recursive */
  27. } lock;
  28. struct lwp_avl_struct node;
  29. struct rt_object *custom_obj;
  30. rt_uint8_t type; /* pmutex type */
  31. };
  32. /*
  33. * userspace mutex definitions in musl
  34. */
  35. struct rt_umutex
  36. {
  37. union
  38. {
  39. int __i[6];
  40. volatile int __vi[6];
  41. volatile void *volatile __p[6];
  42. } __u;
  43. };
  44. #define _m_type __u.__i[0]
  45. #define _m_lock __u.__vi[1]
  46. #define _m_waiters __u.__vi[2]
  47. #define _m_prev __u.__p[3]
  48. #define _m_next __u.__p[4]
  49. #define _m_count __u.__i[5]
  50. static struct rt_mutex _pmutex_lock;
  51. static int pmutex_system_init(void)
  52. {
  53. rt_mutex_init(&_pmutex_lock, "pmtxLock", RT_IPC_FLAG_FIFO);
  54. return 0;
  55. }
  56. INIT_PREV_EXPORT(pmutex_system_init);
  57. static rt_err_t pmutex_destory(void *data)
  58. {
  59. rt_err_t ret = -1;
  60. rt_base_t level = 0;
  61. struct rt_pmutex *pmutex = (struct rt_pmutex *)data;
  62. if (pmutex)
  63. {
  64. level = rt_hw_interrupt_disable();
  65. /* remove pmutex from pmutext avl */
  66. lwp_avl_remove(&pmutex->node, (struct lwp_avl_struct **)pmutex->node.data);
  67. rt_hw_interrupt_enable(level);
  68. if (pmutex->type == PMUTEX_NORMAL)
  69. {
  70. rt_sem_delete(pmutex->lock.ksem);
  71. }
  72. else
  73. {
  74. rt_mutex_delete(pmutex->lock.kmutex);
  75. }
  76. /* release object */
  77. rt_free(pmutex);
  78. ret = 0;
  79. }
  80. return ret;
  81. }
  82. static struct rt_pmutex* pmutex_create(void *umutex, struct rt_lwp *lwp)
  83. {
  84. struct rt_pmutex *pmutex = RT_NULL;
  85. struct rt_object *obj = RT_NULL;
  86. rt_ubase_t type;
  87. if (!lwp)
  88. {
  89. return RT_NULL;
  90. }
  91. long *p = (long *)umutex;
  92. /* umutex[0] bit[0-1] saved mutex type */
  93. type = *p & 3;
  94. if (type != PMUTEX_NORMAL && type != PMUTEX_RECURSIVE && type != PMUTEX_ERRORCHECK)
  95. {
  96. return RT_NULL;
  97. }
  98. pmutex = (struct rt_pmutex *)rt_malloc(sizeof(struct rt_pmutex));
  99. if (!pmutex)
  100. {
  101. return RT_NULL;
  102. }
  103. if (type == PMUTEX_NORMAL)
  104. {
  105. pmutex->lock.ksem = rt_sem_create("pmutex", 1, RT_IPC_FLAG_PRIO);
  106. if (!pmutex->lock.ksem)
  107. {
  108. rt_free(pmutex);
  109. return RT_NULL;
  110. }
  111. }
  112. else
  113. {
  114. pmutex->lock.kmutex = rt_mutex_create("pmutex", RT_IPC_FLAG_PRIO);
  115. if (!pmutex->lock.kmutex)
  116. {
  117. rt_free(pmutex);
  118. return RT_NULL;
  119. }
  120. }
  121. obj = rt_custom_object_create("pmutex", (void *)pmutex, pmutex_destory);
  122. if (!obj)
  123. {
  124. if (pmutex->type == PMUTEX_NORMAL)
  125. {
  126. rt_sem_delete(pmutex->lock.ksem);
  127. }
  128. else
  129. {
  130. rt_mutex_delete(pmutex->lock.kmutex);
  131. }
  132. rt_free(pmutex);
  133. return RT_NULL;
  134. }
  135. pmutex->node.avl_key = (avl_key_t)umutex;
  136. pmutex->node.data = &lwp->address_search_head;
  137. pmutex->custom_obj = obj;
  138. pmutex->type = type;
  139. /* insert into pmutex head */
  140. lwp_avl_insert(&pmutex->node, &lwp->address_search_head);
  141. return pmutex;
  142. }
  143. static struct rt_pmutex* pmutex_get(void *umutex, struct rt_lwp *lwp)
  144. {
  145. struct rt_pmutex *pmutex = RT_NULL;
  146. struct lwp_avl_struct *node = RT_NULL;
  147. node = lwp_avl_find((avl_key_t)umutex, lwp->address_search_head);
  148. if (!node)
  149. {
  150. return RT_NULL;
  151. }
  152. pmutex = rt_container_of(node, struct rt_pmutex, node);
  153. return pmutex;
  154. }
  155. static int _pthread_mutex_init(void *umutex)
  156. {
  157. struct rt_lwp *lwp = RT_NULL;
  158. struct rt_pmutex *pmutex = RT_NULL;
  159. rt_err_t lock_ret = 0;
  160. /* umutex union is 6 x (void *) */
  161. if (!lwp_user_accessable(umutex, sizeof(void *) * 6))
  162. {
  163. rt_set_errno(EINVAL);
  164. return -EINVAL;
  165. }
  166. lock_ret = rt_mutex_take_interruptible(&_pmutex_lock, RT_WAITING_FOREVER);
  167. if (lock_ret != RT_EOK)
  168. {
  169. rt_set_errno(EAGAIN);
  170. return -EAGAIN;
  171. }
  172. lwp = lwp_self();
  173. pmutex = pmutex_get(umutex, lwp);
  174. if (pmutex == RT_NULL)
  175. {
  176. /* create a pmutex according to this umutex */
  177. pmutex = pmutex_create(umutex, lwp);
  178. if (pmutex == RT_NULL)
  179. {
  180. rt_mutex_release(&_pmutex_lock);
  181. rt_set_errno(ENOMEM);
  182. return -ENOMEM;
  183. }
  184. if (lwp_user_object_add(lwp, pmutex->custom_obj) != 0)
  185. {
  186. rt_custom_object_destroy(pmutex->custom_obj);
  187. rt_set_errno(ENOMEM);
  188. return -ENOMEM;
  189. }
  190. }
  191. else
  192. {
  193. rt_base_t level = rt_hw_interrupt_disable();
  194. if (pmutex->type == PMUTEX_NORMAL)
  195. {
  196. pmutex->lock.ksem->value = 1;
  197. }
  198. else
  199. {
  200. pmutex->lock.kmutex->owner = RT_NULL;
  201. pmutex->lock.kmutex->priority = 0xFF;
  202. pmutex->lock.kmutex->hold = 0;
  203. pmutex->lock.kmutex->ceiling_priority = 0xFF;
  204. }
  205. rt_hw_interrupt_enable(level);
  206. }
  207. rt_mutex_release(&_pmutex_lock);
  208. return 0;
  209. }
  210. static int _pthread_mutex_lock_timeout(void *umutex, struct timespec *timeout)
  211. {
  212. struct rt_lwp *lwp = RT_NULL;
  213. struct rt_pmutex *pmutex = RT_NULL;
  214. struct rt_umutex *umutex_p = (struct rt_umutex*)umutex;
  215. rt_err_t lock_ret = 0;
  216. rt_int32_t time = RT_WAITING_FOREVER;
  217. register rt_base_t temp;
  218. if (!lwp_user_accessable((void *)umutex, sizeof(struct rt_umutex)))
  219. {
  220. rt_set_errno(EINVAL);
  221. return -EINVAL;
  222. }
  223. if (timeout)
  224. {
  225. if (!lwp_user_accessable((void *)timeout, sizeof(struct timespec)))
  226. {
  227. rt_set_errno(EINVAL);
  228. return -EINVAL;
  229. }
  230. time = rt_timespec_to_tick(timeout);
  231. }
  232. lock_ret = rt_mutex_take_interruptible(&_pmutex_lock, RT_WAITING_FOREVER);
  233. if (lock_ret != RT_EOK)
  234. {
  235. rt_set_errno(EINTR);
  236. return -EINTR;
  237. }
  238. lwp = lwp_self();
  239. pmutex = pmutex_get(umutex, lwp);
  240. if (pmutex == RT_NULL)
  241. {
  242. rt_mutex_release(&_pmutex_lock);
  243. rt_set_errno(EINVAL);
  244. return -ENOMEM; /* umutex not recored in kernel */
  245. }
  246. rt_mutex_release(&_pmutex_lock);
  247. switch (pmutex->type)
  248. {
  249. case PMUTEX_NORMAL:
  250. lock_ret = rt_sem_take_interruptible(pmutex->lock.ksem, time);
  251. break;
  252. case PMUTEX_RECURSIVE:
  253. lock_ret = rt_mutex_take_interruptible(pmutex->lock.kmutex, time);
  254. if (lock_ret == RT_EOK)
  255. {
  256. umutex_p->_m_lock = rt_thread_self()->tid;
  257. }
  258. break;
  259. case PMUTEX_ERRORCHECK:
  260. temp = rt_hw_interrupt_disable();
  261. if (pmutex->lock.kmutex->owner == rt_thread_self())
  262. {
  263. /* enable interrupt */
  264. rt_hw_interrupt_enable(temp);
  265. return -EDEADLK;
  266. }
  267. lock_ret = rt_mutex_take_interruptible(pmutex->lock.kmutex, time);
  268. if (lock_ret == RT_EOK)
  269. {
  270. umutex_p->_m_lock = rt_thread_self()->tid;
  271. }
  272. rt_hw_interrupt_enable(temp);
  273. break;
  274. default: /* unknown type */
  275. return -EINVAL;
  276. }
  277. if (lock_ret != RT_EOK)
  278. {
  279. if (lock_ret == -RT_ETIMEOUT)
  280. {
  281. if (time == 0) /* timeout is 0, means try lock failed */
  282. {
  283. rt_set_errno(EBUSY);
  284. return -EBUSY;
  285. }
  286. else
  287. {
  288. rt_set_errno(ETIMEDOUT);
  289. return -ETIMEDOUT;
  290. }
  291. }
  292. else if (lock_ret == -RT_EINTR)
  293. {
  294. rt_set_errno(EINTR);
  295. return -EINTR;
  296. }
  297. else
  298. {
  299. rt_set_errno(EAGAIN);
  300. return -EAGAIN;
  301. }
  302. }
  303. return 0;
  304. }
  305. static int _pthread_mutex_unlock(void *umutex)
  306. {
  307. rt_err_t lock_ret = 0;
  308. struct rt_lwp *lwp = RT_NULL;
  309. struct rt_pmutex *pmutex = RT_NULL;
  310. struct rt_umutex *umutex_p = (struct rt_umutex*)umutex;
  311. lock_ret = rt_mutex_take_interruptible(&_pmutex_lock, RT_WAITING_FOREVER);
  312. if (lock_ret != RT_EOK)
  313. {
  314. rt_set_errno(EAGAIN);
  315. return -EAGAIN;
  316. }
  317. lwp = lwp_self();
  318. pmutex = pmutex_get(umutex, lwp);
  319. if (pmutex == RT_NULL)
  320. {
  321. rt_mutex_release(&_pmutex_lock);
  322. rt_set_errno(EPERM);
  323. return -EPERM;//unlock static mutex of unlock state
  324. }
  325. rt_mutex_release(&_pmutex_lock);
  326. switch (pmutex->type)
  327. {
  328. case PMUTEX_NORMAL:
  329. if(pmutex->lock.ksem->value >=1)
  330. {
  331. rt_set_errno(EPERM);
  332. return -EPERM;//unlock dynamic mutex of unlock state
  333. }
  334. else
  335. {
  336. lock_ret = rt_sem_release(pmutex->lock.ksem);
  337. }
  338. break;
  339. case PMUTEX_RECURSIVE:
  340. case PMUTEX_ERRORCHECK:
  341. lock_ret = rt_mutex_release(pmutex->lock.kmutex);
  342. if ((lock_ret == RT_EOK) && pmutex->lock.kmutex->owner == NULL)
  343. {
  344. umutex_p->_m_lock = 0;
  345. }
  346. break;
  347. default: /* unknown type */
  348. return -EINVAL;
  349. }
  350. if (lock_ret != RT_EOK)
  351. {
  352. rt_set_errno(EPERM);
  353. return -EPERM;
  354. }
  355. return 0;
  356. }
  357. static int _pthread_mutex_destroy(void *umutex)
  358. {
  359. struct rt_lwp *lwp = RT_NULL;
  360. struct rt_pmutex *pmutex = RT_NULL;
  361. rt_err_t lock_ret = 0;
  362. lock_ret = rt_mutex_take_interruptible(&_pmutex_lock, RT_WAITING_FOREVER);
  363. if (lock_ret != RT_EOK)
  364. {
  365. rt_set_errno(EAGAIN);
  366. return -EAGAIN;
  367. }
  368. lwp = lwp_self();
  369. pmutex = pmutex_get(umutex, lwp);
  370. if (pmutex == RT_NULL)
  371. {
  372. rt_mutex_release(&_pmutex_lock);
  373. rt_set_errno(EINVAL);
  374. return -EINVAL;
  375. }
  376. lwp_user_object_delete(lwp, pmutex->custom_obj);
  377. rt_mutex_release(&_pmutex_lock);
  378. return 0;
  379. }
  380. sysret_t sys_pmutex(void *umutex, int op, void *arg)
  381. {
  382. int ret = -EINVAL;
  383. switch (op)
  384. {
  385. case PMUTEX_INIT:
  386. ret = _pthread_mutex_init(umutex);
  387. break;
  388. case PMUTEX_LOCK:
  389. ret = _pthread_mutex_lock_timeout(umutex, (struct timespec*)arg);
  390. if (ret == -ENOMEM)
  391. {
  392. /* lock not init, try init it and lock again. */
  393. ret = _pthread_mutex_init(umutex);
  394. if (ret == 0)
  395. {
  396. ret = _pthread_mutex_lock_timeout(umutex, (struct timespec*)arg);
  397. }
  398. }
  399. break;
  400. case PMUTEX_UNLOCK:
  401. ret = _pthread_mutex_unlock(umutex);
  402. break;
  403. case PMUTEX_DESTROY:
  404. ret = _pthread_mutex_destroy(umutex);
  405. break;
  406. default:
  407. rt_set_errno(EINVAL);
  408. break;
  409. }
  410. return ret;
  411. }