lwp_futex.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021/01/02 bernard the first version
  9. * 2023-07-25 Shell Remove usage of rt_hw_interrupt API in the lwp
  10. * Coding style: remove multiple `return` in a routine
  11. * 2023-08-08 Shell Fix return value of futex(wait); Fix ops that only
  12. * FUTEX_PRIVATE is supported currently
  13. * 2023-11-03 Shell Add Support for ~FUTEX_PRIVATE
  14. * 2023-11-16 xqyjlj Add Support for futex requeue and futex pi
  15. */
  16. #define __RT_IPC_SOURCE__
  17. #include "lwp_futex_internal.h"
  18. #include "sys/time.h"
  19. #include <stdatomic.h>
  20. struct rt_mutex _glob_futex;
  21. rt_err_t lwp_futex_init(void)
  22. {
  23. return rt_mutex_init(&_glob_futex, "glob_ftx", RT_IPC_FLAG_PRIO);
  24. }
  25. static void _futex_lock(rt_lwp_t lwp, int op_flags)
  26. {
  27. rt_err_t error;
  28. if (op_flags & FUTEX_PRIVATE)
  29. {
  30. LWP_LOCK(lwp);
  31. }
  32. else
  33. {
  34. error = lwp_mutex_take_safe(&_glob_futex, RT_WAITING_FOREVER, 0);
  35. if (error)
  36. {
  37. LOG_E("%s: Should not failed", __func__);
  38. RT_ASSERT(0);
  39. }
  40. }
  41. }
  42. static void _futex_unlock(rt_lwp_t lwp, int op_flags)
  43. {
  44. rt_err_t error;
  45. if (op_flags & FUTEX_PRIVATE)
  46. {
  47. LWP_UNLOCK(lwp);
  48. }
  49. else
  50. {
  51. error = lwp_mutex_release_safe(&_glob_futex);
  52. if (error)
  53. {
  54. LOG_E("%s: Should not failed", __func__);
  55. RT_ASSERT(0);
  56. }
  57. }
  58. }
  59. /**
  60. * Destroy a Private FuTeX (pftx)
  61. * Note: must have futex address_search_head taken
  62. */
  63. static rt_err_t _pftx_destroy_locked(void *data)
  64. {
  65. rt_err_t ret = -1;
  66. rt_futex_t futex = (rt_futex_t)data;
  67. if (futex)
  68. {
  69. /**
  70. * Brief: Delete the futex from lwp address_search_head
  71. *
  72. * Note: Critical Section
  73. * - the lwp (READ. share by thread)
  74. * - the lwp address_search_head (RW. protected by caller. for destroy
  75. * routine, it's always safe because it has already taken a write lock
  76. * to the lwp.)
  77. */
  78. lwp_avl_remove(&futex->node,
  79. (struct lwp_avl_struct **)futex->node.data);
  80. /* release object */
  81. if (futex->mutex)
  82. {
  83. rt_mutex_delete(futex->mutex);
  84. futex->mutex = RT_NULL;
  85. }
  86. rt_free(futex);
  87. ret = 0;
  88. }
  89. return ret;
  90. }
  91. /**
  92. * Create a Private FuTeX (pftx)
  93. * Note: must have futex address_search_head taken
  94. */
  95. static rt_futex_t _pftx_create_locked(int *uaddr, struct rt_lwp *lwp)
  96. {
  97. rt_futex_t futex = RT_NULL;
  98. struct rt_object *obj = RT_NULL;
  99. /**
  100. * Brief: Create a futex under current lwp
  101. *
  102. * Note: Critical Section
  103. * - lwp (READ; share with thread)
  104. */
  105. if (lwp)
  106. {
  107. futex = (rt_futex_t)rt_malloc(sizeof(struct rt_futex));
  108. if (futex)
  109. {
  110. /* Create a Private FuTeX (pftx) */
  111. obj = rt_custom_object_create("pftx", (void *)futex,
  112. _pftx_destroy_locked);
  113. if (!obj)
  114. {
  115. rt_free(futex);
  116. futex = RT_NULL;
  117. }
  118. else
  119. {
  120. /**
  121. * Brief: Add futex to user object tree for resource recycling
  122. *
  123. * Note: Critical Section
  124. * - lwp user object tree (RW; protected by API)
  125. * - futex (if the adding is successful, others can find the
  126. * unready futex. However, only the lwp_free will do this,
  127. * and this is protected by the ref taken by the lwp thread
  128. * that the lwp_free will never execute at the same time)
  129. */
  130. if (lwp_user_object_add(lwp, obj))
  131. {
  132. /* this will call a _pftx_destroy_locked, but that's okay */
  133. rt_object_delete(obj);
  134. rt_free(futex);
  135. futex = RT_NULL;
  136. }
  137. else
  138. {
  139. futex->node.avl_key = (avl_key_t)uaddr;
  140. futex->node.data = &lwp->address_search_head;
  141. futex->custom_obj = obj;
  142. futex->mutex = RT_NULL;
  143. rt_list_init(&(futex->waiting_thread));
  144. /**
  145. * Brief: Insert into futex head
  146. *
  147. * Note: Critical Section
  148. * - lwp address_search_head (RW; protected by caller)
  149. */
  150. lwp_avl_insert(&futex->node, &lwp->address_search_head);
  151. }
  152. }
  153. }
  154. }
  155. return futex;
  156. }
  157. /**
  158. * Get a Private FuTeX (pftx) match the (lwp, uaddr, op)
  159. */
  160. static rt_futex_t _pftx_get(void *uaddr, struct rt_lwp *lwp, int op,
  161. rt_err_t *rc)
  162. {
  163. struct lwp_avl_struct *node = RT_NULL;
  164. rt_futex_t futex = RT_NULL;
  165. rt_err_t error = -1;
  166. LWP_LOCK(lwp);
  167. /**
  168. * Note: Critical Section
  169. * protect lwp address_search_head (READ)
  170. */
  171. node = lwp_avl_find((avl_key_t)uaddr, lwp->address_search_head);
  172. if (node)
  173. {
  174. futex = rt_container_of(node, struct rt_futex, node);
  175. error = 0;
  176. }
  177. else
  178. {
  179. /* create a futex according to this uaddr */
  180. futex = _pftx_create_locked(uaddr, lwp);
  181. if (!futex)
  182. error = -ENOMEM;
  183. else
  184. error = 0;
  185. }
  186. LWP_UNLOCK(lwp);
  187. *rc = error;
  188. return futex;
  189. }
  190. /**
  191. * Destroy a Shared FuTeX (pftx)
  192. * Note: must have futex address_search_head taken
  193. */
  194. static rt_err_t _sftx_destroy(void *data)
  195. {
  196. rt_err_t ret = -1;
  197. rt_futex_t futex = (rt_futex_t)data;
  198. if (futex)
  199. {
  200. /* delete it even it's not in the table */
  201. futex_global_table_delete(&futex->entry.key);
  202. if (futex->mutex)
  203. {
  204. rt_mutex_delete(futex->mutex);
  205. futex->mutex = RT_NULL;
  206. }
  207. rt_free(futex);
  208. ret = 0;
  209. }
  210. return ret;
  211. }
  212. /**
  213. * Create a Shared FuTeX (sftx)
  214. */
  215. static rt_futex_t _sftx_create(struct shared_futex_key *key, struct rt_lwp *lwp)
  216. {
  217. rt_futex_t futex = RT_NULL;
  218. struct rt_object *obj = RT_NULL;
  219. if (lwp)
  220. {
  221. futex = (rt_futex_t)rt_calloc(1, sizeof(struct rt_futex));
  222. if (futex)
  223. {
  224. /* create a Shared FuTeX (sftx) */
  225. obj = rt_custom_object_create("sftx", (void *)futex, _sftx_destroy);
  226. if (!obj)
  227. {
  228. rt_free(futex);
  229. futex = RT_NULL;
  230. }
  231. else
  232. {
  233. if (futex_global_table_add(key, futex))
  234. {
  235. rt_object_delete(obj);
  236. rt_free(futex);
  237. futex = RT_NULL;
  238. }
  239. else
  240. {
  241. futex->mutex = RT_NULL;
  242. rt_list_init(&(futex->waiting_thread));
  243. futex->custom_obj = obj;
  244. }
  245. }
  246. }
  247. }
  248. return futex;
  249. }
  250. /**
  251. * Get a Shared FuTeX (sftx) match the (lwp, uaddr, op)
  252. */
  253. static rt_futex_t _sftx_get(void *uaddr, struct rt_lwp *lwp, int op,
  254. rt_err_t *rc)
  255. {
  256. rt_futex_t futex = RT_NULL;
  257. struct shared_futex_key key;
  258. rt_varea_t varea;
  259. rt_err_t error = -1;
  260. RD_LOCK(lwp->aspace);
  261. varea = rt_aspace_query(lwp->aspace, uaddr);
  262. if (varea)
  263. {
  264. key.mobj = varea->mem_obj;
  265. key.offset = ((varea->offset) << MM_PAGE_SHIFT) |
  266. ((long)uaddr & ((1 << MM_PAGE_SHIFT) - 1));
  267. RD_UNLOCK(lwp->aspace);
  268. /* query for the key */
  269. _futex_lock(lwp, op & ~FUTEX_PRIVATE);
  270. error = futex_global_table_find(&key, &futex);
  271. if (error != RT_EOK)
  272. {
  273. /* not found, do allocation */
  274. futex = _sftx_create(&key, lwp);
  275. if (!futex)
  276. error = -ENOMEM;
  277. else
  278. error = 0;
  279. }
  280. _futex_unlock(lwp, op & ~FUTEX_PRIVATE);
  281. }
  282. else
  283. {
  284. RD_UNLOCK(lwp->aspace);
  285. }
  286. *rc = error;
  287. return futex;
  288. }
  289. /* must have futex address_search_head taken */
  290. static rt_futex_t _futex_get(void *uaddr, struct rt_lwp *lwp, int op_flags,
  291. rt_err_t *rc)
  292. {
  293. rt_futex_t futex = RT_NULL;
  294. if (op_flags & FUTEX_PRIVATE)
  295. {
  296. futex = _pftx_get(uaddr, lwp, op_flags, rc);
  297. }
  298. else
  299. {
  300. futex = _sftx_get(uaddr, lwp, op_flags, rc);
  301. }
  302. return futex;
  303. }
  304. static rt_err_t _suspend_thread_timeout_locked(rt_thread_t thread,
  305. rt_futex_t futex,
  306. rt_tick_t timeout)
  307. {
  308. rt_err_t rc;
  309. /**
  310. * Brief: Add current thread into futex waiting thread list
  311. *
  312. * Note: Critical Section
  313. * - the futex waiting_thread list (RW)
  314. */
  315. rc = rt_thread_suspend_to_list(thread, &futex->waiting_thread,
  316. RT_IPC_FLAG_FIFO, RT_INTERRUPTIBLE);
  317. if (rc == RT_EOK)
  318. {
  319. /* start the timer of thread */
  320. rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME,
  321. &timeout);
  322. rt_timer_start(&(thread->thread_timer));
  323. rt_set_errno(ETIMEDOUT);
  324. }
  325. return rc;
  326. }
  327. static rt_err_t _suspend_thread_locked(rt_thread_t thread, rt_futex_t futex)
  328. {
  329. /**
  330. * Brief: Add current thread into futex waiting thread list
  331. *
  332. * Note: Critical Section
  333. * - the futex waiting_thread list (RW)
  334. */
  335. return rt_thread_suspend_to_list(thread, &futex->waiting_thread,
  336. RT_IPC_FLAG_FIFO, RT_INTERRUPTIBLE);
  337. }
  338. rt_inline int _futex_cmpxchg_value(int *curval, int *uaddr, int uval,
  339. int newval)
  340. {
  341. int err = 0;
  342. if (!lwp_user_accessable((void *)uaddr, sizeof(*uaddr)))
  343. {
  344. err = -EFAULT;
  345. goto exit;
  346. }
  347. if (!atomic_compare_exchange_strong(uaddr, &uval, newval))
  348. {
  349. *curval = uval;
  350. err = -EAGAIN;
  351. }
  352. exit:
  353. return err;
  354. }
  355. static int _futex_wait(rt_futex_t futex, struct rt_lwp *lwp, int *uaddr,
  356. int value, const struct timespec *timeout, int op_flags)
  357. {
  358. rt_tick_t to;
  359. rt_thread_t thread;
  360. rt_err_t rc = -RT_EINTR;
  361. /**
  362. * Brief: Remove current thread from scheduler, besides appends it to
  363. * the waiting thread list of the futex. If the timeout is specified
  364. * a timer will be setup for current thread
  365. *
  366. * Note: Critical Section
  367. * - futex.waiting (RW; Protected by lwp_lock)
  368. * - the local cpu
  369. */
  370. _futex_lock(lwp, op_flags);
  371. if (*uaddr == value)
  372. {
  373. thread = rt_thread_self();
  374. if (timeout)
  375. {
  376. to = timeout->tv_sec * RT_TICK_PER_SECOND;
  377. to +=
  378. (timeout->tv_nsec * RT_TICK_PER_SECOND) / NANOSECOND_PER_SECOND;
  379. if (to < 0)
  380. {
  381. rc = -EINVAL;
  382. _futex_unlock(lwp, op_flags);
  383. }
  384. else
  385. {
  386. rt_enter_critical();
  387. rc = _suspend_thread_timeout_locked(thread, futex, to);
  388. _futex_unlock(lwp, op_flags);
  389. rt_exit_critical();
  390. }
  391. }
  392. else
  393. {
  394. rt_enter_critical();
  395. rc = _suspend_thread_locked(thread, futex);
  396. _futex_unlock(lwp, op_flags);
  397. rt_exit_critical();
  398. }
  399. if (rc == RT_EOK)
  400. {
  401. /* do schedule */
  402. rt_schedule();
  403. /* check errno */
  404. rc = rt_get_errno();
  405. rc = rc > 0 ? -rc : rc;
  406. }
  407. }
  408. else
  409. {
  410. _futex_unlock(lwp, op_flags);
  411. rc = -EAGAIN;
  412. rt_set_errno(EAGAIN);
  413. }
  414. return rc;
  415. }
  416. static long _futex_wake(rt_futex_t futex, struct rt_lwp *lwp, int number,
  417. int op_flags)
  418. {
  419. long woken_cnt = 0;
  420. int is_empty = 0;
  421. /**
  422. * Brief: Wakeup a suspended thread on the futex waiting thread list
  423. *
  424. * Note: Critical Section
  425. * - the futex waiting_thread list (RW)
  426. */
  427. while (number && !is_empty)
  428. {
  429. _futex_lock(lwp, op_flags);
  430. if (rt_susp_list_dequeue(&futex->waiting_thread, RT_EOK))
  431. {
  432. number--;
  433. woken_cnt++;
  434. is_empty = RT_FALSE;
  435. }
  436. else
  437. {
  438. is_empty = RT_TRUE;
  439. }
  440. _futex_unlock(lwp, op_flags);
  441. }
  442. /* do schedule */
  443. rt_schedule();
  444. return woken_cnt;
  445. }
  446. /**
  447. * Brief: Wake up to nr_wake futex1 threads.
  448. * If there are more waiters waiting on futex1 than nr_wake,
  449. * insert the remaining at most nr_requeue waiters waiting
  450. * on futex1 into the waiting queue of futex2.
  451. */
  452. static long _futex_requeue(rt_futex_t futex1, rt_futex_t futex2,
  453. struct rt_lwp *lwp, int nr_wake, int nr_requeue,
  454. int opflags)
  455. {
  456. long rtn;
  457. long woken_cnt = 0;
  458. int is_empty = 0;
  459. rt_thread_t thread;
  460. if (futex1 == futex2)
  461. {
  462. return -EINVAL;
  463. }
  464. /**
  465. * Brief: Wakeup a suspended thread on the futex waiting thread list
  466. *
  467. * Note: Critical Section
  468. * - the futex waiting_thread list (RW)
  469. */
  470. while (nr_wake && !is_empty)
  471. {
  472. if (rt_susp_list_dequeue(&futex1->waiting_thread, RT_EOK))
  473. {
  474. nr_wake--;
  475. woken_cnt++;
  476. is_empty = RT_FALSE;
  477. }
  478. else
  479. {
  480. is_empty = RT_TRUE;
  481. }
  482. }
  483. rtn = woken_cnt;
  484. /**
  485. * Brief: Requeue
  486. *
  487. * Note: Critical Section
  488. * - the futex waiting_thread list (RW)
  489. */
  490. while (!is_empty && nr_requeue)
  491. {
  492. rt_sched_lock_level_t slvl;
  493. rt_sched_lock(&slvl);
  494. /* moving from one susp list to another */
  495. is_empty = rt_list_isempty(&(futex1->waiting_thread));
  496. if (!is_empty)
  497. {
  498. thread = RT_THREAD_LIST_NODE_ENTRY(futex1->waiting_thread.next);
  499. rt_list_remove(&RT_THREAD_LIST_NODE(thread));
  500. rt_list_insert_before(&(futex2->waiting_thread),
  501. &RT_THREAD_LIST_NODE(thread));
  502. nr_requeue--;
  503. rtn++;
  504. }
  505. rt_sched_unlock(slvl);
  506. }
  507. /* do schedule */
  508. rt_schedule();
  509. return rtn;
  510. }
  511. /* timeout argument measured against the CLOCK_REALTIME clock. */
  512. static long _futex_lock_pi(rt_futex_t futex, struct rt_lwp *lwp, int *uaddr,
  513. const struct timespec *timeout, int op_flags,
  514. rt_bool_t trylock)
  515. {
  516. int word = 0, nword, cword;
  517. int tid = 0;
  518. rt_err_t err = 0;
  519. rt_thread_t thread = RT_NULL, current_thread = RT_NULL;
  520. rt_tick_t to = RT_WAITING_FOREVER;
  521. if (!lwp_user_accessable((void *)uaddr, sizeof(*uaddr)))
  522. {
  523. return -EFAULT;
  524. }
  525. current_thread = rt_thread_self();
  526. _futex_lock(lwp, op_flags);
  527. lwp_get_from_user(&word, (void *)uaddr, sizeof(int));
  528. tid = word & FUTEX_TID_MASK;
  529. if (word == 0)
  530. {
  531. /* If the value is 0, then the kernel tries
  532. to atomically set the futex value to the caller's TID. */
  533. nword = current_thread->tid;
  534. if (_futex_cmpxchg_value(&cword, uaddr, word, nword))
  535. {
  536. _futex_unlock(lwp, op_flags);
  537. return -EAGAIN;
  538. }
  539. _futex_unlock(lwp, op_flags);
  540. return 0;
  541. }
  542. else
  543. {
  544. thread = lwp_tid_get_thread_and_inc_ref(tid);
  545. if (thread == RT_NULL)
  546. {
  547. _futex_unlock(lwp, op_flags);
  548. return -ESRCH;
  549. }
  550. lwp_tid_dec_ref(thread);
  551. nword =
  552. word | FUTEX_WAITERS;
  553. if (_futex_cmpxchg_value(&cword, uaddr, word, nword))
  554. {
  555. _futex_unlock(lwp, op_flags);
  556. return -EAGAIN;
  557. }
  558. word = nword;
  559. }
  560. if (futex->mutex == RT_NULL)
  561. {
  562. futex->mutex = rt_mutex_create("futexpi", RT_IPC_FLAG_PRIO);
  563. if (futex->mutex == RT_NULL)
  564. {
  565. _futex_unlock(lwp, op_flags);
  566. return -ENOMEM;
  567. }
  568. /* set mutex->owner */
  569. rt_spin_lock(&(futex->mutex->spinlock));
  570. futex->mutex->owner = thread;
  571. futex->mutex->hold = 1;
  572. rt_spin_unlock(&(futex->mutex->spinlock));
  573. }
  574. if (timeout)
  575. {
  576. to = rt_timespec_to_tick(timeout);
  577. }
  578. if (trylock)
  579. {
  580. to = RT_WAITING_NO;
  581. }
  582. _futex_unlock(lwp, op_flags);
  583. err = rt_mutex_take_interruptible(futex->mutex, to);
  584. if (err == -RT_ETIMEOUT)
  585. {
  586. err = -EDEADLK;
  587. }
  588. _futex_lock(lwp, op_flags);
  589. nword = current_thread->tid | FUTEX_WAITERS;
  590. if (_futex_cmpxchg_value(&cword, uaddr, word, nword))
  591. {
  592. err = -EAGAIN;
  593. }
  594. _futex_unlock(lwp, op_flags);
  595. return err;
  596. }
  597. static long _futex_unlock_pi(rt_futex_t futex, struct rt_lwp *lwp, int op_flags)
  598. {
  599. rt_err_t err = 0;
  600. _futex_lock(lwp, op_flags);
  601. if (!futex->mutex)
  602. {
  603. _futex_unlock(lwp, op_flags);
  604. return -EPERM;
  605. }
  606. _futex_unlock(lwp, op_flags);
  607. err = rt_mutex_release(futex->mutex);
  608. return err;
  609. }
  610. #include <syscall_generic.h>
  611. rt_inline rt_bool_t _timeout_ignored(int op)
  612. {
  613. /**
  614. * if (op &
  615. * (FUTEX_WAKE|FUTEX_FD|FUTEX_WAKE_BITSET|FUTEX_TRYLOCK_PI|FUTEX_UNLOCK_PI))
  616. * was TRUE `timeout` should be ignored by implementation, according to
  617. * POSIX futex(2) manual. since only FUTEX_WAKE is implemented in rt-smart,
  618. * only FUTEX_WAKE was omitted currently
  619. */
  620. return ((op & (FUTEX_WAKE)) || (op & (FUTEX_REQUEUE)) ||
  621. (op & (FUTEX_CMP_REQUEUE)) || (op & (FUTEX_UNLOCK_PI)) ||
  622. (op & (FUTEX_TRYLOCK_PI)));
  623. }
  624. sysret_t sys_futex(int *uaddr, int op, int val, const struct timespec *timeout,
  625. int *uaddr2, int val3)
  626. {
  627. struct rt_lwp *lwp = RT_NULL;
  628. sysret_t ret = 0;
  629. if (!lwp_user_accessable(uaddr, sizeof(int)))
  630. {
  631. ret = -EFAULT;
  632. }
  633. else if (timeout && !_timeout_ignored(op) &&
  634. !lwp_user_accessable((void *)timeout, sizeof(struct timespec)))
  635. {
  636. ret = -EINVAL;
  637. }
  638. else
  639. {
  640. lwp = lwp_self();
  641. ret = lwp_futex(lwp, uaddr, op, val, timeout, uaddr2, val3);
  642. }
  643. return ret;
  644. }
  645. #define FUTEX_FLAGS (FUTEX_PRIVATE | FUTEX_CLOCK_REALTIME)
  646. rt_err_t lwp_futex(struct rt_lwp *lwp, int *uaddr, int op, int val,
  647. const struct timespec *timeout, int *uaddr2, int val3)
  648. {
  649. rt_futex_t futex, futex2;
  650. rt_err_t rc = 0;
  651. int op_type = op & ~FUTEX_FLAGS;
  652. int op_flags = op & FUTEX_FLAGS;
  653. futex = _futex_get(uaddr, lwp, op_flags, &rc);
  654. if (!rc)
  655. {
  656. switch (op_type)
  657. {
  658. case FUTEX_WAIT:
  659. rc = _futex_wait(futex, lwp, uaddr, val, timeout, op_flags);
  660. break;
  661. case FUTEX_WAKE:
  662. rc = _futex_wake(futex, lwp, val, op_flags);
  663. break;
  664. case FUTEX_REQUEUE:
  665. futex2 = _futex_get(uaddr2, lwp, op_flags, &rc);
  666. if (!rc)
  667. {
  668. _futex_lock(lwp, op_flags);
  669. rc = _futex_requeue(futex, futex2, lwp, val, (long)timeout,
  670. op_flags);
  671. _futex_unlock(lwp, op_flags);
  672. }
  673. break;
  674. case FUTEX_CMP_REQUEUE:
  675. futex2 = _futex_get(uaddr2, lwp, op_flags, &rc);
  676. _futex_lock(lwp, op_flags);
  677. if (*uaddr == val3)
  678. {
  679. rc = 0;
  680. }
  681. else
  682. {
  683. rc = -EAGAIN;
  684. }
  685. if (rc == 0)
  686. {
  687. rc = _futex_requeue(futex, futex2, lwp, val,
  688. (long)timeout, op_flags);
  689. }
  690. _futex_unlock(lwp, op_flags);
  691. break;
  692. case FUTEX_LOCK_PI:
  693. rc = _futex_lock_pi(futex, lwp, uaddr, timeout, op_flags,
  694. RT_FALSE);
  695. break;
  696. case FUTEX_UNLOCK_PI:
  697. rc = _futex_unlock_pi(futex, lwp, op_flags);
  698. break;
  699. case FUTEX_TRYLOCK_PI:
  700. rc = _futex_lock_pi(futex, lwp, uaddr, 0, op_flags, RT_TRUE);
  701. break;
  702. default:
  703. LOG_W("User require op=%d which is not implemented", op);
  704. rc = -ENOSYS;
  705. break;
  706. }
  707. }
  708. return rc;
  709. }
  710. rt_inline int _fetch_robust_entry(struct robust_list **entry,
  711. struct robust_list **head, rt_bool_t *is_pi)
  712. {
  713. unsigned long uentry;
  714. if (!lwp_user_accessable((void *)head, sizeof(*head)))
  715. {
  716. return -EFAULT;
  717. }
  718. if (lwp_get_from_user(&uentry, (void *)head, sizeof(*head)) !=
  719. sizeof(*head))
  720. {
  721. return -EFAULT;
  722. }
  723. *entry = (void *)(uentry & ~1UL);
  724. *is_pi = uentry & 1;
  725. return 0;
  726. }
  727. static int _handle_futex_death(int *uaddr, rt_thread_t thread, rt_bool_t is_pi,
  728. rt_bool_t is_pending_op)
  729. {
  730. int word, cword = 0, nword;
  731. rt_err_t rc;
  732. struct rt_lwp *lwp;
  733. rt_futex_t futex;
  734. /* Futex address must be 32bit aligned */
  735. if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
  736. return -1;
  737. lwp = thread->lwp;
  738. retry:
  739. if (!lwp_user_accessable((void *)uaddr, sizeof(*uaddr)))
  740. {
  741. return -1;
  742. }
  743. if (lwp_get_from_user(&word, (void *)uaddr, sizeof(*uaddr)) !=
  744. sizeof(*uaddr))
  745. {
  746. return -1;
  747. }
  748. futex = _futex_get(uaddr, lwp, FUTEX_PRIVATE, &rc);
  749. if (is_pending_op && !is_pi && !word)
  750. {
  751. _futex_wake(futex, lwp, 1, FUTEX_PRIVATE);
  752. return 0;
  753. }
  754. if ((word & FUTEX_TID_MASK) != thread->tid)
  755. return 0;
  756. nword = (word & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
  757. if ((rc = _futex_cmpxchg_value(&cword, uaddr, word, nword)))
  758. {
  759. switch (rc)
  760. {
  761. case -EFAULT:
  762. return -1;
  763. case -EAGAIN:
  764. rt_schedule();
  765. goto retry;
  766. default:
  767. LOG_W("unknown errno: %d in '%s'", rc, __FUNCTION__);
  768. return rc;
  769. }
  770. }
  771. if (cword != word)
  772. goto retry;
  773. if (!is_pi && (word & FUTEX_WAITERS))
  774. _futex_wake(futex, lwp, 1, FUTEX_PRIVATE);
  775. return 0;
  776. }
  777. /**
  778. * Brief: Walk thread->robust_list mark
  779. * any locks found there dead, and notify any waiters.
  780. *
  781. * note: very carefully, it's a userspace list!
  782. */
  783. void lwp_futex_exit_robust_list(rt_thread_t thread)
  784. {
  785. struct robust_list *entry = RT_NULL;
  786. struct robust_list *next_entry = RT_NULL;
  787. struct robust_list *pending = RT_NULL;
  788. struct robust_list_head *head;
  789. unsigned int limit = 2048;
  790. rt_bool_t pi, pip, next_pi;
  791. unsigned long futex_offset;
  792. int rc;
  793. head = thread->robust_list;
  794. if (head == RT_NULL)
  795. return;
  796. if (_fetch_robust_entry(&entry, &head->list.next, &pi))
  797. return;
  798. if (!lwp_user_accessable((void *)&head->futex_offset,
  799. sizeof(head->futex_offset)))
  800. {
  801. return;
  802. }
  803. if (lwp_get_from_user(&futex_offset, (void *)&head->futex_offset,
  804. sizeof(head->futex_offset)) !=
  805. sizeof(head->futex_offset))
  806. {
  807. return;
  808. }
  809. if (_fetch_robust_entry(&pending, &head->list_op_pending, &pip))
  810. {
  811. return;
  812. }
  813. while (entry != &head->list)
  814. {
  815. rc = _fetch_robust_entry(&next_entry, &entry->next, &next_pi);
  816. if (entry != pending)
  817. {
  818. if (_handle_futex_death((int *)((size_t)entry + futex_offset), thread, pi,
  819. RT_FALSE))
  820. return;
  821. }
  822. if (rc)
  823. return;
  824. entry = next_entry;
  825. pi = next_pi;
  826. if (!--limit)
  827. break;
  828. }
  829. if (pending)
  830. {
  831. _handle_futex_death((void *)pending + futex_offset, thread, pip,
  832. RT_TRUE);
  833. }
  834. }