lwp_futex.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021/01/02 bernard the first version
  9. * 2023-07-25 Shell Remove usage of rt_hw_interrupt API in the lwp
  10. * Coding style: remove multiple `return` in a routine
  11. * 2023-08-08 Shell Fix return value of futex(wait); Fix ops that only
  12. * FUTEX_PRIVATE is supported currently
  13. * 2023-11-03 Shell Add Support for ~FUTEX_PRIVATE
  14. * 2023-11-16 xqyjlj Add Support for futex requeue and futex pi
  15. */
  16. #define __RT_IPC_SOURCE__
  17. #include "lwp_futex_internal.h"
  18. #include "sys/time.h"
  19. #include <stdatomic.h>
  20. struct rt_mutex _glob_futex;
  21. rt_err_t lwp_futex_init(void)
  22. {
  23. return rt_mutex_init(&_glob_futex, "glob_ftx", RT_IPC_FLAG_PRIO);
  24. }
  25. static void _futex_lock(rt_lwp_t lwp, int op_flags)
  26. {
  27. rt_err_t error;
  28. if (op_flags & FUTEX_PRIVATE)
  29. {
  30. LWP_LOCK(lwp);
  31. }
  32. else
  33. {
  34. error = lwp_mutex_take_safe(&_glob_futex, RT_WAITING_FOREVER, 0);
  35. if (error)
  36. {
  37. LOG_E("%s: Should not failed", __func__);
  38. RT_ASSERT(0);
  39. }
  40. }
  41. }
  42. static void _futex_unlock(rt_lwp_t lwp, int op_flags)
  43. {
  44. rt_err_t error;
  45. if (op_flags & FUTEX_PRIVATE)
  46. {
  47. LWP_UNLOCK(lwp);
  48. }
  49. else
  50. {
  51. error = lwp_mutex_release_safe(&_glob_futex);
  52. if (error)
  53. {
  54. LOG_E("%s: Should not failed", __func__);
  55. RT_ASSERT(0);
  56. }
  57. }
  58. }
  59. /**
  60. * Destroy a Private FuTeX (pftx)
  61. * Note: must have futex address_search_head taken
  62. */
  63. static rt_err_t _pftx_destroy_locked(void *data)
  64. {
  65. rt_err_t ret = -1;
  66. rt_futex_t futex = (rt_futex_t)data;
  67. if (futex)
  68. {
  69. /**
  70. * Brief: Delete the futex from lwp address_search_head
  71. *
  72. * Note: Critical Section
  73. * - the lwp (READ. share by thread)
  74. * - the lwp address_search_head (RW. protected by caller. for destroy
  75. * routine, it's always safe because it has already taken a write lock
  76. * to the lwp.)
  77. */
  78. lwp_avl_remove(&futex->node,
  79. (struct lwp_avl_struct **)futex->node.data);
  80. /* release object */
  81. if (futex->mutex)
  82. {
  83. rt_mutex_delete(futex->mutex);
  84. futex->mutex = RT_NULL;
  85. }
  86. rt_free(futex);
  87. ret = 0;
  88. }
  89. return ret;
  90. }
  91. /**
  92. * Create a Private FuTeX (pftx)
  93. * Note: must have futex address_search_head taken
  94. */
  95. static rt_futex_t _pftx_create_locked(int *uaddr, struct rt_lwp *lwp)
  96. {
  97. rt_futex_t futex = RT_NULL;
  98. struct rt_object *obj = RT_NULL;
  99. /**
  100. * Brief: Create a futex under current lwp
  101. *
  102. * Note: Critical Section
  103. * - lwp (READ; share with thread)
  104. */
  105. if (lwp)
  106. {
  107. futex = (rt_futex_t)rt_malloc(sizeof(struct rt_futex));
  108. if (futex)
  109. {
  110. /* Create a Private FuTeX (pftx) */
  111. obj = rt_custom_object_create("pftx", (void *)futex,
  112. _pftx_destroy_locked);
  113. if (!obj)
  114. {
  115. rt_free(futex);
  116. futex = RT_NULL;
  117. }
  118. else
  119. {
  120. /**
  121. * Brief: Add futex to user object tree for resource recycling
  122. *
  123. * Note: Critical Section
  124. * - lwp user object tree (RW; protected by API)
  125. * - futex (if the adding is successful, others can find the
  126. * unready futex. However, only the lwp_free will do this,
  127. * and this is protected by the ref taken by the lwp thread
  128. * that the lwp_free will never execute at the same time)
  129. */
  130. if (lwp_user_object_add(lwp, obj))
  131. {
  132. /* this will call a _pftx_destroy_locked, but that's okay */
  133. rt_object_delete(obj);
  134. rt_free(futex);
  135. futex = RT_NULL;
  136. }
  137. else
  138. {
  139. futex->node.avl_key = (avl_key_t)uaddr;
  140. futex->node.data = &lwp->address_search_head;
  141. futex->custom_obj = obj;
  142. futex->mutex = RT_NULL;
  143. rt_list_init(&(futex->waiting_thread));
  144. /**
  145. * Brief: Insert into futex head
  146. *
  147. * Note: Critical Section
  148. * - lwp address_search_head (RW; protected by caller)
  149. */
  150. lwp_avl_insert(&futex->node, &lwp->address_search_head);
  151. }
  152. }
  153. }
  154. }
  155. return futex;
  156. }
  157. /**
  158. * Get a Private FuTeX (pftx) match the (lwp, uaddr, op)
  159. */
  160. static rt_futex_t _pftx_get(void *uaddr, struct rt_lwp *lwp, int op,
  161. rt_err_t *rc)
  162. {
  163. struct lwp_avl_struct *node = RT_NULL;
  164. rt_futex_t futex = RT_NULL;
  165. rt_err_t error = -1;
  166. LWP_LOCK(lwp);
  167. /**
  168. * Note: Critical Section
  169. * protect lwp address_search_head (READ)
  170. */
  171. node = lwp_avl_find((avl_key_t)uaddr, lwp->address_search_head);
  172. if (node)
  173. {
  174. futex = rt_container_of(node, struct rt_futex, node);
  175. error = 0;
  176. }
  177. else
  178. {
  179. /* create a futex according to this uaddr */
  180. futex = _pftx_create_locked(uaddr, lwp);
  181. if (!futex)
  182. error = -ENOMEM;
  183. else
  184. error = 0;
  185. }
  186. LWP_UNLOCK(lwp);
  187. *rc = error;
  188. return futex;
  189. }
  190. /**
  191. * Destroy a Shared FuTeX (pftx)
  192. * Note: must have futex address_search_head taken
  193. */
  194. static rt_err_t _sftx_destroy(void *data)
  195. {
  196. rt_err_t ret = -1;
  197. rt_futex_t futex = (rt_futex_t)data;
  198. if (futex)
  199. {
  200. /* delete it even it's not in the table */
  201. futex_global_table_delete(&futex->entry.key);
  202. if (futex->mutex)
  203. {
  204. rt_mutex_delete(futex->mutex);
  205. futex->mutex = RT_NULL;
  206. }
  207. rt_free(futex);
  208. ret = 0;
  209. }
  210. return ret;
  211. }
  212. /**
  213. * Create a Shared FuTeX (sftx)
  214. */
  215. static rt_futex_t _sftx_create(struct shared_futex_key *key, struct rt_lwp *lwp)
  216. {
  217. rt_futex_t futex = RT_NULL;
  218. struct rt_object *obj = RT_NULL;
  219. if (lwp)
  220. {
  221. futex = (rt_futex_t)rt_calloc(1, sizeof(struct rt_futex));
  222. if (futex)
  223. {
  224. /* create a Shared FuTeX (sftx) */
  225. obj = rt_custom_object_create("sftx", (void *)futex, _sftx_destroy);
  226. if (!obj)
  227. {
  228. rt_free(futex);
  229. futex = RT_NULL;
  230. }
  231. else
  232. {
  233. if (futex_global_table_add(key, futex))
  234. {
  235. rt_object_delete(obj);
  236. rt_free(futex);
  237. futex = RT_NULL;
  238. }
  239. else
  240. {
  241. futex->mutex = RT_NULL;
  242. rt_list_init(&(futex->waiting_thread));
  243. futex->custom_obj = obj;
  244. }
  245. }
  246. }
  247. }
  248. return futex;
  249. }
  250. /**
  251. * Get a Shared FuTeX (sftx) match the (lwp, uaddr, op)
  252. */
  253. static rt_futex_t _sftx_get(void *uaddr, struct rt_lwp *lwp, int op,
  254. rt_err_t *rc)
  255. {
  256. rt_futex_t futex = RT_NULL;
  257. struct shared_futex_key key;
  258. rt_varea_t varea;
  259. rt_err_t error = -1;
  260. RD_LOCK(lwp->aspace);
  261. varea = rt_aspace_query(lwp->aspace, uaddr);
  262. if (varea)
  263. {
  264. key.mobj = varea->mem_obj;
  265. key.offset = ((varea->offset) << MM_PAGE_SHIFT) |
  266. ((long)uaddr & ((1 << MM_PAGE_SHIFT) - 1));
  267. RD_UNLOCK(lwp->aspace);
  268. /* query for the key */
  269. _futex_lock(lwp, op & ~FUTEX_PRIVATE);
  270. error = futex_global_table_find(&key, &futex);
  271. if (error != RT_EOK)
  272. {
  273. /* not found, do allocation */
  274. futex = _sftx_create(&key, lwp);
  275. if (!futex)
  276. error = -ENOMEM;
  277. else
  278. error = 0;
  279. }
  280. _futex_unlock(lwp, op & ~FUTEX_PRIVATE);
  281. }
  282. else
  283. {
  284. RD_UNLOCK(lwp->aspace);
  285. }
  286. *rc = error;
  287. return futex;
  288. }
  289. /* must have futex address_search_head taken */
  290. static rt_futex_t _futex_get(void *uaddr, struct rt_lwp *lwp, int op_flags,
  291. rt_err_t *rc)
  292. {
  293. rt_futex_t futex = RT_NULL;
  294. if (op_flags & FUTEX_PRIVATE)
  295. {
  296. futex = _pftx_get(uaddr, lwp, op_flags, rc);
  297. }
  298. else
  299. {
  300. futex = _sftx_get(uaddr, lwp, op_flags, rc);
  301. }
  302. return futex;
  303. }
  304. static rt_err_t _suspend_thread_timeout_locked(rt_thread_t thread,
  305. rt_futex_t futex,
  306. rt_tick_t timeout)
  307. {
  308. rt_err_t rc;
  309. /**
  310. * Brief: Add current thread into futex waiting thread list
  311. *
  312. * Note: Critical Section
  313. * - the futex waiting_thread list (RW)
  314. */
  315. rc = rt_thread_suspend_to_list(thread, &futex->waiting_thread,
  316. RT_IPC_FLAG_FIFO, RT_INTERRUPTIBLE);
  317. if (rc == RT_EOK)
  318. {
  319. /* start the timer of thread */
  320. rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME,
  321. &timeout);
  322. rt_timer_start(&(thread->thread_timer));
  323. rt_set_errno(ETIMEDOUT);
  324. }
  325. return rc;
  326. }
  327. static rt_err_t _suspend_thread_locked(rt_thread_t thread, rt_futex_t futex)
  328. {
  329. /**
  330. * Brief: Add current thread into futex waiting thread list
  331. *
  332. * Note: Critical Section
  333. * - the futex waiting_thread list (RW)
  334. */
  335. return rt_thread_suspend_to_list(thread, &futex->waiting_thread,
  336. RT_IPC_FLAG_FIFO, RT_INTERRUPTIBLE);
  337. }
  338. rt_inline int _futex_cmpxchg_value(int *curval, int *uaddr, int uval,
  339. int newval)
  340. {
  341. int err = 0;
  342. if (!lwp_user_accessable((void *)uaddr, sizeof(*uaddr)))
  343. {
  344. err = -EFAULT;
  345. goto exit;
  346. }
  347. if (!atomic_compare_exchange_strong(uaddr, &uval, newval))
  348. {
  349. *curval = uval;
  350. err = -EAGAIN;
  351. }
  352. exit:
  353. return err;
  354. }
  355. static int _futex_wait(rt_futex_t futex, struct rt_lwp *lwp, int *uaddr,
  356. int value, const struct timespec *timeout, int op_flags)
  357. {
  358. rt_tick_t to;
  359. rt_thread_t thread;
  360. rt_err_t rc = -RT_EINTR;
  361. /**
  362. * Brief: Remove current thread from scheduler, besides appends it to
  363. * the waiting thread list of the futex. If the timeout is specified
  364. * a timer will be setup for current thread
  365. *
  366. * Note: Critical Section
  367. * - futex.waiting (RW; Protected by lwp_lock)
  368. * - the local cpu
  369. */
  370. _futex_lock(lwp, op_flags);
  371. if (*uaddr == value)
  372. {
  373. thread = rt_thread_self();
  374. if (timeout)
  375. {
  376. to = timeout->tv_sec * RT_TICK_PER_SECOND;
  377. to +=
  378. (timeout->tv_nsec * RT_TICK_PER_SECOND) / NANOSECOND_PER_SECOND;
  379. if (to < 0)
  380. {
  381. rc = -EINVAL;
  382. _futex_unlock(lwp, op_flags);
  383. }
  384. else
  385. {
  386. rt_enter_critical();
  387. rc = _suspend_thread_timeout_locked(thread, futex, to);
  388. _futex_unlock(lwp, op_flags);
  389. rt_exit_critical();
  390. }
  391. }
  392. else
  393. {
  394. rt_enter_critical();
  395. rc = _suspend_thread_locked(thread, futex);
  396. _futex_unlock(lwp, op_flags);
  397. rt_exit_critical();
  398. }
  399. if (rc == RT_EOK)
  400. {
  401. /* do schedule */
  402. rt_schedule();
  403. /* check errno */
  404. rc = rt_get_errno();
  405. rc = rc > 0 ? -rc : rc;
  406. }
  407. }
  408. else
  409. {
  410. _futex_unlock(lwp, op_flags);
  411. rc = -EAGAIN;
  412. rt_set_errno(EAGAIN);
  413. }
  414. return rc;
  415. }
  416. static long _futex_wake(rt_futex_t futex, struct rt_lwp *lwp, int number,
  417. int op_flags)
  418. {
  419. long woken_cnt = 0;
  420. int is_empty = 0;
  421. /**
  422. * Brief: Wakeup a suspended thread on the futex waiting thread list
  423. *
  424. * Note: Critical Section
  425. * - the futex waiting_thread list (RW)
  426. */
  427. while (number && !is_empty)
  428. {
  429. _futex_lock(lwp, op_flags);
  430. if (rt_susp_list_dequeue(&futex->waiting_thread, RT_EOK))
  431. {
  432. number--;
  433. woken_cnt++;
  434. }
  435. _futex_unlock(lwp, op_flags);
  436. }
  437. /* do schedule */
  438. rt_schedule();
  439. return woken_cnt;
  440. }
  441. /**
  442. * Brief: Wake up to nr_wake futex1 threads.
  443. * If there are more waiters waiting on futex1 than nr_wake,
  444. * insert the remaining at most nr_requeue waiters waiting
  445. * on futex1 into the waiting queue of futex2.
  446. */
  447. static long _futex_requeue(rt_futex_t futex1, rt_futex_t futex2,
  448. struct rt_lwp *lwp, int nr_wake, int nr_requeue,
  449. int opflags)
  450. {
  451. long rtn;
  452. long woken_cnt = 0;
  453. int is_empty = 0;
  454. rt_thread_t thread;
  455. if (futex1 == futex2)
  456. {
  457. return -EINVAL;
  458. }
  459. /**
  460. * Brief: Wakeup a suspended thread on the futex waiting thread list
  461. *
  462. * Note: Critical Section
  463. * - the futex waiting_thread list (RW)
  464. */
  465. while (nr_wake && !is_empty)
  466. {
  467. rt_sched_lock_level_t slvl;
  468. rt_sched_lock(&slvl);
  469. is_empty = rt_list_isempty(&(futex1->waiting_thread));
  470. if (!is_empty)
  471. {
  472. thread = RT_THREAD_LIST_NODE_ENTRY(futex1->waiting_thread.next);
  473. /* remove from waiting list */
  474. rt_list_remove(&RT_THREAD_LIST_NODE(thread));
  475. thread->error = RT_EOK;
  476. /* resume the suspended thread */
  477. rt_thread_resume(thread);
  478. nr_wake--;
  479. woken_cnt++;
  480. }
  481. rt_sched_unlock(slvl);
  482. }
  483. rtn = woken_cnt;
  484. /**
  485. * Brief: Requeue
  486. *
  487. * Note: Critical Section
  488. * - the futex waiting_thread list (RW)
  489. */
  490. while (!is_empty && nr_requeue)
  491. {
  492. rt_sched_lock_level_t slvl;
  493. rt_sched_lock(&slvl);
  494. is_empty = rt_list_isempty(&(futex1->waiting_thread));
  495. if (!is_empty)
  496. {
  497. thread = RT_THREAD_LIST_NODE_ENTRY(futex1->waiting_thread.next);
  498. rt_list_remove(&RT_THREAD_LIST_NODE(thread));
  499. rt_list_insert_before(&(futex2->waiting_thread),
  500. &RT_THREAD_LIST_NODE(thread));
  501. nr_requeue--;
  502. rtn++;
  503. }
  504. rt_sched_unlock(slvl);
  505. }
  506. /* do schedule */
  507. rt_schedule();
  508. return rtn;
  509. }
  510. /* timeout argument measured against the CLOCK_REALTIME clock. */
  511. static long _futex_lock_pi(rt_futex_t futex, struct rt_lwp *lwp, int *uaddr,
  512. const struct timespec *timeout, int op_flags,
  513. rt_bool_t trylock)
  514. {
  515. int word = 0, nword, cword;
  516. int tid = 0;
  517. rt_err_t err = 0;
  518. rt_thread_t thread = RT_NULL, current_thread = RT_NULL;
  519. rt_tick_t to = RT_WAITING_FOREVER;
  520. if (!lwp_user_accessable((void *)uaddr, sizeof(*uaddr)))
  521. {
  522. return -EFAULT;
  523. }
  524. current_thread = rt_thread_self();
  525. _futex_lock(lwp, op_flags);
  526. lwp_get_from_user(&word, (void *)uaddr, sizeof(int));
  527. tid = word & FUTEX_TID_MASK;
  528. if (word == 0)
  529. {
  530. /* If the value is 0, then the kernel tries
  531. to atomically set the futex value to the caller's TID. */
  532. nword = current_thread->tid;
  533. if (_futex_cmpxchg_value(&cword, uaddr, word, nword))
  534. {
  535. _futex_unlock(lwp, op_flags);
  536. return -EAGAIN;
  537. }
  538. _futex_unlock(lwp, op_flags);
  539. return 0;
  540. }
  541. else
  542. {
  543. thread = lwp_tid_get_thread_and_inc_ref(tid);
  544. if (thread == RT_NULL)
  545. {
  546. _futex_unlock(lwp, op_flags);
  547. return -ESRCH;
  548. }
  549. lwp_tid_dec_ref(thread);
  550. nword =
  551. word | FUTEX_WAITERS;
  552. if (_futex_cmpxchg_value(&cword, uaddr, word, nword))
  553. {
  554. _futex_unlock(lwp, op_flags);
  555. return -EAGAIN;
  556. }
  557. word = nword;
  558. }
  559. if (futex->mutex == RT_NULL)
  560. {
  561. futex->mutex = rt_mutex_create("futexpi", RT_IPC_FLAG_PRIO);
  562. if (futex->mutex == RT_NULL)
  563. {
  564. _futex_unlock(lwp, op_flags);
  565. return -ENOMEM;
  566. }
  567. /* set mutex->owner */
  568. rt_spin_lock(&(futex->mutex->spinlock));
  569. futex->mutex->owner = thread;
  570. futex->mutex->hold = 1;
  571. rt_spin_unlock(&(futex->mutex->spinlock));
  572. }
  573. if (timeout)
  574. {
  575. to = rt_timespec_to_tick(timeout);
  576. }
  577. if (trylock)
  578. {
  579. to = RT_WAITING_NO;
  580. }
  581. _futex_unlock(lwp, op_flags);
  582. err = rt_mutex_take_interruptible(futex->mutex, to);
  583. if (err == -RT_ETIMEOUT)
  584. {
  585. err = -EDEADLK;
  586. }
  587. _futex_lock(lwp, op_flags);
  588. nword = current_thread->tid | FUTEX_WAITERS;
  589. if (_futex_cmpxchg_value(&cword, uaddr, word, nword))
  590. {
  591. err = -EAGAIN;
  592. }
  593. _futex_unlock(lwp, op_flags);
  594. return err;
  595. }
  596. static long _futex_unlock_pi(rt_futex_t futex, struct rt_lwp *lwp, int op_flags)
  597. {
  598. rt_err_t err = 0;
  599. _futex_lock(lwp, op_flags);
  600. if (!futex->mutex)
  601. {
  602. _futex_unlock(lwp, op_flags);
  603. return -EPERM;
  604. }
  605. _futex_unlock(lwp, op_flags);
  606. err = rt_mutex_release(futex->mutex);
  607. return err;
  608. }
  609. #include <syscall_generic.h>
  610. rt_inline rt_bool_t _timeout_ignored(int op)
  611. {
  612. /**
  613. * if (op &
  614. * (FUTEX_WAKE|FUTEX_FD|FUTEX_WAKE_BITSET|FUTEX_TRYLOCK_PI|FUTEX_UNLOCK_PI))
  615. * was TRUE `timeout` should be ignored by implementation, according to
  616. * POSIX futex(2) manual. since only FUTEX_WAKE is implemented in rt-smart,
  617. * only FUTEX_WAKE was omitted currently
  618. */
  619. return ((op & (FUTEX_WAKE)) || (op & (FUTEX_REQUEUE)) ||
  620. (op & (FUTEX_CMP_REQUEUE)) || (op & (FUTEX_UNLOCK_PI)) ||
  621. (op & (FUTEX_TRYLOCK_PI)));
  622. }
  623. sysret_t sys_futex(int *uaddr, int op, int val, const struct timespec *timeout,
  624. int *uaddr2, int val3)
  625. {
  626. struct rt_lwp *lwp = RT_NULL;
  627. sysret_t ret = 0;
  628. if (!lwp_user_accessable(uaddr, sizeof(int)))
  629. {
  630. ret = -EFAULT;
  631. }
  632. else if (timeout && !_timeout_ignored(op) &&
  633. !lwp_user_accessable((void *)timeout, sizeof(struct timespec)))
  634. {
  635. ret = -EINVAL;
  636. }
  637. else
  638. {
  639. lwp = lwp_self();
  640. ret = lwp_futex(lwp, uaddr, op, val, timeout, uaddr2, val3);
  641. }
  642. return ret;
  643. }
  644. #define FUTEX_FLAGS (FUTEX_PRIVATE | FUTEX_CLOCK_REALTIME)
  645. rt_err_t lwp_futex(struct rt_lwp *lwp, int *uaddr, int op, int val,
  646. const struct timespec *timeout, int *uaddr2, int val3)
  647. {
  648. rt_futex_t futex, futex2;
  649. rt_err_t rc = 0;
  650. int op_type = op & ~FUTEX_FLAGS;
  651. int op_flags = op & FUTEX_FLAGS;
  652. futex = _futex_get(uaddr, lwp, op_flags, &rc);
  653. if (!rc)
  654. {
  655. switch (op_type)
  656. {
  657. case FUTEX_WAIT:
  658. rc = _futex_wait(futex, lwp, uaddr, val, timeout, op_flags);
  659. break;
  660. case FUTEX_WAKE:
  661. rc = _futex_wake(futex, lwp, val, op_flags);
  662. break;
  663. case FUTEX_REQUEUE:
  664. futex2 = _futex_get(uaddr2, lwp, op_flags, &rc);
  665. if (!rc)
  666. {
  667. _futex_lock(lwp, op_flags);
  668. rc = _futex_requeue(futex, futex2, lwp, val, (long)timeout,
  669. op_flags);
  670. _futex_unlock(lwp, op_flags);
  671. }
  672. break;
  673. case FUTEX_CMP_REQUEUE:
  674. futex2 = _futex_get(uaddr2, lwp, op_flags, &rc);
  675. _futex_lock(lwp, op_flags);
  676. if (*uaddr == val3)
  677. {
  678. rc = 0;
  679. }
  680. else
  681. {
  682. rc = -EAGAIN;
  683. }
  684. if (rc == 0)
  685. {
  686. rc = _futex_requeue(futex, futex2, lwp, val,
  687. (long)timeout, op_flags);
  688. }
  689. _futex_unlock(lwp, op_flags);
  690. break;
  691. case FUTEX_LOCK_PI:
  692. rc = _futex_lock_pi(futex, lwp, uaddr, timeout, op_flags,
  693. RT_FALSE);
  694. break;
  695. case FUTEX_UNLOCK_PI:
  696. rc = _futex_unlock_pi(futex, lwp, op_flags);
  697. break;
  698. case FUTEX_TRYLOCK_PI:
  699. rc = _futex_lock_pi(futex, lwp, uaddr, 0, op_flags, RT_TRUE);
  700. break;
  701. default:
  702. LOG_W("User require op=%d which is not implemented", op);
  703. rc = -ENOSYS;
  704. break;
  705. }
  706. }
  707. return rc;
  708. }
  709. rt_inline int _fetch_robust_entry(struct robust_list **entry,
  710. struct robust_list **head, rt_bool_t *is_pi)
  711. {
  712. unsigned long uentry;
  713. if (!lwp_user_accessable((void *)head, sizeof(*head)))
  714. {
  715. return -EFAULT;
  716. }
  717. if (lwp_get_from_user(&uentry, (void *)head, sizeof(*head)) !=
  718. sizeof(*head))
  719. {
  720. return -EFAULT;
  721. }
  722. *entry = (void *)(uentry & ~1UL);
  723. *is_pi = uentry & 1;
  724. return 0;
  725. }
  726. static int _handle_futex_death(int *uaddr, rt_thread_t thread, rt_bool_t is_pi,
  727. rt_bool_t is_pending_op)
  728. {
  729. int word, cword = 0, nword;
  730. rt_err_t rc;
  731. struct rt_lwp *lwp;
  732. rt_futex_t futex;
  733. /* Futex address must be 32bit aligned */
  734. if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
  735. return -1;
  736. lwp = thread->lwp;
  737. retry:
  738. if (!lwp_user_accessable((void *)uaddr, sizeof(*uaddr)))
  739. {
  740. return -1;
  741. }
  742. if (lwp_get_from_user(&word, (void *)uaddr, sizeof(*uaddr)) !=
  743. sizeof(*uaddr))
  744. {
  745. return -1;
  746. }
  747. futex = _futex_get(uaddr, lwp, FUTEX_PRIVATE, &rc);
  748. if (is_pending_op && !is_pi && !word)
  749. {
  750. _futex_wake(futex, lwp, 1, FUTEX_PRIVATE);
  751. return 0;
  752. }
  753. if ((word & FUTEX_TID_MASK) != thread->tid)
  754. return 0;
  755. nword = (word & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
  756. if ((rc = _futex_cmpxchg_value(&cword, uaddr, word, nword)))
  757. {
  758. switch (rc)
  759. {
  760. case -EFAULT:
  761. return -1;
  762. case -EAGAIN:
  763. rt_schedule();
  764. goto retry;
  765. default:
  766. LOG_W("unknown errno: %d in '%s'", rc, __FUNCTION__);
  767. return rc;
  768. }
  769. }
  770. if (cword != word)
  771. goto retry;
  772. if (!is_pi && (word & FUTEX_WAITERS))
  773. _futex_wake(futex, lwp, 1, FUTEX_PRIVATE);
  774. return 0;
  775. }
  776. /**
  777. * Brief: Walk thread->robust_list mark
  778. * any locks found there dead, and notify any waiters.
  779. *
  780. * note: very carefully, it's a userspace list!
  781. */
  782. void lwp_futex_exit_robust_list(rt_thread_t thread)
  783. {
  784. struct robust_list *entry = RT_NULL;
  785. struct robust_list *next_entry = RT_NULL;
  786. struct robust_list *pending = RT_NULL;
  787. struct robust_list_head *head;
  788. unsigned int limit = 2048;
  789. rt_bool_t pi, pip, next_pi;
  790. unsigned long futex_offset;
  791. int rc;
  792. head = thread->robust_list;
  793. if (head == RT_NULL)
  794. return;
  795. if (_fetch_robust_entry(&entry, &head->list.next, &pi))
  796. return;
  797. if (!lwp_user_accessable((void *)&head->futex_offset,
  798. sizeof(head->futex_offset)))
  799. {
  800. return;
  801. }
  802. if (lwp_get_from_user(&futex_offset, (void *)&head->futex_offset,
  803. sizeof(head->futex_offset)) !=
  804. sizeof(head->futex_offset))
  805. {
  806. return;
  807. }
  808. if (_fetch_robust_entry(&pending, &head->list_op_pending, &pip))
  809. {
  810. return;
  811. }
  812. while (entry != &head->list)
  813. {
  814. rc = _fetch_robust_entry(&next_entry, &entry->next, &next_pi);
  815. if (entry != pending)
  816. {
  817. if (_handle_futex_death((void *)entry + futex_offset, thread, pi,
  818. RT_FALSE))
  819. return;
  820. }
  821. if (rc)
  822. return;
  823. entry = next_entry;
  824. pi = next_pi;
  825. if (!--limit)
  826. break;
  827. }
  828. if (pending)
  829. {
  830. _handle_futex_death((void *)pending + futex_offset, thread, pip,
  831. RT_TRUE);
  832. }
  833. }