poll.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2016-12-28 Bernard first version
  9. * 2018-03-09 Bernard Add protection for pt->triggered.
  10. * 2023-12-04 Shell Fix return code and error verification
  11. * 2023-12-14 Shell When poll goes to sleep before the waitqueue has added a
  12. * record and finished enumerating all the fd's, it may be
  13. * incorrectly woken up. This is basically because the poll
  14. * mechanism wakeup algorithm does not correctly distinguish
  15. * the current wait state.
  16. */
  17. #include <stdint.h>
  18. #include <rthw.h>
  19. #include <rtthread.h>
  20. #include <dfs_file.h>
  21. #include "poll.h"
  22. struct rt_poll_node;
  23. enum rt_poll_status {
  24. RT_POLL_STAT_INIT,
  25. RT_POLL_STAT_TRIG,
  26. RT_POLL_STAT_WAITING,
  27. };
  28. struct rt_poll_table
  29. {
  30. rt_pollreq_t req;
  31. enum rt_poll_status status; /* the waited thread whether triggered */
  32. rt_thread_t polling_thread;
  33. struct rt_poll_node *nodes;
  34. };
  35. struct rt_poll_node
  36. {
  37. struct rt_wqueue_node wqn;
  38. struct rt_poll_table *pt;
  39. struct rt_poll_node *next;
  40. };
  41. static RT_DEFINE_SPINLOCK(_spinlock);
  42. static int __wqueue_pollwake(struct rt_wqueue_node *wait, void *key)
  43. {
  44. rt_ubase_t level;
  45. struct rt_poll_node *pn;
  46. int is_waiting;
  47. if (key && !((rt_ubase_t)key & wait->key))
  48. return -1;
  49. pn = rt_container_of(wait, struct rt_poll_node, wqn);
  50. level = rt_spin_lock_irqsave(&_spinlock);
  51. is_waiting = (pn->pt->status == RT_POLL_STAT_WAITING);
  52. pn->pt->status = RT_POLL_STAT_TRIG;
  53. rt_spin_unlock_irqrestore(&_spinlock, level);
  54. if (is_waiting)
  55. return __wqueue_default_wake(wait, key);
  56. return -1;
  57. }
  58. static void _poll_add(rt_wqueue_t *wq, rt_pollreq_t *req)
  59. {
  60. struct rt_poll_table *pt;
  61. struct rt_poll_node *node;
  62. node = (struct rt_poll_node *)rt_malloc(sizeof(struct rt_poll_node));
  63. if (node == RT_NULL)
  64. return;
  65. pt = rt_container_of(req, struct rt_poll_table, req);
  66. node->wqn.key = req->_key;
  67. rt_list_init(&(node->wqn.list));
  68. node->wqn.polling_thread = pt->polling_thread;
  69. node->wqn.wakeup = __wqueue_pollwake;
  70. node->next = pt->nodes;
  71. node->pt = pt;
  72. pt->nodes = node;
  73. rt_wqueue_add(wq, &node->wqn);
  74. }
  75. static void poll_table_init(struct rt_poll_table *pt)
  76. {
  77. pt->req._proc = _poll_add;
  78. pt->status = RT_POLL_STAT_INIT;
  79. pt->nodes = RT_NULL;
  80. pt->polling_thread = rt_thread_self();
  81. }
  82. static int poll_wait_timeout(struct rt_poll_table *pt, int msec)
  83. {
  84. rt_int32_t timeout;
  85. int ret = 0;
  86. struct rt_thread *thread;
  87. rt_base_t level;
  88. thread = pt->polling_thread;
  89. timeout = rt_tick_from_millisecond(msec);
  90. level = rt_spin_lock_irqsave(&_spinlock);
  91. if (timeout != 0 && pt->status != RT_POLL_STAT_TRIG)
  92. {
  93. if (rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE) == RT_EOK)
  94. {
  95. if (timeout > 0)
  96. {
  97. rt_timer_control(&(thread->thread_timer),
  98. RT_TIMER_CTRL_SET_TIME,
  99. &timeout);
  100. rt_timer_start(&(thread->thread_timer));
  101. rt_set_errno(RT_ETIMEOUT);
  102. }
  103. else
  104. {
  105. rt_set_errno(0);
  106. }
  107. pt->status = RT_POLL_STAT_WAITING;
  108. rt_spin_unlock_irqrestore(&_spinlock, level);
  109. rt_schedule();
  110. level = rt_spin_lock_irqsave(&_spinlock);
  111. if (pt->status == RT_POLL_STAT_WAITING)
  112. pt->status = RT_POLL_STAT_INIT;
  113. }
  114. }
  115. ret = rt_get_errno();
  116. if (ret == RT_EINTR)
  117. ret = -RT_EINTR;
  118. else if (pt->status == RT_POLL_STAT_TRIG)
  119. ret = RT_EOK;
  120. else
  121. ret = -RT_ETIMEOUT;
  122. rt_spin_unlock_irqrestore(&_spinlock, level);
  123. return ret;
  124. }
  125. static int do_pollfd(struct pollfd *pollfd, rt_pollreq_t *req)
  126. {
  127. int mask = 0;
  128. int fd;
  129. fd = pollfd->fd;
  130. if (fd >= 0)
  131. {
  132. struct dfs_file *f = fd_get(fd);
  133. mask = POLLNVAL;
  134. if (f)
  135. {
  136. mask = POLLMASK_DEFAULT;
  137. if (f->vnode->fops->poll)
  138. {
  139. req->_key = pollfd->events | POLLERR | POLLHUP;
  140. mask = f->vnode->fops->poll(f, req);
  141. /* dealwith the device return error -1*/
  142. if (mask < 0)
  143. {
  144. pollfd->revents = 0;
  145. return mask;
  146. }
  147. }
  148. /* Mask out unneeded events. */
  149. mask &= pollfd->events | POLLERR | POLLHUP;
  150. }
  151. }
  152. pollfd->revents = mask;
  153. return mask;
  154. }
  155. static int poll_do(struct pollfd *fds, nfds_t nfds, struct rt_poll_table *pt, int msec)
  156. {
  157. int num;
  158. int istimeout = 0;
  159. nfds_t n;
  160. struct pollfd *pf;
  161. int ret = 0;
  162. if (msec == 0)
  163. {
  164. pt->req._proc = RT_NULL;
  165. istimeout = 1;
  166. }
  167. while (1)
  168. {
  169. pf = fds;
  170. num = 0;
  171. pt->status = RT_POLL_STAT_INIT;
  172. for (n = 0; n < nfds; n ++)
  173. {
  174. ret = do_pollfd(pf, &pt->req);
  175. if(ret < 0)
  176. {
  177. /*dealwith the device return error -1 */
  178. pt->req._proc = RT_NULL;
  179. return ret;
  180. }
  181. else if(ret > 0)
  182. {
  183. num ++;
  184. pt->req._proc = RT_NULL;
  185. }
  186. pf ++;
  187. }
  188. pt->req._proc = RT_NULL;
  189. if (num || istimeout)
  190. break;
  191. ret = poll_wait_timeout(pt, msec);
  192. if (ret == -RT_EINTR)
  193. return -EINTR;
  194. else if (ret == -RT_ETIMEOUT)
  195. istimeout = 1;
  196. else
  197. istimeout = 0;
  198. }
  199. return num;
  200. }
  201. static void poll_teardown(struct rt_poll_table *pt)
  202. {
  203. struct rt_poll_node *node, *next;
  204. next = pt->nodes;
  205. while (next)
  206. {
  207. node = next;
  208. rt_wqueue_remove(&node->wqn);
  209. next = node->next;
  210. rt_free(node);
  211. }
  212. }
  213. int poll(struct pollfd *fds, nfds_t nfds, int timeout)
  214. {
  215. int num;
  216. struct rt_poll_table table;
  217. poll_table_init(&table);
  218. num = poll_do(fds, nfds, &table, timeout);
  219. poll_teardown(&table);
  220. return num;
  221. }