poll.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2016-12-28 Bernard first version
  9. * 2018-03-09 Bernard Add protection for pt->triggered.
  10. */
  11. #include <stdint.h>
  12. #include <rthw.h>
  13. #include <rtthread.h>
  14. #include <dfs_file.h>
  15. #include "poll.h"
  16. struct rt_poll_node;
  17. struct rt_poll_table
  18. {
  19. rt_pollreq_t req;
  20. rt_uint32_t triggered; /* the waited thread whether triggered */
  21. rt_thread_t polling_thread;
  22. struct rt_poll_node *nodes;
  23. };
  24. struct rt_poll_node
  25. {
  26. struct rt_wqueue_node wqn;
  27. struct rt_poll_table *pt;
  28. struct rt_poll_node *next;
  29. };
  30. static RT_DEFINE_SPINLOCK(_spinlock);
  31. static int __wqueue_pollwake(struct rt_wqueue_node *wait, void *key)
  32. {
  33. struct rt_poll_node *pn;
  34. if (key && !((rt_ubase_t)key & wait->key))
  35. return -1;
  36. pn = rt_container_of(wait, struct rt_poll_node, wqn);
  37. pn->pt->triggered = 1;
  38. return __wqueue_default_wake(wait, key);
  39. }
  40. static void _poll_add(rt_wqueue_t *wq, rt_pollreq_t *req)
  41. {
  42. struct rt_poll_table *pt;
  43. struct rt_poll_node *node;
  44. node = (struct rt_poll_node *)rt_malloc(sizeof(struct rt_poll_node));
  45. if (node == RT_NULL)
  46. return;
  47. pt = rt_container_of(req, struct rt_poll_table, req);
  48. node->wqn.key = req->_key;
  49. rt_list_init(&(node->wqn.list));
  50. node->wqn.polling_thread = pt->polling_thread;
  51. node->wqn.wakeup = __wqueue_pollwake;
  52. node->next = pt->nodes;
  53. node->pt = pt;
  54. pt->nodes = node;
  55. rt_wqueue_add(wq, &node->wqn);
  56. }
  57. static void poll_table_init(struct rt_poll_table *pt)
  58. {
  59. pt->req._proc = _poll_add;
  60. pt->triggered = 0;
  61. pt->nodes = RT_NULL;
  62. pt->polling_thread = rt_thread_self();
  63. }
  64. static int poll_wait_timeout(struct rt_poll_table *pt, int msec)
  65. {
  66. rt_int32_t timeout;
  67. int ret = 0;
  68. struct rt_thread *thread;
  69. rt_base_t level;
  70. thread = pt->polling_thread;
  71. timeout = rt_tick_from_millisecond(msec);
  72. level = rt_spin_lock_irqsave(&_spinlock);
  73. if (timeout != 0 && !pt->triggered)
  74. {
  75. if (rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE) == RT_EOK)
  76. {
  77. if (timeout > 0)
  78. {
  79. rt_timer_control(&(thread->thread_timer),
  80. RT_TIMER_CTRL_SET_TIME,
  81. &timeout);
  82. rt_timer_start(&(thread->thread_timer));
  83. }
  84. rt_spin_unlock_irqrestore(&_spinlock, level);
  85. rt_schedule();
  86. level = rt_spin_lock_irqsave(&_spinlock);
  87. }
  88. }
  89. ret = !pt->triggered;
  90. rt_spin_unlock_irqrestore(&_spinlock, level);
  91. return ret;
  92. }
  93. static int do_pollfd(struct pollfd *pollfd, rt_pollreq_t *req)
  94. {
  95. int mask = 0;
  96. int fd;
  97. fd = pollfd->fd;
  98. if (fd >= 0)
  99. {
  100. struct dfs_file *f = fd_get(fd);
  101. mask = POLLNVAL;
  102. if (f)
  103. {
  104. mask = POLLMASK_DEFAULT;
  105. if (f->vnode->fops->poll)
  106. {
  107. req->_key = pollfd->events | POLLERR | POLLHUP;
  108. mask = f->vnode->fops->poll(f, req);
  109. /* dealwith the device return error -1*/
  110. if (mask < 0)
  111. {
  112. pollfd->revents = 0;
  113. return mask;
  114. }
  115. }
  116. /* Mask out unneeded events. */
  117. mask &= pollfd->events | POLLERR | POLLHUP;
  118. }
  119. }
  120. pollfd->revents = mask;
  121. return mask;
  122. }
  123. static int poll_do(struct pollfd *fds, nfds_t nfds, struct rt_poll_table *pt, int msec)
  124. {
  125. int num;
  126. int istimeout = 0;
  127. nfds_t n;
  128. struct pollfd *pf;
  129. int ret = 0;
  130. if (msec == 0)
  131. {
  132. pt->req._proc = RT_NULL;
  133. istimeout = 1;
  134. }
  135. while (1)
  136. {
  137. pf = fds;
  138. num = 0;
  139. pt->triggered = 0;
  140. for (n = 0; n < nfds; n ++)
  141. {
  142. ret = do_pollfd(pf, &pt->req);
  143. if(ret < 0)
  144. {
  145. /*dealwith the device return error -1 */
  146. pt->req._proc = RT_NULL;
  147. return ret;
  148. }
  149. else if(ret > 0)
  150. {
  151. num ++;
  152. pt->req._proc = RT_NULL;
  153. }
  154. pf ++;
  155. }
  156. pt->req._proc = RT_NULL;
  157. if (num || istimeout)
  158. break;
  159. if (poll_wait_timeout(pt, msec))
  160. istimeout = 1;
  161. }
  162. return num;
  163. }
  164. static void poll_teardown(struct rt_poll_table *pt)
  165. {
  166. struct rt_poll_node *node, *next;
  167. next = pt->nodes;
  168. while (next)
  169. {
  170. node = next;
  171. rt_wqueue_remove(&node->wqn);
  172. next = node->next;
  173. rt_free(node);
  174. }
  175. }
  176. int poll(struct pollfd *fds, nfds_t nfds, int timeout)
  177. {
  178. int num;
  179. struct rt_poll_table table;
  180. poll_table_init(&table);
  181. num = poll_do(fds, nfds, &table, timeout);
  182. poll_teardown(&table);
  183. return num;
  184. }