poll.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2016-12-28 Bernard first version
  9. * 2018-03-09 Bernard Add protection for pt->triggered.
  10. */
  11. #include <stdint.h>
  12. #include <rthw.h>
  13. #include <rtthread.h>
  14. #include <dfs_file.h>
  15. #include "poll.h"
  16. struct rt_poll_node;
  17. struct rt_poll_table
  18. {
  19. rt_pollreq_t req;
  20. rt_uint32_t triggered; /* the waited thread whether triggered */
  21. rt_thread_t polling_thread;
  22. struct rt_poll_node *nodes;
  23. };
  24. struct rt_poll_node
  25. {
  26. struct rt_wqueue_node wqn;
  27. struct rt_poll_table *pt;
  28. struct rt_poll_node *next;
  29. };
  30. static int __wqueue_pollwake(struct rt_wqueue_node *wait, void *key)
  31. {
  32. struct rt_poll_node *pn;
  33. if (key && !((rt_ubase_t)key & wait->key))
  34. return -1;
  35. pn = rt_container_of(wait, struct rt_poll_node, wqn);
  36. pn->pt->triggered = 1;
  37. return __wqueue_default_wake(wait, key);
  38. }
  39. static void _poll_add(rt_wqueue_t *wq, rt_pollreq_t *req)
  40. {
  41. struct rt_poll_table *pt;
  42. struct rt_poll_node *node;
  43. node = (struct rt_poll_node *)rt_malloc(sizeof(struct rt_poll_node));
  44. if (node == RT_NULL)
  45. return;
  46. pt = rt_container_of(req, struct rt_poll_table, req);
  47. node->wqn.key = req->_key;
  48. rt_list_init(&(node->wqn.list));
  49. node->wqn.polling_thread = pt->polling_thread;
  50. node->wqn.wakeup = __wqueue_pollwake;
  51. node->next = pt->nodes;
  52. node->pt = pt;
  53. pt->nodes = node;
  54. rt_wqueue_add(wq, &node->wqn);
  55. }
  56. static void poll_table_init(struct rt_poll_table *pt)
  57. {
  58. pt->req._proc = _poll_add;
  59. pt->triggered = 0;
  60. pt->nodes = RT_NULL;
  61. pt->polling_thread = rt_thread_self();
  62. }
  63. static int poll_wait_timeout(struct rt_poll_table *pt, int msec)
  64. {
  65. rt_int32_t timeout;
  66. int ret = 0;
  67. struct rt_thread *thread;
  68. rt_base_t level;
  69. thread = pt->polling_thread;
  70. timeout = rt_tick_from_millisecond(msec);
  71. level = rt_hw_interrupt_disable();
  72. if (timeout != 0 && !pt->triggered)
  73. {
  74. rt_thread_suspend(thread);
  75. if (timeout > 0)
  76. {
  77. rt_timer_control(&(thread->thread_timer),
  78. RT_TIMER_CTRL_SET_TIME,
  79. &timeout);
  80. rt_timer_start(&(thread->thread_timer));
  81. }
  82. rt_hw_interrupt_enable(level);
  83. rt_schedule();
  84. level = rt_hw_interrupt_disable();
  85. }
  86. ret = !pt->triggered;
  87. rt_hw_interrupt_enable(level);
  88. return ret;
  89. }
  90. static int do_pollfd(struct pollfd *pollfd, rt_pollreq_t *req)
  91. {
  92. int mask = 0;
  93. int fd;
  94. fd = pollfd->fd;
  95. if (fd >= 0)
  96. {
  97. struct dfs_fd *f = fd_get(fd);
  98. mask = POLLNVAL;
  99. if (f)
  100. {
  101. mask = POLLMASK_DEFAULT;
  102. if (f->fops->poll)
  103. {
  104. req->_key = pollfd->events | POLLERR | POLLHUP;
  105. mask = f->fops->poll(f, req);
  106. /* dealwith the device return error -1*/
  107. if (mask < 0)
  108. {
  109. fd_put(f);
  110. pollfd->revents = 0;
  111. return mask;
  112. }
  113. }
  114. /* Mask out unneeded events. */
  115. mask &= pollfd->events | POLLERR | POLLHUP;
  116. fd_put(f);
  117. }
  118. }
  119. pollfd->revents = mask;
  120. return mask;
  121. }
  122. static int poll_do(struct pollfd *fds, nfds_t nfds, struct rt_poll_table *pt, int msec)
  123. {
  124. int num;
  125. int istimeout = 0;
  126. nfds_t n;
  127. struct pollfd *pf;
  128. int ret = 0;
  129. if (msec == 0)
  130. {
  131. pt->req._proc = RT_NULL;
  132. istimeout = 1;
  133. }
  134. while (1)
  135. {
  136. pf = fds;
  137. num = 0;
  138. pt->triggered = 0;
  139. for (n = 0; n < nfds; n ++)
  140. {
  141. ret = do_pollfd(pf, &pt->req);
  142. if(ret < 0)
  143. {
  144. /*dealwith the device return error -1 */
  145. pt->req._proc = RT_NULL;
  146. return ret;
  147. }
  148. else if(ret > 0)
  149. {
  150. num ++;
  151. pt->req._proc = RT_NULL;
  152. }
  153. pf ++;
  154. }
  155. pt->req._proc = RT_NULL;
  156. if (num || istimeout)
  157. break;
  158. if (poll_wait_timeout(pt, msec))
  159. istimeout = 1;
  160. }
  161. return num;
  162. }
  163. static void poll_teardown(struct rt_poll_table *pt)
  164. {
  165. struct rt_poll_node *node, *next;
  166. next = pt->nodes;
  167. while (next)
  168. {
  169. node = next;
  170. rt_wqueue_remove(&node->wqn);
  171. next = node->next;
  172. rt_free(node);
  173. }
  174. }
  175. int poll(struct pollfd *fds, nfds_t nfds, int timeout)
  176. {
  177. int num;
  178. struct rt_poll_table table;
  179. poll_table_init(&table);
  180. num = poll_do(fds, nfds, &table, timeout);
  181. poll_teardown(&table);
  182. return num;
  183. }