epoll.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-07-29 zmq810150896 first version
  9. * 2024-03-26 TroyMitchelle Add comments for all functions, members within structure members and fix incorrect naming of triggered
  10. * 2023-12-14 Shell When poll goes to sleep before the waitqueue has added a
  11. * record and finished enumerating all the fd's, it may be
  12. * incorrectly woken up. This is basically because the poll
  13. * mechanism wakeup algorithm does not correctly distinguish
  14. * the current wait state.
  15. */
  16. #include <rtthread.h>
  17. #include <fcntl.h>
  18. #include <stdint.h>
  19. #include <unistd.h>
  20. #include <dfs_file.h>
  21. #include "sys/epoll.h"
  22. #include "poll.h"
  23. #include <lwp_signal.h>
  24. #define EPOLL_MUTEX_NAME "EVENTEPOLL"
  25. #define EFD_SHARED_EPOLL_TYPE (EPOLL_CTL_ADD | EPOLL_CTL_DEL | EPOLL_CTL_MOD)
  26. #define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM)
  27. #define EPOLLEXCLUSIVE_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \
  28. EPOLLET | EPOLLEXCLUSIVE)
  29. struct rt_eventpoll;
  30. enum rt_epoll_status {
  31. RT_EPOLL_STAT_INIT,
  32. RT_EPOLL_STAT_TRIG,
  33. RT_EPOLL_STAT_WAITING,
  34. };
  35. /* Monitor queue */
  36. struct rt_fd_list
  37. {
  38. rt_uint32_t revents; /**< Monitored events */
  39. struct epoll_event epev; /**< Epoll event structure */
  40. rt_pollreq_t req; /**< Poll request structure */
  41. struct rt_eventpoll *ep; /**< Pointer to the associated event poll */
  42. struct rt_wqueue_node wqn; /**< Wait queue node */
  43. int exclusive; /**< Indicates if the event is exclusive */
  44. rt_bool_t is_rdl_node; /**< Indicates if the node is in the ready list */
  45. int fd; /**< File descriptor */
  46. struct rt_fd_list *next; /**< Pointer to the next file descriptor list */
  47. rt_slist_t rdl_node; /**< Ready list node */
  48. };
  49. struct rt_eventpoll
  50. {
  51. rt_wqueue_t epoll_read; /**< Epoll read queue */
  52. rt_thread_t polling_thread; /**< Polling thread */
  53. struct rt_mutex lock; /**< Mutex lock */
  54. struct rt_fd_list *fdlist; /**< Monitor list */
  55. int eventpoll_num; /**< Number of ready lists */
  56. rt_pollreq_t req; /**< Poll request structure */
  57. struct rt_spinlock spinlock; /**< Spin lock */
  58. rt_slist_t rdl_head; /**< Ready list head */
  59. enum rt_epoll_status status; /* the waited thread whether triggered */
  60. };
  61. static int epoll_close(struct dfs_file *file);
  62. static int epoll_poll(struct dfs_file *file, struct rt_pollreq *req);
  63. static int epoll_get_event(struct rt_fd_list *fl, rt_pollreq_t *req);
  64. static int epoll_do_ctl(int epfd, int op, int fd, struct epoll_event *event);
  65. static const struct dfs_file_ops epoll_fops =
  66. {
  67. .close = epoll_close,
  68. .poll = epoll_poll,
  69. };
  70. /**
  71. * @brief Closes the file descriptor list associated with epoll.
  72. *
  73. * This function closes the file descriptor list associated with epoll and frees the allocated memory.
  74. *
  75. * @param fdlist Pointer to the file descriptor list.
  76. *
  77. * @return Returns 0 on success.
  78. */
  79. static int epoll_close_fdlist(struct rt_fd_list *fdlist)
  80. {
  81. struct rt_fd_list *fre_node, *list;
  82. if (fdlist != RT_NULL)
  83. {
  84. list = fdlist;
  85. while (list->next != RT_NULL)
  86. {
  87. fre_node = list->next;
  88. rt_wqueue_remove(&fre_node->wqn);
  89. list->next = fre_node->next;
  90. rt_free(fre_node);
  91. }
  92. rt_free(fdlist);
  93. }
  94. return 0;
  95. }
  96. /**
  97. * @brief Closes the epoll file descriptor.
  98. *
  99. * This function closes the epoll file descriptor and cleans up associated resources.
  100. *
  101. * @param file Pointer to the file structure.
  102. *
  103. * @return Returns 0 on success.
  104. */
  105. static int epoll_close(struct dfs_file *file)
  106. {
  107. struct rt_eventpoll *ep;
  108. if (file->vnode->ref_count != 1)
  109. return 0;
  110. if (file->vnode)
  111. {
  112. if (file->vnode->data)
  113. {
  114. ep = file->vnode->data;
  115. if (ep)
  116. {
  117. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  118. if (ep->fdlist)
  119. {
  120. epoll_close_fdlist(ep->fdlist);
  121. }
  122. rt_mutex_release(&ep->lock);
  123. rt_mutex_detach(&ep->lock);
  124. rt_free(ep);
  125. }
  126. }
  127. }
  128. return 0;
  129. }
  130. /**
  131. * @brief Polls the epoll file descriptor for events.
  132. *
  133. * This function polls the epoll file descriptor for events and updates the poll request accordingly.
  134. *
  135. * @param file Pointer to the file structure.
  136. * @param req Pointer to the poll request structure.
  137. *
  138. * @return Returns the events occurred on success.
  139. */
  140. static int epoll_poll(struct dfs_file *file, struct rt_pollreq *req)
  141. {
  142. struct rt_eventpoll *ep;
  143. int events = 0;
  144. rt_base_t level;
  145. if (file->vnode->data)
  146. {
  147. ep = file->vnode->data;
  148. ep->req._key = req->_key;
  149. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  150. rt_poll_add(&ep->epoll_read, req);
  151. level = rt_spin_lock_irqsave(&ep->spinlock);
  152. if (!rt_slist_isempty(&ep->rdl_head))
  153. events |= POLLIN | EPOLLRDNORM | POLLOUT;
  154. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  155. rt_mutex_release(&ep->lock);
  156. }
  157. return events;
  158. }
  159. /**
  160. * @brief Callback function for the wait queue.
  161. *
  162. * This function is called when the file descriptor is ready for polling.
  163. *
  164. * @param wait Pointer to the wait queue node.
  165. * @param key Key associated with the wait queue node.
  166. *
  167. * @return Returns 0 on success.
  168. */
  169. static int epoll_wqueue_callback(struct rt_wqueue_node *wait, void *key)
  170. {
  171. struct rt_fd_list *fdlist;
  172. struct rt_eventpoll *ep;
  173. rt_base_t level;
  174. int is_waiting = 0;
  175. if (key && !((rt_ubase_t)key & wait->key))
  176. return -1;
  177. fdlist = rt_container_of(wait, struct rt_fd_list, wqn);
  178. ep = fdlist->ep;
  179. if (ep)
  180. {
  181. level = rt_spin_lock_irqsave(&ep->spinlock);
  182. if (fdlist->is_rdl_node == RT_FALSE)
  183. {
  184. rt_slist_append(&ep->rdl_head, &fdlist->rdl_node);
  185. fdlist->exclusive = 0;
  186. fdlist->is_rdl_node = RT_TRUE;
  187. ep->eventpoll_num++;
  188. is_waiting = (ep->status == RT_EPOLL_STAT_WAITING);
  189. ep->status = RT_EPOLL_STAT_TRIG;
  190. rt_wqueue_wakeup(&ep->epoll_read, (void *)POLLIN);
  191. }
  192. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  193. }
  194. if (is_waiting)
  195. {
  196. return __wqueue_default_wake(wait, key);
  197. }
  198. return -1;
  199. }
  200. /**
  201. * @brief Adds a callback function to the wait queue associated with epoll.
  202. *
  203. * This function adds a callback function to the wait queue associated with epoll.
  204. *
  205. * @param wq Pointer to the wait queue.
  206. * @param req Pointer to the poll request structure.
  207. */
  208. static void epoll_wqueue_add_callback(rt_wqueue_t *wq, rt_pollreq_t *req)
  209. {
  210. struct rt_fd_list *fdlist;
  211. struct rt_eventpoll *ep;
  212. fdlist = rt_container_of(req, struct rt_fd_list, req);
  213. ep = fdlist->ep;
  214. fdlist->wqn.key = req->_key;
  215. rt_list_init(&(fdlist->wqn.list));
  216. fdlist->wqn.polling_thread = ep->polling_thread;
  217. fdlist->wqn.wakeup = epoll_wqueue_callback;
  218. rt_wqueue_add(wq, &fdlist->wqn);
  219. }
  220. /**
  221. * @brief Installs a file descriptor list into the epoll control structure.
  222. *
  223. * This function installs a file descriptor list into the epoll control structure.
  224. *
  225. * @param fdlist Pointer to the file descriptor list.
  226. * @param ep Pointer to the epoll control structure.
  227. */
  228. static void epoll_ctl_install(struct rt_fd_list *fdlist, struct rt_eventpoll *ep)
  229. {
  230. rt_uint32_t mask = 0;
  231. rt_base_t level;
  232. fdlist->req._key = fdlist->revents;
  233. mask = epoll_get_event(fdlist, &fdlist->req);
  234. if (mask & fdlist->revents)
  235. {
  236. if (ep)
  237. {
  238. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  239. level = rt_spin_lock_irqsave(&ep->spinlock);
  240. rt_slist_append(&ep->rdl_head, &fdlist->rdl_node);
  241. fdlist->exclusive = 0;
  242. fdlist->is_rdl_node = RT_TRUE;
  243. ep->status = RT_EPOLL_STAT_TRIG;
  244. ep->eventpoll_num ++;
  245. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  246. rt_mutex_release(&ep->lock);
  247. }
  248. }
  249. }
  250. /**
  251. * @brief Initializes the epoll control structure.
  252. *
  253. * This function initializes the epoll control structure.
  254. *
  255. * @param ep Pointer to the epoll control structure.
  256. */
  257. static void epoll_member_init(struct rt_eventpoll *ep)
  258. {
  259. ep->status = RT_EPOLL_STAT_INIT;
  260. ep->eventpoll_num = 0;
  261. ep->polling_thread = rt_thread_self();
  262. ep->fdlist = RT_NULL;
  263. ep->req._key = 0;
  264. rt_slist_init(&(ep->rdl_head));
  265. rt_wqueue_init(&ep->epoll_read);
  266. rt_mutex_init(&ep->lock, EPOLL_MUTEX_NAME, RT_IPC_FLAG_FIFO);
  267. rt_spin_lock_init(&ep->spinlock);
  268. }
  269. /**
  270. * @brief Initializes the epoll file descriptor.
  271. *
  272. * This function initializes the epoll file descriptor.
  273. *
  274. * @param fd File descriptor.
  275. *
  276. * @return Returns 0 on success.
  277. */
  278. static int epoll_epf_init(int fd)
  279. {
  280. struct dfs_file *df;
  281. struct rt_eventpoll *ep;
  282. rt_err_t ret = 0;
  283. df = fd_get(fd);
  284. if (df)
  285. {
  286. ep = (struct rt_eventpoll *)rt_malloc(sizeof(struct rt_eventpoll));
  287. if (ep)
  288. {
  289. epoll_member_init(ep);
  290. #ifdef RT_USING_DFS_V2
  291. df->fops = &epoll_fops;
  292. #endif
  293. df->vnode = (struct dfs_vnode *)rt_malloc(sizeof(struct dfs_vnode));
  294. if (df->vnode)
  295. {
  296. ep->fdlist = (struct rt_fd_list *)rt_malloc(sizeof(struct rt_fd_list));
  297. if (ep->fdlist)
  298. {
  299. ep->fdlist->next = RT_NULL;
  300. ep->fdlist->fd = fd;
  301. ep->fdlist->ep = ep;
  302. ep->fdlist->exclusive = 0;
  303. ep->fdlist->is_rdl_node = RT_FALSE;
  304. dfs_vnode_init(df->vnode, FT_REGULAR, &epoll_fops);
  305. df->vnode->data = ep;
  306. rt_slist_init(&ep->fdlist->rdl_node);
  307. }
  308. else
  309. {
  310. ret = -ENOMEM;
  311. rt_free(df->vnode);
  312. rt_free(ep);
  313. }
  314. }
  315. else
  316. {
  317. ret = -ENOMEM;
  318. rt_free(ep);
  319. }
  320. }
  321. else
  322. {
  323. ret = -ENOMEM;
  324. }
  325. }
  326. return ret;
  327. }
  328. /**
  329. * @brief Creates an epoll file descriptor.
  330. *
  331. * This function creates an epoll file descriptor.
  332. *
  333. * @param size Size of the epoll instance.
  334. *
  335. * @return Returns the file descriptor on success, or -1 on failure.
  336. */
  337. static int epoll_do_create(int size)
  338. {
  339. rt_err_t ret = -1;
  340. int status;
  341. int fd;
  342. if (size < 0)
  343. {
  344. rt_set_errno(EINVAL);
  345. }
  346. else
  347. {
  348. fd = fd_new();
  349. if (fd >= 0)
  350. {
  351. ret = fd;
  352. status = epoll_epf_init(fd);
  353. if (status < 0)
  354. {
  355. fd_release(fd);
  356. rt_set_errno(-status);
  357. }
  358. }
  359. else
  360. {
  361. rt_set_errno(-fd);
  362. }
  363. }
  364. return ret;
  365. }
  366. /**
  367. * @brief Adds a file descriptor to the epoll instance.
  368. *
  369. * This function adds a file descriptor to the epoll instance.
  370. *
  371. * @param df Pointer to the file structure.
  372. * @param fd File descriptor to add.
  373. * @param event Pointer to the epoll event structure.
  374. *
  375. * @return Returns 0 on success, or an error code on failure.
  376. */
  377. static int epoll_ctl_add(struct dfs_file *df, int fd, struct epoll_event *event)
  378. {
  379. struct rt_fd_list *fdlist;
  380. struct rt_eventpoll *ep;
  381. rt_err_t ret = -EINVAL;
  382. if (df->vnode->data)
  383. {
  384. ep = df->vnode->data;
  385. fdlist = ep->fdlist;
  386. ret = 0;
  387. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  388. while (fdlist->next != RT_NULL)
  389. {
  390. if (fdlist->next->fd == fd)
  391. {
  392. rt_mutex_release(&ep->lock);
  393. return 0;
  394. }
  395. fdlist = fdlist->next;
  396. }
  397. rt_mutex_release(&ep->lock);
  398. fdlist = (struct rt_fd_list *)rt_malloc(sizeof(struct rt_fd_list));
  399. if (fdlist)
  400. {
  401. fdlist->fd = fd;
  402. memcpy(&fdlist->epev.data, &event->data, sizeof(event->data));
  403. fdlist->epev.events = 0;
  404. fdlist->ep = ep;
  405. fdlist->exclusive = 0;
  406. fdlist->is_rdl_node = RT_FALSE;
  407. fdlist->req._proc = epoll_wqueue_add_callback;
  408. fdlist->revents = event->events;
  409. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  410. fdlist->next = ep->fdlist->next;
  411. ep->fdlist->next = fdlist;
  412. rt_mutex_release(&ep->lock);
  413. rt_slist_init(&fdlist->rdl_node);
  414. epoll_ctl_install(fdlist, ep);
  415. }
  416. else
  417. {
  418. ret = -ENOMEM;
  419. }
  420. }
  421. return ret;
  422. }
  423. /**
  424. * @brief Removes a file descriptor from the epoll instance.
  425. *
  426. * This function removes a file descriptor from the epoll instance.
  427. *
  428. * @param df Pointer to the file structure.
  429. * @param fd File descriptor to remove.
  430. *
  431. * @return Returns 0 on success, or an error code on failure.
  432. */
  433. static int epoll_ctl_del(struct dfs_file *df, int fd)
  434. {
  435. struct rt_fd_list *fdlist, *fre_fd, *rdlist;
  436. struct rt_eventpoll *ep = RT_NULL;
  437. rt_slist_t *node = RT_NULL;
  438. rt_err_t ret = -EINVAL;
  439. rt_base_t level;
  440. if (df->vnode->data)
  441. {
  442. ep = df->vnode->data;
  443. if (ep)
  444. {
  445. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  446. level = rt_spin_lock_irqsave(&ep->spinlock);
  447. rt_slist_for_each(node, &ep->rdl_head)
  448. {
  449. rdlist = rt_slist_entry(node, struct rt_fd_list, rdl_node);
  450. if (rdlist->fd == fd)
  451. rt_slist_remove(&ep->rdl_head, node);
  452. }
  453. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  454. fdlist = ep->fdlist;
  455. while (fdlist->next != RT_NULL)
  456. {
  457. if (fdlist->next->fd == fd)
  458. {
  459. fre_fd = fdlist->next;
  460. fdlist->next = fdlist->next->next;
  461. if (fre_fd->wqn.wqueue)
  462. rt_wqueue_remove(&fre_fd->wqn);
  463. rt_free(fre_fd);
  464. break;
  465. }
  466. else
  467. {
  468. fdlist = fdlist->next;
  469. }
  470. }
  471. rt_mutex_release(&ep->lock);
  472. }
  473. ret = 0;
  474. }
  475. return ret;
  476. }
  477. /**
  478. * @brief Modifies the events associated with a file descriptor in the epoll instance.
  479. *
  480. * This function modifies the events associated with a file descriptor in the epoll instance.
  481. *
  482. * @param df Pointer to the file structure.
  483. * @param fd File descriptor to modify.
  484. * @param event Pointer to the epoll event structure.
  485. *
  486. * @return Returns 0 on success, or an error code on failure.
  487. */
  488. static int epoll_ctl_mod(struct dfs_file *df, int fd, struct epoll_event *event)
  489. {
  490. struct rt_fd_list *fdlist;
  491. struct rt_eventpoll *ep = RT_NULL;
  492. rt_err_t ret = -EINVAL;
  493. if (df->vnode->data)
  494. {
  495. ep = df->vnode->data;
  496. fdlist = ep->fdlist;
  497. while (fdlist->next != RT_NULL)
  498. {
  499. if (fdlist->next->fd == fd)
  500. {
  501. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  502. memcpy(&fdlist->next->epev.data, &event->data, sizeof(event->data));
  503. fdlist->next->revents = event->events;
  504. if (fdlist->next->wqn.wqueue)
  505. rt_wqueue_remove(&fdlist->next->wqn);
  506. rt_mutex_release(&ep->lock);
  507. epoll_ctl_install(fdlist->next, ep);
  508. break;
  509. }
  510. fdlist = fdlist->next;
  511. }
  512. ret = 0;
  513. }
  514. return ret;
  515. }
  516. /**
  517. * @brief Controls an epoll instance.
  518. *
  519. * This function controls an epoll instance, performing operations such as adding,
  520. * modifying, or removing file descriptors associated with the epoll instance.
  521. *
  522. * @param epfd File descriptor of the epoll instance.
  523. * @param op Operation to perform (EPOLL_CTL_ADD, EPOLL_CTL_DEL, or EPOLL_CTL_MOD).
  524. * @param fd File descriptor to add, modify, or remove.
  525. * @param event Pointer to the epoll event structure.
  526. *
  527. * @return Returns 0 on success, or -1 on failure with errno set appropriately.
  528. */
  529. static int epoll_do_ctl(int epfd, int op, int fd, struct epoll_event *event)
  530. {
  531. struct dfs_file *epdf;
  532. struct rt_eventpoll *ep;
  533. rt_err_t ret = 0;
  534. if (op & ~EFD_SHARED_EPOLL_TYPE)
  535. {
  536. rt_set_errno(EINVAL);
  537. return -1;
  538. }
  539. if ((epfd == fd) || (epfd < 0))
  540. {
  541. rt_set_errno(EINVAL);
  542. return -1;
  543. }
  544. if (!(op & EPOLL_CTL_DEL))
  545. {
  546. if (!(event->events & EPOLLEXCLUSIVE_BITS))
  547. {
  548. rt_set_errno(EINVAL);
  549. return -1;
  550. }
  551. event->events |= EPOLLERR | EPOLLHUP;
  552. }
  553. if (!fd_get(fd))
  554. {
  555. rt_set_errno(EBADF);
  556. return -1;
  557. }
  558. epdf = fd_get(epfd);
  559. if (epdf->vnode->data)
  560. {
  561. ep = epdf->vnode->data;
  562. switch (op)
  563. {
  564. case EPOLL_CTL_ADD:
  565. ret = epoll_ctl_add(epdf, fd, event);
  566. break;
  567. case EPOLL_CTL_DEL:
  568. ret = epoll_ctl_del(epdf, fd);
  569. break;
  570. case EPOLL_CTL_MOD:
  571. ret = epoll_ctl_mod(epdf, fd, event);
  572. break;
  573. default:
  574. rt_set_errno(EINVAL);
  575. break;
  576. }
  577. if (ret < 0)
  578. {
  579. rt_set_errno(-ret);
  580. ret = -1;
  581. }
  582. else
  583. {
  584. ep->polling_thread = rt_thread_self();
  585. }
  586. }
  587. return ret;
  588. }
  589. /**
  590. * @brief Waits for events on an epoll instance with a specified timeout.
  591. *
  592. * This function waits for events on the specified epoll instance within the given timeout.
  593. *
  594. * @param ep Pointer to the epoll instance.
  595. * @param msec Timeout in milliseconds.
  596. *
  597. * @return Returns 0 if no events occurred within the timeout, or 1 if events were triggered.
  598. */
  599. static int epoll_wait_timeout(struct rt_eventpoll *ep, int msec)
  600. {
  601. rt_int32_t timeout;
  602. struct rt_thread *thread;
  603. rt_base_t level;
  604. int ret = 0;
  605. thread = ep->polling_thread;
  606. timeout = rt_tick_from_millisecond(msec);
  607. level = rt_spin_lock_irqsave(&ep->spinlock);
  608. if (timeout != 0 && ep->status != RT_EPOLL_STAT_TRIG)
  609. {
  610. if (rt_thread_suspend_with_flag(thread, RT_KILLABLE) == RT_EOK)
  611. {
  612. if (timeout > 0)
  613. {
  614. rt_timer_control(&(thread->thread_timer),
  615. RT_TIMER_CTRL_SET_TIME,
  616. &timeout);
  617. rt_timer_start(&(thread->thread_timer));
  618. }
  619. ep->status = RT_EPOLL_STAT_WAITING;
  620. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  621. rt_schedule();
  622. level = rt_spin_lock_irqsave(&ep->spinlock);
  623. if (ep->status == RT_EPOLL_STAT_WAITING)
  624. ep->status = RT_EPOLL_STAT_INIT;
  625. }
  626. }
  627. ret = !(ep->status == RT_EPOLL_STAT_TRIG);
  628. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  629. return ret;
  630. }
  631. /**
  632. * @brief Gets events associated with a file descriptor in the epoll instance.
  633. *
  634. * This function gets events associated with a file descriptor in the epoll instance.
  635. *
  636. * @param fl Pointer to the file descriptor list structure.
  637. * @param req Pointer to the poll request structure.
  638. *
  639. * @return Returns the bitmask of events associated with the file descriptor.
  640. */
  641. static int epoll_get_event(struct rt_fd_list *fl, rt_pollreq_t *req)
  642. {
  643. struct dfs_file *df;
  644. int mask = 0;
  645. int fd = 0;
  646. fd = fl->fd;
  647. if (fd >= 0)
  648. {
  649. df = fd_get(fd);
  650. if (df)
  651. {
  652. if (df->vnode->fops->poll)
  653. {
  654. req->_key = fl->revents | POLLERR | POLLHUP;
  655. mask = df->vnode->fops->poll(df, req);
  656. if (mask < 0)
  657. return mask;
  658. }
  659. mask &= fl->revents | EPOLLOUT | POLLERR;
  660. }
  661. }
  662. return mask;
  663. }
  664. /**
  665. * @brief Performs epoll operation to get triggered events.
  666. *
  667. * This function performs epoll operation to get triggered events.
  668. *
  669. * @param ep Pointer to the epoll instance.
  670. * @param events Pointer to the array to store triggered events.
  671. * @param maxevents Maximum number of events to store in the array.
  672. * @param timeout Timeout value in milliseconds.
  673. *
  674. * @return Returns the number of triggered events.
  675. */
  676. static int epoll_do(struct rt_eventpoll *ep, struct epoll_event *events, int maxevents, int timeout)
  677. {
  678. struct rt_fd_list *rdlist;
  679. rt_slist_t *node = RT_NULL;
  680. int event_num = 0;
  681. int istimeout = 0;
  682. int isn_add = 0;
  683. int isfree = 0;
  684. int mask = 0;
  685. rt_base_t level;
  686. while (1)
  687. {
  688. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  689. level = rt_spin_lock_irqsave(&ep->spinlock);
  690. if (ep->eventpoll_num > 0)
  691. {
  692. rt_slist_for_each(node,&ep->rdl_head)
  693. {
  694. rdlist = rt_slist_entry(node, struct rt_fd_list, rdl_node);
  695. ep->eventpoll_num --;
  696. rt_slist_remove(&ep->rdl_head, &rdlist->rdl_node);
  697. rdlist->is_rdl_node = RT_FALSE;
  698. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  699. isfree = 0;
  700. isn_add = 0;
  701. if (event_num < maxevents)
  702. {
  703. if (rdlist->wqn.wqueue)
  704. {
  705. rt_wqueue_remove(&rdlist->wqn);
  706. }
  707. mask = epoll_get_event(rdlist, &rdlist->req);
  708. if (mask & rdlist->revents)
  709. {
  710. rdlist->epev.events = mask & rdlist->revents;
  711. }
  712. else
  713. {
  714. isfree = 1;
  715. isn_add = 1;
  716. }
  717. if (rdlist->revents & EPOLLONESHOT)
  718. {
  719. rdlist->revents = 0;
  720. isfree = 1;
  721. if (rdlist->wqn.wqueue)
  722. rt_wqueue_remove(&rdlist->wqn);
  723. }
  724. else
  725. {
  726. if (rdlist->revents & EPOLLET)
  727. {
  728. isfree = 1;
  729. }
  730. else
  731. {
  732. level = rt_spin_lock_irqsave(&ep->spinlock);
  733. if (rdlist->exclusive != 1)
  734. {
  735. rdlist->exclusive = 1;
  736. }
  737. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  738. }
  739. }
  740. if (!isn_add)
  741. {
  742. memcpy(&events[event_num], &rdlist->epev, sizeof(rdlist->epev));
  743. event_num ++;
  744. }
  745. if (!isfree)
  746. {
  747. if (rdlist->is_rdl_node == RT_FALSE)
  748. {
  749. level = rt_spin_lock_irqsave(&ep->spinlock);
  750. ep->eventpoll_num ++;
  751. rt_slist_append(&ep->rdl_head, &rdlist->rdl_node);
  752. rdlist->is_rdl_node = RT_TRUE;
  753. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  754. }
  755. else
  756. {
  757. level = rt_spin_lock_irqsave(&ep->spinlock);
  758. if (!rdlist->wqn.wqueue)
  759. {
  760. epoll_get_event(rdlist, &rdlist->req);
  761. }
  762. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  763. }
  764. }
  765. }
  766. else
  767. {
  768. level = rt_spin_lock_irqsave(&ep->spinlock);
  769. break;
  770. }
  771. level = rt_spin_lock_irqsave(&ep->spinlock);
  772. }
  773. }
  774. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  775. rt_mutex_release(&ep->lock);
  776. if (event_num || istimeout)
  777. {
  778. level = rt_spin_lock_irqsave(&ep->spinlock);
  779. ep->status = RT_EPOLL_STAT_INIT;
  780. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  781. if ((timeout >= 0) || (event_num > 0))
  782. break;
  783. }
  784. if (epoll_wait_timeout(ep, timeout))
  785. {
  786. istimeout = 1;
  787. }
  788. }
  789. return event_num;
  790. }
  791. /**
  792. * @brief Waits for events on an epoll instance with specified parameters.
  793. *
  794. * This function waits for events on the specified epoll instance within the given timeout, optionally blocking signals based on the provided signal set.
  795. *
  796. * @param epfd File descriptor referring to the epoll instance.
  797. * @param events Pointer to the array to store triggered events.
  798. * @param maxevents Maximum number of events to store in the array.
  799. * @param timeout Timeout value in milliseconds.
  800. * @param ss Pointer to the signal set indicating signals to block during the wait operation. Pass NULL if no signals need to be blocked.
  801. *
  802. * @return Returns the number of triggered events on success, or -1 on failure.
  803. */
  804. static int epoll_do_wait(int epfd, struct epoll_event *events, int maxevents, int timeout, const sigset_t *ss)
  805. {
  806. struct rt_eventpoll *ep;
  807. struct dfs_file *df;
  808. lwp_sigset_t old_sig, new_sig;
  809. rt_err_t ret = 0;
  810. if (ss)
  811. {
  812. memcpy(&new_sig, ss, sizeof(lwp_sigset_t));
  813. lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_BLOCK, &new_sig, &old_sig);
  814. }
  815. if ((maxevents > 0) && (epfd >= 0))
  816. {
  817. df = fd_get(epfd);
  818. if (df && df->vnode)
  819. {
  820. ep = (struct rt_eventpoll *)df->vnode->data;
  821. if (ep)
  822. {
  823. ret = epoll_do(ep, events, maxevents, timeout);
  824. }
  825. }
  826. }
  827. if (ss)
  828. {
  829. lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &old_sig, RT_NULL);
  830. }
  831. if (ret < 0)
  832. {
  833. rt_set_errno(-ret);
  834. ret = -1;
  835. }
  836. return ret;
  837. }
  838. /**
  839. * @brief Creates an epoll instance.
  840. *
  841. * This function creates an epoll instance with the specified size.
  842. *
  843. * @param size Size of the epoll instance.
  844. *
  845. * @return Returns the file descriptor referring to the created epoll instance on success, or -1 on failure.
  846. */
  847. int epoll_create(int size)
  848. {
  849. return epoll_do_create(size);
  850. }
  851. /**
  852. * @brief Modifies an epoll instance.
  853. *
  854. * This function modifies the epoll instance referred to by 'epfd' according to the specified operation 'op', associated with the file descriptor 'fd', and the event structure 'event'.
  855. *
  856. * @param epfd File descriptor referring to the epoll instance.
  857. * @param op Operation to perform (EPOLL_CTL_ADD, EPOLL_CTL_DEL, or EPOLL_CTL_MOD).
  858. * @param fd File descriptor to associate with the epoll instance or remove from it.
  859. * @param event Pointer to the event structure containing the events to modify.
  860. *
  861. * @return Returns 0 on success, or -1 on failure.
  862. */
  863. int epoll_ctl(int epfd, int op, int fd, struct epoll_event *event)
  864. {
  865. return epoll_do_ctl(epfd, op, fd, event);
  866. }
  867. /**
  868. * @brief Waits for events on an epoll instance.
  869. *
  870. * This function waits for events on the epoll instance referred to by 'epfd' within the given timeout.
  871. *
  872. * @param epfd File descriptor referring to the epoll instance.
  873. * @param events Pointer to the array to store triggered events.
  874. * @param maxevents Maximum number of events to store in the array.
  875. * @param timeout Timeout value in milliseconds.
  876. *
  877. * @return Returns the number of triggered events on success, or -1 on failure.
  878. */
  879. int epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout)
  880. {
  881. return epoll_do_wait(epfd, events, maxevents, timeout, RT_NULL);
  882. }
  883. /**
  884. * @brief Waits for events on an epoll instance, blocking signals.
  885. *
  886. * This function waits for events on the epoll instance referred to by 'epfd' within the given timeout, blocking signals based on the provided signal set 'ss'.
  887. *
  888. * @param epfd File descriptor referring to the epoll instance.
  889. * @param events Pointer to the array to store triggered events.
  890. * @param maxevents Maximum number of events to store in the array.
  891. * @param timeout Timeout value in milliseconds.
  892. * @param ss Pointer to the signal set indicating signals to block during the wait operation.
  893. *
  894. * @return Returns the number of triggered events on success, or -1 on failure.
  895. */
  896. int epoll_pwait(int epfd, struct epoll_event *events, int maxevents, int timeout, const sigset_t *ss)
  897. {
  898. return epoll_do_wait(epfd, events, maxevents, timeout, ss);
  899. }
  900. /**
  901. * @brief Waits for events on an epoll instance, blocking signals.
  902. *
  903. * This function waits for events on the epoll instance referred to by 'epfd' within the given timeout, blocking signals based on the provided signal set 'ss'.
  904. *
  905. * @param epfd File descriptor referring to the epoll instance.
  906. * @param events Pointer to the array to store triggered events.
  907. * @param maxevents Maximum number of events to store in the array.
  908. * @param timeout Timeout value in milliseconds.
  909. * @param ss Pointer to the signal set indicating signals to block during the wait operation.
  910. *
  911. * @return Returns the number of triggered events on success, or -1 on failure.
  912. */
  913. int epoll_pwait2(int epfd, struct epoll_event *events, int maxevents, int timeout, const sigset_t *ss)
  914. {
  915. return epoll_do_wait(epfd, events, maxevents, timeout, ss);
  916. }