epoll.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-07-29 zmq810150896 first version
  9. */
  10. #include <rtthread.h>
  11. #include <fcntl.h>
  12. #include <stdint.h>
  13. #include <unistd.h>
  14. #include <dfs_file.h>
  15. #include "sys/epoll.h"
  16. #include "poll.h"
  17. #include <lwp_signal.h>
  18. #define EPOLL_MUTEX_NAME "EVENTEPOLL"
  19. #define EFD_SHARED_EPOLL_TYPE (EPOLL_CTL_ADD | EPOLL_CTL_DEL | EPOLL_CTL_MOD)
  20. #define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM)
  21. #define EPOLLEXCLUSIVE_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \
  22. EPOLLET | EPOLLEXCLUSIVE)
  23. static struct rt_spinlock spinlock;
  24. struct rt_eventpoll;
  25. /* Monitor queue */
  26. struct rt_fd_list
  27. {
  28. rt_uint32_t revents; /* Monitored events */
  29. struct epoll_event epev;
  30. rt_pollreq_t req;
  31. struct rt_eventpoll *ep;
  32. struct rt_wqueue_node wqn;
  33. int fd;
  34. struct rt_fd_list *next;
  35. };
  36. struct rt_ready_list
  37. {
  38. int exclusive;/* If triggered horizontally, a check is made to see if the data has been read, and if there is any data left to read, the readability event is returned in the next epoll_wait */
  39. struct rt_fd_list *rdl_event; /* rdl: ready list */
  40. struct rt_ready_list *next;
  41. };
  42. struct rt_eventpoll
  43. {
  44. rt_uint32_t tirggered; /* the waited thread whether triggered */
  45. rt_wqueue_t epoll_read;
  46. rt_thread_t polling_thread;
  47. struct rt_mutex lock;
  48. struct rt_fd_list *fdlist; /* Monitor list */
  49. int eventpoll_num; /* Number of ready lists */
  50. rt_pollreq_t req;
  51. struct rt_ready_list *rdlist; /* ready list */
  52. };
  53. static int epoll_close(struct dfs_file *file);
  54. static int epoll_poll(struct dfs_file *file, struct rt_pollreq *req);
  55. static int epoll_get_event(struct rt_fd_list *fl, rt_pollreq_t *req);
  56. static int epoll_do_ctl(int epfd, int op, int fd, struct epoll_event *event);
  57. static const struct dfs_file_ops epoll_fops =
  58. {
  59. .close = epoll_close,
  60. .poll = epoll_poll,
  61. };
  62. static int epoll_close_fdlist(struct rt_fd_list *fdlist)
  63. {
  64. struct rt_fd_list *fre_node, *list;
  65. if (fdlist != RT_NULL)
  66. {
  67. list = fdlist;
  68. while (list->next != RT_NULL)
  69. {
  70. fre_node = list->next;
  71. rt_wqueue_remove(&fre_node->wqn);
  72. list->next = fre_node->next;
  73. rt_free(fre_node);
  74. }
  75. rt_free(fdlist);
  76. }
  77. return 0;
  78. }
  79. static int epoll_close_rdlist(struct rt_ready_list *rdlist)
  80. {
  81. struct rt_ready_list *list, *fre_node;
  82. list = rdlist;
  83. if (list)
  84. {
  85. while (list->next != RT_NULL)
  86. {
  87. fre_node = list->next;
  88. list->next = fre_node->next;
  89. rt_free(fre_node);
  90. }
  91. rt_free(rdlist);
  92. }
  93. return 0;
  94. }
  95. static int epoll_close(struct dfs_file *file)
  96. {
  97. struct rt_eventpoll *ep;
  98. if (file->vnode)
  99. {
  100. if (file->vnode->data)
  101. {
  102. ep = file->vnode->data;
  103. if (ep)
  104. {
  105. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  106. if (ep->fdlist)
  107. {
  108. epoll_close_fdlist(ep->fdlist);
  109. }
  110. if (ep->rdlist)
  111. {
  112. epoll_close_rdlist(ep->rdlist);
  113. }
  114. rt_mutex_release(&ep->lock);
  115. rt_mutex_detach(&ep->lock);
  116. rt_free(ep);
  117. }
  118. }
  119. }
  120. return 0;
  121. }
  122. static int epoll_poll(struct dfs_file *file, struct rt_pollreq *req)
  123. {
  124. struct rt_eventpoll *ep;
  125. struct rt_fd_list *fdlist;
  126. int mask;
  127. int events = 0;
  128. if (file->vnode->data)
  129. {
  130. ep = file->vnode->data;
  131. ep->req._key = req->_key;
  132. rt_poll_add(&ep->epoll_read, req);
  133. fdlist = ep->fdlist;
  134. if (fdlist)
  135. {
  136. while (fdlist->next != RT_NULL)
  137. {
  138. fdlist = fdlist->next;
  139. mask = epoll_get_event(fdlist, &fdlist->req);
  140. if (mask & fdlist->revents)
  141. {
  142. events |= mask | POLLIN | EPOLLRDNORM;
  143. break;
  144. }
  145. }
  146. }
  147. }
  148. return events;
  149. }
  150. static int epoll_rdlist_add(struct rt_fd_list *fdl, rt_uint32_t revents)
  151. {
  152. struct rt_ready_list *rdlist = RT_NULL;
  153. struct rt_eventpoll *ep;
  154. int isexist = 0;
  155. int res = -1;
  156. ep = fdl->ep;
  157. if (revents & ep->req._key)
  158. {
  159. rt_wqueue_wakeup(&ep->epoll_read, (void*)POLLIN);
  160. }
  161. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  162. if (ep->rdlist == RT_NULL)
  163. {
  164. ep->rdlist = (struct rt_ready_list *)rt_malloc(sizeof(struct rt_ready_list));
  165. if (ep->rdlist == RT_NULL)
  166. {
  167. return -1;
  168. }
  169. ep->rdlist->next = RT_NULL;
  170. }
  171. rdlist = ep->rdlist;
  172. while (rdlist->next != RT_NULL)
  173. {
  174. rdlist = rdlist->next;
  175. if (rdlist->rdl_event->fd == fdl->fd)
  176. {
  177. isexist = 1;
  178. res = 0;
  179. break;
  180. }
  181. }
  182. if (!isexist)
  183. {
  184. rdlist = RT_NULL;
  185. rdlist = (struct rt_ready_list *)rt_malloc(sizeof(struct rt_ready_list));
  186. if (rdlist != RT_NULL)
  187. {
  188. rdlist->rdl_event = fdl;
  189. rdlist->rdl_event->epev.events = fdl->revents & revents;
  190. rdlist->next = ep->rdlist->next;
  191. rdlist->exclusive = 0;
  192. ep->rdlist->next = rdlist;
  193. ep->eventpoll_num ++;
  194. res = 0;
  195. }
  196. }
  197. ep->tirggered = 1;
  198. rt_mutex_release(&ep->lock);
  199. return res;
  200. }
  201. static int epoll_wqueue_callback(struct rt_wqueue_node *wait, void *key)
  202. {
  203. struct rt_fd_list *fdlist;
  204. if (key && !((rt_ubase_t)key & wait->key))
  205. return -1;
  206. fdlist = rt_container_of(wait, struct rt_fd_list, wqn);
  207. if (fdlist->revents)
  208. {
  209. epoll_rdlist_add(fdlist, (rt_ubase_t)key);
  210. }
  211. return __wqueue_default_wake(wait, key);
  212. }
  213. static void epoll_wqueue_add_callback(rt_wqueue_t *wq, rt_pollreq_t *req)
  214. {
  215. struct rt_fd_list *fdlist;
  216. struct rt_eventpoll *ep;
  217. fdlist = rt_container_of(req, struct rt_fd_list, req);
  218. ep = fdlist->ep;
  219. fdlist->wqn.key = req->_key;
  220. rt_list_init(&(fdlist->wqn.list));
  221. fdlist->wqn.polling_thread = ep->polling_thread;
  222. fdlist->wqn.wakeup = epoll_wqueue_callback;
  223. rt_wqueue_add(wq, &fdlist->wqn);
  224. }
  225. static void epoll_ctl_install(struct rt_fd_list *fdlist, struct rt_eventpoll *ep)
  226. {
  227. rt_uint32_t mask = 0;
  228. fdlist->req._key = fdlist->revents;
  229. mask = epoll_get_event(fdlist, &fdlist->req);
  230. if (mask & fdlist->revents)
  231. {
  232. epoll_rdlist_add(fdlist, mask);
  233. }
  234. }
  235. static void epoll_member_init(struct rt_eventpoll *ep)
  236. {
  237. ep->tirggered = 0;
  238. ep->eventpoll_num = 0;
  239. ep->polling_thread = rt_thread_self();
  240. ep->rdlist = RT_NULL;
  241. ep->fdlist = RT_NULL;
  242. ep->req._key = 0;
  243. rt_wqueue_init(&ep->epoll_read);
  244. rt_spin_lock_init(&spinlock);
  245. }
  246. static int epoll_epf_init(int fd)
  247. {
  248. struct dfs_file *df;
  249. struct rt_eventpoll *ep;
  250. rt_err_t ret = 0;
  251. df = fd_get(fd);
  252. if (df)
  253. {
  254. ep = (struct rt_eventpoll *)rt_malloc(sizeof(struct rt_eventpoll));
  255. if (ep)
  256. {
  257. epoll_member_init(ep);
  258. rt_mutex_init(&ep->lock, EPOLL_MUTEX_NAME, RT_IPC_FLAG_FIFO);
  259. #ifdef RT_USING_DFS_V2
  260. df->fops = &epoll_fops;
  261. #endif
  262. df->vnode = (struct dfs_vnode *)rt_malloc(sizeof(struct dfs_vnode));
  263. if (df->vnode)
  264. {
  265. ep->fdlist = (struct rt_fd_list *)rt_malloc(sizeof(struct rt_fd_list));
  266. if (ep->fdlist)
  267. {
  268. ep->fdlist->next = RT_NULL;
  269. ep->fdlist->fd = fd;
  270. ep->fdlist->ep = ep;
  271. dfs_vnode_init(df->vnode, FT_REGULAR, &epoll_fops);
  272. df->vnode->data = ep;
  273. }
  274. else
  275. {
  276. ret = -ENOMEM;
  277. rt_free(df->vnode);
  278. rt_free(ep);
  279. }
  280. }
  281. else
  282. {
  283. ret = -ENOMEM;
  284. rt_free(ep);
  285. }
  286. }
  287. else
  288. {
  289. ret = -ENOMEM;
  290. }
  291. }
  292. return ret;
  293. }
  294. static int epoll_do_create(int size)
  295. {
  296. rt_err_t ret = -1;
  297. int status;
  298. int fd;
  299. if (size < 0)
  300. {
  301. rt_set_errno(EINVAL);
  302. }
  303. else
  304. {
  305. fd = fd_new();
  306. if (fd >= 0)
  307. {
  308. ret = fd;
  309. status = epoll_epf_init(fd);
  310. if (status < 0)
  311. {
  312. fd_release(fd);
  313. rt_set_errno(-status);
  314. }
  315. }
  316. else
  317. {
  318. rt_set_errno(-fd);
  319. }
  320. }
  321. return ret;
  322. }
  323. static int epoll_ctl_add(struct dfs_file *df, int fd, struct epoll_event *event)
  324. {
  325. struct rt_fd_list *fdlist;
  326. struct rt_eventpoll *ep;
  327. rt_err_t ret = -EINVAL;
  328. if (df->vnode->data)
  329. {
  330. ep = df->vnode->data;
  331. fdlist = ep->fdlist;
  332. ret = 0;
  333. while (fdlist->next != RT_NULL)
  334. {
  335. if (fdlist->next->fd == fd)
  336. {
  337. return 0;
  338. }
  339. fdlist = fdlist->next;
  340. }
  341. fdlist = (struct rt_fd_list *)rt_malloc(sizeof(struct rt_fd_list));
  342. if (fdlist)
  343. {
  344. fdlist->fd = fd;
  345. memcpy(&fdlist->epev.data, &event->data, sizeof(event->data));
  346. fdlist->epev.events = event->events;
  347. fdlist->ep = ep;
  348. fdlist->req._proc = epoll_wqueue_add_callback;
  349. fdlist->next = ep->fdlist->next;
  350. fdlist->revents = event->events;
  351. ep->fdlist->next = fdlist;
  352. epoll_ctl_install(fdlist, ep);
  353. }
  354. else
  355. {
  356. ret = -ENOMEM;
  357. }
  358. }
  359. return ret;
  360. }
  361. static int epoll_ctl_del(struct dfs_file *df, int fd)
  362. {
  363. struct rt_fd_list *fdlist, *fre_fd;
  364. struct rt_eventpoll *ep = RT_NULL;
  365. struct rt_ready_list *rdlist, *fre_rdl;
  366. rt_err_t ret = -EINVAL;
  367. if (df->vnode->data)
  368. {
  369. ep = df->vnode->data;
  370. fdlist = ep->fdlist;
  371. while (fdlist->next != RT_NULL)
  372. {
  373. if (fdlist->next->fd == fd)
  374. {
  375. fre_fd = fdlist->next;
  376. fdlist->next = fdlist->next->next;
  377. if (fre_fd->epev.events != 0)
  378. {
  379. rt_wqueue_remove(&fre_fd->wqn);
  380. }
  381. rt_free(fre_fd);
  382. break;
  383. }
  384. else
  385. {
  386. fdlist = fdlist->next;
  387. }
  388. }
  389. if (ep->rdlist)
  390. {
  391. rdlist = ep->rdlist;
  392. while (rdlist->next != RT_NULL)
  393. {
  394. if (rdlist->next->rdl_event->fd == fd)
  395. {
  396. fre_rdl = rdlist->next;
  397. rdlist->next = rdlist->next->next;
  398. ep->eventpoll_num --;
  399. rt_free(fre_rdl);
  400. break;
  401. }
  402. else
  403. {
  404. rdlist = rdlist->next;
  405. }
  406. }
  407. }
  408. ret = 0;
  409. }
  410. return ret;
  411. }
  412. static int epoll_ctl_mod(struct dfs_file *df, int fd, struct epoll_event *event)
  413. {
  414. struct rt_fd_list *fdlist;
  415. struct rt_eventpoll *ep = RT_NULL;
  416. rt_err_t ret = -EINVAL;
  417. if (df->vnode->data)
  418. {
  419. ep = df->vnode->data;
  420. fdlist = ep->fdlist;
  421. while (fdlist->next != RT_NULL)
  422. {
  423. if (fdlist->next->fd == fd)
  424. {
  425. memcpy(&fdlist->next->epev.data, &event->data, sizeof(event->data));
  426. fdlist->next->revents = event->events;
  427. rt_wqueue_remove(&fdlist->next->wqn);
  428. epoll_ctl_install(fdlist->next, ep);
  429. break;
  430. }
  431. fdlist = fdlist->next;
  432. }
  433. ret = 0;
  434. }
  435. return ret;
  436. }
  437. static int epoll_do_ctl(int epfd, int op, int fd, struct epoll_event *event)
  438. {
  439. struct dfs_file *epdf;
  440. struct rt_eventpoll *ep;
  441. rt_err_t ret = 0;
  442. if (op & ~EFD_SHARED_EPOLL_TYPE)
  443. {
  444. rt_set_errno(EINVAL);
  445. return -1;
  446. }
  447. if ((epfd == fd) || (epfd < 0))
  448. {
  449. rt_set_errno(EINVAL);
  450. return -1;
  451. }
  452. if (!(event->events & EPOLLEXCLUSIVE_BITS))
  453. {
  454. rt_set_errno(EINVAL);
  455. return -1;
  456. }
  457. if (!fd_get(fd))
  458. {
  459. rt_set_errno(EBADF);
  460. return -1;
  461. }
  462. epdf = fd_get(epfd);
  463. if (epdf->vnode->data)
  464. {
  465. ep = epdf->vnode->data;
  466. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  467. switch (op)
  468. {
  469. case EPOLL_CTL_ADD:
  470. ret = epoll_ctl_add(epdf, fd, event);
  471. break;
  472. case EPOLL_CTL_DEL:
  473. ret = epoll_ctl_del(epdf, fd);
  474. break;
  475. case EPOLL_CTL_MOD:
  476. ret = epoll_ctl_mod(epdf, fd, event);
  477. break;
  478. default:
  479. rt_set_errno(EINVAL);
  480. break;
  481. }
  482. if (ret < 0)
  483. {
  484. rt_set_errno(-ret);
  485. ret = -1;
  486. }
  487. rt_mutex_release(&ep->lock);
  488. }
  489. return ret;
  490. }
  491. static int epoll_wait_timeout(struct rt_eventpoll *ep, int msec)
  492. {
  493. rt_int32_t timeout;
  494. struct rt_thread *thread;
  495. rt_base_t level;
  496. int ret = 0;
  497. thread = ep->polling_thread;
  498. timeout = rt_tick_from_millisecond(msec);
  499. level = rt_spin_lock_irqsave(&spinlock);
  500. if (timeout != 0 && !ep->tirggered)
  501. {
  502. if (rt_thread_suspend_with_flag(thread, RT_KILLABLE) == RT_EOK)
  503. {
  504. if (timeout > 0)
  505. {
  506. rt_timer_control(&(thread->thread_timer),
  507. RT_TIMER_CTRL_SET_TIME,
  508. &timeout);
  509. rt_timer_start(&(thread->thread_timer));
  510. }
  511. rt_spin_unlock_irqrestore(&spinlock, level);
  512. rt_schedule();
  513. level = rt_spin_lock_irqsave(&spinlock);
  514. }
  515. }
  516. ret = !ep->tirggered;
  517. rt_spin_unlock_irqrestore(&spinlock, level);
  518. return ret;
  519. }
  520. static int epoll_get_event(struct rt_fd_list *fl, rt_pollreq_t *req)
  521. {
  522. struct dfs_file *df;
  523. int mask = 0;
  524. int fd = 0;
  525. fd = fl->fd;
  526. if (fd >= 0)
  527. {
  528. df = fd_get(fd);
  529. if (df)
  530. {
  531. if (df->vnode->fops->poll)
  532. {
  533. req->_key = fl->revents | POLLERR | POLLHUP;
  534. mask = df->vnode->fops->poll(df, req);
  535. if (mask < 0)
  536. return mask;
  537. }
  538. mask &= fl->revents | EPOLLOUT | POLLERR;
  539. }
  540. }
  541. return mask;
  542. }
  543. static int epoll_do(struct rt_eventpoll *ep, struct epoll_event *events, int maxevents, int timeout)
  544. {
  545. struct rt_ready_list *rdlist, *pre_rdlist;
  546. int event_num = 0;
  547. int istimeout = 0;
  548. int isn_add = 0;
  549. int isfree = 0;
  550. int mask = 0;
  551. while (1)
  552. {
  553. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  554. if (ep->eventpoll_num > 0)
  555. {
  556. rdlist = ep->rdlist;
  557. while (rdlist->next != RT_NULL)
  558. {
  559. isfree = 0;
  560. isn_add = 0;
  561. pre_rdlist = rdlist;
  562. rdlist = rdlist->next;
  563. if (event_num < maxevents)
  564. {
  565. rt_wqueue_remove(&rdlist->rdl_event->wqn);
  566. mask = epoll_get_event(rdlist->rdl_event, &rdlist->rdl_event->req);
  567. if (mask & rdlist->rdl_event->revents)
  568. {
  569. rdlist->rdl_event->epev.events = mask & rdlist->rdl_event->revents;
  570. }
  571. else
  572. {
  573. isfree = 1;
  574. isn_add = 1;
  575. }
  576. if (rdlist->rdl_event->revents & EPOLLONESHOT)
  577. {
  578. rdlist->rdl_event->revents = 0;
  579. isfree = 1;
  580. rt_wqueue_remove(&rdlist->rdl_event->wqn);
  581. }
  582. else
  583. {
  584. if (rdlist->rdl_event->revents & EPOLLET)
  585. {
  586. isfree = 1;
  587. }
  588. else
  589. {
  590. if (rdlist->exclusive != 1)
  591. {
  592. rdlist->exclusive = 1;
  593. }
  594. }
  595. }
  596. if (!isn_add)
  597. {
  598. memcpy(&events[event_num], &rdlist->rdl_event->epev, sizeof(rdlist->rdl_event->epev));
  599. event_num ++;
  600. }
  601. if (isfree)
  602. {
  603. pre_rdlist->next = rdlist->next;
  604. rt_free(rdlist);
  605. ep->eventpoll_num --;
  606. rdlist = pre_rdlist;
  607. }
  608. }
  609. else
  610. {
  611. break;
  612. }
  613. }
  614. }
  615. rt_mutex_release(&ep->lock);
  616. if (event_num || istimeout)
  617. {
  618. ep->tirggered = 0;
  619. break;
  620. }
  621. if (epoll_wait_timeout(ep, timeout))
  622. {
  623. istimeout = 1;
  624. }
  625. }
  626. return event_num;
  627. }
  628. static int epoll_do_wait(int epfd, struct epoll_event *events, int maxevents, int timeout, const sigset_t *ss)
  629. {
  630. struct rt_eventpoll *ep;
  631. struct dfs_file *df;
  632. lwp_sigset_t old_sig, new_sig;
  633. rt_err_t ret = 0;
  634. if (ss)
  635. {
  636. memcpy(&new_sig, ss, sizeof(lwp_sigset_t));
  637. lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_BLOCK, &new_sig, &old_sig);
  638. }
  639. if ((maxevents > 0) && (epfd >=0))
  640. {
  641. df = fd_get(epfd);
  642. if (df && df->vnode)
  643. {
  644. ep = (struct rt_eventpoll *)df->vnode->data;
  645. if (ep)
  646. {
  647. ret = epoll_do(ep, events, maxevents, timeout);
  648. }
  649. }
  650. }
  651. if (ss)
  652. {
  653. lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &old_sig, RT_NULL);
  654. }
  655. if (ret < 0)
  656. {
  657. rt_set_errno(-ret);
  658. ret = -1;
  659. }
  660. return ret;
  661. }
  662. int epoll_create(int size)
  663. {
  664. return epoll_do_create(size);
  665. }
  666. int epoll_ctl(int epfd, int op, int fd, struct epoll_event *event)
  667. {
  668. return epoll_do_ctl(epfd, op, fd, event);
  669. }
  670. int epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout)
  671. {
  672. return epoll_do_wait(epfd, events, maxevents, timeout, RT_NULL);
  673. }
  674. int epoll_pwait(int epfd, struct epoll_event *events, int maxevents, int timeout, const sigset_t *ss)
  675. {
  676. return epoll_do_wait(epfd, events, maxevents, timeout, ss);
  677. }
  678. int epoll_pwait2(int epfd, struct epoll_event *events, int maxevents, int timeout, const sigset_t *ss)
  679. {
  680. return epoll_do_wait(epfd, events, maxevents, timeout, ss);
  681. }