eventfd.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-07-20 zmq810150896 first version
  9. */
  10. #include <rtthread.h>
  11. #include <fcntl.h>
  12. #include <rtdevice.h>
  13. #include <stdint.h>
  14. #include <unistd.h>
  15. #include <dfs_file.h>
  16. #include "poll.h"
  17. #include "eventfd.h"
  18. #define EFD_SEMAPHORE (1 << 0)
  19. #define EFD_CLOEXEC O_CLOEXEC
  20. #define EFD_NONBLOCK O_NONBLOCK
  21. #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
  22. #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
  23. #define ULLONG_MAX (~0ULL)
  24. #define EVENTFD_MUTEX_NAME "eventfd"
  25. struct eventfd_ctx
  26. {
  27. rt_wqueue_t reader_queue;
  28. rt_wqueue_t writer_queue;
  29. rt_uint64_t count;
  30. unsigned int flags;
  31. struct rt_mutex lock;
  32. };
  33. #ifndef RT_USING_DFS_V2
  34. static int eventfd_close(struct dfs_file *file);
  35. static int eventfd_poll(struct dfs_file *file, struct rt_pollreq *req);
  36. static ssize_t eventfd_read(struct dfs_file *file, void *buf, size_t count);
  37. static ssize_t eventfd_write(struct dfs_file *file, const void *buf, size_t count);
  38. #else
  39. static int eventfd_close(struct dfs_file *file);
  40. static int eventfd_poll(struct dfs_file *file, struct rt_pollreq *req);
  41. static ssize_t eventfd_read(struct dfs_file *file, void *buf, size_t count, off_t *pos);
  42. static ssize_t eventfd_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos);
  43. #endif
  44. static const struct dfs_file_ops eventfd_fops =
  45. {
  46. .close = eventfd_close,
  47. .poll = eventfd_poll,
  48. .read = eventfd_read,
  49. .write = eventfd_write,
  50. };
  51. static int eventfd_close(struct dfs_file *file)
  52. {
  53. struct eventfd_ctx *ctx = file->vnode->data;
  54. rt_mutex_detach(&ctx->lock);
  55. rt_free(ctx);
  56. return 0;
  57. }
  58. static int eventfd_poll(struct dfs_file *file, struct rt_pollreq *req)
  59. {
  60. struct eventfd_ctx *ctx = (struct eventfd_ctx *)file->vnode->data;
  61. int events = 0;
  62. rt_uint64_t count;
  63. count = ctx->count;
  64. rt_poll_add(&ctx->reader_queue, req);
  65. if (count > 0)
  66. events |= POLLIN;
  67. if (count == ULLONG_MAX)
  68. events |= POLLERR;
  69. if ((ULLONG_MAX - 1) > count)
  70. events |= POLLOUT;
  71. return events;
  72. }
  73. #ifndef RT_USING_DFS_V2
  74. static ssize_t eventfd_read(struct dfs_file *file, void *buf, size_t count)
  75. #else
  76. static ssize_t eventfd_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
  77. #endif
  78. {
  79. struct eventfd_ctx *ctx = (struct eventfd_ctx *)file->vnode->data;
  80. rt_uint64_t counter_num = 0;
  81. rt_uint64_t *buffer;
  82. if (count < sizeof(counter_num))
  83. return -EINVAL;
  84. buffer = (rt_uint64_t *)buf;
  85. rt_mutex_take(&ctx->lock, RT_WAITING_FOREVER);
  86. if (ctx->count <= 0)
  87. {
  88. if (file->flags & O_NONBLOCK)
  89. {
  90. rt_wqueue_wakeup(&ctx->writer_queue, (void*)POLLOUT);
  91. rt_mutex_release(&ctx->lock);
  92. return -EAGAIN;
  93. }
  94. else
  95. {
  96. /* In this case, when the data is read in blocked mode, when ctx->count is 0, the mutex needs to be released and wait for writing */
  97. rt_mutex_release(&ctx->lock);
  98. rt_wqueue_wakeup(&ctx->writer_queue, (void*)POLLOUT);
  99. rt_wqueue_wait(&ctx->reader_queue, 0, RT_WAITING_FOREVER);
  100. rt_mutex_take(&ctx->lock, RT_WAITING_FOREVER);
  101. }
  102. }
  103. if (ctx->flags & EFD_SEMAPHORE)
  104. {
  105. counter_num = 1;
  106. }
  107. else
  108. {
  109. counter_num = ctx->count;
  110. }
  111. ctx->count -= counter_num;
  112. (*buffer) = counter_num;
  113. rt_mutex_release(&ctx->lock);
  114. return sizeof(counter_num);
  115. }
  116. #ifndef RT_USING_DFS_V2
  117. static ssize_t eventfd_write(struct dfs_file *file, const void *buf, size_t count)
  118. #else
  119. static ssize_t eventfd_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos)
  120. #endif
  121. {
  122. struct eventfd_ctx *ctx = (struct eventfd_ctx *)file->vnode->data;
  123. rt_ssize_t ret = 0;
  124. rt_uint64_t counter_num;
  125. if (count < sizeof(counter_num))
  126. return -EINVAL;
  127. counter_num = *(rt_uint64_t *)buf;
  128. if (counter_num == ULLONG_MAX)
  129. return -EINVAL;
  130. ret = -EAGAIN;
  131. rt_mutex_take(&ctx->lock, RT_WAITING_FOREVER);
  132. if ((ULLONG_MAX - ctx->count) > counter_num)
  133. {
  134. ret = sizeof(counter_num);
  135. }
  136. else if (!(file->flags & O_NONBLOCK))
  137. {
  138. for (;;)
  139. {
  140. if ((ULLONG_MAX - ctx->count) >= counter_num)
  141. {
  142. ret = sizeof(counter_num);
  143. break;
  144. }
  145. /* Release the mutex to avoid a deadlock */
  146. rt_mutex_release(&ctx->lock);
  147. rt_wqueue_wait(&ctx->writer_queue, 0, RT_WAITING_FOREVER);
  148. rt_mutex_take(&ctx->lock, RT_WAITING_FOREVER);
  149. }
  150. }
  151. if (ret > 0)
  152. {
  153. ctx->count += counter_num;
  154. rt_wqueue_wakeup(&ctx->reader_queue, (void *)POLLIN);
  155. }
  156. rt_mutex_release(&ctx->lock);
  157. return ret;
  158. }
  159. static int rt_eventfd_create(struct dfs_file *df, unsigned int count, int flags)
  160. {
  161. struct eventfd_ctx *ctx = RT_NULL;
  162. rt_err_t ret = 0;
  163. ctx = (struct eventfd_ctx *)rt_malloc(sizeof(struct eventfd_ctx));
  164. if (ctx == RT_NULL)
  165. {
  166. ret = -ENOMEM;
  167. }
  168. else
  169. {
  170. ctx->count = count;
  171. ctx->flags = flags;
  172. flags &= EFD_SHARED_FCNTL_FLAGS;
  173. flags |= O_RDWR;
  174. rt_mutex_init(&ctx->lock, EVENTFD_MUTEX_NAME, RT_IPC_FLAG_FIFO);
  175. rt_wqueue_init(&ctx->reader_queue);
  176. rt_wqueue_init(&ctx->writer_queue);
  177. df->vnode = (struct dfs_vnode *)rt_malloc(sizeof(struct dfs_vnode));
  178. if (df->vnode)
  179. {
  180. dfs_vnode_init(df->vnode, FT_REGULAR, &eventfd_fops);
  181. df->vnode->data = ctx;
  182. df->flags = flags;
  183. }
  184. else
  185. {
  186. rt_mutex_detach(&ctx->lock);
  187. rt_free(ctx);
  188. ret = -ENOMEM;
  189. }
  190. #ifdef RT_USING_DFS_V2
  191. df->fops = &eventfd_fops;
  192. #endif
  193. }
  194. return ret;
  195. }
  196. static int do_eventfd(unsigned int count, int flags)
  197. {
  198. struct dfs_file *file;
  199. int fd;
  200. int status;
  201. rt_ssize_t ret = 0;
  202. if (flags & ~EFD_FLAGS_SET)
  203. {
  204. rt_set_errno(EINVAL);
  205. return -1;
  206. }
  207. fd = fd_new();
  208. if (fd >= 0)
  209. {
  210. ret = fd;
  211. file = fd_get(fd);
  212. status = rt_eventfd_create(file, count, flags);
  213. if (status < 0)
  214. {
  215. fd_release(fd);
  216. rt_set_errno(-status);
  217. ret = -1;
  218. }
  219. }
  220. else
  221. {
  222. rt_set_errno(-fd);
  223. ret = -1;
  224. }
  225. return ret;
  226. }
  227. int eventfd(unsigned int count, int flags)
  228. {
  229. return do_eventfd(count, flags);
  230. }