eventfd.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-07-20 zmq810150896 first version
  9. */
  10. #include <rtthread.h>
  11. #include <fcntl.h>
  12. #include <rtdevice.h>
  13. #include <stdint.h>
  14. #include <unistd.h>
  15. #include <dfs_file.h>
  16. #include "poll.h"
  17. #include "eventfd.h"
  18. #define EFD_SEMAPHORE (1 << 0)
  19. #define EFD_CLOEXEC O_CLOEXEC
  20. #define EFD_NONBLOCK O_NONBLOCK
  21. #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
  22. #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
  23. #define ULLONG_MAX (~0ULL)
  24. #define EVENTFD_MUTEX_NAME "eventfd"
  25. struct eventfd_ctx
  26. {
  27. rt_wqueue_t reader_queue;
  28. rt_wqueue_t writer_queue;
  29. rt_uint64_t count;
  30. unsigned int flags;
  31. struct rt_mutex lock;
  32. };
  33. #ifndef RT_USING_DFS_V2
  34. static int eventfd_close(struct dfs_file *file);
  35. static int eventfd_poll(struct dfs_file *file, struct rt_pollreq *req);
  36. static ssize_t eventfd_read(struct dfs_file *file, void *buf, size_t count);
  37. static ssize_t eventfd_write(struct dfs_file *file, const void *buf, size_t count);
  38. #else
  39. static int eventfd_close(struct dfs_file *file);
  40. static int eventfd_poll(struct dfs_file *file, struct rt_pollreq *req);
  41. static ssize_t eventfd_read(struct dfs_file *file, void *buf, size_t count, off_t *pos);
  42. static ssize_t eventfd_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos);
  43. #endif
  44. static const struct dfs_file_ops eventfd_fops =
  45. {
  46. .close = eventfd_close,
  47. .poll = eventfd_poll,
  48. .read = eventfd_read,
  49. .write = eventfd_write,
  50. };
  51. static int eventfd_close(struct dfs_file *file)
  52. {
  53. struct eventfd_ctx *ctx = file->vnode->data;
  54. if (file->vnode->ref_count == 1)
  55. {
  56. rt_mutex_detach(&ctx->lock);
  57. rt_free(ctx);
  58. }
  59. return 0;
  60. }
  61. static int eventfd_poll(struct dfs_file *file, struct rt_pollreq *req)
  62. {
  63. struct eventfd_ctx *ctx = (struct eventfd_ctx *)file->vnode->data;
  64. int events = 0;
  65. rt_uint64_t count;
  66. count = ctx->count;
  67. rt_poll_add(&ctx->reader_queue, req);
  68. if (count > 0)
  69. events |= POLLIN;
  70. if (count == ULLONG_MAX)
  71. events |= POLLERR;
  72. if ((ULLONG_MAX - 1) > count)
  73. events |= POLLOUT;
  74. return events;
  75. }
  76. #ifndef RT_USING_DFS_V2
  77. static ssize_t eventfd_read(struct dfs_file *file, void *buf, size_t count)
  78. #else
  79. static ssize_t eventfd_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
  80. #endif
  81. {
  82. struct eventfd_ctx *ctx = (struct eventfd_ctx *)file->vnode->data;
  83. rt_uint64_t counter_num = 0;
  84. rt_uint64_t *buffer;
  85. if (count < sizeof(counter_num))
  86. return -EINVAL;
  87. buffer = (rt_uint64_t *)buf;
  88. rt_mutex_take(&ctx->lock, RT_WAITING_FOREVER);
  89. if (ctx->count <= 0)
  90. {
  91. if (file->flags & O_NONBLOCK)
  92. {
  93. rt_wqueue_wakeup(&ctx->writer_queue, (void*)POLLOUT);
  94. rt_mutex_release(&ctx->lock);
  95. return -EAGAIN;
  96. }
  97. else
  98. {
  99. /* In this case, when the data is read in blocked mode, when ctx->count is 0, the mutex needs to be released and wait for writing */
  100. rt_mutex_release(&ctx->lock);
  101. rt_wqueue_wakeup(&ctx->writer_queue, (void*)POLLOUT);
  102. rt_wqueue_wait(&ctx->reader_queue, 0, RT_WAITING_FOREVER);
  103. rt_mutex_take(&ctx->lock, RT_WAITING_FOREVER);
  104. }
  105. }
  106. if (ctx->flags & EFD_SEMAPHORE)
  107. {
  108. counter_num = 1;
  109. }
  110. else
  111. {
  112. counter_num = ctx->count;
  113. }
  114. ctx->count -= counter_num;
  115. (*buffer) = counter_num;
  116. rt_mutex_release(&ctx->lock);
  117. return sizeof(counter_num);
  118. }
  119. #ifndef RT_USING_DFS_V2
  120. static ssize_t eventfd_write(struct dfs_file *file, const void *buf, size_t count)
  121. #else
  122. static ssize_t eventfd_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos)
  123. #endif
  124. {
  125. struct eventfd_ctx *ctx = (struct eventfd_ctx *)file->vnode->data;
  126. rt_ssize_t ret = 0;
  127. rt_uint64_t counter_num;
  128. if (count < sizeof(counter_num))
  129. return -EINVAL;
  130. counter_num = *(rt_uint64_t *)buf;
  131. if (counter_num == ULLONG_MAX)
  132. return -EINVAL;
  133. ret = -EAGAIN;
  134. rt_mutex_take(&ctx->lock, RT_WAITING_FOREVER);
  135. if ((ULLONG_MAX - ctx->count) > counter_num)
  136. {
  137. ret = sizeof(counter_num);
  138. }
  139. else if (!(file->flags & O_NONBLOCK))
  140. {
  141. for (;;)
  142. {
  143. if ((ULLONG_MAX - ctx->count) >= counter_num)
  144. {
  145. ret = sizeof(counter_num);
  146. break;
  147. }
  148. /* Release the mutex to avoid a deadlock */
  149. rt_mutex_release(&ctx->lock);
  150. rt_wqueue_wait(&ctx->writer_queue, 0, RT_WAITING_FOREVER);
  151. rt_mutex_take(&ctx->lock, RT_WAITING_FOREVER);
  152. }
  153. }
  154. if (ret > 0)
  155. {
  156. ctx->count += counter_num;
  157. rt_wqueue_wakeup(&ctx->reader_queue, (void *)POLLIN);
  158. }
  159. rt_mutex_release(&ctx->lock);
  160. return ret;
  161. }
  162. static int rt_eventfd_create(struct dfs_file *df, unsigned int count, int flags)
  163. {
  164. struct eventfd_ctx *ctx = RT_NULL;
  165. rt_err_t ret = 0;
  166. ctx = (struct eventfd_ctx *)rt_malloc(sizeof(struct eventfd_ctx));
  167. if (ctx == RT_NULL)
  168. {
  169. ret = -ENOMEM;
  170. }
  171. else
  172. {
  173. ctx->count = count;
  174. ctx->flags = flags;
  175. flags &= EFD_SHARED_FCNTL_FLAGS;
  176. flags |= O_RDWR;
  177. rt_mutex_init(&ctx->lock, EVENTFD_MUTEX_NAME, RT_IPC_FLAG_FIFO);
  178. rt_wqueue_init(&ctx->reader_queue);
  179. rt_wqueue_init(&ctx->writer_queue);
  180. df->vnode = (struct dfs_vnode *)rt_malloc(sizeof(struct dfs_vnode));
  181. if (df->vnode)
  182. {
  183. dfs_vnode_init(df->vnode, FT_NONLOCK, &eventfd_fops);
  184. df->vnode->data = ctx;
  185. df->flags = flags;
  186. }
  187. else
  188. {
  189. rt_mutex_detach(&ctx->lock);
  190. rt_free(ctx);
  191. ret = -ENOMEM;
  192. }
  193. #ifdef RT_USING_DFS_V2
  194. df->fops = &eventfd_fops;
  195. #endif
  196. }
  197. return ret;
  198. }
  199. static int do_eventfd(unsigned int count, int flags)
  200. {
  201. struct dfs_file *file;
  202. int fd;
  203. int status;
  204. rt_ssize_t ret = 0;
  205. if (flags & ~EFD_FLAGS_SET)
  206. {
  207. rt_set_errno(EINVAL);
  208. return -1;
  209. }
  210. fd = fd_new();
  211. if (fd >= 0)
  212. {
  213. ret = fd;
  214. file = fd_get(fd);
  215. status = rt_eventfd_create(file, count, flags);
  216. if (status < 0)
  217. {
  218. fd_release(fd);
  219. rt_set_errno(-status);
  220. ret = -1;
  221. }
  222. }
  223. else
  224. {
  225. rt_set_errno(-fd);
  226. ret = -1;
  227. }
  228. return ret;
  229. }
  230. int eventfd(unsigned int count, int flags)
  231. {
  232. return do_eventfd(count, flags);
  233. }