eventfd.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-07-20 zmq810150896 first version
  9. */
  10. #include <rtthread.h>
  11. #include <fcntl.h>
  12. #include <rtdevice.h>
  13. #include <stdint.h>
  14. #include <unistd.h>
  15. #include <dfs_file.h>
  16. #include <dfs.h>
  17. #include "poll.h"
  18. #include "eventfd.h"
  19. #define EFD_SEMAPHORE (1 << 0)
  20. #define EFD_CLOEXEC O_CLOEXEC
  21. #define EFD_NONBLOCK O_NONBLOCK
  22. #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
  23. #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
  24. #define EFD_ULLONG_MAX (~0ULL)
  25. #define EVENTFD_MUTEX_NAME "eventfd"
  26. struct eventfd_ctx
  27. {
  28. rt_wqueue_t reader_queue;
  29. rt_wqueue_t writer_queue;
  30. rt_uint64_t count;
  31. unsigned int flags;
  32. struct rt_mutex lock;
  33. };
  34. #ifndef RT_USING_DFS_V2
  35. static int eventfd_close(struct dfs_file *file);
  36. static int eventfd_poll(struct dfs_file *file, struct rt_pollreq *req);
  37. static ssize_t eventfd_read(struct dfs_file *file, void *buf, size_t count);
  38. static ssize_t eventfd_write(struct dfs_file *file, const void *buf, size_t count);
  39. #else
  40. static int eventfd_close(struct dfs_file *file);
  41. static int eventfd_poll(struct dfs_file *file, struct rt_pollreq *req);
  42. static ssize_t eventfd_read(struct dfs_file *file, void *buf, size_t count, off_t *pos);
  43. static ssize_t eventfd_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos);
  44. #endif
  45. static const struct dfs_file_ops eventfd_fops =
  46. {
  47. .close = eventfd_close,
  48. .poll = eventfd_poll,
  49. .read = eventfd_read,
  50. .write = eventfd_write,
  51. };
  52. static int eventfd_close(struct dfs_file *file)
  53. {
  54. struct eventfd_ctx *ctx = file->vnode->data;
  55. if (file->vnode->ref_count == 1)
  56. {
  57. rt_mutex_detach(&ctx->lock);
  58. rt_free(ctx);
  59. }
  60. return 0;
  61. }
  62. static int eventfd_poll(struct dfs_file *file, struct rt_pollreq *req)
  63. {
  64. struct eventfd_ctx *ctx = (struct eventfd_ctx *)file->vnode->data;
  65. int events = 0;
  66. rt_uint64_t count;
  67. count = ctx->count;
  68. rt_poll_add(&ctx->reader_queue, req);
  69. if (count > 0)
  70. events |= POLLIN;
  71. if (count == EFD_ULLONG_MAX)
  72. events |= POLLERR;
  73. if ((EFD_ULLONG_MAX - 1) > count)
  74. events |= POLLOUT;
  75. return events;
  76. }
  77. #ifndef RT_USING_DFS_V2
  78. static ssize_t eventfd_read(struct dfs_file *file, void *buf, size_t count)
  79. #else
  80. static ssize_t eventfd_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
  81. #endif
  82. {
  83. struct eventfd_ctx *ctx = (struct eventfd_ctx *)file->vnode->data;
  84. rt_uint64_t counter_num = 0;
  85. rt_uint64_t *buffer;
  86. if (count < sizeof(counter_num))
  87. return -EINVAL;
  88. buffer = (rt_uint64_t *)buf;
  89. rt_mutex_take(&ctx->lock, RT_WAITING_FOREVER);
  90. if (ctx->count <= 0)
  91. {
  92. if (file->flags & O_NONBLOCK)
  93. {
  94. rt_wqueue_wakeup(&ctx->writer_queue, (void*)POLLOUT);
  95. rt_mutex_release(&ctx->lock);
  96. return -EAGAIN;
  97. }
  98. else
  99. {
  100. /* In this case, when the data is read in blocked mode, when ctx->count is 0, the mutex needs to be released and wait for writing */
  101. rt_mutex_release(&ctx->lock);
  102. rt_wqueue_wakeup(&ctx->writer_queue, (void*)POLLOUT);
  103. rt_wqueue_wait(&ctx->reader_queue, 0, RT_WAITING_FOREVER);
  104. rt_mutex_take(&ctx->lock, RT_WAITING_FOREVER);
  105. }
  106. }
  107. if (ctx->flags & EFD_SEMAPHORE)
  108. {
  109. counter_num = 1;
  110. }
  111. else
  112. {
  113. counter_num = ctx->count;
  114. }
  115. ctx->count -= counter_num;
  116. (*buffer) = counter_num;
  117. rt_mutex_release(&ctx->lock);
  118. return sizeof(counter_num);
  119. }
  120. #ifndef RT_USING_DFS_V2
  121. static ssize_t eventfd_write(struct dfs_file *file, const void *buf, size_t count)
  122. #else
  123. static ssize_t eventfd_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos)
  124. #endif
  125. {
  126. struct eventfd_ctx *ctx = (struct eventfd_ctx *)file->vnode->data;
  127. rt_ssize_t ret = 0;
  128. rt_uint64_t counter_num;
  129. if (count < sizeof(counter_num))
  130. return -EINVAL;
  131. counter_num = *(rt_uint64_t *)buf;
  132. if (counter_num == EFD_ULLONG_MAX)
  133. return -EINVAL;
  134. ret = -EAGAIN;
  135. rt_mutex_take(&ctx->lock, RT_WAITING_FOREVER);
  136. if ((EFD_ULLONG_MAX - ctx->count) > counter_num)
  137. {
  138. ret = sizeof(counter_num);
  139. }
  140. else if (!(file->flags & O_NONBLOCK))
  141. {
  142. for (;;)
  143. {
  144. if ((EFD_ULLONG_MAX - ctx->count) >= counter_num)
  145. {
  146. ret = sizeof(counter_num);
  147. break;
  148. }
  149. /* Release the mutex to avoid a deadlock */
  150. rt_mutex_release(&ctx->lock);
  151. rt_wqueue_wait(&ctx->writer_queue, 0, RT_WAITING_FOREVER);
  152. rt_mutex_take(&ctx->lock, RT_WAITING_FOREVER);
  153. }
  154. }
  155. if (ret > 0)
  156. {
  157. ctx->count += counter_num;
  158. rt_wqueue_wakeup(&ctx->reader_queue, (void *)POLLIN);
  159. }
  160. rt_mutex_release(&ctx->lock);
  161. return ret;
  162. }
  163. static int rt_eventfd_create(struct dfs_file *df, unsigned int count, int flags)
  164. {
  165. struct eventfd_ctx *ctx = RT_NULL;
  166. rt_err_t ret = 0;
  167. ctx = (struct eventfd_ctx *)rt_malloc(sizeof(struct eventfd_ctx));
  168. if (ctx == RT_NULL)
  169. {
  170. ret = -ENOMEM;
  171. }
  172. else
  173. {
  174. ctx->count = count;
  175. ctx->flags = flags;
  176. flags &= EFD_SHARED_FCNTL_FLAGS;
  177. flags |= O_RDWR;
  178. rt_mutex_init(&ctx->lock, EVENTFD_MUTEX_NAME, RT_IPC_FLAG_FIFO);
  179. rt_wqueue_init(&ctx->reader_queue);
  180. rt_wqueue_init(&ctx->writer_queue);
  181. df->vnode = (struct dfs_vnode *)rt_malloc(sizeof(struct dfs_vnode));
  182. if (df->vnode)
  183. {
  184. dfs_vnode_init(df->vnode, FT_NONLOCK, &eventfd_fops);
  185. df->vnode->data = ctx;
  186. df->flags = flags;
  187. }
  188. else
  189. {
  190. rt_mutex_detach(&ctx->lock);
  191. rt_free(ctx);
  192. ret = -ENOMEM;
  193. }
  194. #ifdef RT_USING_DFS_V2
  195. df->fops = &eventfd_fops;
  196. #endif
  197. }
  198. return ret;
  199. }
  200. static int do_eventfd(unsigned int count, int flags)
  201. {
  202. struct dfs_file *file;
  203. int fd;
  204. int status;
  205. rt_ssize_t ret = 0;
  206. if (flags & ~EFD_FLAGS_SET)
  207. {
  208. rt_set_errno(EINVAL);
  209. return -1;
  210. }
  211. fd = fd_new();
  212. if (fd >= 0)
  213. {
  214. ret = fd;
  215. file = fd_get(fd);
  216. status = rt_eventfd_create(file, count, flags);
  217. if (status < 0)
  218. {
  219. fd_release(fd);
  220. rt_set_errno(-status);
  221. ret = -1;
  222. }
  223. }
  224. else
  225. {
  226. rt_set_errno(-fd);
  227. ret = -1;
  228. }
  229. return ret;
  230. }
  231. int eventfd(unsigned int count, int flags)
  232. {
  233. return do_eventfd(count, flags);
  234. }