dataqueue.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2012-09-30 Bernard first version.
  9. * 2016-10-31 armink fix some resume push and pop thread bugs
  10. */
  11. #include <rtthread.h>
  12. #include <rtdevice.h>
  13. #include <rthw.h>
  14. #define DATAQUEUE_MAGIC 0xbead0e0e
  15. struct rt_data_item
  16. {
  17. const void *data_ptr;
  18. rt_size_t data_size;
  19. };
  20. rt_err_t
  21. rt_data_queue_init(struct rt_data_queue *queue,
  22. rt_uint16_t size,
  23. rt_uint16_t lwm,
  24. void (*evt_notify)(struct rt_data_queue *queue, rt_uint32_t event))
  25. {
  26. RT_ASSERT(queue != RT_NULL);
  27. queue->evt_notify = evt_notify;
  28. queue->magic = DATAQUEUE_MAGIC;
  29. queue->size = size;
  30. queue->lwm = lwm;
  31. queue->get_index = 0;
  32. queue->put_index = 0;
  33. rt_list_init(&(queue->suspended_push_list));
  34. rt_list_init(&(queue->suspended_pop_list));
  35. queue->queue = (struct rt_data_item *)rt_malloc(sizeof(struct rt_data_item) * size);
  36. if (queue->queue == RT_NULL)
  37. {
  38. return -RT_ENOMEM;
  39. }
  40. return RT_EOK;
  41. }
  42. RTM_EXPORT(rt_data_queue_init);
  43. rt_err_t rt_data_queue_push(struct rt_data_queue *queue,
  44. const void *data_ptr,
  45. rt_size_t data_size,
  46. rt_int32_t timeout)
  47. {
  48. rt_ubase_t level;
  49. rt_thread_t thread;
  50. rt_err_t result;
  51. RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
  52. RT_ASSERT(queue != RT_NULL);
  53. result = RT_EOK;
  54. thread = rt_thread_self();
  55. level = rt_hw_interrupt_disable();
  56. while (queue->put_index - queue->get_index == queue->size)
  57. {
  58. /* queue is full */
  59. if (timeout == 0)
  60. {
  61. result = -RT_ETIMEOUT;
  62. goto __exit;
  63. }
  64. /* current context checking */
  65. RT_DEBUG_NOT_IN_INTERRUPT;
  66. /* reset thread error number */
  67. thread->error = RT_EOK;
  68. /* suspend thread on the push list */
  69. rt_thread_suspend(thread);
  70. rt_list_insert_before(&(queue->suspended_push_list), &(thread->tlist));
  71. /* start timer */
  72. if (timeout > 0)
  73. {
  74. /* reset the timeout of thread timer and start it */
  75. rt_timer_control(&(thread->thread_timer),
  76. RT_TIMER_CTRL_SET_TIME,
  77. &timeout);
  78. rt_timer_start(&(thread->thread_timer));
  79. }
  80. /* enable interrupt */
  81. rt_hw_interrupt_enable(level);
  82. /* do schedule */
  83. rt_schedule();
  84. /* thread is waked up */
  85. result = thread->error;
  86. level = rt_hw_interrupt_disable();
  87. if (result != RT_EOK) goto __exit;
  88. }
  89. queue->queue[queue->put_index % queue->size].data_ptr = data_ptr;
  90. queue->queue[queue->put_index % queue->size].data_size = data_size;
  91. queue->put_index += 1;
  92. /* there is at least one thread in suspended list */
  93. if (!rt_list_isempty(&(queue->suspended_pop_list)))
  94. {
  95. /* get thread entry */
  96. thread = rt_list_entry(queue->suspended_pop_list.next,
  97. struct rt_thread,
  98. tlist);
  99. /* resume it */
  100. rt_thread_resume(thread);
  101. rt_hw_interrupt_enable(level);
  102. /* perform a schedule */
  103. rt_schedule();
  104. return result;
  105. }
  106. __exit:
  107. rt_hw_interrupt_enable(level);
  108. if ((result == RT_EOK) && queue->evt_notify != RT_NULL)
  109. {
  110. queue->evt_notify(queue, RT_DATAQUEUE_EVENT_PUSH);
  111. }
  112. return result;
  113. }
  114. RTM_EXPORT(rt_data_queue_push);
  115. rt_err_t rt_data_queue_pop(struct rt_data_queue *queue,
  116. const void** data_ptr,
  117. rt_size_t *size,
  118. rt_int32_t timeout)
  119. {
  120. rt_ubase_t level;
  121. rt_thread_t thread;
  122. rt_err_t result;
  123. RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
  124. RT_ASSERT(queue != RT_NULL);
  125. RT_ASSERT(data_ptr != RT_NULL);
  126. RT_ASSERT(size != RT_NULL);
  127. result = RT_EOK;
  128. thread = rt_thread_self();
  129. level = rt_hw_interrupt_disable();
  130. while (queue->get_index == queue->put_index)
  131. {
  132. /* queue is empty */
  133. if (timeout == 0)
  134. {
  135. result = -RT_ETIMEOUT;
  136. goto __exit;
  137. }
  138. /* current context checking */
  139. RT_DEBUG_NOT_IN_INTERRUPT;
  140. /* reset thread error number */
  141. thread->error = RT_EOK;
  142. /* suspend thread on the pop list */
  143. rt_thread_suspend(thread);
  144. rt_list_insert_before(&(queue->suspended_pop_list), &(thread->tlist));
  145. /* start timer */
  146. if (timeout > 0)
  147. {
  148. /* reset the timeout of thread timer and start it */
  149. rt_timer_control(&(thread->thread_timer),
  150. RT_TIMER_CTRL_SET_TIME,
  151. &timeout);
  152. rt_timer_start(&(thread->thread_timer));
  153. }
  154. /* enable interrupt */
  155. rt_hw_interrupt_enable(level);
  156. /* do schedule */
  157. rt_schedule();
  158. /* thread is waked up */
  159. result = thread->error;
  160. level = rt_hw_interrupt_disable();
  161. if (result != RT_EOK)
  162. goto __exit;
  163. }
  164. *data_ptr = queue->queue[queue->get_index % queue->size].data_ptr;
  165. *size = queue->queue[queue->get_index % queue->size].data_size;
  166. queue->get_index += 1;
  167. if ((queue->put_index - queue->get_index) <= queue->lwm)
  168. {
  169. /* there is at least one thread in suspended list */
  170. if (!rt_list_isempty(&(queue->suspended_push_list)))
  171. {
  172. /* get thread entry */
  173. thread = rt_list_entry(queue->suspended_push_list.next,
  174. struct rt_thread,
  175. tlist);
  176. /* resume it */
  177. rt_thread_resume(thread);
  178. rt_hw_interrupt_enable(level);
  179. /* perform a schedule */
  180. rt_schedule();
  181. }
  182. else
  183. {
  184. rt_hw_interrupt_enable(level);
  185. }
  186. if (queue->evt_notify != RT_NULL)
  187. queue->evt_notify(queue, RT_DATAQUEUE_EVENT_LWM);
  188. return result;
  189. }
  190. __exit:
  191. rt_hw_interrupt_enable(level);
  192. if ((result == RT_EOK) && (queue->evt_notify != RT_NULL))
  193. {
  194. queue->evt_notify(queue, RT_DATAQUEUE_EVENT_POP);
  195. }
  196. return result;
  197. }
  198. RTM_EXPORT(rt_data_queue_pop);
  199. rt_err_t rt_data_queue_peak(struct rt_data_queue *queue,
  200. const void** data_ptr,
  201. rt_size_t *size)
  202. {
  203. rt_ubase_t level;
  204. RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
  205. RT_ASSERT(queue != RT_NULL);
  206. level = rt_hw_interrupt_disable();
  207. if (queue->get_index == queue->put_index)
  208. {
  209. rt_hw_interrupt_enable(level);
  210. return -RT_EEMPTY;
  211. }
  212. *data_ptr = queue->queue[queue->get_index % queue->size].data_ptr;
  213. *size = queue->queue[queue->get_index % queue->size].data_size;
  214. rt_hw_interrupt_enable(level);
  215. return RT_EOK;
  216. }
  217. RTM_EXPORT(rt_data_queue_peak);
  218. void rt_data_queue_reset(struct rt_data_queue *queue)
  219. {
  220. struct rt_thread *thread;
  221. register rt_ubase_t temp;
  222. RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
  223. rt_enter_critical();
  224. /* wakeup all suspend threads */
  225. /* resume on pop list */
  226. while (!rt_list_isempty(&(queue->suspended_pop_list)))
  227. {
  228. /* disable interrupt */
  229. temp = rt_hw_interrupt_disable();
  230. /* get next suspend thread */
  231. thread = rt_list_entry(queue->suspended_pop_list.next,
  232. struct rt_thread,
  233. tlist);
  234. /* set error code to RT_ERROR */
  235. thread->error = -RT_ERROR;
  236. /*
  237. * resume thread
  238. * In rt_thread_resume function, it will remove current thread from
  239. * suspend list
  240. */
  241. rt_thread_resume(thread);
  242. /* enable interrupt */
  243. rt_hw_interrupt_enable(temp);
  244. }
  245. /* resume on push list */
  246. while (!rt_list_isempty(&(queue->suspended_push_list)))
  247. {
  248. /* disable interrupt */
  249. temp = rt_hw_interrupt_disable();
  250. /* get next suspend thread */
  251. thread = rt_list_entry(queue->suspended_push_list.next,
  252. struct rt_thread,
  253. tlist);
  254. /* set error code to RT_ERROR */
  255. thread->error = -RT_ERROR;
  256. /*
  257. * resume thread
  258. * In rt_thread_resume function, it will remove current thread from
  259. * suspend list
  260. */
  261. rt_thread_resume(thread);
  262. /* enable interrupt */
  263. rt_hw_interrupt_enable(temp);
  264. }
  265. rt_exit_critical();
  266. rt_schedule();
  267. }
  268. RTM_EXPORT(rt_data_queue_reset);
  269. rt_err_t rt_data_queue_deinit(struct rt_data_queue *queue)
  270. {
  271. rt_ubase_t level;
  272. RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
  273. RT_ASSERT(queue != RT_NULL);
  274. level = rt_hw_interrupt_disable();
  275. /* wakeup all suspend threads */
  276. rt_data_queue_reset(queue);
  277. queue->magic = 0;
  278. rt_free(queue->queue);
  279. rt_hw_interrupt_enable(level);
  280. return RT_EOK;
  281. }
  282. RTM_EXPORT(rt_data_queue_deinit);