ringblk_buf.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-08-25 armink the first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #include <rtdevice.h>
  13. /**
  14. * ring block buffer object initialization
  15. *
  16. * @param rbb ring block buffer object
  17. * @param buf buffer
  18. * @param buf_size buffer size
  19. * @param block_set block set
  20. * @param blk_max_num max block number
  21. *
  22. * @note When your application need align access, please make the buffer address is aligned.
  23. */
  24. void rt_rbb_init(rt_rbb_t rbb, rt_uint8_t *buf, rt_size_t buf_size, rt_rbb_blk_t block_set, rt_size_t blk_max_num)
  25. {
  26. rt_size_t i;
  27. RT_ASSERT(rbb);
  28. RT_ASSERT(buf);
  29. RT_ASSERT(block_set);
  30. rbb->buf = buf;
  31. rbb->buf_size = buf_size;
  32. rbb->blk_set = block_set;
  33. rbb->blk_max_num = blk_max_num;
  34. rbb->tail = &rbb->blk_list;
  35. rt_slist_init(&rbb->blk_list);
  36. rt_slist_init(&rbb->free_list);
  37. /* initialize block status */
  38. for (i = 0; i < blk_max_num; i++)
  39. {
  40. block_set[i].status = RT_RBB_BLK_UNUSED;
  41. rt_slist_init(&block_set[i].list);
  42. rt_slist_insert(&rbb->free_list, &block_set[i].list);
  43. }
  44. }
  45. RTM_EXPORT(rt_rbb_init);
  46. #ifdef RT_USING_HEAP
  47. /**
  48. * ring block buffer object create
  49. *
  50. * @param buf_size buffer size
  51. * @param blk_max_num max block number
  52. *
  53. * @return != RT_NULL: ring block buffer object
  54. * RT_NULL: create failed
  55. */
  56. rt_rbb_t rt_rbb_create(rt_size_t buf_size, rt_size_t blk_max_num)
  57. {
  58. rt_rbb_t rbb = RT_NULL;
  59. rt_uint8_t *buf;
  60. rt_rbb_blk_t blk_set;
  61. rbb = (rt_rbb_t)rt_malloc(sizeof(struct rt_rbb));
  62. if (!rbb)
  63. {
  64. return RT_NULL;
  65. }
  66. buf = (rt_uint8_t *)rt_malloc(buf_size);
  67. if (!buf)
  68. {
  69. rt_free(rbb);
  70. return RT_NULL;
  71. }
  72. blk_set = (rt_rbb_blk_t)rt_malloc(sizeof(struct rt_rbb_blk) * blk_max_num);
  73. if (!blk_set)
  74. {
  75. rt_free(buf);
  76. rt_free(rbb);
  77. return RT_NULL;
  78. }
  79. rt_rbb_init(rbb, buf, buf_size, blk_set, blk_max_num);
  80. return rbb;
  81. }
  82. RTM_EXPORT(rt_rbb_create);
  83. /**
  84. * ring block buffer object destroy
  85. *
  86. * @param rbb ring block buffer object
  87. */
  88. void rt_rbb_destroy(rt_rbb_t rbb)
  89. {
  90. RT_ASSERT(rbb);
  91. rt_free(rbb->buf);
  92. rt_free(rbb->blk_set);
  93. rt_free(rbb);
  94. }
  95. RTM_EXPORT(rt_rbb_destroy);
  96. #endif
  97. static rt_rbb_blk_t find_empty_blk_in_set(rt_rbb_t rbb)
  98. {
  99. struct rt_rbb_blk *blk;
  100. RT_ASSERT(rbb);
  101. if (rt_slist_isempty(&rbb->free_list))
  102. {
  103. return RT_NULL;
  104. }
  105. blk = rt_slist_first_entry(&rbb->free_list, struct rt_rbb_blk, list);
  106. rt_slist_remove(&rbb->free_list, &blk->list);
  107. RT_ASSERT(blk->status == RT_RBB_BLK_UNUSED);
  108. return blk;
  109. }
  110. rt_inline void list_append(rt_rbb_t rbb, rt_slist_t *n)
  111. {
  112. /* append the node to the tail */
  113. rbb->tail->next = n;
  114. n->next = RT_NULL;
  115. /* save tail node */
  116. rbb->tail = n;
  117. }
  118. rt_inline rt_slist_t *list_remove(rt_rbb_t rbb, rt_slist_t *n)
  119. {
  120. rt_slist_t *l = &rbb->blk_list;
  121. struct rt_slist_node *node = l;
  122. /* remove slist head */
  123. while (node->next && node->next != n) node = node->next;
  124. /* remove node */
  125. if (node->next != (rt_slist_t *)0)
  126. {
  127. node->next = node->next->next;
  128. n->next = RT_NULL;
  129. /* update tail node */
  130. if (rbb->tail == n)
  131. rbb->tail = node;
  132. }
  133. return l;
  134. }
  135. /**
  136. * Allocate a block by given size. The block will add to blk_list when allocate success.
  137. *
  138. * @param rbb ring block buffer object
  139. * @param blk_size block size
  140. *
  141. * @note When your application need align access, please make the blk_szie is aligned.
  142. *
  143. * @return != RT_NULL: allocated block
  144. * RT_NULL: allocate failed
  145. */
  146. rt_rbb_blk_t rt_rbb_blk_alloc(rt_rbb_t rbb, rt_size_t blk_size)
  147. {
  148. rt_base_t level;
  149. rt_size_t empty1 = 0, empty2 = 0;
  150. rt_rbb_blk_t head, tail, new_rbb = RT_NULL;
  151. RT_ASSERT(rbb);
  152. RT_ASSERT(blk_size < (1L << 24));
  153. level = rt_hw_interrupt_disable();
  154. new_rbb = find_empty_blk_in_set(rbb);
  155. if (new_rbb)
  156. {
  157. if (rt_slist_isempty(&rbb->blk_list) == 0)
  158. {
  159. head = rt_slist_first_entry(&rbb->blk_list, struct rt_rbb_blk, list);
  160. /* get tail rbb blk object */
  161. tail = rt_slist_entry(rbb->tail, struct rt_rbb_blk, list);
  162. if (head->buf <= tail->buf)
  163. {
  164. /**
  165. * head tail
  166. * +--------------------------------------+-----------------+------------------+
  167. * | empty2 | block1 | block2 | block3 | empty1 |
  168. * +--------------------------------------+-----------------+------------------+
  169. * rbb->buf
  170. */
  171. empty1 = (rbb->buf + rbb->buf_size) - (tail->buf + tail->size);
  172. empty2 = head->buf - rbb->buf;
  173. if (empty1 >= blk_size)
  174. {
  175. list_append(rbb, &new_rbb->list);
  176. new_rbb->status = RT_RBB_BLK_INITED;
  177. new_rbb->buf = tail->buf + tail->size;
  178. new_rbb->size = blk_size;
  179. }
  180. else if (empty2 >= blk_size)
  181. {
  182. list_append(rbb, &new_rbb->list);
  183. new_rbb->status = RT_RBB_BLK_INITED;
  184. new_rbb->buf = rbb->buf;
  185. new_rbb->size = blk_size;
  186. }
  187. else
  188. {
  189. /* no space */
  190. new_rbb = RT_NULL;
  191. }
  192. }
  193. else
  194. {
  195. /**
  196. * tail head
  197. * +----------------+-------------------------------------+--------+-----------+
  198. * | block3 | empty1 | block1 | block2 |
  199. * +----------------+-------------------------------------+--------+-----------+
  200. * rbb->buf
  201. */
  202. empty1 = head->buf - (tail->buf + tail->size);
  203. if (empty1 >= blk_size)
  204. {
  205. list_append(rbb, &new_rbb->list);
  206. new_rbb->status = RT_RBB_BLK_INITED;
  207. new_rbb->buf = tail->buf + tail->size;
  208. new_rbb->size = blk_size;
  209. }
  210. else
  211. {
  212. /* no space */
  213. new_rbb = RT_NULL;
  214. }
  215. }
  216. }
  217. else
  218. {
  219. /* the list is empty */
  220. list_append(rbb, &new_rbb->list);
  221. new_rbb->status = RT_RBB_BLK_INITED;
  222. new_rbb->buf = rbb->buf;
  223. new_rbb->size = blk_size;
  224. }
  225. }
  226. else
  227. {
  228. new_rbb = RT_NULL;
  229. }
  230. rt_hw_interrupt_enable(level);
  231. return new_rbb;
  232. }
  233. RTM_EXPORT(rt_rbb_blk_alloc);
  234. /**
  235. * put a block to ring block buffer object
  236. *
  237. * @param block the block
  238. */
  239. void rt_rbb_blk_put(rt_rbb_blk_t block)
  240. {
  241. RT_ASSERT(block);
  242. RT_ASSERT(block->status == RT_RBB_BLK_INITED);
  243. block->status = RT_RBB_BLK_PUT;
  244. }
  245. RTM_EXPORT(rt_rbb_blk_put);
  246. /**
  247. * get a block from the ring block buffer object
  248. *
  249. * @param rbb ring block buffer object
  250. *
  251. * @return != RT_NULL: block
  252. * RT_NULL: get failed
  253. */
  254. rt_rbb_blk_t rt_rbb_blk_get(rt_rbb_t rbb)
  255. {
  256. rt_base_t level;
  257. rt_rbb_blk_t block = RT_NULL;
  258. rt_slist_t *node;
  259. RT_ASSERT(rbb);
  260. if (rt_slist_isempty(&rbb->blk_list))
  261. return 0;
  262. level = rt_hw_interrupt_disable();
  263. for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
  264. {
  265. block = rt_slist_entry(node, struct rt_rbb_blk, list);
  266. if (block->status == RT_RBB_BLK_PUT)
  267. {
  268. block->status = RT_RBB_BLK_GET;
  269. goto __exit;
  270. }
  271. }
  272. /* not found */
  273. block = RT_NULL;
  274. __exit:
  275. rt_hw_interrupt_enable(level);
  276. return block;
  277. }
  278. RTM_EXPORT(rt_rbb_blk_get);
  279. /**
  280. * return the block size
  281. *
  282. * @param block the block
  283. *
  284. * @return block size
  285. */
  286. rt_size_t rt_rbb_blk_size(rt_rbb_blk_t block)
  287. {
  288. RT_ASSERT(block);
  289. return block->size;
  290. }
  291. RTM_EXPORT(rt_rbb_blk_size);
  292. /**
  293. * return the block buffer
  294. *
  295. * @param block the block
  296. *
  297. * @return block buffer
  298. */
  299. rt_uint8_t *rt_rbb_blk_buf(rt_rbb_blk_t block)
  300. {
  301. RT_ASSERT(block);
  302. return block->buf;
  303. }
  304. RTM_EXPORT(rt_rbb_blk_buf);
  305. /**
  306. * free the block
  307. *
  308. * @param rbb ring block buffer object
  309. * @param block the block
  310. */
  311. void rt_rbb_blk_free(rt_rbb_t rbb, rt_rbb_blk_t block)
  312. {
  313. rt_base_t level;
  314. RT_ASSERT(rbb);
  315. RT_ASSERT(block);
  316. RT_ASSERT(block->status != RT_RBB_BLK_UNUSED);
  317. level = rt_hw_interrupt_disable();
  318. /* remove it on rbb block list */
  319. list_remove(rbb, &block->list);
  320. block->status = RT_RBB_BLK_UNUSED;
  321. rt_slist_insert(&rbb->free_list, &block->list);
  322. rt_hw_interrupt_enable(level);
  323. }
  324. RTM_EXPORT(rt_rbb_blk_free);
  325. /**
  326. * get a continuous block to queue by given size
  327. *
  328. * tail head
  329. * +------------------+---------------+--------+----------+--------+
  330. * | block3 | empty1 | block1 | block2 |fragment|
  331. * +------------------+------------------------+----------+--------+
  332. * |<-- return_size -->| |
  333. * |<--- queue_data_len --->|
  334. *
  335. * tail head
  336. * +------------------+---------------+--------+----------+--------+
  337. * | block3 | empty1 | block1 | block2 |fragment|
  338. * +------------------+------------------------+----------+--------+
  339. * |<-- return_size -->| out of len(b1+b2+b3) |
  340. * |<-------------------- queue_data_len -------------------->|
  341. *
  342. * @param rbb ring block buffer object
  343. * @param queue_data_len The max queue data size, and the return size must less then it.
  344. * @param queue continuous block queue
  345. *
  346. * @return the block queue data total size
  347. */
  348. rt_size_t rt_rbb_blk_queue_get(rt_rbb_t rbb, rt_size_t queue_data_len, rt_rbb_blk_queue_t blk_queue)
  349. {
  350. rt_base_t level;
  351. rt_size_t data_total_size = 0;
  352. rt_slist_t *node, *tmp = RT_NULL;
  353. rt_rbb_blk_t last_block = RT_NULL, block;
  354. RT_ASSERT(rbb);
  355. RT_ASSERT(blk_queue);
  356. if (rt_slist_isempty(&rbb->blk_list))
  357. return 0;
  358. level = rt_hw_interrupt_disable();
  359. node = rt_slist_first(&rbb->blk_list);
  360. if (node != RT_NULL)
  361. {
  362. tmp = rt_slist_next(node);
  363. }
  364. for (; node; node = tmp, tmp = rt_slist_next(node))
  365. {
  366. if (!last_block)
  367. {
  368. last_block = rt_slist_entry(node, struct rt_rbb_blk, list);
  369. if (last_block->status == RT_RBB_BLK_PUT)
  370. {
  371. /* save the first put status block to queue */
  372. blk_queue->blocks = last_block;
  373. blk_queue->blk_num = 0;
  374. }
  375. else
  376. {
  377. /* the first block must be put status */
  378. last_block = RT_NULL;
  379. continue;
  380. }
  381. }
  382. else
  383. {
  384. block = rt_slist_entry(node, struct rt_rbb_blk, list);
  385. /*
  386. * these following conditions will break the loop:
  387. * 1. the current block is not put status
  388. * 2. the last block and current block is not continuous
  389. * 3. the data_total_size will out of range
  390. */
  391. if (block->status != RT_RBB_BLK_PUT ||
  392. last_block->buf > block->buf ||
  393. data_total_size + block->size > queue_data_len)
  394. {
  395. break;
  396. }
  397. /* backup last block */
  398. last_block = block;
  399. }
  400. /* remove current block */
  401. data_total_size += last_block->size;
  402. last_block->status = RT_RBB_BLK_GET;
  403. blk_queue->blk_num++;
  404. }
  405. rt_hw_interrupt_enable(level);
  406. return data_total_size;
  407. }
  408. RTM_EXPORT(rt_rbb_blk_queue_get);
  409. /**
  410. * get all block length on block queue
  411. *
  412. * @param blk_queue the block queue
  413. *
  414. * @return total length
  415. */
  416. rt_size_t rt_rbb_blk_queue_len(rt_rbb_blk_queue_t blk_queue)
  417. {
  418. rt_size_t i = 0, data_total_size = 0;
  419. rt_rbb_blk_t blk;
  420. RT_ASSERT(blk_queue);
  421. for (blk = blk_queue->blocks; i < blk_queue->blk_num; i++)
  422. {
  423. data_total_size += blk->size;
  424. blk = rt_slist_entry(blk->list.next, struct rt_rbb_blk, list);
  425. }
  426. return data_total_size;
  427. }
  428. RTM_EXPORT(rt_rbb_blk_queue_len);
  429. /**
  430. * return the block queue buffer
  431. *
  432. * @param blk_queue the block queue
  433. *
  434. * @return block queue buffer
  435. */
  436. rt_uint8_t *rt_rbb_blk_queue_buf(rt_rbb_blk_queue_t blk_queue)
  437. {
  438. RT_ASSERT(blk_queue);
  439. return blk_queue->blocks[0].buf;
  440. }
  441. RTM_EXPORT(rt_rbb_blk_queue_buf);
  442. /**
  443. * free the block queue
  444. *
  445. * @param rbb ring block buffer object
  446. * @param blk_queue the block queue
  447. */
  448. void rt_rbb_blk_queue_free(rt_rbb_t rbb, rt_rbb_blk_queue_t blk_queue)
  449. {
  450. rt_size_t i = 0;
  451. rt_rbb_blk_t blk, next_blk;
  452. RT_ASSERT(rbb);
  453. RT_ASSERT(blk_queue);
  454. for (blk = blk_queue->blocks; i < blk_queue->blk_num; i++)
  455. {
  456. next_blk = rt_slist_entry(blk->list.next, struct rt_rbb_blk, list);
  457. rt_rbb_blk_free(rbb, blk);
  458. blk = next_blk;
  459. }
  460. }
  461. RTM_EXPORT(rt_rbb_blk_queue_free);
  462. /**
  463. * The put status and buffer continuous blocks can be make a block queue.
  464. * This function will return the length which from next can be make block queue.
  465. *
  466. * @param rbb ring block buffer object
  467. *
  468. * @return the next can be make block queue's length
  469. */
  470. rt_size_t rt_rbb_next_blk_queue_len(rt_rbb_t rbb)
  471. {
  472. rt_base_t level;
  473. rt_size_t data_len = 0;
  474. rt_slist_t *node;
  475. rt_rbb_blk_t last_block = RT_NULL, block;
  476. RT_ASSERT(rbb);
  477. if (rt_slist_isempty(&rbb->blk_list))
  478. return 0;
  479. level = rt_hw_interrupt_disable();
  480. for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
  481. {
  482. if (!last_block)
  483. {
  484. last_block = rt_slist_entry(node, struct rt_rbb_blk, list);
  485. if (last_block->status != RT_RBB_BLK_PUT)
  486. {
  487. /* the first block must be put status */
  488. last_block = RT_NULL;
  489. continue;
  490. }
  491. }
  492. else
  493. {
  494. block = rt_slist_entry(node, struct rt_rbb_blk, list);
  495. /*
  496. * these following conditions will break the loop:
  497. * 1. the current block is not put status
  498. * 2. the last block and current block is not continuous
  499. */
  500. if (block->status != RT_RBB_BLK_PUT || last_block->buf > block->buf)
  501. {
  502. break;
  503. }
  504. /* backup last block */
  505. last_block = block;
  506. }
  507. data_len += last_block->size;
  508. }
  509. rt_hw_interrupt_enable(level);
  510. return data_len;
  511. }
  512. RTM_EXPORT(rt_rbb_next_blk_queue_len);
  513. /**
  514. * get the ring block buffer object buffer size
  515. *
  516. * @param rbb ring block buffer object
  517. *
  518. * @return buffer size
  519. */
  520. rt_size_t rt_rbb_get_buf_size(rt_rbb_t rbb)
  521. {
  522. RT_ASSERT(rbb);
  523. return rbb->buf_size;
  524. }
  525. RTM_EXPORT(rt_rbb_get_buf_size);