ringblk_buf.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-08-25 armink the first version
  9. */
  10. #include <rthw.h>
  11. #include <rtdevice.h>
  12. /**
  13. * ring block buffer object initialization
  14. *
  15. * @param rbb ring block buffer object
  16. * @param buf buffer
  17. * @param buf_size buffer size
  18. * @param block_set block set
  19. * @param blk_max_num max block number
  20. *
  21. * @note When your application need align access, please make the buffer address is aligned.
  22. */
  23. void rt_rbb_init(rt_rbb_t rbb, rt_uint8_t *buf, rt_size_t buf_size, rt_rbb_blk_t block_set, rt_size_t blk_max_num)
  24. {
  25. rt_size_t i;
  26. RT_ASSERT(rbb);
  27. RT_ASSERT(buf);
  28. RT_ASSERT(block_set);
  29. rbb->buf = buf;
  30. rbb->buf_size = buf_size;
  31. rbb->blk_set = block_set;
  32. rbb->blk_max_num = blk_max_num;
  33. rbb->tail = &rbb->blk_list;
  34. rt_slist_init(&rbb->blk_list);
  35. rt_slist_init(&rbb->free_list);
  36. /* initialize block status */
  37. for (i = 0; i < blk_max_num; i++)
  38. {
  39. block_set[i].status = RT_RBB_BLK_UNUSED;
  40. rt_slist_init(&block_set[i].list);
  41. rt_slist_insert(&rbb->free_list, &block_set[i].list);
  42. }
  43. }
  44. RTM_EXPORT(rt_rbb_init);
  45. #ifdef RT_USING_HEAP
  46. /**
  47. * ring block buffer object create
  48. *
  49. * @param buf_size buffer size
  50. * @param blk_max_num max block number
  51. *
  52. * @return != RT_NULL: ring block buffer object
  53. * RT_NULL: create failed
  54. */
  55. rt_rbb_t rt_rbb_create(rt_size_t buf_size, rt_size_t blk_max_num)
  56. {
  57. rt_rbb_t rbb = RT_NULL;
  58. rt_uint8_t *buf;
  59. rt_rbb_blk_t blk_set;
  60. rbb = (rt_rbb_t)rt_malloc(sizeof(struct rt_rbb));
  61. if (!rbb)
  62. {
  63. return RT_NULL;
  64. }
  65. buf = (rt_uint8_t *)rt_malloc(buf_size);
  66. if (!buf)
  67. {
  68. rt_free(rbb);
  69. return RT_NULL;
  70. }
  71. blk_set = (rt_rbb_blk_t)rt_malloc(sizeof(struct rt_rbb_blk) * blk_max_num);
  72. if (!blk_set)
  73. {
  74. rt_free(buf);
  75. rt_free(rbb);
  76. return RT_NULL;
  77. }
  78. rt_rbb_init(rbb, buf, buf_size, blk_set, blk_max_num);
  79. return rbb;
  80. }
  81. RTM_EXPORT(rt_rbb_create);
  82. /**
  83. * ring block buffer object destroy
  84. *
  85. * @param rbb ring block buffer object
  86. */
  87. void rt_rbb_destroy(rt_rbb_t rbb)
  88. {
  89. RT_ASSERT(rbb);
  90. rt_free(rbb->buf);
  91. rt_free(rbb->blk_set);
  92. rt_free(rbb);
  93. }
  94. RTM_EXPORT(rt_rbb_destroy);
  95. #endif
  96. static rt_rbb_blk_t find_empty_blk_in_set(rt_rbb_t rbb)
  97. {
  98. struct rt_rbb_blk *blk;
  99. RT_ASSERT(rbb);
  100. if (rt_slist_isempty(&rbb->free_list))
  101. {
  102. return RT_NULL;
  103. }
  104. blk = rt_slist_first_entry(&rbb->free_list, struct rt_rbb_blk, list);
  105. rt_slist_remove(&rbb->free_list, &blk->list);
  106. RT_ASSERT(blk->status == RT_RBB_BLK_UNUSED);
  107. return blk;
  108. }
  109. rt_inline void list_append(rt_rbb_t rbb, rt_slist_t *n)
  110. {
  111. /* append the node to the tail */
  112. rbb->tail->next = n;
  113. n->next = RT_NULL;
  114. /* save tail node */
  115. rbb->tail = n;
  116. }
  117. rt_inline rt_slist_t *list_remove(rt_rbb_t rbb, rt_slist_t *n)
  118. {
  119. rt_slist_t *l = &rbb->blk_list;
  120. struct rt_slist_node *node = l;
  121. /* remove slist head */
  122. while (node->next && node->next != n) node = node->next;
  123. /* remove node */
  124. if (node->next != (rt_slist_t *)0)
  125. {
  126. node->next = node->next->next;
  127. n->next = RT_NULL;
  128. /* update tail node */
  129. if (rbb->tail == n)
  130. rbb->tail = node;
  131. }
  132. return l;
  133. }
  134. /**
  135. * Allocate a block by given size. The block will add to blk_list when allocate success.
  136. *
  137. * @param rbb ring block buffer object
  138. * @param blk_size block size
  139. *
  140. * @note When your application need align access, please make the blk_szie is aligned.
  141. *
  142. * @return != RT_NULL: allocated block
  143. * RT_NULL: allocate failed
  144. */
  145. rt_rbb_blk_t rt_rbb_blk_alloc(rt_rbb_t rbb, rt_size_t blk_size)
  146. {
  147. rt_base_t level;
  148. rt_size_t empty1 = 0, empty2 = 0;
  149. rt_rbb_blk_t head, tail, new_rbb = RT_NULL;
  150. RT_ASSERT(rbb);
  151. RT_ASSERT(blk_size < (1L << 24));
  152. level = rt_hw_interrupt_disable();
  153. new_rbb = find_empty_blk_in_set(rbb);
  154. if (new_rbb)
  155. {
  156. if (rt_slist_isempty(&rbb->blk_list) == 0)
  157. {
  158. head = rt_slist_first_entry(&rbb->blk_list, struct rt_rbb_blk, list);
  159. /* get tail rbb blk object */
  160. tail = rt_slist_entry(rbb->tail, struct rt_rbb_blk, list);
  161. if (head->buf <= tail->buf)
  162. {
  163. /**
  164. * head tail
  165. * +--------------------------------------+-----------------+------------------+
  166. * | empty2 | block1 | block2 | block3 | empty1 |
  167. * +--------------------------------------+-----------------+------------------+
  168. * rbb->buf
  169. */
  170. empty1 = (rbb->buf + rbb->buf_size) - (tail->buf + tail->size);
  171. empty2 = head->buf - rbb->buf;
  172. if (empty1 >= blk_size)
  173. {
  174. list_append(rbb, &new_rbb->list);
  175. new_rbb->status = RT_RBB_BLK_INITED;
  176. new_rbb->buf = tail->buf + tail->size;
  177. new_rbb->size = blk_size;
  178. }
  179. else if (empty2 >= blk_size)
  180. {
  181. list_append(rbb, &new_rbb->list);
  182. new_rbb->status = RT_RBB_BLK_INITED;
  183. new_rbb->buf = rbb->buf;
  184. new_rbb->size = blk_size;
  185. }
  186. else
  187. {
  188. /* no space */
  189. new_rbb = RT_NULL;
  190. }
  191. }
  192. else
  193. {
  194. /**
  195. * tail head
  196. * +----------------+-------------------------------------+--------+-----------+
  197. * | block3 | empty1 | block1 | block2 |
  198. * +----------------+-------------------------------------+--------+-----------+
  199. * rbb->buf
  200. */
  201. empty1 = head->buf - (tail->buf + tail->size);
  202. if (empty1 >= blk_size)
  203. {
  204. list_append(rbb, &new_rbb->list);
  205. new_rbb->status = RT_RBB_BLK_INITED;
  206. new_rbb->buf = tail->buf + tail->size;
  207. new_rbb->size = blk_size;
  208. }
  209. else
  210. {
  211. /* no space */
  212. new_rbb = RT_NULL;
  213. }
  214. }
  215. }
  216. else
  217. {
  218. /* the list is empty */
  219. list_append(rbb, &new_rbb->list);
  220. new_rbb->status = RT_RBB_BLK_INITED;
  221. new_rbb->buf = rbb->buf;
  222. new_rbb->size = blk_size;
  223. }
  224. }
  225. else
  226. {
  227. new_rbb = RT_NULL;
  228. }
  229. rt_hw_interrupt_enable(level);
  230. return new_rbb;
  231. }
  232. RTM_EXPORT(rt_rbb_blk_alloc);
  233. /**
  234. * put a block to ring block buffer object
  235. *
  236. * @param block the block
  237. */
  238. void rt_rbb_blk_put(rt_rbb_blk_t block)
  239. {
  240. RT_ASSERT(block);
  241. RT_ASSERT(block->status == RT_RBB_BLK_INITED);
  242. block->status = RT_RBB_BLK_PUT;
  243. }
  244. RTM_EXPORT(rt_rbb_blk_put);
  245. /**
  246. * get a block from the ring block buffer object
  247. *
  248. * @param rbb ring block buffer object
  249. *
  250. * @return != RT_NULL: block
  251. * RT_NULL: get failed
  252. */
  253. rt_rbb_blk_t rt_rbb_blk_get(rt_rbb_t rbb)
  254. {
  255. rt_base_t level;
  256. rt_rbb_blk_t block = RT_NULL;
  257. rt_slist_t *node;
  258. RT_ASSERT(rbb);
  259. if (rt_slist_isempty(&rbb->blk_list))
  260. return 0;
  261. level = rt_hw_interrupt_disable();
  262. for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
  263. {
  264. block = rt_slist_entry(node, struct rt_rbb_blk, list);
  265. if (block->status == RT_RBB_BLK_PUT)
  266. {
  267. block->status = RT_RBB_BLK_GET;
  268. goto __exit;
  269. }
  270. }
  271. /* not found */
  272. block = RT_NULL;
  273. __exit:
  274. rt_hw_interrupt_enable(level);
  275. return block;
  276. }
  277. RTM_EXPORT(rt_rbb_blk_get);
  278. /**
  279. * return the block size
  280. *
  281. * @param block the block
  282. *
  283. * @return block size
  284. */
  285. rt_size_t rt_rbb_blk_size(rt_rbb_blk_t block)
  286. {
  287. RT_ASSERT(block);
  288. return block->size;
  289. }
  290. RTM_EXPORT(rt_rbb_blk_size);
  291. /**
  292. * return the block buffer
  293. *
  294. * @param block the block
  295. *
  296. * @return block buffer
  297. */
  298. rt_uint8_t *rt_rbb_blk_buf(rt_rbb_blk_t block)
  299. {
  300. RT_ASSERT(block);
  301. return block->buf;
  302. }
  303. RTM_EXPORT(rt_rbb_blk_buf);
  304. /**
  305. * free the block
  306. *
  307. * @param rbb ring block buffer object
  308. * @param block the block
  309. */
  310. void rt_rbb_blk_free(rt_rbb_t rbb, rt_rbb_blk_t block)
  311. {
  312. rt_base_t level;
  313. RT_ASSERT(rbb);
  314. RT_ASSERT(block);
  315. RT_ASSERT(block->status != RT_RBB_BLK_UNUSED);
  316. level = rt_hw_interrupt_disable();
  317. /* remove it on rbb block list */
  318. list_remove(rbb, &block->list);
  319. block->status = RT_RBB_BLK_UNUSED;
  320. rt_slist_insert(&rbb->free_list, &block->list);
  321. rt_hw_interrupt_enable(level);
  322. }
  323. RTM_EXPORT(rt_rbb_blk_free);
  324. /**
  325. * get a continuous block to queue by given size
  326. *
  327. * tail head
  328. * +------------------+---------------+--------+----------+--------+
  329. * | block3 | empty1 | block1 | block2 |fragment|
  330. * +------------------+------------------------+----------+--------+
  331. * |<-- return_size -->| |
  332. * |<--- queue_data_len --->|
  333. *
  334. * tail head
  335. * +------------------+---------------+--------+----------+--------+
  336. * | block3 | empty1 | block1 | block2 |fragment|
  337. * +------------------+------------------------+----------+--------+
  338. * |<-- return_size -->| out of len(b1+b2+b3) |
  339. * |<-------------------- queue_data_len -------------------->|
  340. *
  341. * @param rbb ring block buffer object
  342. * @param queue_data_len The max queue data size, and the return size must less then it.
  343. * @param queue continuous block queue
  344. *
  345. * @return the block queue data total size
  346. */
  347. rt_size_t rt_rbb_blk_queue_get(rt_rbb_t rbb, rt_size_t queue_data_len, rt_rbb_blk_queue_t blk_queue)
  348. {
  349. rt_base_t level;
  350. rt_size_t data_total_size = 0;
  351. rt_slist_t *node, *tmp = RT_NULL;
  352. rt_rbb_blk_t last_block = RT_NULL, block;
  353. RT_ASSERT(rbb);
  354. RT_ASSERT(blk_queue);
  355. if (rt_slist_isempty(&rbb->blk_list))
  356. return 0;
  357. level = rt_hw_interrupt_disable();
  358. node = rt_slist_first(&rbb->blk_list);
  359. if (node != RT_NULL)
  360. {
  361. tmp = rt_slist_next(node);
  362. }
  363. for (; node; node = tmp, tmp = rt_slist_next(node))
  364. {
  365. if (!last_block)
  366. {
  367. last_block = rt_slist_entry(node, struct rt_rbb_blk, list);
  368. if (last_block->status == RT_RBB_BLK_PUT)
  369. {
  370. /* save the first put status block to queue */
  371. blk_queue->blocks = last_block;
  372. blk_queue->blk_num = 0;
  373. }
  374. else
  375. {
  376. /* the first block must be put status */
  377. last_block = RT_NULL;
  378. continue;
  379. }
  380. }
  381. else
  382. {
  383. block = rt_slist_entry(node, struct rt_rbb_blk, list);
  384. /*
  385. * these following conditions will break the loop:
  386. * 1. the current block is not put status
  387. * 2. the last block and current block is not continuous
  388. * 3. the data_total_size will out of range
  389. */
  390. if (block->status != RT_RBB_BLK_PUT ||
  391. last_block->buf > block->buf ||
  392. data_total_size + block->size > queue_data_len)
  393. {
  394. break;
  395. }
  396. /* backup last block */
  397. last_block = block;
  398. }
  399. /* remove current block */
  400. data_total_size += last_block->size;
  401. last_block->status = RT_RBB_BLK_GET;
  402. blk_queue->blk_num++;
  403. }
  404. rt_hw_interrupt_enable(level);
  405. return data_total_size;
  406. }
  407. RTM_EXPORT(rt_rbb_blk_queue_get);
  408. /**
  409. * get all block length on block queue
  410. *
  411. * @param blk_queue the block queue
  412. *
  413. * @return total length
  414. */
  415. rt_size_t rt_rbb_blk_queue_len(rt_rbb_blk_queue_t blk_queue)
  416. {
  417. rt_size_t i = 0, data_total_size = 0;
  418. rt_rbb_blk_t blk;
  419. RT_ASSERT(blk_queue);
  420. for (blk = blk_queue->blocks; i < blk_queue->blk_num; i++)
  421. {
  422. data_total_size += blk->size;
  423. blk = rt_slist_entry(blk->list.next, struct rt_rbb_blk, list);
  424. }
  425. return data_total_size;
  426. }
  427. RTM_EXPORT(rt_rbb_blk_queue_len);
  428. /**
  429. * return the block queue buffer
  430. *
  431. * @param blk_queue the block queue
  432. *
  433. * @return block queue buffer
  434. */
  435. rt_uint8_t *rt_rbb_blk_queue_buf(rt_rbb_blk_queue_t blk_queue)
  436. {
  437. RT_ASSERT(blk_queue);
  438. return blk_queue->blocks[0].buf;
  439. }
  440. RTM_EXPORT(rt_rbb_blk_queue_buf);
  441. /**
  442. * free the block queue
  443. *
  444. * @param rbb ring block buffer object
  445. * @param blk_queue the block queue
  446. */
  447. void rt_rbb_blk_queue_free(rt_rbb_t rbb, rt_rbb_blk_queue_t blk_queue)
  448. {
  449. rt_size_t i = 0;
  450. rt_rbb_blk_t blk, next_blk;
  451. RT_ASSERT(rbb);
  452. RT_ASSERT(blk_queue);
  453. for (blk = blk_queue->blocks; i < blk_queue->blk_num; i++)
  454. {
  455. next_blk = rt_slist_entry(blk->list.next, struct rt_rbb_blk, list);
  456. rt_rbb_blk_free(rbb, blk);
  457. blk = next_blk;
  458. }
  459. }
  460. RTM_EXPORT(rt_rbb_blk_queue_free);
  461. /**
  462. * The put status and buffer continuous blocks can be make a block queue.
  463. * This function will return the length which from next can be make block queue.
  464. *
  465. * @param rbb ring block buffer object
  466. *
  467. * @return the next can be make block queue's length
  468. */
  469. rt_size_t rt_rbb_next_blk_queue_len(rt_rbb_t rbb)
  470. {
  471. rt_base_t level;
  472. rt_size_t data_len = 0;
  473. rt_slist_t *node;
  474. rt_rbb_blk_t last_block = RT_NULL, block;
  475. RT_ASSERT(rbb);
  476. if (rt_slist_isempty(&rbb->blk_list))
  477. return 0;
  478. level = rt_hw_interrupt_disable();
  479. for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
  480. {
  481. if (!last_block)
  482. {
  483. last_block = rt_slist_entry(node, struct rt_rbb_blk, list);
  484. if (last_block->status != RT_RBB_BLK_PUT)
  485. {
  486. /* the first block must be put status */
  487. last_block = RT_NULL;
  488. continue;
  489. }
  490. }
  491. else
  492. {
  493. block = rt_slist_entry(node, struct rt_rbb_blk, list);
  494. /*
  495. * these following conditions will break the loop:
  496. * 1. the current block is not put status
  497. * 2. the last block and current block is not continuous
  498. */
  499. if (block->status != RT_RBB_BLK_PUT || last_block->buf > block->buf)
  500. {
  501. break;
  502. }
  503. /* backup last block */
  504. last_block = block;
  505. }
  506. data_len += last_block->size;
  507. }
  508. rt_hw_interrupt_enable(level);
  509. return data_len;
  510. }
  511. RTM_EXPORT(rt_rbb_next_blk_queue_len);
  512. /**
  513. * get the ring block buffer object buffer size
  514. *
  515. * @param rbb ring block buffer object
  516. *
  517. * @return buffer size
  518. */
  519. rt_size_t rt_rbb_get_buf_size(rt_rbb_t rbb)
  520. {
  521. RT_ASSERT(rbb);
  522. return rbb->buf_size;
  523. }
  524. RTM_EXPORT(rt_rbb_get_buf_size);