ringblk_buf.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-08-25 armink the first version
  9. * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
  10. */
  11. #include <rthw.h>
  12. #include <rtdevice.h>
  13. /**
  14. * ring block buffer object initialization
  15. *
  16. * @param rbb ring block buffer object
  17. * @param buf buffer
  18. * @param buf_size buffer size
  19. * @param block_set block set
  20. * @param blk_max_num max block number
  21. *
  22. * @note When your application need align access, please make the buffer address is aligned.
  23. */
  24. void rt_rbb_init(rt_rbb_t rbb, rt_uint8_t *buf, rt_size_t buf_size, rt_rbb_blk_t block_set, rt_size_t blk_max_num)
  25. {
  26. rt_size_t i;
  27. RT_ASSERT(rbb);
  28. RT_ASSERT(buf);
  29. RT_ASSERT(block_set);
  30. rbb->buf = buf;
  31. rbb->buf_size = buf_size;
  32. rbb->blk_set = block_set;
  33. rbb->blk_max_num = blk_max_num;
  34. rbb->tail = &rbb->blk_list;
  35. rt_slist_init(&rbb->blk_list);
  36. rt_slist_init(&rbb->free_list);
  37. /* initialize block status */
  38. for (i = 0; i < blk_max_num; i++)
  39. {
  40. block_set[i].status = RT_RBB_BLK_UNUSED;
  41. rt_slist_init(&block_set[i].list);
  42. rt_slist_insert(&rbb->free_list, &block_set[i].list);
  43. }
  44. rt_spin_lock_init(&(rbb->spinlock));
  45. }
  46. RTM_EXPORT(rt_rbb_init);
  47. #ifdef RT_USING_HEAP
  48. /**
  49. * ring block buffer object create
  50. *
  51. * @param buf_size buffer size
  52. * @param blk_max_num max block number
  53. *
  54. * @return != RT_NULL: ring block buffer object
  55. * RT_NULL: create failed
  56. */
  57. rt_rbb_t rt_rbb_create(rt_size_t buf_size, rt_size_t blk_max_num)
  58. {
  59. rt_rbb_t rbb = RT_NULL;
  60. rt_uint8_t *buf;
  61. rt_rbb_blk_t blk_set;
  62. rbb = (rt_rbb_t)rt_malloc(sizeof(struct rt_rbb));
  63. if (!rbb)
  64. {
  65. return RT_NULL;
  66. }
  67. buf = (rt_uint8_t *)rt_malloc(buf_size);
  68. if (!buf)
  69. {
  70. rt_free(rbb);
  71. return RT_NULL;
  72. }
  73. blk_set = (rt_rbb_blk_t)rt_malloc(sizeof(struct rt_rbb_blk) * blk_max_num);
  74. if (!blk_set)
  75. {
  76. rt_free(buf);
  77. rt_free(rbb);
  78. return RT_NULL;
  79. }
  80. rt_rbb_init(rbb, buf, buf_size, blk_set, blk_max_num);
  81. return rbb;
  82. }
  83. RTM_EXPORT(rt_rbb_create);
  84. /**
  85. * ring block buffer object destroy
  86. *
  87. * @param rbb ring block buffer object
  88. */
  89. void rt_rbb_destroy(rt_rbb_t rbb)
  90. {
  91. RT_ASSERT(rbb);
  92. rt_free(rbb->buf);
  93. rt_free(rbb->blk_set);
  94. rt_free(rbb);
  95. }
  96. RTM_EXPORT(rt_rbb_destroy);
  97. #endif
  98. static rt_rbb_blk_t find_empty_blk_in_set(rt_rbb_t rbb)
  99. {
  100. struct rt_rbb_blk *blk;
  101. RT_ASSERT(rbb);
  102. if (rt_slist_isempty(&rbb->free_list))
  103. {
  104. return RT_NULL;
  105. }
  106. blk = rt_slist_first_entry(&rbb->free_list, struct rt_rbb_blk, list);
  107. rt_slist_remove(&rbb->free_list, &blk->list);
  108. RT_ASSERT(blk->status == RT_RBB_BLK_UNUSED);
  109. return blk;
  110. }
  111. rt_inline void list_append(rt_rbb_t rbb, rt_slist_t *n)
  112. {
  113. /* append the node to the tail */
  114. rbb->tail->next = n;
  115. n->next = RT_NULL;
  116. /* save tail node */
  117. rbb->tail = n;
  118. }
  119. rt_inline rt_slist_t *list_remove(rt_rbb_t rbb, rt_slist_t *n)
  120. {
  121. rt_slist_t *l = &rbb->blk_list;
  122. struct rt_slist_node *node = l;
  123. /* remove slist head */
  124. while (node->next && node->next != n) node = node->next;
  125. /* remove node */
  126. if (node->next != (rt_slist_t *)0)
  127. {
  128. node->next = node->next->next;
  129. n->next = RT_NULL;
  130. /* update tail node */
  131. if (rbb->tail == n)
  132. rbb->tail = node;
  133. }
  134. return l;
  135. }
  136. /**
  137. * Allocate a block by given size. The block will add to blk_list when allocate success.
  138. *
  139. * @param rbb ring block buffer object
  140. * @param blk_size block size
  141. *
  142. * @note When your application need align access, please make the blk_szie is aligned.
  143. *
  144. * @return != RT_NULL: allocated block
  145. * RT_NULL: allocate failed
  146. */
  147. rt_rbb_blk_t rt_rbb_blk_alloc(rt_rbb_t rbb, rt_size_t blk_size)
  148. {
  149. rt_base_t level;
  150. rt_size_t empty1 = 0, empty2 = 0;
  151. rt_rbb_blk_t head, tail, new_rbb = RT_NULL;
  152. RT_ASSERT(rbb);
  153. RT_ASSERT(blk_size < (1L << 24));
  154. level = rt_spin_lock_irqsave(&(rbb->spinlock));
  155. new_rbb = find_empty_blk_in_set(rbb);
  156. if (new_rbb)
  157. {
  158. if (rt_slist_isempty(&rbb->blk_list) == 0)
  159. {
  160. head = rt_slist_first_entry(&rbb->blk_list, struct rt_rbb_blk, list);
  161. /* get tail rbb blk object */
  162. tail = rt_slist_entry(rbb->tail, struct rt_rbb_blk, list);
  163. if (head->buf <= tail->buf)
  164. {
  165. /**
  166. * head tail
  167. * +--------------------------------------+-----------------+------------------+
  168. * | empty2 | block1 | block2 | block3 | empty1 |
  169. * +--------------------------------------+-----------------+------------------+
  170. * rbb->buf
  171. */
  172. empty1 = (rbb->buf + rbb->buf_size) - (tail->buf + tail->size);
  173. empty2 = head->buf - rbb->buf;
  174. if (empty1 >= blk_size)
  175. {
  176. list_append(rbb, &new_rbb->list);
  177. new_rbb->status = RT_RBB_BLK_INITED;
  178. new_rbb->buf = tail->buf + tail->size;
  179. new_rbb->size = blk_size;
  180. }
  181. else if (empty2 >= blk_size)
  182. {
  183. list_append(rbb, &new_rbb->list);
  184. new_rbb->status = RT_RBB_BLK_INITED;
  185. new_rbb->buf = rbb->buf;
  186. new_rbb->size = blk_size;
  187. }
  188. else
  189. {
  190. /* no space */
  191. new_rbb = RT_NULL;
  192. }
  193. }
  194. else
  195. {
  196. /**
  197. * tail head
  198. * +----------------+-------------------------------------+--------+-----------+
  199. * | block3 | empty1 | block1 | block2 |
  200. * +----------------+-------------------------------------+--------+-----------+
  201. * rbb->buf
  202. */
  203. empty1 = head->buf - (tail->buf + tail->size);
  204. if (empty1 >= blk_size)
  205. {
  206. list_append(rbb, &new_rbb->list);
  207. new_rbb->status = RT_RBB_BLK_INITED;
  208. new_rbb->buf = tail->buf + tail->size;
  209. new_rbb->size = blk_size;
  210. }
  211. else
  212. {
  213. /* no space */
  214. new_rbb = RT_NULL;
  215. }
  216. }
  217. }
  218. else
  219. {
  220. /* the list is empty */
  221. list_append(rbb, &new_rbb->list);
  222. new_rbb->status = RT_RBB_BLK_INITED;
  223. new_rbb->buf = rbb->buf;
  224. new_rbb->size = blk_size;
  225. }
  226. }
  227. else
  228. {
  229. new_rbb = RT_NULL;
  230. }
  231. rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
  232. return new_rbb;
  233. }
  234. RTM_EXPORT(rt_rbb_blk_alloc);
  235. /**
  236. * put a block to ring block buffer object
  237. *
  238. * @param block the block
  239. */
  240. void rt_rbb_blk_put(rt_rbb_blk_t block)
  241. {
  242. RT_ASSERT(block);
  243. RT_ASSERT(block->status == RT_RBB_BLK_INITED);
  244. block->status = RT_RBB_BLK_PUT;
  245. }
  246. RTM_EXPORT(rt_rbb_blk_put);
  247. /**
  248. * get a block from the ring block buffer object
  249. *
  250. * @param rbb ring block buffer object
  251. *
  252. * @return != RT_NULL: block
  253. * RT_NULL: get failed
  254. */
  255. rt_rbb_blk_t rt_rbb_blk_get(rt_rbb_t rbb)
  256. {
  257. rt_base_t level;
  258. rt_rbb_blk_t block = RT_NULL;
  259. rt_slist_t *node;
  260. RT_ASSERT(rbb);
  261. if (rt_slist_isempty(&rbb->blk_list))
  262. return 0;
  263. level = rt_spin_lock_irqsave(&(rbb->spinlock));
  264. for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
  265. {
  266. block = rt_slist_entry(node, struct rt_rbb_blk, list);
  267. if (block->status == RT_RBB_BLK_PUT)
  268. {
  269. block->status = RT_RBB_BLK_GET;
  270. goto __exit;
  271. }
  272. }
  273. /* not found */
  274. block = RT_NULL;
  275. __exit:
  276. rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
  277. return block;
  278. }
  279. RTM_EXPORT(rt_rbb_blk_get);
  280. /**
  281. * return the block size
  282. *
  283. * @param block the block
  284. *
  285. * @return block size
  286. */
  287. rt_size_t rt_rbb_blk_size(rt_rbb_blk_t block)
  288. {
  289. RT_ASSERT(block);
  290. return block->size;
  291. }
  292. RTM_EXPORT(rt_rbb_blk_size);
  293. /**
  294. * return the block buffer
  295. *
  296. * @param block the block
  297. *
  298. * @return block buffer
  299. */
  300. rt_uint8_t *rt_rbb_blk_buf(rt_rbb_blk_t block)
  301. {
  302. RT_ASSERT(block);
  303. return block->buf;
  304. }
  305. RTM_EXPORT(rt_rbb_blk_buf);
  306. /**
  307. * free the block
  308. *
  309. * @param rbb ring block buffer object
  310. * @param block the block
  311. */
  312. void rt_rbb_blk_free(rt_rbb_t rbb, rt_rbb_blk_t block)
  313. {
  314. rt_base_t level;
  315. RT_ASSERT(rbb);
  316. RT_ASSERT(block);
  317. RT_ASSERT(block->status != RT_RBB_BLK_UNUSED);
  318. level = rt_spin_lock_irqsave(&(rbb->spinlock));
  319. /* remove it on rbb block list */
  320. list_remove(rbb, &block->list);
  321. block->status = RT_RBB_BLK_UNUSED;
  322. rt_slist_insert(&rbb->free_list, &block->list);
  323. rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
  324. }
  325. RTM_EXPORT(rt_rbb_blk_free);
  326. /**
  327. * get a continuous block to queue by given size
  328. *
  329. * tail head
  330. * +------------------+---------------+--------+----------+--------+
  331. * | block3 | empty1 | block1 | block2 |fragment|
  332. * +------------------+------------------------+----------+--------+
  333. * |<-- return_size -->| |
  334. * |<--- queue_data_len --->|
  335. *
  336. * tail head
  337. * +------------------+---------------+--------+----------+--------+
  338. * | block3 | empty1 | block1 | block2 |fragment|
  339. * +------------------+------------------------+----------+--------+
  340. * |<-- return_size -->| out of len(b1+b2+b3) |
  341. * |<-------------------- queue_data_len -------------------->|
  342. *
  343. * @param rbb ring block buffer object
  344. * @param queue_data_len The max queue data size, and the return size must less then it.
  345. * @param queue continuous block queue
  346. *
  347. * @return the block queue data total size
  348. */
  349. rt_size_t rt_rbb_blk_queue_get(rt_rbb_t rbb, rt_size_t queue_data_len, rt_rbb_blk_queue_t blk_queue)
  350. {
  351. rt_base_t level;
  352. rt_size_t data_total_size = 0;
  353. rt_slist_t *node, *tmp = RT_NULL;
  354. rt_rbb_blk_t last_block = RT_NULL, block;
  355. RT_ASSERT(rbb);
  356. RT_ASSERT(blk_queue);
  357. if (rt_slist_isempty(&rbb->blk_list))
  358. return 0;
  359. level = rt_spin_lock_irqsave(&(rbb->spinlock));
  360. node = rt_slist_first(&rbb->blk_list);
  361. if (node != RT_NULL)
  362. {
  363. tmp = rt_slist_next(node);
  364. }
  365. for (; node; node = tmp, tmp = rt_slist_next(node))
  366. {
  367. if (!last_block)
  368. {
  369. last_block = rt_slist_entry(node, struct rt_rbb_blk, list);
  370. if (last_block->status == RT_RBB_BLK_PUT)
  371. {
  372. /* save the first put status block to queue */
  373. blk_queue->blocks = last_block;
  374. blk_queue->blk_num = 0;
  375. }
  376. else
  377. {
  378. /* the first block must be put status */
  379. last_block = RT_NULL;
  380. continue;
  381. }
  382. }
  383. else
  384. {
  385. block = rt_slist_entry(node, struct rt_rbb_blk, list);
  386. /*
  387. * these following conditions will break the loop:
  388. * 1. the current block is not put status
  389. * 2. the last block and current block is not continuous
  390. * 3. the data_total_size will out of range
  391. */
  392. if (block->status != RT_RBB_BLK_PUT ||
  393. last_block->buf > block->buf ||
  394. data_total_size + block->size > queue_data_len)
  395. {
  396. break;
  397. }
  398. /* backup last block */
  399. last_block = block;
  400. }
  401. /* remove current block */
  402. data_total_size += last_block->size;
  403. last_block->status = RT_RBB_BLK_GET;
  404. blk_queue->blk_num++;
  405. }
  406. rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
  407. return data_total_size;
  408. }
  409. RTM_EXPORT(rt_rbb_blk_queue_get);
  410. /**
  411. * get all block length on block queue
  412. *
  413. * @param blk_queue the block queue
  414. *
  415. * @return total length
  416. */
  417. rt_size_t rt_rbb_blk_queue_len(rt_rbb_blk_queue_t blk_queue)
  418. {
  419. rt_size_t i = 0, data_total_size = 0;
  420. rt_rbb_blk_t blk;
  421. RT_ASSERT(blk_queue);
  422. for (blk = blk_queue->blocks; i < blk_queue->blk_num; i++)
  423. {
  424. data_total_size += blk->size;
  425. blk = rt_slist_entry(blk->list.next, struct rt_rbb_blk, list);
  426. }
  427. return data_total_size;
  428. }
  429. RTM_EXPORT(rt_rbb_blk_queue_len);
  430. /**
  431. * return the block queue buffer
  432. *
  433. * @param blk_queue the block queue
  434. *
  435. * @return block queue buffer
  436. */
  437. rt_uint8_t *rt_rbb_blk_queue_buf(rt_rbb_blk_queue_t blk_queue)
  438. {
  439. RT_ASSERT(blk_queue);
  440. return blk_queue->blocks[0].buf;
  441. }
  442. RTM_EXPORT(rt_rbb_blk_queue_buf);
  443. /**
  444. * free the block queue
  445. *
  446. * @param rbb ring block buffer object
  447. * @param blk_queue the block queue
  448. */
  449. void rt_rbb_blk_queue_free(rt_rbb_t rbb, rt_rbb_blk_queue_t blk_queue)
  450. {
  451. rt_size_t i = 0;
  452. rt_rbb_blk_t blk, next_blk;
  453. RT_ASSERT(rbb);
  454. RT_ASSERT(blk_queue);
  455. for (blk = blk_queue->blocks; i < blk_queue->blk_num; i++)
  456. {
  457. next_blk = rt_slist_entry(blk->list.next, struct rt_rbb_blk, list);
  458. rt_rbb_blk_free(rbb, blk);
  459. blk = next_blk;
  460. }
  461. }
  462. RTM_EXPORT(rt_rbb_blk_queue_free);
  463. /**
  464. * The put status and buffer continuous blocks can be make a block queue.
  465. * This function will return the length which from next can be make block queue.
  466. *
  467. * @param rbb ring block buffer object
  468. *
  469. * @return the next can be make block queue's length
  470. */
  471. rt_size_t rt_rbb_next_blk_queue_len(rt_rbb_t rbb)
  472. {
  473. rt_base_t level;
  474. rt_size_t data_len = 0;
  475. rt_slist_t *node;
  476. rt_rbb_blk_t last_block = RT_NULL, block;
  477. RT_ASSERT(rbb);
  478. if (rt_slist_isempty(&rbb->blk_list))
  479. return 0;
  480. level = rt_spin_lock_irqsave(&(rbb->spinlock));
  481. for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
  482. {
  483. if (!last_block)
  484. {
  485. last_block = rt_slist_entry(node, struct rt_rbb_blk, list);
  486. if (last_block->status != RT_RBB_BLK_PUT)
  487. {
  488. /* the first block must be put status */
  489. last_block = RT_NULL;
  490. continue;
  491. }
  492. }
  493. else
  494. {
  495. block = rt_slist_entry(node, struct rt_rbb_blk, list);
  496. /*
  497. * these following conditions will break the loop:
  498. * 1. the current block is not put status
  499. * 2. the last block and current block is not continuous
  500. */
  501. if (block->status != RT_RBB_BLK_PUT || last_block->buf > block->buf)
  502. {
  503. break;
  504. }
  505. /* backup last block */
  506. last_block = block;
  507. }
  508. data_len += last_block->size;
  509. }
  510. rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
  511. return data_len;
  512. }
  513. RTM_EXPORT(rt_rbb_next_blk_queue_len);
  514. /**
  515. * get the ring block buffer object buffer size
  516. *
  517. * @param rbb ring block buffer object
  518. *
  519. * @return buffer size
  520. */
  521. rt_size_t rt_rbb_get_buf_size(rt_rbb_t rbb)
  522. {
  523. RT_ASSERT(rbb);
  524. return rbb->buf_size;
  525. }
  526. RTM_EXPORT(rt_rbb_get_buf_size);