ringblk_buf.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-08-25 armink the first version
  9. * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
  10. */
  11. #include <rthw.h>
  12. #include <rtdevice.h>
  13. /**
  14. * ring block buffer object initialization
  15. *
  16. * @param rbb ring block buffer object
  17. * @param buf buffer
  18. * @param buf_size buffer size
  19. * @param block_set block set
  20. * @param blk_max_num max block number
  21. *
  22. * @note When your application need align access, please make the buffer address is aligned.
  23. */
  24. void rt_rbb_init(rt_rbb_t rbb, rt_uint8_t *buf, rt_size_t buf_size, rt_rbb_blk_t block_set, rt_size_t blk_max_num)
  25. {
  26. rt_size_t i;
  27. RT_ASSERT(rbb);
  28. RT_ASSERT(buf);
  29. RT_ASSERT(block_set);
  30. rbb->buf = buf;
  31. rbb->buf_size = buf_size;
  32. rbb->blk_set = block_set;
  33. rbb->blk_max_num = blk_max_num;
  34. rbb->tail = &rbb->blk_list;
  35. rt_slist_init(&rbb->blk_list);
  36. rt_slist_init(&rbb->free_list);
  37. /* initialize block status */
  38. for (i = 0; i < blk_max_num; i++)
  39. {
  40. block_set[i].status = RT_RBB_BLK_UNUSED;
  41. rt_slist_init(&block_set[i].list);
  42. rt_slist_insert(&rbb->free_list, &block_set[i].list);
  43. }
  44. rt_spin_lock_init(&(rbb->spinlock));
  45. }
  46. RTM_EXPORT(rt_rbb_init);
  47. #ifdef RT_USING_HEAP
  48. /**
  49. * ring block buffer object create
  50. *
  51. * @param buf_size buffer size
  52. * @param blk_max_num max block number
  53. *
  54. * @return != RT_NULL: ring block buffer object
  55. * RT_NULL: create failed
  56. */
  57. rt_rbb_t rt_rbb_create(rt_size_t buf_size, rt_size_t blk_max_num)
  58. {
  59. rt_rbb_t rbb = RT_NULL;
  60. rt_uint8_t *buf;
  61. rt_rbb_blk_t blk_set;
  62. rbb = (rt_rbb_t)rt_malloc(sizeof(struct rt_rbb));
  63. if (!rbb)
  64. {
  65. return RT_NULL;
  66. }
  67. buf = (rt_uint8_t *)rt_malloc(buf_size);
  68. if (!buf)
  69. {
  70. rt_free(rbb);
  71. return RT_NULL;
  72. }
  73. blk_set = (rt_rbb_blk_t)rt_malloc(sizeof(struct rt_rbb_blk) * blk_max_num);
  74. if (!blk_set)
  75. {
  76. rt_free(buf);
  77. rt_free(rbb);
  78. return RT_NULL;
  79. }
  80. rt_rbb_init(rbb, buf, buf_size, blk_set, blk_max_num);
  81. return rbb;
  82. }
  83. RTM_EXPORT(rt_rbb_create);
  84. /**
  85. * ring block buffer object destroy
  86. *
  87. * @param rbb ring block buffer object
  88. */
  89. void rt_rbb_destroy(rt_rbb_t rbb)
  90. {
  91. RT_ASSERT(rbb);
  92. rt_free(rbb->buf);
  93. rt_free(rbb->blk_set);
  94. rt_free(rbb);
  95. }
  96. RTM_EXPORT(rt_rbb_destroy);
  97. #endif
  98. static rt_rbb_blk_t find_empty_blk_in_set(rt_rbb_t rbb)
  99. {
  100. struct rt_rbb_blk *blk;
  101. RT_ASSERT(rbb);
  102. if (rt_slist_isempty(&rbb->free_list))
  103. {
  104. return RT_NULL;
  105. }
  106. blk = rt_slist_first_entry(&rbb->free_list, struct rt_rbb_blk, list);
  107. rt_slist_remove(&rbb->free_list, &blk->list);
  108. RT_ASSERT(blk->status == RT_RBB_BLK_UNUSED);
  109. return blk;
  110. }
  111. rt_inline void list_append(rt_rbb_t rbb, rt_slist_t *n)
  112. {
  113. /* append the node to the tail */
  114. rbb->tail->next = n;
  115. n->next = RT_NULL;
  116. /* save tail node */
  117. rbb->tail = n;
  118. }
  119. rt_inline rt_slist_t *list_remove(rt_rbb_t rbb, rt_slist_t *n)
  120. {
  121. rt_slist_t *l = &rbb->blk_list;
  122. struct rt_slist_node *node = l;
  123. /* remove slist head */
  124. while (node->next && node->next != n) node = node->next;
  125. /* remove node */
  126. if (node->next != (rt_slist_t *)0)
  127. {
  128. node->next = node->next->next;
  129. n->next = RT_NULL;
  130. /* update tail node */
  131. if (rbb->tail == n)
  132. rbb->tail = node;
  133. }
  134. return l;
  135. }
  136. /**
  137. * Allocate a block by given size. The block will add to blk_list when allocate success.
  138. *
  139. * @param rbb ring block buffer object
  140. * @param blk_size block size
  141. *
  142. * @note When your application need align access, please make the blk_szie is aligned.
  143. *
  144. * @return != RT_NULL: allocated block
  145. * RT_NULL: allocate failed
  146. */
  147. rt_rbb_blk_t rt_rbb_blk_alloc(rt_rbb_t rbb, rt_size_t blk_size)
  148. {
  149. rt_base_t level;
  150. rt_size_t empty1 = 0, empty2 = 0;
  151. rt_rbb_blk_t head, tail, new_rbb = RT_NULL;
  152. RT_ASSERT(rbb);
  153. RT_ASSERT(blk_size < (1L << 24));
  154. level = rt_spin_lock_irqsave(&(rbb->spinlock));
  155. new_rbb = find_empty_blk_in_set(rbb);
  156. if (new_rbb)
  157. {
  158. if (rt_slist_isempty(&rbb->blk_list) == 0)
  159. {
  160. head = rt_slist_first_entry(&rbb->blk_list, struct rt_rbb_blk, list);
  161. /* get tail rbb blk object */
  162. tail = rt_slist_entry(rbb->tail, struct rt_rbb_blk, list);
  163. if (head->buf <= tail->buf)
  164. {
  165. /**
  166. * head tail
  167. * +--------------------------------------+-----------------+------------------+
  168. * | empty2 | block1 | block2 | block3 | empty1 |
  169. * +--------------------------------------+-----------------+------------------+
  170. * rbb->buf
  171. */
  172. empty1 = (rbb->buf + rbb->buf_size) - (tail->buf + tail->size);
  173. empty2 = head->buf - rbb->buf;
  174. if (empty1 >= blk_size)
  175. {
  176. list_append(rbb, &new_rbb->list);
  177. new_rbb->status = RT_RBB_BLK_INITED;
  178. new_rbb->buf = tail->buf + tail->size;
  179. new_rbb->size = blk_size;
  180. }
  181. else if (empty2 >= blk_size)
  182. {
  183. list_append(rbb, &new_rbb->list);
  184. new_rbb->status = RT_RBB_BLK_INITED;
  185. new_rbb->buf = rbb->buf;
  186. new_rbb->size = blk_size;
  187. }
  188. else
  189. {
  190. /* no space */
  191. rt_slist_insert(&rbb->free_list, &new_rbb->list);
  192. new_rbb = RT_NULL;
  193. }
  194. }
  195. else
  196. {
  197. /**
  198. * tail head
  199. * +----------------+-------------------------------------+--------+-----------+
  200. * | block3 | empty1 | block1 | block2 |
  201. * +----------------+-------------------------------------+--------+-----------+
  202. * rbb->buf
  203. */
  204. empty1 = head->buf - (tail->buf + tail->size);
  205. if (empty1 >= blk_size)
  206. {
  207. list_append(rbb, &new_rbb->list);
  208. new_rbb->status = RT_RBB_BLK_INITED;
  209. new_rbb->buf = tail->buf + tail->size;
  210. new_rbb->size = blk_size;
  211. }
  212. else
  213. {
  214. /* no space */
  215. rt_slist_insert(&rbb->free_list, &new_rbb->list);
  216. new_rbb = RT_NULL;
  217. }
  218. }
  219. }
  220. else
  221. {
  222. /* the list is empty */
  223. if(blk_size <= rbb->buf_size)
  224. {
  225. list_append(rbb, &new_rbb->list);
  226. new_rbb->status = RT_RBB_BLK_INITED;
  227. new_rbb->buf = rbb->buf;
  228. new_rbb->size = blk_size;
  229. }
  230. else
  231. {
  232. /* no space */
  233. rt_slist_insert(&rbb->free_list, &new_rbb->list);
  234. new_rbb = RT_NULL;
  235. }
  236. }
  237. }
  238. rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
  239. return new_rbb;
  240. }
  241. RTM_EXPORT(rt_rbb_blk_alloc);
  242. /**
  243. * put a block to ring block buffer object
  244. *
  245. * @param block the block
  246. */
  247. void rt_rbb_blk_put(rt_rbb_blk_t block)
  248. {
  249. RT_ASSERT(block);
  250. RT_ASSERT(block->status == RT_RBB_BLK_INITED);
  251. block->status = RT_RBB_BLK_PUT;
  252. }
  253. RTM_EXPORT(rt_rbb_blk_put);
  254. /**
  255. * get a block from the ring block buffer object
  256. *
  257. * @param rbb ring block buffer object
  258. *
  259. * @return != RT_NULL: block
  260. * RT_NULL: get failed
  261. */
  262. rt_rbb_blk_t rt_rbb_blk_get(rt_rbb_t rbb)
  263. {
  264. rt_base_t level;
  265. rt_rbb_blk_t block = RT_NULL;
  266. rt_slist_t *node;
  267. RT_ASSERT(rbb);
  268. if (rt_slist_isempty(&rbb->blk_list))
  269. return 0;
  270. level = rt_spin_lock_irqsave(&(rbb->spinlock));
  271. for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
  272. {
  273. block = rt_slist_entry(node, struct rt_rbb_blk, list);
  274. if (block->status == RT_RBB_BLK_PUT)
  275. {
  276. block->status = RT_RBB_BLK_GET;
  277. goto __exit;
  278. }
  279. }
  280. /* not found */
  281. block = RT_NULL;
  282. __exit:
  283. rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
  284. return block;
  285. }
  286. RTM_EXPORT(rt_rbb_blk_get);
  287. /**
  288. * return the block size
  289. *
  290. * @param block the block
  291. *
  292. * @return block size
  293. */
  294. rt_size_t rt_rbb_blk_size(rt_rbb_blk_t block)
  295. {
  296. RT_ASSERT(block);
  297. return block->size;
  298. }
  299. RTM_EXPORT(rt_rbb_blk_size);
  300. /**
  301. * return the block buffer
  302. *
  303. * @param block the block
  304. *
  305. * @return block buffer
  306. */
  307. rt_uint8_t *rt_rbb_blk_buf(rt_rbb_blk_t block)
  308. {
  309. RT_ASSERT(block);
  310. return block->buf;
  311. }
  312. RTM_EXPORT(rt_rbb_blk_buf);
  313. /**
  314. * free the block
  315. *
  316. * @param rbb ring block buffer object
  317. * @param block the block
  318. */
  319. void rt_rbb_blk_free(rt_rbb_t rbb, rt_rbb_blk_t block)
  320. {
  321. rt_base_t level;
  322. RT_ASSERT(rbb);
  323. RT_ASSERT(block);
  324. RT_ASSERT(block->status != RT_RBB_BLK_UNUSED);
  325. level = rt_spin_lock_irqsave(&(rbb->spinlock));
  326. /* remove it on rbb block list */
  327. list_remove(rbb, &block->list);
  328. block->status = RT_RBB_BLK_UNUSED;
  329. rt_slist_insert(&rbb->free_list, &block->list);
  330. rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
  331. }
  332. RTM_EXPORT(rt_rbb_blk_free);
  333. /**
  334. * get a continuous block to queue by given size
  335. *
  336. * tail head
  337. * +------------------+---------------+--------+----------+--------+
  338. * | block3 | empty1 | block1 | block2 |fragment|
  339. * +------------------+------------------------+----------+--------+
  340. * |<-- return_size -->| |
  341. * |<--- queue_data_len --->|
  342. *
  343. * tail head
  344. * +------------------+---------------+--------+----------+--------+
  345. * | block3 | empty1 | block1 | block2 |fragment|
  346. * +------------------+------------------------+----------+--------+
  347. * |<-- return_size -->| out of len(b1+b2+b3) |
  348. * |<-------------------- queue_data_len -------------------->|
  349. *
  350. * @param rbb ring block buffer object
  351. * @param queue_data_len The max queue data size, and the return size must less then it.
  352. * @param queue continuous block queue
  353. *
  354. * @return the block queue data total size
  355. */
  356. rt_size_t rt_rbb_blk_queue_get(rt_rbb_t rbb, rt_size_t queue_data_len, rt_rbb_blk_queue_t blk_queue)
  357. {
  358. rt_base_t level;
  359. rt_size_t data_total_size = 0;
  360. rt_slist_t *node, *tmp = RT_NULL;
  361. rt_rbb_blk_t last_block = RT_NULL, block;
  362. RT_ASSERT(rbb);
  363. RT_ASSERT(blk_queue);
  364. if (rt_slist_isempty(&rbb->blk_list))
  365. return 0;
  366. level = rt_spin_lock_irqsave(&(rbb->spinlock));
  367. node = rt_slist_first(&rbb->blk_list);
  368. if (node != RT_NULL)
  369. {
  370. tmp = rt_slist_next(node);
  371. }
  372. for (; node; node = tmp, tmp = rt_slist_next(node))
  373. {
  374. if (!last_block)
  375. {
  376. last_block = rt_slist_entry(node, struct rt_rbb_blk, list);
  377. if (last_block->status == RT_RBB_BLK_PUT)
  378. {
  379. /* save the first put status block to queue */
  380. blk_queue->blocks = last_block;
  381. blk_queue->blk_num = 0;
  382. }
  383. else
  384. {
  385. /* the first block must be put status */
  386. last_block = RT_NULL;
  387. continue;
  388. }
  389. }
  390. else
  391. {
  392. block = rt_slist_entry(node, struct rt_rbb_blk, list);
  393. /*
  394. * these following conditions will break the loop:
  395. * 1. the current block is not put status
  396. * 2. the last block and current block is not continuous
  397. * 3. the data_total_size will out of range
  398. */
  399. if (block->status != RT_RBB_BLK_PUT ||
  400. last_block->buf > block->buf ||
  401. data_total_size + block->size > queue_data_len)
  402. {
  403. break;
  404. }
  405. /* backup last block */
  406. last_block = block;
  407. }
  408. /* remove current block */
  409. data_total_size += last_block->size;
  410. last_block->status = RT_RBB_BLK_GET;
  411. blk_queue->blk_num++;
  412. }
  413. rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
  414. return data_total_size;
  415. }
  416. RTM_EXPORT(rt_rbb_blk_queue_get);
  417. /**
  418. * get all block length on block queue
  419. *
  420. * @param blk_queue the block queue
  421. *
  422. * @return total length
  423. */
  424. rt_size_t rt_rbb_blk_queue_len(rt_rbb_blk_queue_t blk_queue)
  425. {
  426. rt_size_t i = 0, data_total_size = 0;
  427. rt_rbb_blk_t blk;
  428. RT_ASSERT(blk_queue);
  429. for (blk = blk_queue->blocks; i < blk_queue->blk_num; i++)
  430. {
  431. data_total_size += blk->size;
  432. blk = rt_slist_entry(blk->list.next, struct rt_rbb_blk, list);
  433. }
  434. return data_total_size;
  435. }
  436. RTM_EXPORT(rt_rbb_blk_queue_len);
  437. /**
  438. * return the block queue buffer
  439. *
  440. * @param blk_queue the block queue
  441. *
  442. * @return block queue buffer
  443. */
  444. rt_uint8_t *rt_rbb_blk_queue_buf(rt_rbb_blk_queue_t blk_queue)
  445. {
  446. RT_ASSERT(blk_queue);
  447. return blk_queue->blocks[0].buf;
  448. }
  449. RTM_EXPORT(rt_rbb_blk_queue_buf);
  450. /**
  451. * free the block queue
  452. *
  453. * @param rbb ring block buffer object
  454. * @param blk_queue the block queue
  455. */
  456. void rt_rbb_blk_queue_free(rt_rbb_t rbb, rt_rbb_blk_queue_t blk_queue)
  457. {
  458. rt_size_t i = 0;
  459. rt_rbb_blk_t blk, next_blk;
  460. RT_ASSERT(rbb);
  461. RT_ASSERT(blk_queue);
  462. for (blk = blk_queue->blocks; i < blk_queue->blk_num; i++)
  463. {
  464. next_blk = rt_slist_entry(blk->list.next, struct rt_rbb_blk, list);
  465. rt_rbb_blk_free(rbb, blk);
  466. blk = next_blk;
  467. }
  468. }
  469. RTM_EXPORT(rt_rbb_blk_queue_free);
  470. /**
  471. * The put status and buffer continuous blocks can be make a block queue.
  472. * This function will return the length which from next can be make block queue.
  473. *
  474. * @param rbb ring block buffer object
  475. *
  476. * @return the next can be make block queue's length
  477. */
  478. rt_size_t rt_rbb_next_blk_queue_len(rt_rbb_t rbb)
  479. {
  480. rt_base_t level;
  481. rt_size_t data_len = 0;
  482. rt_slist_t *node;
  483. rt_rbb_blk_t last_block = RT_NULL, block;
  484. RT_ASSERT(rbb);
  485. if (rt_slist_isempty(&rbb->blk_list))
  486. return 0;
  487. level = rt_spin_lock_irqsave(&(rbb->spinlock));
  488. for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
  489. {
  490. if (!last_block)
  491. {
  492. last_block = rt_slist_entry(node, struct rt_rbb_blk, list);
  493. if (last_block->status != RT_RBB_BLK_PUT)
  494. {
  495. /* the first block must be put status */
  496. last_block = RT_NULL;
  497. continue;
  498. }
  499. }
  500. else
  501. {
  502. block = rt_slist_entry(node, struct rt_rbb_blk, list);
  503. /*
  504. * these following conditions will break the loop:
  505. * 1. the current block is not put status
  506. * 2. the last block and current block is not continuous
  507. */
  508. if (block->status != RT_RBB_BLK_PUT || last_block->buf > block->buf)
  509. {
  510. break;
  511. }
  512. /* backup last block */
  513. last_block = block;
  514. }
  515. data_len += last_block->size;
  516. }
  517. rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
  518. return data_len;
  519. }
  520. RTM_EXPORT(rt_rbb_next_blk_queue_len);
  521. /**
  522. * get the ring block buffer object buffer size
  523. *
  524. * @param rbb ring block buffer object
  525. *
  526. * @return buffer size
  527. */
  528. rt_size_t rt_rbb_get_buf_size(rt_rbb_t rbb)
  529. {
  530. RT_ASSERT(rbb);
  531. return rbb->buf_size;
  532. }
  533. RTM_EXPORT(rt_rbb_get_buf_size);