virtio.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-11-11 GuEe-GUI the first version
  9. */
  10. #include <rtthread.h>
  11. #include <cpuport.h>
  12. #include <virtio.h>
  13. rt_inline void _virtio_dev_check(struct virtio_device *dev)
  14. {
  15. RT_ASSERT(dev != RT_NULL);
  16. RT_ASSERT(dev->mmio_config != RT_NULL);
  17. }
  18. void virtio_reset_device(struct virtio_device *dev)
  19. {
  20. _virtio_dev_check(dev);
  21. dev->mmio_config->status = 0;
  22. }
  23. void virtio_status_acknowledge_driver(struct virtio_device *dev)
  24. {
  25. _virtio_dev_check(dev);
  26. dev->mmio_config->status |= VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER;
  27. }
  28. void virtio_status_driver_ok(struct virtio_device *dev)
  29. {
  30. _virtio_dev_check(dev);
  31. dev->mmio_config->status |= VIRTIO_STATUS_FEATURES_OK | VIRTIO_STATUS_DRIVER_OK;
  32. }
  33. void virtio_interrupt_ack(struct virtio_device *dev)
  34. {
  35. rt_uint32_t status;
  36. _virtio_dev_check(dev);
  37. status = dev->mmio_config->interrupt_status;
  38. if (status != 0)
  39. {
  40. dev->mmio_config->interrupt_ack = status;
  41. }
  42. }
  43. rt_bool_t virtio_has_feature(struct virtio_device *dev, rt_uint32_t feature_bit)
  44. {
  45. _virtio_dev_check(dev);
  46. return !!(dev->mmio_config->device_features & (1UL << feature_bit));
  47. }
  48. rt_err_t virtio_queues_alloc(struct virtio_device *dev, rt_size_t queues_num)
  49. {
  50. _virtio_dev_check(dev);
  51. dev->queues = rt_malloc(sizeof(struct virtq) * queues_num);
  52. if (dev->queues != RT_NULL)
  53. {
  54. dev->queues_num = queues_num;
  55. return RT_EOK;
  56. }
  57. return -RT_ENOMEM;
  58. }
  59. void virtio_queues_free(struct virtio_device *dev)
  60. {
  61. if (dev->queues != RT_NULL)
  62. {
  63. dev->queues_num = 0;
  64. rt_free(dev->queues);
  65. }
  66. }
  67. rt_err_t virtio_queue_init(struct virtio_device *dev, rt_uint32_t queue_index, rt_size_t ring_size)
  68. {
  69. int i;
  70. void *pages;
  71. rt_size_t pages_total_size;
  72. struct virtq *queue;
  73. _virtio_dev_check(dev);
  74. RT_ASSERT(dev->mmio_config->queue_num_max > 0);
  75. RT_ASSERT(dev->mmio_config->queue_num_max > queue_index);
  76. /* ring_size is power of 2 */
  77. RT_ASSERT(ring_size > 0);
  78. RT_ASSERT(((ring_size - 1) & ring_size) == 0);
  79. queue = &dev->queues[queue_index];
  80. pages_total_size = VIRTIO_PAGE_ALIGN(
  81. VIRTQ_DESC_TOTAL_SIZE(ring_size) + VIRTQ_AVAIL_TOTAL_SIZE(ring_size)) + VIRTQ_USED_TOTAL_SIZE(ring_size);
  82. pages = rt_malloc_align(pages_total_size, VIRTIO_PAGE_SIZE);
  83. if (pages == RT_NULL)
  84. {
  85. return -RT_ENOMEM;
  86. }
  87. queue->free = rt_malloc(sizeof(rt_bool_t) * ring_size);
  88. if (queue->free == RT_NULL)
  89. {
  90. rt_free_align(pages);
  91. return -RT_ENOMEM;
  92. }
  93. rt_memset(pages, 0, pages_total_size);
  94. dev->mmio_config->guest_page_size = VIRTIO_PAGE_SIZE;
  95. dev->mmio_config->queue_sel = queue_index;
  96. dev->mmio_config->queue_num = ring_size;
  97. dev->mmio_config->queue_align = VIRTIO_PAGE_SIZE;
  98. dev->mmio_config->queue_pfn = VIRTIO_VA2PA(pages) >> VIRTIO_PAGE_SHIFT;
  99. queue->num = ring_size;
  100. queue->desc = (struct virtq_desc *)((rt_ubase_t)pages);
  101. queue->avail = (struct virtq_avail *)(((rt_ubase_t)pages) + VIRTQ_DESC_TOTAL_SIZE(ring_size));
  102. queue->used = (struct virtq_used *)VIRTIO_PAGE_ALIGN(
  103. (rt_ubase_t)&queue->avail->ring[ring_size] + VIRTQ_AVAIL_RES_SIZE);
  104. queue->used_idx = 0;
  105. /* All descriptors start out unused */
  106. for (i = 0; i < ring_size; ++i)
  107. {
  108. queue->free[i] = RT_TRUE;
  109. }
  110. queue->free_count = ring_size;
  111. return RT_EOK;
  112. }
  113. void virtio_queue_destroy(struct virtio_device *dev, rt_uint32_t queue_index)
  114. {
  115. struct virtq *queue;
  116. _virtio_dev_check(dev);
  117. RT_ASSERT(dev->mmio_config->queue_num_max > 0);
  118. RT_ASSERT(dev->mmio_config->queue_num_max > queue_index);
  119. queue = &dev->queues[queue_index];
  120. RT_ASSERT(queue->num > 0);
  121. rt_free(queue->free);
  122. rt_free_align((void *)queue->desc);
  123. dev->mmio_config->queue_sel = queue_index;
  124. dev->mmio_config->queue_pfn = RT_NULL;
  125. queue->num = 0;
  126. queue->desc = RT_NULL;
  127. queue->avail = RT_NULL;
  128. queue->used = RT_NULL;
  129. }
  130. void virtio_queue_notify(struct virtio_device *dev, rt_uint32_t queue_index)
  131. {
  132. _virtio_dev_check(dev);
  133. dev->mmio_config->queue_notify = queue_index;
  134. }
  135. void virtio_submit_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index)
  136. {
  137. rt_size_t ring_size;
  138. struct virtq *queue;
  139. _virtio_dev_check(dev);
  140. queue = &dev->queues[queue_index];
  141. ring_size = queue->num;
  142. /* Tell the device the first index in our chain of descriptors */
  143. queue->avail->ring[queue->avail->idx % ring_size] = desc_index;
  144. rt_hw_dsb();
  145. /* Tell the device another avail ring entry is available */
  146. queue->avail->idx++;
  147. rt_hw_dsb();
  148. }
  149. rt_uint16_t virtio_alloc_desc(struct virtio_device *dev, rt_uint32_t queue_index)
  150. {
  151. int i;
  152. struct virtq *queue;
  153. _virtio_dev_check(dev);
  154. RT_ASSERT(queue_index < dev->queues_num);
  155. queue = &dev->queues[queue_index];
  156. if (queue->free_count > 0)
  157. {
  158. rt_size_t ring_size = queue->num;
  159. for (i = 0; i < ring_size; ++i)
  160. {
  161. if (queue->free[i])
  162. {
  163. queue->free[i] = RT_FALSE;
  164. queue->free_count--;
  165. return (rt_uint16_t)i;
  166. }
  167. }
  168. }
  169. return VIRTQ_INVALID_DESC_ID;
  170. }
  171. void virtio_free_desc(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index)
  172. {
  173. struct virtq *queue;
  174. _virtio_dev_check(dev);
  175. queue = &dev->queues[queue_index];
  176. RT_ASSERT(queue_index < dev->queues_num);
  177. RT_ASSERT(!queue->free[desc_index]);
  178. queue->desc[desc_index].addr = 0;
  179. queue->desc[desc_index].len = 0;
  180. queue->desc[desc_index].flags = 0;
  181. queue->desc[desc_index].next = 0;
  182. queue->free[desc_index] = RT_TRUE;
  183. queue->free_count++;
  184. }
  185. rt_err_t virtio_alloc_desc_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_size_t count,
  186. rt_uint16_t *indexs)
  187. {
  188. int i, j;
  189. _virtio_dev_check(dev);
  190. RT_ASSERT(indexs != RT_NULL);
  191. if (dev->queues[queue_index].free_count < count)
  192. {
  193. return -RT_ERROR;
  194. }
  195. for (i = 0; i < count; ++i)
  196. {
  197. indexs[i] = virtio_alloc_desc(dev, queue_index);
  198. if (indexs[i] == VIRTQ_INVALID_DESC_ID)
  199. {
  200. for (j = 0; j < i; ++j)
  201. {
  202. virtio_free_desc(dev, queue_index, indexs[j]);
  203. }
  204. return -RT_ERROR;
  205. }
  206. }
  207. return RT_EOK;
  208. }
  209. void virtio_free_desc_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index)
  210. {
  211. rt_uint16_t flags, next;
  212. struct virtq_desc *desc;
  213. _virtio_dev_check(dev);
  214. desc = &dev->queues[queue_index].desc[0];
  215. for (;;)
  216. {
  217. flags = desc[desc_index].flags;
  218. next = desc[desc_index].next;
  219. virtio_free_desc(dev, queue_index, desc_index);
  220. if (flags & VIRTQ_DESC_F_NEXT)
  221. {
  222. desc_index = next;
  223. }
  224. else
  225. {
  226. break;
  227. }
  228. }
  229. }
  230. void virtio_fill_desc(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index,
  231. rt_uint64_t addr, rt_uint32_t len, rt_uint16_t flags, rt_uint16_t next)
  232. {
  233. struct virtq_desc *desc;
  234. _virtio_dev_check(dev);
  235. desc = &dev->queues[queue_index].desc[desc_index];
  236. desc->addr = addr;
  237. desc->len = len;
  238. desc->flags = flags;
  239. desc->next = next;
  240. }