virtio_blk.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-9-16 GuEe-GUI the first version
  9. * 2021-11-11 GuEe-GUI using virtio common interface
  10. */
  11. #include <rthw.h>
  12. #include <rtthread.h>
  13. #include <cpuport.h>
  14. #ifdef RT_USING_VIRTIO_BLK
  15. #include <virtio_blk.h>
  16. static void virtio_blk_rw(struct virtio_blk_device *virtio_blk_dev, rt_off_t pos, void *buffer, rt_size_t count,
  17. int flags)
  18. {
  19. rt_uint16_t idx[3];
  20. rt_size_t size = count * virtio_blk_dev->config->blk_size;
  21. struct virtio_device *virtio_dev = &virtio_blk_dev->virtio_dev;
  22. #ifdef RT_USING_SMP
  23. rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
  24. #endif
  25. /* Allocate 3 descriptors */
  26. while (virtio_alloc_desc_chain(virtio_dev, 0, 3, idx))
  27. {
  28. #ifdef RT_USING_SMP
  29. rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
  30. #endif
  31. rt_thread_yield();
  32. #ifdef RT_USING_SMP
  33. level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
  34. #endif
  35. }
  36. virtio_blk_dev->info[idx[0]].status = 0xff;
  37. virtio_blk_dev->info[idx[0]].valid = RT_TRUE;
  38. virtio_blk_dev->info[idx[0]].req.type = flags;
  39. virtio_blk_dev->info[idx[0]].req.ioprio = 0;
  40. virtio_blk_dev->info[idx[0]].req.sector = pos * (virtio_blk_dev->config->blk_size / 512);
  41. flags = flags == VIRTIO_BLK_T_OUT ? 0 : VIRTQ_DESC_F_WRITE;
  42. virtio_fill_desc(virtio_dev, VIRTIO_BLK_QUEUE, idx[0],
  43. VIRTIO_VA2PA(&virtio_blk_dev->info[idx[0]].req), sizeof(struct virtio_blk_req), VIRTQ_DESC_F_NEXT, idx[1]);
  44. virtio_fill_desc(virtio_dev, VIRTIO_BLK_QUEUE, idx[1],
  45. VIRTIO_VA2PA(buffer), size, flags | VIRTQ_DESC_F_NEXT, idx[2]);
  46. virtio_fill_desc(virtio_dev, VIRTIO_BLK_QUEUE, idx[2],
  47. VIRTIO_VA2PA(&virtio_blk_dev->info[idx[0]].status), sizeof(rt_uint8_t), VIRTQ_DESC_F_WRITE, 0);
  48. virtio_submit_chain(virtio_dev, VIRTIO_BLK_QUEUE, idx[0]);
  49. virtio_queue_notify(virtio_dev, VIRTIO_BLK_QUEUE);
  50. /* Wait for virtio_blk_isr() to done */
  51. while (virtio_blk_dev->info[idx[0]].valid)
  52. {
  53. #ifdef RT_USING_SMP
  54. rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
  55. #endif
  56. rt_thread_yield();
  57. #ifdef RT_USING_SMP
  58. level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
  59. #endif
  60. }
  61. virtio_free_desc_chain(virtio_dev, VIRTIO_BLK_QUEUE, idx[0]);
  62. #ifdef RT_USING_SMP
  63. rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
  64. #endif
  65. }
  66. static rt_ssize_t virtio_blk_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t count)
  67. {
  68. virtio_blk_rw((struct virtio_blk_device *)dev, pos, buffer, count, VIRTIO_BLK_T_IN);
  69. return count;
  70. }
  71. static rt_ssize_t virtio_blk_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t count)
  72. {
  73. virtio_blk_rw((struct virtio_blk_device *)dev, pos, (void *)buffer, count, VIRTIO_BLK_T_OUT);
  74. return count;
  75. }
  76. static rt_err_t virtio_blk_control(rt_device_t dev, int cmd, void *args)
  77. {
  78. rt_err_t status = RT_EOK;
  79. struct virtio_blk_device *virtio_blk_dev = (struct virtio_blk_device *)dev;
  80. switch (cmd)
  81. {
  82. case RT_DEVICE_CTRL_BLK_GETGEOME:
  83. {
  84. struct rt_device_blk_geometry *geometry = (struct rt_device_blk_geometry *)args;
  85. if (geometry == RT_NULL)
  86. {
  87. status = -RT_ERROR;
  88. break;
  89. }
  90. geometry->bytes_per_sector = VIRTIO_BLK_BYTES_PER_SECTOR;
  91. geometry->block_size = virtio_blk_dev->config->blk_size;
  92. geometry->sector_count = virtio_blk_dev->config->capacity;
  93. }
  94. break;
  95. default:
  96. status = -RT_EINVAL;
  97. break;
  98. }
  99. return status;
  100. }
  101. #ifdef RT_USING_DEVICE_OPS
  102. const static struct rt_device_ops virtio_blk_ops =
  103. {
  104. RT_NULL,
  105. RT_NULL,
  106. RT_NULL,
  107. virtio_blk_read,
  108. virtio_blk_write,
  109. virtio_blk_control
  110. };
  111. #endif
  112. static void virtio_blk_isr(int irqno, void *param)
  113. {
  114. rt_uint32_t id;
  115. struct virtio_blk_device *virtio_blk_dev = (struct virtio_blk_device *)param;
  116. struct virtio_device *virtio_dev = &virtio_blk_dev->virtio_dev;
  117. struct virtq *queue = &virtio_dev->queues[VIRTIO_BLK_QUEUE];
  118. #ifdef RT_USING_SMP
  119. rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
  120. #endif
  121. virtio_interrupt_ack(virtio_dev);
  122. rt_hw_dsb();
  123. /* The device increments disk.used->idx when it adds an entry to the used ring */
  124. while (queue->used_idx != queue->used->idx)
  125. {
  126. rt_hw_dsb();
  127. id = queue->used->ring[queue->used_idx % queue->num].id;
  128. RT_ASSERT(virtio_blk_dev->info[id].status == 0);
  129. /* Done with buffer */
  130. virtio_blk_dev->info[id].valid = RT_FALSE;
  131. queue->used_idx++;
  132. }
  133. #ifdef RT_USING_SMP
  134. rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
  135. #endif
  136. }
  137. rt_err_t rt_virtio_blk_init(rt_ubase_t *mmio_base, rt_uint32_t irq)
  138. {
  139. static int dev_no = 0;
  140. char dev_name[RT_NAME_MAX];
  141. struct virtio_device *virtio_dev;
  142. struct virtio_blk_device *virtio_blk_dev;
  143. virtio_blk_dev = rt_malloc(sizeof(struct virtio_blk_device));
  144. if (virtio_blk_dev == RT_NULL)
  145. {
  146. return -RT_ENOMEM;
  147. }
  148. virtio_dev = &virtio_blk_dev->virtio_dev;
  149. virtio_dev->irq = irq;
  150. virtio_dev->mmio_base = mmio_base;
  151. virtio_blk_dev->config = (struct virtio_blk_config *)virtio_dev->mmio_config->config;
  152. #ifdef RT_USING_SMP
  153. rt_spin_lock_init(&virtio_dev->spinlock);
  154. #endif
  155. virtio_reset_device(virtio_dev);
  156. virtio_status_acknowledge_driver(virtio_dev);
  157. /* Negotiate features */
  158. virtio_dev->mmio_config->driver_features = virtio_dev->mmio_config->device_features & ~(
  159. (1 << VIRTIO_BLK_F_RO) |
  160. (1 << VIRTIO_BLK_F_MQ) |
  161. (1 << VIRTIO_BLK_F_SCSI) |
  162. (1 << VIRTIO_BLK_F_CONFIG_WCE) |
  163. (1 << VIRTIO_F_ANY_LAYOUT) |
  164. (1 << VIRTIO_F_RING_EVENT_IDX) |
  165. (1 << VIRTIO_F_RING_INDIRECT_DESC));
  166. /* Tell device that feature negotiation is complete and we're completely ready */
  167. virtio_status_driver_ok(virtio_dev);
  168. if (virtio_queues_alloc(virtio_dev, 1) != RT_EOK)
  169. {
  170. goto _alloc_fail;
  171. }
  172. /* Initialize queue 0 */
  173. if (virtio_queue_init(virtio_dev, 0, VIRTIO_BLK_QUEUE_RING_SIZE) != RT_EOK)
  174. {
  175. goto _alloc_fail;
  176. }
  177. virtio_blk_dev->parent.type = RT_Device_Class_Block;
  178. #ifdef RT_USING_DEVICE_OPS
  179. virtio_blk_dev->parent.ops = &virtio_blk_ops;
  180. #else
  181. virtio_blk_dev->parent.init = RT_NULL;
  182. virtio_blk_dev->parent.open = RT_NULL;
  183. virtio_blk_dev->parent.close = RT_NULL;
  184. virtio_blk_dev->parent.read = virtio_blk_read;
  185. virtio_blk_dev->parent.write = virtio_blk_write;
  186. virtio_blk_dev->parent.control = virtio_blk_control;
  187. #endif
  188. rt_snprintf(dev_name, RT_NAME_MAX, "virtio-blk%d", dev_no++);
  189. rt_hw_interrupt_install(irq, virtio_blk_isr, virtio_blk_dev, dev_name);
  190. rt_hw_interrupt_umask(irq);
  191. return rt_device_register((rt_device_t)virtio_blk_dev, dev_name, RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_REMOVABLE);
  192. _alloc_fail:
  193. if (virtio_blk_dev != RT_NULL)
  194. {
  195. virtio_queues_free(virtio_dev);
  196. rt_free(virtio_blk_dev);
  197. }
  198. return -RT_ENOMEM;
  199. }
  200. #endif /* RT_USING_VIRTIO_BLK */