virtio_blk.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-9-16 GuEe-GUI the first version
  9. * 2021-11-11 GuEe-GUI using virtio common interface
  10. */
  11. #include <rthw.h>
  12. #include <rtthread.h>
  13. #include <cpuport.h>
  14. #include <virtio_blk.h>
  15. #ifdef BSP_USING_VIRTIO_BLK
  16. static void virtio_blk_rw(struct virtio_blk_device *virtio_blk_dev, rt_off_t pos, void *buffer, int flags)
  17. {
  18. rt_uint16_t idx[3];
  19. struct virtio_device *virtio_dev = &virtio_blk_dev->virtio_dev;
  20. #ifdef RT_USING_SMP
  21. rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
  22. #endif
  23. /* Allocate 3 descriptors */
  24. while (virtio_alloc_desc_chain(virtio_dev, 0, 3, idx))
  25. {
  26. }
  27. virtio_blk_dev->info[idx[0]].status = 0xff;
  28. virtio_blk_dev->info[idx[0]].valid = RT_TRUE;
  29. virtio_blk_dev->info[idx[0]].req.type = flags;
  30. virtio_blk_dev->info[idx[0]].req.ioprio = 0;
  31. virtio_blk_dev->info[idx[0]].req.sector = pos * (VIRTIO_BLK_BUF_DATA_SIZE / 512);
  32. flags = flags == VIRTIO_BLK_T_OUT ? 0 : VIRTQ_DESC_F_WRITE;
  33. virtio_fill_desc(virtio_dev, 0, idx[0],
  34. VIRTIO_VA2PA(&virtio_blk_dev->info[idx[0]].req), sizeof(struct virtio_blk_req), VIRTQ_DESC_F_NEXT, idx[1]);
  35. virtio_fill_desc(virtio_dev, 0, idx[1],
  36. VIRTIO_VA2PA(buffer), VIRTIO_BLK_BUF_DATA_SIZE, flags | VIRTQ_DESC_F_NEXT, idx[2]);
  37. virtio_fill_desc(virtio_dev, 0, idx[2],
  38. VIRTIO_VA2PA(&virtio_blk_dev->info[idx[0]].status), 1, VIRTQ_DESC_F_WRITE, 0);
  39. virtio_submit_chain(virtio_dev, 0, idx[0]);
  40. virtio_queue_notify(virtio_dev, 0);
  41. /* Wait for virtio_blk_isr() to done */
  42. while (virtio_blk_dev->info[idx[0]].valid)
  43. {
  44. #ifdef RT_USING_SMP
  45. rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
  46. #endif
  47. rt_thread_yield();
  48. #ifdef RT_USING_SMP
  49. level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
  50. #endif
  51. }
  52. virtio_free_desc_chain(virtio_dev, 0, idx[0]);
  53. #ifdef RT_USING_SMP
  54. rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
  55. #endif
  56. }
  57. static rt_size_t virtio_blk_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
  58. {
  59. virtio_blk_rw((struct virtio_blk_device *)dev, pos, buffer, VIRTIO_BLK_T_IN);
  60. return size;
  61. }
  62. static rt_size_t virtio_blk_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
  63. {
  64. virtio_blk_rw((struct virtio_blk_device *)dev, pos, (void *)buffer, VIRTIO_BLK_T_OUT);
  65. return size;
  66. }
  67. static rt_err_t virtio_blk_control(rt_device_t dev, int cmd, void *args)
  68. {
  69. rt_err_t status = RT_EOK;
  70. struct virtio_blk_device *virtio_blk_dev = (struct virtio_blk_device *)dev;
  71. switch (cmd)
  72. {
  73. case RT_DEVICE_CTRL_BLK_GETGEOME:
  74. {
  75. struct rt_device_blk_geometry *geometry = (struct rt_device_blk_geometry *)args;
  76. if (geometry == RT_NULL)
  77. {
  78. status = -RT_ERROR;
  79. break;
  80. }
  81. geometry->bytes_per_sector = VIRTIO_BLK_BYTES_PER_SECTOR;
  82. geometry->block_size = VIRTIO_BLK_BLOCK_SIZE;
  83. geometry->sector_count = virtio_blk_dev->virtio_dev.mmio_config->config[0];
  84. }
  85. break;
  86. default:
  87. break;
  88. }
  89. return status;
  90. }
  91. const static struct rt_device_ops virtio_blk_ops =
  92. {
  93. RT_NULL,
  94. RT_NULL,
  95. RT_NULL,
  96. virtio_blk_read,
  97. virtio_blk_write,
  98. virtio_blk_control
  99. };
  100. static void virtio_blk_isr(int irqno, void *param)
  101. {
  102. rt_uint32_t id;
  103. struct virtio_blk_device *virtio_blk_dev = (struct virtio_blk_device *)param;
  104. struct virtio_device *virtio_dev = &virtio_blk_dev->virtio_dev;
  105. #ifdef RT_USING_SMP
  106. rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
  107. #endif
  108. virtio_interrupt_ack(virtio_dev, virtio_dev->mmio_config->interrupt_status & 0x3);
  109. rt_hw_dsb();
  110. /* The device increments disk.used->idx when it adds an entry to the used ring */
  111. while (virtio_dev->queues[0].used_idx != virtio_dev->queues[0].used->idx)
  112. {
  113. rt_hw_dsb();
  114. id = virtio_dev->queues[0].used->ring[virtio_dev->queues[0].used_idx % VIRTIO_BLK_QUEUE_RING_SIZE].id;
  115. RT_ASSERT(virtio_blk_dev->info[id].status == 0);
  116. /* Done with buffer */
  117. virtio_blk_dev->info[id].valid = RT_FALSE;
  118. rt_thread_yield();
  119. virtio_dev->queues[0].used_idx++;
  120. }
  121. #ifdef RT_USING_SMP
  122. rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
  123. #endif
  124. }
  125. rt_err_t rt_virtio_blk_init(rt_ubase_t *mmio_base, rt_uint32_t irq)
  126. {
  127. static int dev_no = 0;
  128. char dev_name[RT_NAME_MAX];
  129. struct virtio_device *virtio_dev;
  130. struct virtio_blk_device *virtio_blk_dev;
  131. virtio_blk_dev = rt_malloc(sizeof(struct virtio_blk_device));
  132. if (virtio_blk_dev == RT_NULL)
  133. {
  134. return -RT_ENOMEM;
  135. }
  136. virtio_dev = &virtio_blk_dev->virtio_dev;
  137. virtio_dev->irq = irq;
  138. virtio_dev->mmio_base = mmio_base;
  139. #ifdef RT_USING_SMP
  140. rt_spin_lock_init(&virtio_dev->spinlock);
  141. #endif
  142. virtio_reset_device(virtio_dev);
  143. virtio_status_acknowledge_driver(virtio_dev);
  144. /* Negotiate features */
  145. virtio_dev->mmio_config->driver_features = virtio_dev->mmio_config->device_features & ~(
  146. (1 << VIRTIO_BLK_F_RO) |
  147. (1 << VIRTIO_BLK_F_MQ) |
  148. (1 << VIRTIO_BLK_F_SCSI) |
  149. (1 << VIRTIO_BLK_F_CONFIG_WCE) |
  150. (1 << VIRTIO_F_ANY_LAYOUT) |
  151. (1 << VIRTIO_F_RING_EVENT_IDX) |
  152. (1 << VIRTIO_F_RING_INDIRECT_DESC));
  153. /* Tell device that feature negotiation is complete and we're completely ready */
  154. virtio_status_driver_ok(virtio_dev);
  155. /* Initialize queue 0 */
  156. if (virtio_queue_init(virtio_dev, 0, VIRTIO_BLK_QUEUE_RING_SIZE) != RT_EOK)
  157. {
  158. rt_free(virtio_blk_dev);
  159. return -RT_ENOMEM;
  160. }
  161. virtio_blk_dev->parent.type = RT_Device_Class_Block;
  162. virtio_blk_dev->parent.ops = &virtio_blk_ops;
  163. rt_snprintf(dev_name, RT_NAME_MAX, "%s%d", "virtio-blk", dev_no++);
  164. rt_hw_interrupt_install(irq, virtio_blk_isr, virtio_blk_dev, dev_name);
  165. rt_hw_interrupt_umask(irq);
  166. return rt_device_register((rt_device_t)virtio_blk_dev, dev_name, RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_REMOVABLE);
  167. }
  168. #endif /* BSP_USING_VIRTIO_BLK */