drv_virtio_blk.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-9-16 GuEe-GUI the first version
  9. */
  10. #include <rtconfig.h>
  11. #include <rtthread.h>
  12. #include <rthw.h>
  13. #include <cpuport.h>
  14. #include <board.h>
  15. #include "virtio.h"
  16. #include "virtio_mmio.h"
  17. #include "drv_virtio_blk.h"
  18. #ifdef BSP_USING_VIRTIO_BLK
  19. #ifdef RT_USING_VIRTIO_BLK0
  20. static struct virtio_blk blk0;
  21. static struct virtio_blk_device virtio_blk_dev0;
  22. #endif /* RT_USING_VIRTIO_BLK0 */
  23. static int alloc_desc(struct virtio_blk *blk)
  24. {
  25. int i;
  26. for(i = 0; i < QUEUE_SIZE; i++)
  27. {
  28. if (blk->free[i])
  29. {
  30. blk->free[i] = 0;
  31. return i;
  32. }
  33. }
  34. return -RT_ERROR;
  35. }
  36. static void free_desc(struct virtio_blk *blk, int i)
  37. {
  38. if (i >= QUEUE_SIZE)
  39. {
  40. rt_kprintf("Out of queue number");
  41. RT_ASSERT(0);
  42. }
  43. if (blk0.free[i])
  44. {
  45. rt_kprintf("Already freed");
  46. RT_ASSERT(0);
  47. }
  48. blk->desc[i].addr = 0;
  49. blk->desc[i].len = 0;
  50. blk->desc[i].flags = 0;
  51. blk->desc[i].next = 0;
  52. blk->free[i] = 1;
  53. }
  54. static void free_chain(struct virtio_blk *blk, int i)
  55. {
  56. int flag, nxt;
  57. for (;;)
  58. {
  59. flag = blk->desc[i].flags;
  60. nxt = blk->desc[i].next;
  61. free_desc(blk, i);
  62. if (flag & VRING_DESC_F_NEXT)
  63. {
  64. i = nxt;
  65. }
  66. else
  67. {
  68. break;
  69. }
  70. }
  71. }
  72. static int alloc3_desc(struct virtio_blk *blk, int *idx)
  73. {
  74. for (int i = 0; i < 3; ++i)
  75. {
  76. idx[i] = alloc_desc(blk);
  77. if (idx[i] < 0)
  78. {
  79. for (int j = 0; j < i; ++j)
  80. {
  81. free_desc(blk, idx[j]);
  82. }
  83. return -RT_ERROR;
  84. }
  85. }
  86. return 0;
  87. }
  88. static int virtio_blk_device_init(struct virtio_blk_device *virtio_blk_dev)
  89. {
  90. uint32_t status = 0;
  91. uint32_t max;
  92. uint64_t features;
  93. int i;
  94. #ifdef RT_USING_SMP
  95. rt_spin_lock_init(&virtio_blk_dev->spinlock);
  96. #endif
  97. if (virtio_mmio_read32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_MAGIC_VALUE) != VIRTIO_MMIO_MAGIC ||
  98. virtio_mmio_read32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_VERSION) != 1 ||
  99. virtio_mmio_read32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_DEVICE_ID) != 2 ||
  100. virtio_mmio_read32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_VENDOR_ID) != VIRTIO_MMIO_VENDOR)
  101. {
  102. rt_kprintf("Could not find virtio disk");
  103. return -RT_ERROR;
  104. }
  105. status |= VIRTIO_STAT_ACKNOWLEDGE;
  106. virtio_mmio_write32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_STATUS, status);
  107. status |= VIRTIO_STAT_DRIVER;
  108. virtio_mmio_write32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_STATUS, status);
  109. /* negotiate features */
  110. features = virtio_mmio_read32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_DEVICE_FEATURES);
  111. features &= ~(1 << VIRTIO_BLK_F_RO);
  112. features &= ~(1 << VIRTIO_BLK_F_SCSI);
  113. features &= ~(1 << VIRTIO_BLK_F_CONFIG_WCE);
  114. features &= ~(1 << VIRTIO_BLK_F_MQ);
  115. features &= ~(1 << VIRTIO_F_ANY_LAYOUT);
  116. features &= ~(1 << VIRTIO_RING_F_EVENT_IDX);
  117. features &= ~(1 << VIRTIO_RING_F_INDIRECT_DESC);
  118. virtio_mmio_write32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_DRIVER_FEATURES, features);
  119. /* tell device that feature negotiation is complete */
  120. status |= VIRTIO_STAT_FEATURES_OK;
  121. virtio_mmio_write32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_STATUS, status);
  122. /* tell device we're completely ready */
  123. status |= VIRTIO_STAT_DRIVER_OK;
  124. virtio_mmio_write32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_STATUS, status);
  125. virtio_mmio_write32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_GUEST_PAGE_SIZE, PAGE_SIZE);
  126. /* initialize queue 0 */
  127. virtio_mmio_write32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_QUEUE_SEL, 0);
  128. max = virtio_mmio_read32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_QUEUE_NUM_MAX);
  129. if (max == 0)
  130. {
  131. rt_kprintf("Virtio disk has no queue 0");
  132. RT_ASSERT(0);
  133. }
  134. if (max < QUEUE_SIZE)
  135. {
  136. rt_kprintf("Virtio disk max queue too short");
  137. RT_ASSERT(0);
  138. }
  139. virtio_mmio_write32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_QUEUE_NUM, QUEUE_SIZE);
  140. rt_memset(virtio_blk_dev->blk->pages, 0, sizeof(virtio_blk_dev->blk->pages));
  141. virtio_mmio_write32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_QUEUE_PFN, ((uint64_t)blk0.pages) >> PAGE_SHIFT);
  142. virtio_blk_dev->blk->desc = (struct virtq_desc *)virtio_blk_dev->blk->pages;
  143. virtio_blk_dev->blk->avail = (struct virtq_avail *)(virtio_blk_dev->blk->pages + QUEUE_SIZE * sizeof(struct virtq_desc));
  144. virtio_blk_dev->blk->used = (struct virtq_used *)(virtio_blk_dev->blk->pages + PAGE_SIZE);
  145. /* all QUEUE_SIZE descriptors start out unused */
  146. for (i = 0; i < QUEUE_SIZE; ++i)
  147. {
  148. virtio_blk_dev->blk->free[i] = 1;
  149. }
  150. return RT_EOK;
  151. }
  152. static void virtio_blk_rw(struct virtio_blk_device *virtio_blk_dev, struct virtio_blk_buf *buf, int flag)
  153. {
  154. struct virtio_blk *blk = virtio_blk_dev->blk;
  155. uint64_t sector = buf->block_no * (VIRTIO_BLK_BUF_DATA_SIZE / 512);
  156. int idx[3];
  157. struct virtio_blk_req *req;
  158. #ifdef RT_USING_SMP
  159. rt_base_t level;
  160. level = rt_spin_lock_irqsave(&virtio_blk_dev->spinlock);
  161. #endif
  162. /* allocate the three descriptors */
  163. for (;;)
  164. {
  165. if (alloc3_desc(blk, idx) == 0)
  166. {
  167. break;
  168. }
  169. }
  170. req = &(blk->ops[idx[0]]);
  171. req->type = flag;
  172. req->reserved = 0;
  173. req->sector = sector;
  174. blk->desc[idx[0]].addr = (uint64_t)req;
  175. blk->desc[idx[0]].len = sizeof(struct virtio_blk_req);
  176. blk->desc[idx[0]].flags = VRING_DESC_F_NEXT;
  177. blk->desc[idx[0]].next = idx[1];
  178. blk->desc[idx[1]].addr = (uint64_t)buf->data;
  179. blk->desc[idx[1]].len = VIRTIO_BLK_BUF_DATA_SIZE;
  180. blk->desc[idx[1]].flags = flag ? 0 : VRING_DESC_F_WRITE;
  181. blk->desc[idx[1]].flags |= VRING_DESC_F_NEXT;
  182. blk->desc[idx[1]].next = idx[2];
  183. /* device writes 0 on success */
  184. blk->info[idx[0]].status = 0xff;
  185. blk->desc[idx[2]].addr = (uint64_t)&(blk->info[idx[0]].status);
  186. blk->desc[idx[2]].len = 1;
  187. /* device writes the status */
  188. blk->desc[idx[2]].flags = VRING_DESC_F_WRITE;
  189. blk->desc[idx[2]].next = 0;
  190. /* record struct buf for virtio_blk_isr() */
  191. buf->valid = 1;
  192. blk->info[idx[0]].buf = buf;
  193. /* tell the device the first index in our chain of descriptors */
  194. blk->avail->ring[blk->avail->idx % QUEUE_SIZE] = idx[0];
  195. rt_hw_dsb();
  196. /* tell the device another avail ring entry is available */
  197. blk->avail->idx += 1;
  198. rt_hw_dsb();
  199. /* value is queue number */
  200. virtio_mmio_write32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_QUEUE_NOTIFY, 0);
  201. /* wait for virtio_blk_isr() to done */
  202. while (buf->valid == 1)
  203. {
  204. #ifdef RT_USING_SMP
  205. rt_spin_unlock_irqrestore(&virtio_blk_dev->spinlock, level);
  206. #endif
  207. rt_thread_yield();
  208. #ifdef RT_USING_SMP
  209. level = rt_spin_lock_irqsave(&virtio_blk_dev->spinlock);
  210. #endif
  211. }
  212. blk->info[idx[0]].buf = 0;
  213. free_chain(blk, idx[0]);
  214. #ifdef RT_USING_SMP
  215. rt_spin_unlock_irqrestore(&virtio_blk_dev->spinlock, level);
  216. #endif
  217. }
  218. static void virtio_blk_isr(int irqno, void *param)
  219. {
  220. int id;
  221. struct virtio_blk_device *virtio_blk_dev = (struct virtio_blk_device *)param;
  222. struct virtio_blk *blk = virtio_blk_dev->blk;
  223. struct virtio_blk_buf *buf_tmp;
  224. #ifdef RT_USING_SMP
  225. rt_base_t level;
  226. level = rt_spin_lock_irqsave(&virtio_blk_dev->spinlock);
  227. #endif
  228. virtio_mmio_write32(
  229. virtio_blk_dev->mmio_base,
  230. VIRTIO_MMIO_INTERRUPT_ACK,
  231. virtio_mmio_read32(virtio_blk_dev->mmio_base, VIRTIO_MMIO_INTERRUPT_STATUS) & 0x3);
  232. rt_hw_dsb();
  233. /*
  234. * the device increments disk.used->idx
  235. * when it adds an entry to the used ring
  236. */
  237. while (blk->used_idx != blk->used->idx)
  238. {
  239. rt_hw_dsb();
  240. id = blk->used->ring[blk->used_idx % QUEUE_SIZE].id;
  241. if (blk->info[id].status != 0)
  242. {
  243. rt_kprintf("Virtio BLK Status");
  244. RT_ASSERT(0);
  245. }
  246. buf_tmp = blk->info[id].buf;
  247. /* done with buf */
  248. buf_tmp->valid = 0;
  249. rt_thread_yield();
  250. blk->used_idx += 1;
  251. }
  252. #ifdef RT_USING_SMP
  253. rt_spin_unlock_irqrestore(&virtio_blk_dev->spinlock, level);
  254. #endif
  255. }
  256. static rt_err_t virtio_blk_init(rt_device_t dev)
  257. {
  258. return RT_EOK;
  259. }
  260. static rt_err_t virtio_blk_open(rt_device_t dev, rt_uint16_t oflag)
  261. {
  262. return RT_EOK;
  263. }
  264. static rt_err_t virtio_blk_close(rt_device_t dev)
  265. {
  266. return RT_EOK;
  267. }
  268. static rt_size_t virtio_blk_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
  269. {
  270. struct virtio_blk_device *virtio_blk_dev = (struct virtio_blk_device *)dev;
  271. struct virtio_blk_buf buf =
  272. {
  273. .block_no = (uint32_t)pos,
  274. .data = (uint8_t *)buffer
  275. };
  276. virtio_blk_rw(virtio_blk_dev, &buf, VIRTIO_BLK_T_IN);
  277. return size;
  278. }
  279. static rt_size_t virtio_blk_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
  280. {
  281. struct virtio_blk_device *virtio_blk_dev = (struct virtio_blk_device *)dev;
  282. struct virtio_blk_buf buf =
  283. {
  284. .block_no = (uint32_t)pos,
  285. .data = (uint8_t *)buffer
  286. };
  287. virtio_blk_rw(virtio_blk_dev, &buf, VIRTIO_BLK_T_OUT);
  288. return size;
  289. }
  290. static rt_err_t virtio_blk_control(rt_device_t dev, int cmd, void *args)
  291. {
  292. if (cmd == RT_DEVICE_CTRL_BLK_GETGEOME)
  293. {
  294. struct rt_device_blk_geometry *geometry;
  295. geometry = (struct rt_device_blk_geometry *)args;
  296. if (geometry == RT_NULL)
  297. {
  298. return -RT_ERROR;
  299. }
  300. geometry->bytes_per_sector = VIRTIO_BLK_BYTES_PER_SECTOR;
  301. geometry->block_size = VIRTIO_BLK_BLOCK_SIZE;
  302. geometry->sector_count = VIRTIO_BLK_SECTOR_COUNT;
  303. }
  304. return RT_EOK;
  305. }
  306. const static struct rt_device_ops virtio_blk_ops =
  307. {
  308. virtio_blk_init,
  309. virtio_blk_open,
  310. virtio_blk_close,
  311. virtio_blk_read,
  312. virtio_blk_write,
  313. virtio_blk_control
  314. };
  315. int rt_virtio_blk_init(void)
  316. {
  317. rt_err_t status = RT_EOK;
  318. #ifdef RT_USING_VIRTIO_BLK0
  319. virtio_blk_dev0.parent.type = RT_Device_Class_Block;
  320. virtio_blk_dev0.parent.ops = &virtio_blk_ops;
  321. virtio_blk_dev0.blk = &blk0;
  322. virtio_blk_dev0.mmio_base = (uint32_t *)VIRTIO_MMIO_BLK0_BASE;
  323. status = virtio_blk_device_init(&virtio_blk_dev0);
  324. rt_device_register((rt_device_t)&virtio_blk_dev0, "virtio-blk0", RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_REMOVABLE);
  325. rt_hw_interrupt_install(VIRTIO_MMIO_BLK0_IRQ, virtio_blk_isr, &virtio_blk_dev0, "virtio-blk0");
  326. rt_hw_interrupt_umask(VIRTIO_MMIO_BLK0_IRQ);
  327. #endif /* RT_USING_VIRTIO_BLK0 */
  328. return status;
  329. }
  330. INIT_DEVICE_EXPORT(rt_virtio_blk_init);
  331. #endif /* BSP_USING_VIRTIO_BLK */