dev_block.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2011-07-25 weety first version
  9. * 2023-08-08 GuEe-GUI port to the block
  10. */
  11. #include <rtthread.h>
  12. #include <drivers/blk.h>
  13. #include <drivers/misc.h>
  14. #include <drivers/dev_mmcsd_core.h>
  15. #define DBG_TAG "SDIO"
  16. #ifdef RT_SDIO_DEBUG
  17. #define DBG_LVL DBG_LOG
  18. #else
  19. #define DBG_LVL DBG_INFO
  20. #endif /* RT_SDIO_DEBUG */
  21. #include <rtdbg.h>
  22. #ifndef RT_MMCSD_MAX_PARTITION
  23. #define RT_MMCSD_MAX_PARTITION 16
  24. #endif
  25. struct mmcsd_blk_device
  26. {
  27. struct rt_blk_disk parent;
  28. struct rt_mmcsd_card *card;
  29. rt_size_t max_req_size;
  30. struct rt_device_blk_geometry geometry;
  31. };
  32. #define raw_to_mmcsd_blk(raw) rt_container_of(raw, struct mmcsd_blk_device, parent)
  33. #ifdef RT_USING_DM
  34. static struct rt_dm_ida sdio_ida = RT_DM_IDA_INIT(SDIO);
  35. #endif
  36. static int __send_status(struct rt_mmcsd_card *card, rt_uint32_t *status, unsigned retries)
  37. {
  38. int err;
  39. struct rt_mmcsd_cmd cmd;
  40. cmd.busy_timeout = 0;
  41. cmd.cmd_code = SEND_STATUS;
  42. cmd.arg = card->rca << 16;
  43. cmd.flags = RESP_R1 | CMD_AC;
  44. err = mmcsd_send_cmd(card->host, &cmd, retries);
  45. if (err)
  46. return err;
  47. if (status)
  48. *status = cmd.resp[0];
  49. return 0;
  50. }
  51. static int card_busy_detect(struct rt_mmcsd_card *card, unsigned int timeout_ms,
  52. rt_uint32_t *resp_errs)
  53. {
  54. int timeout = rt_tick_from_millisecond(timeout_ms);
  55. int err = 0;
  56. rt_uint32_t status;
  57. rt_tick_t start;
  58. start = rt_tick_get();
  59. do
  60. {
  61. rt_bool_t out = (int)(rt_tick_get() - start) > timeout;
  62. err = __send_status(card, &status, 5);
  63. if (err)
  64. {
  65. LOG_E("error %d requesting status", err);
  66. return err;
  67. }
  68. /* Accumulate any response error bits seen */
  69. if (resp_errs)
  70. *resp_errs |= status;
  71. if (out)
  72. {
  73. LOG_E("wait card busy timeout");
  74. return -RT_ETIMEOUT;
  75. }
  76. /*
  77. * Some cards mishandle the status bits,
  78. * so make sure to check both the busy
  79. * indication and the card state.
  80. */
  81. }
  82. while (!(status & R1_READY_FOR_DATA) ||
  83. (R1_CURRENT_STATE(status) == 7));
  84. return err;
  85. }
  86. rt_int32_t mmcsd_num_wr_blocks(struct rt_mmcsd_card *card)
  87. {
  88. rt_int32_t err;
  89. rt_uint32_t blocks;
  90. struct rt_mmcsd_req req;
  91. struct rt_mmcsd_cmd cmd;
  92. struct rt_mmcsd_data data;
  93. rt_uint32_t timeout_us;
  94. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  95. cmd.cmd_code = APP_CMD;
  96. cmd.arg = card->rca << 16;
  97. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC;
  98. err = mmcsd_send_cmd(card->host, &cmd, 0);
  99. if (err)
  100. return -RT_ERROR;
  101. if (!controller_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
  102. return -RT_ERROR;
  103. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  104. cmd.cmd_code = SD_APP_SEND_NUM_WR_BLKS;
  105. cmd.arg = 0;
  106. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
  107. rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
  108. data.timeout_ns = card->tacc_ns * 100;
  109. data.timeout_clks = card->tacc_clks * 100;
  110. timeout_us = data.timeout_ns / 1000;
  111. timeout_us += data.timeout_clks * 1000 /
  112. (card->host->io_cfg.clock / 1000);
  113. if (timeout_us > 100000)
  114. {
  115. data.timeout_ns = 100000000;
  116. data.timeout_clks = 0;
  117. }
  118. data.blksize = 4;
  119. data.blks = 1;
  120. data.flags = DATA_DIR_READ;
  121. data.buf = &blocks;
  122. rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
  123. req.cmd = &cmd;
  124. req.data = &data;
  125. mmcsd_send_request(card->host, &req);
  126. if (cmd.err || data.err)
  127. return -RT_ERROR;
  128. return blocks;
  129. }
  130. static rt_err_t rt_mmcsd_req_blk(struct rt_mmcsd_card *card,
  131. rt_uint32_t sector,
  132. void *buf,
  133. rt_size_t blks,
  134. rt_uint8_t dir)
  135. {
  136. struct rt_mmcsd_cmd cmd, stop;
  137. struct rt_mmcsd_data data;
  138. struct rt_mmcsd_req req;
  139. struct rt_mmcsd_host *host = card->host;
  140. rt_uint32_t r_cmd, w_cmd;
  141. mmcsd_host_lock(host);
  142. rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
  143. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  144. rt_memset(&stop, 0, sizeof(struct rt_mmcsd_cmd));
  145. rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
  146. req.cmd = &cmd;
  147. req.data = &data;
  148. cmd.arg = sector;
  149. if (!(card->flags & CARD_FLAG_SDHC))
  150. {
  151. cmd.arg <<= 9;
  152. }
  153. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
  154. data.blksize = SECTOR_SIZE;
  155. data.blks = blks;
  156. if (blks > 1)
  157. {
  158. if (!controller_is_spi(card->host) || !dir)
  159. {
  160. req.stop = &stop;
  161. stop.cmd_code = STOP_TRANSMISSION;
  162. stop.arg = 0;
  163. stop.flags = RESP_SPI_R1B | RESP_R1B | CMD_AC;
  164. }
  165. r_cmd = READ_MULTIPLE_BLOCK;
  166. w_cmd = WRITE_MULTIPLE_BLOCK;
  167. }
  168. else
  169. {
  170. req.stop = RT_NULL;
  171. r_cmd = READ_SINGLE_BLOCK;
  172. w_cmd = WRITE_BLOCK;
  173. }
  174. if (!controller_is_spi(card->host) && (card->flags & 0x8000))
  175. {
  176. /* last request is WRITE,need check busy */
  177. card_busy_detect(card, 10000, RT_NULL);
  178. }
  179. if (!dir)
  180. {
  181. cmd.cmd_code = r_cmd;
  182. data.flags |= DATA_DIR_READ;
  183. card->flags &= 0x7fff;
  184. }
  185. else
  186. {
  187. cmd.cmd_code = w_cmd;
  188. data.flags |= DATA_DIR_WRITE;
  189. card->flags |= 0x8000;
  190. }
  191. mmcsd_set_data_timeout(&data, card);
  192. data.buf = buf;
  193. mmcsd_send_request(host, &req);
  194. mmcsd_host_unlock(host);
  195. if (cmd.err || data.err || stop.err)
  196. {
  197. LOG_E("mmcsd request blocks error");
  198. LOG_E("%d,%d,%d, 0x%08x,0x%08x",
  199. cmd.err, data.err, stop.err, data.flags, sector);
  200. return -RT_ERROR;
  201. }
  202. return RT_EOK;
  203. }
  204. static rt_int32_t mmcsd_set_blksize(struct rt_mmcsd_card *card)
  205. {
  206. struct rt_mmcsd_cmd cmd;
  207. int err;
  208. /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
  209. if (card->flags & CARD_FLAG_SDHC)
  210. return 0;
  211. mmcsd_host_lock(card->host);
  212. cmd.cmd_code = SET_BLOCKLEN;
  213. cmd.arg = 512;
  214. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC;
  215. err = mmcsd_send_cmd(card->host, &cmd, 5);
  216. mmcsd_host_unlock(card->host);
  217. if (err)
  218. {
  219. LOG_E("MMCSD: unable to set block size to %d: %d", cmd.arg, err);
  220. return -RT_ERROR;
  221. }
  222. return 0;
  223. }
  224. static rt_ssize_t mmcsd_blk_read(struct rt_blk_disk *disk, rt_off_t sector,
  225. void *buffer, rt_size_t sector_count)
  226. {
  227. rt_err_t err;
  228. rt_size_t offset = 0;
  229. rt_size_t req_size = 0;
  230. rt_size_t remain_size = sector_count;
  231. void *rd_ptr = (void *)buffer;
  232. struct mmcsd_blk_device *blk_dev = raw_to_mmcsd_blk(disk);
  233. while (remain_size)
  234. {
  235. req_size = rt_min_t(rt_size_t, remain_size, blk_dev->max_req_size);
  236. err = rt_mmcsd_req_blk(blk_dev->card, sector + offset, rd_ptr, req_size, 0);
  237. if (err)
  238. {
  239. return err;
  240. }
  241. offset += req_size;
  242. rd_ptr = (void *)((rt_uint8_t *)rd_ptr + (req_size << 9));
  243. remain_size -= req_size;
  244. }
  245. return sector_count - remain_size;
  246. }
  247. static rt_ssize_t mmcsd_blk_write(struct rt_blk_disk *disk, rt_off_t sector,
  248. const void *buffer, rt_size_t sector_count)
  249. {
  250. rt_err_t err;
  251. rt_size_t offset = 0;
  252. rt_size_t req_size = 0;
  253. rt_size_t remain_size = sector_count;
  254. void *wr_ptr = (void *)buffer;
  255. struct mmcsd_blk_device *blk_dev = raw_to_mmcsd_blk(disk);
  256. while (remain_size)
  257. {
  258. req_size = rt_min_t(rt_size_t, remain_size, blk_dev->max_req_size);
  259. err = rt_mmcsd_req_blk(blk_dev->card, sector + offset, wr_ptr, req_size, 1);
  260. if (err)
  261. {
  262. return err;
  263. }
  264. offset += req_size;
  265. wr_ptr = (void *)((rt_uint8_t *)wr_ptr + (req_size << 9));
  266. remain_size -= req_size;
  267. }
  268. return sector_count - remain_size;
  269. }
  270. static rt_err_t mmcsd_blk_getgeome(struct rt_blk_disk *disk,
  271. struct rt_device_blk_geometry *geometry)
  272. {
  273. struct mmcsd_blk_device *blk_dev = raw_to_mmcsd_blk(disk);
  274. rt_memcpy(geometry, &blk_dev->geometry, sizeof(*geometry));
  275. return RT_EOK;
  276. }
  277. static const struct rt_blk_disk_ops mmcsd_blk_ops =
  278. {
  279. .read = mmcsd_blk_read,
  280. .write = mmcsd_blk_write,
  281. .getgeome = mmcsd_blk_getgeome,
  282. };
  283. rt_int32_t rt_mmcsd_blk_probe(struct rt_mmcsd_card *card)
  284. {
  285. rt_err_t err;
  286. struct rt_mmcsd_host *host = card->host;
  287. struct mmcsd_blk_device *blk_dev = rt_calloc(1, sizeof(*blk_dev));
  288. if (!blk_dev)
  289. {
  290. return -RT_ENOMEM;
  291. }
  292. card->blk_dev = blk_dev;
  293. #ifdef RT_USING_DM
  294. blk_dev->parent.ida = &sdio_ida;
  295. #endif
  296. blk_dev->parent.parallel_io = RT_FALSE;
  297. blk_dev->parent.removable = controller_is_removable(host);
  298. blk_dev->parent.ops = &mmcsd_blk_ops;
  299. blk_dev->parent.max_partitions = RT_MMCSD_MAX_PARTITION;
  300. blk_dev->card = card;
  301. blk_dev->max_req_size = rt_min_t(rt_size_t,
  302. host->max_dma_segs * host->max_seg_size,
  303. host->max_blk_count * host->max_blk_size) >> 9;
  304. blk_dev->geometry.bytes_per_sector = 1 << 9;
  305. blk_dev->geometry.block_size = card->card_blksize;
  306. blk_dev->geometry.sector_count = card->card_capacity * (1024 / 512);
  307. /* Set blk size before partitions probe, Why? */
  308. if ((err = mmcsd_set_blksize(card)))
  309. {
  310. goto _fail;
  311. }
  312. rt_thread_mdelay(1);
  313. #ifdef RT_USING_DM
  314. rt_dm_dev_set_name(&blk_dev->parent.parent, host->name);
  315. #else
  316. rt_strncpy(blk_dev->parent.parent.parent.name, host->name, RT_NAME_MAX);
  317. #endif
  318. if ((err = rt_hw_blk_disk_register(&blk_dev->parent)))
  319. {
  320. goto _fail;
  321. }
  322. return RT_EOK;
  323. _fail:
  324. card->blk_dev = RT_NULL;
  325. free(blk_dev);
  326. return err;
  327. }
  328. void rt_mmcsd_blk_remove(struct rt_mmcsd_card *card)
  329. {
  330. struct mmcsd_blk_device *blk_dev = card->blk_dev;
  331. if (!blk_dev)
  332. {
  333. return;
  334. }
  335. if (!rt_hw_blk_disk_unregister(&blk_dev->parent))
  336. {
  337. card->blk_dev = RT_NULL;
  338. rt_free(blk_dev);
  339. }
  340. }