1
0

block_dev.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2011-07-25 weety first version
  9. */
  10. #include <rtthread.h>
  11. #include <dfs_fs.h>
  12. #include <drivers/mmcsd_core.h>
  13. #define DBG_TAG "SDIO"
  14. #ifdef RT_SDIO_DEBUG
  15. #define DBG_LVL DBG_LOG
  16. #else
  17. #define DBG_LVL DBG_INFO
  18. #endif /* RT_SDIO_DEBUG */
  19. #include <rtdbg.h>
  20. static rt_list_t blk_devices = RT_LIST_OBJECT_INIT(blk_devices);
  21. #define BLK_MIN(a, b) ((a) < (b) ? (a) : (b))
  22. struct mmcsd_blk_device
  23. {
  24. struct rt_mmcsd_card *card;
  25. rt_list_t list;
  26. struct rt_device dev;
  27. struct dfs_partition part;
  28. struct rt_device_blk_geometry geometry;
  29. rt_size_t max_req_size;
  30. };
  31. #ifndef RT_MMCSD_MAX_PARTITION
  32. #define RT_MMCSD_MAX_PARTITION 16
  33. #endif
  34. rt_int32_t mmcsd_num_wr_blocks(struct rt_mmcsd_card *card)
  35. {
  36. rt_int32_t err;
  37. rt_uint32_t blocks;
  38. struct rt_mmcsd_req req;
  39. struct rt_mmcsd_cmd cmd;
  40. struct rt_mmcsd_data data;
  41. rt_uint32_t timeout_us;
  42. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  43. cmd.cmd_code = APP_CMD;
  44. cmd.arg = card->rca << 16;
  45. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC;
  46. err = mmcsd_send_cmd(card->host, &cmd, 0);
  47. if (err)
  48. return -RT_ERROR;
  49. if (!controller_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
  50. return -RT_ERROR;
  51. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  52. cmd.cmd_code = SD_APP_SEND_NUM_WR_BLKS;
  53. cmd.arg = 0;
  54. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
  55. rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
  56. data.timeout_ns = card->tacc_ns * 100;
  57. data.timeout_clks = card->tacc_clks * 100;
  58. timeout_us = data.timeout_ns / 1000;
  59. timeout_us += data.timeout_clks * 1000 /
  60. (card->host->io_cfg.clock / 1000);
  61. if (timeout_us > 100000)
  62. {
  63. data.timeout_ns = 100000000;
  64. data.timeout_clks = 0;
  65. }
  66. data.blksize = 4;
  67. data.blks = 1;
  68. data.flags = DATA_DIR_READ;
  69. data.buf = &blocks;
  70. rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
  71. req.cmd = &cmd;
  72. req.data = &data;
  73. mmcsd_send_request(card->host, &req);
  74. if (cmd.err || data.err)
  75. return -RT_ERROR;
  76. return blocks;
  77. }
  78. static rt_err_t rt_mmcsd_req_blk(struct rt_mmcsd_card *card,
  79. rt_uint32_t sector,
  80. void *buf,
  81. rt_size_t blks,
  82. rt_uint8_t dir)
  83. {
  84. struct rt_mmcsd_cmd cmd, stop;
  85. struct rt_mmcsd_data data;
  86. struct rt_mmcsd_req req;
  87. struct rt_mmcsd_host *host = card->host;
  88. rt_uint32_t r_cmd, w_cmd;
  89. mmcsd_host_lock(host);
  90. rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
  91. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  92. rt_memset(&stop, 0, sizeof(struct rt_mmcsd_cmd));
  93. rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
  94. req.cmd = &cmd;
  95. req.data = &data;
  96. cmd.arg = sector;
  97. if (!(card->flags & CARD_FLAG_SDHC))
  98. {
  99. cmd.arg <<= 9;
  100. }
  101. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
  102. data.blksize = SECTOR_SIZE;
  103. data.blks = blks;
  104. if (blks > 1)
  105. {
  106. if (!controller_is_spi(card->host) || !dir)
  107. {
  108. req.stop = &stop;
  109. stop.cmd_code = STOP_TRANSMISSION;
  110. stop.arg = 0;
  111. stop.flags = RESP_SPI_R1B | RESP_R1B | CMD_AC;
  112. }
  113. r_cmd = READ_MULTIPLE_BLOCK;
  114. w_cmd = WRITE_MULTIPLE_BLOCK;
  115. }
  116. else
  117. {
  118. req.stop = RT_NULL;
  119. r_cmd = READ_SINGLE_BLOCK;
  120. w_cmd = WRITE_BLOCK;
  121. }
  122. if (!dir)
  123. {
  124. cmd.cmd_code = r_cmd;
  125. data.flags |= DATA_DIR_READ;
  126. }
  127. else
  128. {
  129. cmd.cmd_code = w_cmd;
  130. data.flags |= DATA_DIR_WRITE;
  131. }
  132. mmcsd_set_data_timeout(&data, card);
  133. data.buf = buf;
  134. mmcsd_send_request(host, &req);
  135. if (!controller_is_spi(card->host) && dir != 0)
  136. {
  137. do
  138. {
  139. rt_int32_t err;
  140. cmd.cmd_code = SEND_STATUS;
  141. cmd.arg = card->rca << 16;
  142. cmd.flags = RESP_R1 | CMD_AC;
  143. err = mmcsd_send_cmd(card->host, &cmd, 5);
  144. if (err)
  145. {
  146. LOG_E("error %d requesting status", err);
  147. break;
  148. }
  149. /*
  150. * Some cards mishandle the status bits,
  151. * so make sure to check both the busy
  152. * indication and the card state.
  153. */
  154. } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
  155. (R1_CURRENT_STATE(cmd.resp[0]) == 7));
  156. }
  157. mmcsd_host_unlock(host);
  158. if (cmd.err || data.err || stop.err)
  159. {
  160. LOG_E("mmcsd request blocks error");
  161. LOG_E("%d,%d,%d, 0x%08x,0x%08x",
  162. cmd.err, data.err, stop.err, data.flags, sector);
  163. return -RT_ERROR;
  164. }
  165. return RT_EOK;
  166. }
  167. static rt_err_t rt_mmcsd_init(rt_device_t dev)
  168. {
  169. return RT_EOK;
  170. }
  171. static rt_err_t rt_mmcsd_open(rt_device_t dev, rt_uint16_t oflag)
  172. {
  173. return RT_EOK;
  174. }
  175. static rt_err_t rt_mmcsd_close(rt_device_t dev)
  176. {
  177. return RT_EOK;
  178. }
  179. static rt_err_t rt_mmcsd_control(rt_device_t dev, int cmd, void *args)
  180. {
  181. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  182. switch (cmd)
  183. {
  184. case RT_DEVICE_CTRL_BLK_GETGEOME:
  185. rt_memcpy(args, &blk_dev->geometry, sizeof(struct rt_device_blk_geometry));
  186. break;
  187. default:
  188. break;
  189. }
  190. return RT_EOK;
  191. }
  192. static rt_size_t rt_mmcsd_read(rt_device_t dev,
  193. rt_off_t pos,
  194. void *buffer,
  195. rt_size_t size)
  196. {
  197. rt_err_t err = 0;
  198. rt_size_t offset = 0;
  199. rt_size_t req_size = 0;
  200. rt_size_t remain_size = size;
  201. void *rd_ptr = (void *)buffer;
  202. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  203. struct dfs_partition *part = &blk_dev->part;
  204. if (dev == RT_NULL)
  205. {
  206. rt_set_errno(-EINVAL);
  207. return 0;
  208. }
  209. rt_sem_take(part->lock, RT_WAITING_FOREVER);
  210. while (remain_size)
  211. {
  212. req_size = (remain_size > blk_dev->max_req_size) ? blk_dev->max_req_size : remain_size;
  213. err = rt_mmcsd_req_blk(blk_dev->card, part->offset + pos + offset, rd_ptr, req_size, 0);
  214. if (err)
  215. break;
  216. offset += req_size;
  217. rd_ptr = (void *)((rt_uint8_t *)rd_ptr + (req_size << 9));
  218. remain_size -= req_size;
  219. }
  220. rt_sem_release(part->lock);
  221. /* the length of reading must align to SECTOR SIZE */
  222. if (err)
  223. {
  224. rt_set_errno(-EIO);
  225. return 0;
  226. }
  227. return size - remain_size;
  228. }
  229. static rt_size_t rt_mmcsd_write(rt_device_t dev,
  230. rt_off_t pos,
  231. const void *buffer,
  232. rt_size_t size)
  233. {
  234. rt_err_t err = 0;
  235. rt_size_t offset = 0;
  236. rt_size_t req_size = 0;
  237. rt_size_t remain_size = size;
  238. void *wr_ptr = (void *)buffer;
  239. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  240. struct dfs_partition *part = &blk_dev->part;
  241. if (dev == RT_NULL)
  242. {
  243. rt_set_errno(-EINVAL);
  244. return 0;
  245. }
  246. rt_sem_take(part->lock, RT_WAITING_FOREVER);
  247. while (remain_size)
  248. {
  249. req_size = (remain_size > blk_dev->max_req_size) ? blk_dev->max_req_size : remain_size;
  250. err = rt_mmcsd_req_blk(blk_dev->card, part->offset + pos + offset, wr_ptr, req_size, 1);
  251. if (err)
  252. break;
  253. offset += req_size;
  254. wr_ptr = (void *)((rt_uint8_t *)wr_ptr + (req_size << 9));
  255. remain_size -= req_size;
  256. }
  257. rt_sem_release(part->lock);
  258. /* the length of reading must align to SECTOR SIZE */
  259. if (err)
  260. {
  261. rt_set_errno(-EIO);
  262. return 0;
  263. }
  264. return size - remain_size;
  265. }
  266. static rt_int32_t mmcsd_set_blksize(struct rt_mmcsd_card *card)
  267. {
  268. struct rt_mmcsd_cmd cmd;
  269. int err;
  270. /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
  271. if (card->flags & CARD_FLAG_SDHC)
  272. return 0;
  273. mmcsd_host_lock(card->host);
  274. cmd.cmd_code = SET_BLOCKLEN;
  275. cmd.arg = 512;
  276. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC;
  277. err = mmcsd_send_cmd(card->host, &cmd, 5);
  278. mmcsd_host_unlock(card->host);
  279. if (err)
  280. {
  281. LOG_E("MMCSD: unable to set block size to %d: %d", cmd.arg, err);
  282. return -RT_ERROR;
  283. }
  284. return 0;
  285. }
  286. #ifdef RT_USING_DEVICE_OPS
  287. const static struct rt_device_ops mmcsd_blk_ops =
  288. {
  289. rt_mmcsd_init,
  290. rt_mmcsd_open,
  291. rt_mmcsd_close,
  292. rt_mmcsd_read,
  293. rt_mmcsd_write,
  294. rt_mmcsd_control
  295. };
  296. #endif
  297. rt_int32_t rt_mmcsd_blk_probe(struct rt_mmcsd_card *card)
  298. {
  299. rt_int32_t err = 0;
  300. rt_uint8_t i, status;
  301. rt_uint8_t *sector;
  302. char dname[4];
  303. char sname[8];
  304. struct mmcsd_blk_device *blk_dev = RT_NULL;
  305. err = mmcsd_set_blksize(card);
  306. if(err)
  307. {
  308. return err;
  309. }
  310. LOG_D("probe mmcsd block device!");
  311. /* get the first sector to read partition table */
  312. sector = (rt_uint8_t *)rt_malloc(SECTOR_SIZE);
  313. if (sector == RT_NULL)
  314. {
  315. LOG_E("allocate partition sector buffer failed!");
  316. return -RT_ENOMEM;
  317. }
  318. status = rt_mmcsd_req_blk(card, 0, sector, 1, 0);
  319. if (status == RT_EOK)
  320. {
  321. for (i = 0; i < RT_MMCSD_MAX_PARTITION; i++)
  322. {
  323. blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
  324. if (!blk_dev)
  325. {
  326. LOG_E("mmcsd:malloc memory failed!");
  327. break;
  328. }
  329. blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
  330. card->host->max_seg_size) >> 9,
  331. (card->host->max_blk_count *
  332. card->host->max_blk_size) >> 9);
  333. /* get the first partition */
  334. status = dfs_filesystem_get_partition(&blk_dev->part, sector, i);
  335. if (status == RT_EOK)
  336. {
  337. rt_snprintf(dname, 4, "sd%d", i);
  338. rt_snprintf(sname, 8, "sem_sd%d", i);
  339. blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
  340. /* register mmcsd device */
  341. blk_dev->dev.type = RT_Device_Class_Block;
  342. #ifdef RT_USING_DEVICE_OPS
  343. blk_dev->dev.ops = &mmcsd_blk_ops;
  344. #else
  345. blk_dev->dev.init = rt_mmcsd_init;
  346. blk_dev->dev.open = rt_mmcsd_open;
  347. blk_dev->dev.close = rt_mmcsd_close;
  348. blk_dev->dev.read = rt_mmcsd_read;
  349. blk_dev->dev.write = rt_mmcsd_write;
  350. blk_dev->dev.control = rt_mmcsd_control;
  351. #endif
  352. blk_dev->dev.user_data = blk_dev;
  353. blk_dev->card = card;
  354. blk_dev->geometry.bytes_per_sector = 1<<9;
  355. blk_dev->geometry.block_size = card->card_blksize;
  356. blk_dev->geometry.sector_count = blk_dev->part.size;
  357. rt_device_register(&blk_dev->dev, dname,
  358. RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_REMOVABLE | RT_DEVICE_FLAG_STANDALONE);
  359. rt_list_insert_after(&blk_devices, &blk_dev->list);
  360. }
  361. else
  362. {
  363. if (i == 0)
  364. {
  365. /* there is no partition table */
  366. blk_dev->part.offset = 0;
  367. blk_dev->part.size = 0;
  368. blk_dev->part.lock = rt_sem_create("sem_sd0", 1, RT_IPC_FLAG_FIFO);
  369. /* register mmcsd device */
  370. blk_dev->dev.type = RT_Device_Class_Block;
  371. #ifdef RT_USING_DEVICE_OPS
  372. blk_dev->dev.ops = &mmcsd_blk_ops;
  373. #else
  374. blk_dev->dev.init = rt_mmcsd_init;
  375. blk_dev->dev.open = rt_mmcsd_open;
  376. blk_dev->dev.close = rt_mmcsd_close;
  377. blk_dev->dev.read = rt_mmcsd_read;
  378. blk_dev->dev.write = rt_mmcsd_write;
  379. blk_dev->dev.control = rt_mmcsd_control;
  380. #endif
  381. blk_dev->dev.user_data = blk_dev;
  382. blk_dev->card = card;
  383. blk_dev->geometry.bytes_per_sector = 1<<9;
  384. blk_dev->geometry.block_size = card->card_blksize;
  385. blk_dev->geometry.sector_count =
  386. card->card_capacity * (1024 / 512);
  387. rt_device_register(&blk_dev->dev, "sd0",
  388. RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_REMOVABLE | RT_DEVICE_FLAG_STANDALONE);
  389. rt_list_insert_after(&blk_devices, &blk_dev->list);
  390. }
  391. else
  392. {
  393. rt_free(blk_dev);
  394. blk_dev = RT_NULL;
  395. break;
  396. }
  397. }
  398. #ifdef RT_USING_DFS_MNTTABLE
  399. if (blk_dev)
  400. {
  401. LOG_I("try to mount file system!");
  402. /* try to mount file system on this block device */
  403. dfs_mount_device(&(blk_dev->dev));
  404. }
  405. #endif
  406. }
  407. }
  408. else
  409. {
  410. LOG_E("read mmcsd first sector failed");
  411. err = -RT_ERROR;
  412. }
  413. /* release sector buffer */
  414. rt_free(sector);
  415. return err;
  416. }
  417. void rt_mmcsd_blk_remove(struct rt_mmcsd_card *card)
  418. {
  419. rt_list_t *l, *n;
  420. struct mmcsd_blk_device *blk_dev;
  421. for (l = (&blk_devices)->next, n = l->next; l != &blk_devices; l = n)
  422. {
  423. blk_dev = (struct mmcsd_blk_device *)rt_list_entry(l, struct mmcsd_blk_device, list);
  424. if (blk_dev->card == card)
  425. {
  426. /* unmount file system */
  427. const char * mounted_path = dfs_filesystem_get_mounted_path(&(blk_dev->dev));
  428. if (mounted_path)
  429. {
  430. dfs_unmount(mounted_path);
  431. LOG_D("unmount file system %s for device %s.\r\n", mounted_path, blk_dev->dev.parent.name);
  432. }
  433. rt_sem_delete(blk_dev->part.lock);
  434. rt_device_unregister(&blk_dev->dev);
  435. rt_list_remove(&blk_dev->list);
  436. rt_free(blk_dev);
  437. }
  438. }
  439. }
  440. /*
  441. * This function will initialize block device on the mmc/sd.
  442. *
  443. * @deprecated since 2.1.0, this function does not need to be invoked
  444. * in the system initialization.
  445. */
  446. int rt_mmcsd_blk_init(void)
  447. {
  448. /* nothing */
  449. return 0;
  450. }