block_dev.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2011-07-25 weety first version
  9. */
  10. #include <rtthread.h>
  11. #include <dfs_fs.h>
  12. #include <drivers/mmcsd_core.h>
  13. #define DBG_TAG "SDIO"
  14. #ifdef RT_SDIO_DEBUG
  15. #define DBG_LVL DBG_LOG
  16. #else
  17. #define DBG_LVL DBG_INFO
  18. #endif /* RT_SDIO_DEBUG */
  19. #include <rtdbg.h>
  20. #define BLK_MIN(a, b) ((a) < (b) ? (a) : (b))
  21. struct mmcsd_blk_device
  22. {
  23. struct rt_mmcsd_card *card;
  24. rt_list_t list;
  25. struct rt_device dev;
  26. struct dfs_partition part;
  27. struct rt_device_blk_geometry geometry;
  28. rt_size_t max_req_size;
  29. };
  30. #ifndef RT_MMCSD_MAX_PARTITION
  31. #define RT_MMCSD_MAX_PARTITION 16
  32. #endif
  33. rt_int32_t mmcsd_num_wr_blocks(struct rt_mmcsd_card *card)
  34. {
  35. rt_int32_t err;
  36. rt_uint32_t blocks;
  37. struct rt_mmcsd_req req;
  38. struct rt_mmcsd_cmd cmd;
  39. struct rt_mmcsd_data data;
  40. rt_uint32_t timeout_us;
  41. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  42. cmd.cmd_code = APP_CMD;
  43. cmd.arg = card->rca << 16;
  44. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC;
  45. err = mmcsd_send_cmd(card->host, &cmd, 0);
  46. if (err)
  47. return -RT_ERROR;
  48. if (!controller_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
  49. return -RT_ERROR;
  50. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  51. cmd.cmd_code = SD_APP_SEND_NUM_WR_BLKS;
  52. cmd.arg = 0;
  53. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
  54. rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
  55. data.timeout_ns = card->tacc_ns * 100;
  56. data.timeout_clks = card->tacc_clks * 100;
  57. timeout_us = data.timeout_ns / 1000;
  58. timeout_us += data.timeout_clks * 1000 /
  59. (card->host->io_cfg.clock / 1000);
  60. if (timeout_us > 100000)
  61. {
  62. data.timeout_ns = 100000000;
  63. data.timeout_clks = 0;
  64. }
  65. data.blksize = 4;
  66. data.blks = 1;
  67. data.flags = DATA_DIR_READ;
  68. data.buf = &blocks;
  69. rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
  70. req.cmd = &cmd;
  71. req.data = &data;
  72. mmcsd_send_request(card->host, &req);
  73. if (cmd.err || data.err)
  74. return -RT_ERROR;
  75. return blocks;
  76. }
  77. static rt_err_t rt_mmcsd_req_blk(struct rt_mmcsd_card *card,
  78. rt_uint32_t sector,
  79. void *buf,
  80. rt_size_t blks,
  81. rt_uint8_t dir)
  82. {
  83. struct rt_mmcsd_cmd cmd, stop;
  84. struct rt_mmcsd_data data;
  85. struct rt_mmcsd_req req;
  86. struct rt_mmcsd_host *host = card->host;
  87. rt_uint32_t r_cmd, w_cmd;
  88. mmcsd_host_lock(host);
  89. rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
  90. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  91. rt_memset(&stop, 0, sizeof(struct rt_mmcsd_cmd));
  92. rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
  93. req.cmd = &cmd;
  94. req.data = &data;
  95. cmd.arg = sector;
  96. if (!(card->flags & CARD_FLAG_SDHC))
  97. {
  98. cmd.arg <<= 9;
  99. }
  100. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
  101. data.blksize = SECTOR_SIZE;
  102. data.blks = blks;
  103. if (blks > 1)
  104. {
  105. if (!controller_is_spi(card->host) || !dir)
  106. {
  107. req.stop = &stop;
  108. stop.cmd_code = STOP_TRANSMISSION;
  109. stop.arg = 0;
  110. stop.flags = RESP_SPI_R1B | RESP_R1B | CMD_AC;
  111. }
  112. r_cmd = READ_MULTIPLE_BLOCK;
  113. w_cmd = WRITE_MULTIPLE_BLOCK;
  114. }
  115. else
  116. {
  117. req.stop = RT_NULL;
  118. r_cmd = READ_SINGLE_BLOCK;
  119. w_cmd = WRITE_BLOCK;
  120. }
  121. if (!dir)
  122. {
  123. cmd.cmd_code = r_cmd;
  124. data.flags |= DATA_DIR_READ;
  125. }
  126. else
  127. {
  128. cmd.cmd_code = w_cmd;
  129. data.flags |= DATA_DIR_WRITE;
  130. }
  131. mmcsd_set_data_timeout(&data, card);
  132. data.buf = buf;
  133. mmcsd_send_request(host, &req);
  134. if (!controller_is_spi(card->host) && dir != 0)
  135. {
  136. do
  137. {
  138. rt_int32_t err;
  139. cmd.cmd_code = SEND_STATUS;
  140. cmd.arg = card->rca << 16;
  141. cmd.flags = RESP_R1 | CMD_AC;
  142. err = mmcsd_send_cmd(card->host, &cmd, 5);
  143. if (err)
  144. {
  145. LOG_E("error %d requesting status", err);
  146. break;
  147. }
  148. /*
  149. * Some cards mishandle the status bits,
  150. * so make sure to check both the busy
  151. * indication and the card state.
  152. */
  153. } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
  154. (R1_CURRENT_STATE(cmd.resp[0]) == 7));
  155. }
  156. mmcsd_host_unlock(host);
  157. if (cmd.err || data.err || stop.err)
  158. {
  159. LOG_E("mmcsd request blocks error");
  160. LOG_E("%d,%d,%d, 0x%08x,0x%08x",
  161. cmd.err, data.err, stop.err, data.flags, sector);
  162. return -RT_ERROR;
  163. }
  164. return RT_EOK;
  165. }
  166. static rt_err_t rt_mmcsd_init(rt_device_t dev)
  167. {
  168. return RT_EOK;
  169. }
  170. static rt_err_t rt_mmcsd_open(rt_device_t dev, rt_uint16_t oflag)
  171. {
  172. return RT_EOK;
  173. }
  174. static rt_err_t rt_mmcsd_close(rt_device_t dev)
  175. {
  176. return RT_EOK;
  177. }
  178. static rt_err_t rt_mmcsd_control(rt_device_t dev, int cmd, void *args)
  179. {
  180. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  181. switch (cmd)
  182. {
  183. case RT_DEVICE_CTRL_BLK_GETGEOME:
  184. rt_memcpy(args, &blk_dev->geometry, sizeof(struct rt_device_blk_geometry));
  185. break;
  186. default:
  187. break;
  188. }
  189. return RT_EOK;
  190. }
  191. static rt_size_t rt_mmcsd_read(rt_device_t dev,
  192. rt_off_t pos,
  193. void *buffer,
  194. rt_size_t size)
  195. {
  196. rt_err_t err = 0;
  197. rt_size_t offset = 0;
  198. rt_size_t req_size = 0;
  199. rt_size_t remain_size = size;
  200. void *rd_ptr = (void *)buffer;
  201. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  202. struct dfs_partition *part = &blk_dev->part;
  203. if (dev == RT_NULL)
  204. {
  205. rt_set_errno(-EINVAL);
  206. return 0;
  207. }
  208. rt_sem_take(part->lock, RT_WAITING_FOREVER);
  209. while (remain_size)
  210. {
  211. req_size = (remain_size > blk_dev->max_req_size) ? blk_dev->max_req_size : remain_size;
  212. err = rt_mmcsd_req_blk(blk_dev->card, part->offset + pos + offset, rd_ptr, req_size, 0);
  213. if (err)
  214. break;
  215. offset += req_size;
  216. rd_ptr = (void *)((rt_uint8_t *)rd_ptr + (req_size << 9));
  217. remain_size -= req_size;
  218. }
  219. rt_sem_release(part->lock);
  220. /* the length of reading must align to SECTOR SIZE */
  221. if (err)
  222. {
  223. rt_set_errno(-EIO);
  224. return 0;
  225. }
  226. return size - remain_size;
  227. }
  228. static rt_size_t rt_mmcsd_write(rt_device_t dev,
  229. rt_off_t pos,
  230. const void *buffer,
  231. rt_size_t size)
  232. {
  233. rt_err_t err = 0;
  234. rt_size_t offset = 0;
  235. rt_size_t req_size = 0;
  236. rt_size_t remain_size = size;
  237. void *wr_ptr = (void *)buffer;
  238. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  239. struct dfs_partition *part = &blk_dev->part;
  240. if (dev == RT_NULL)
  241. {
  242. rt_set_errno(-EINVAL);
  243. return 0;
  244. }
  245. rt_sem_take(part->lock, RT_WAITING_FOREVER);
  246. while (remain_size)
  247. {
  248. req_size = (remain_size > blk_dev->max_req_size) ? blk_dev->max_req_size : remain_size;
  249. err = rt_mmcsd_req_blk(blk_dev->card, part->offset + pos + offset, wr_ptr, req_size, 1);
  250. if (err)
  251. break;
  252. offset += req_size;
  253. wr_ptr = (void *)((rt_uint8_t *)wr_ptr + (req_size << 9));
  254. remain_size -= req_size;
  255. }
  256. rt_sem_release(part->lock);
  257. /* the length of reading must align to SECTOR SIZE */
  258. if (err)
  259. {
  260. rt_set_errno(-EIO);
  261. return 0;
  262. }
  263. return size - remain_size;
  264. }
  265. static rt_int32_t mmcsd_set_blksize(struct rt_mmcsd_card *card)
  266. {
  267. struct rt_mmcsd_cmd cmd;
  268. int err;
  269. /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
  270. if (card->flags & CARD_FLAG_SDHC)
  271. return 0;
  272. mmcsd_host_lock(card->host);
  273. cmd.cmd_code = SET_BLOCKLEN;
  274. cmd.arg = 512;
  275. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC;
  276. err = mmcsd_send_cmd(card->host, &cmd, 5);
  277. mmcsd_host_unlock(card->host);
  278. if (err)
  279. {
  280. LOG_E("MMCSD: unable to set block size to %d: %d", cmd.arg, err);
  281. return -RT_ERROR;
  282. }
  283. return 0;
  284. }
  285. #ifdef RT_USING_DEVICE_OPS
  286. const static struct rt_device_ops mmcsd_blk_ops =
  287. {
  288. rt_mmcsd_init,
  289. rt_mmcsd_open,
  290. rt_mmcsd_close,
  291. rt_mmcsd_read,
  292. rt_mmcsd_write,
  293. rt_mmcsd_control
  294. };
  295. #endif
  296. static struct mmcsd_blk_device * rt_mmcsd_create_blkdev(struct rt_mmcsd_card *card, const char* dname, struct dfs_partition* psPart)
  297. {
  298. struct mmcsd_blk_device *blk_dev;
  299. char sname[12];
  300. blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
  301. if (!blk_dev)
  302. {
  303. LOG_E("mmcsd:malloc memory failed!");
  304. return RT_NULL;
  305. }
  306. if (psPart != RT_NULL)
  307. {
  308. rt_memcpy(&blk_dev->part, psPart, sizeof(struct dfs_partition));
  309. blk_dev->geometry.sector_count = psPart->size;
  310. }
  311. else
  312. {
  313. blk_dev->part.offset = 0;
  314. blk_dev->part.size = 0;
  315. blk_dev->geometry.sector_count = card->card_capacity * (1024 / 512);
  316. }
  317. blk_dev->geometry.bytes_per_sector = 1<<9;
  318. blk_dev->geometry.block_size = card->card_blksize;
  319. rt_snprintf(sname, sizeof(sname), "sem_%s", dname);
  320. blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
  321. blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
  322. card->host->max_seg_size) >> 9,
  323. (card->host->max_blk_count *
  324. card->host->max_blk_size) >> 9);
  325. /* register mmcsd device */
  326. blk_dev->dev.type = RT_Device_Class_Block;
  327. #ifdef RT_USING_DEVICE_OPS
  328. blk_dev->dev.ops = &mmcsd_blk_ops;
  329. #else
  330. blk_dev->dev.init = rt_mmcsd_init;
  331. blk_dev->dev.open = rt_mmcsd_open;
  332. blk_dev->dev.close = rt_mmcsd_close;
  333. blk_dev->dev.read = rt_mmcsd_read;
  334. blk_dev->dev.write = rt_mmcsd_write;
  335. blk_dev->dev.control = rt_mmcsd_control;
  336. #endif
  337. blk_dev->dev.user_data = blk_dev;
  338. blk_dev->card = card;
  339. rt_device_register(&blk_dev->dev, dname,
  340. RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_REMOVABLE | RT_DEVICE_FLAG_STANDALONE);
  341. /* Insert to list. */
  342. rt_list_insert_after(&card->blk_devices, &blk_dev->list);
  343. #ifdef RT_USING_DFS_MNTTABLE
  344. if ( blk_dev )
  345. {
  346. LOG_I("try to mount file system!");
  347. /* try to mount file system on this block device */
  348. dfs_mount_device(&(blk_dev->dev));
  349. }
  350. #endif
  351. return blk_dev;
  352. }
  353. rt_int32_t rt_mmcsd_blk_probe(struct rt_mmcsd_card *card)
  354. {
  355. rt_int32_t err = 0;
  356. rt_err_t status;
  357. rt_uint8_t *sector;
  358. err = mmcsd_set_blksize(card);
  359. if(err)
  360. {
  361. return err;
  362. }
  363. LOG_D("probe mmcsd block device!");
  364. /* get the first sector to read partition table */
  365. sector = (rt_uint8_t *)rt_malloc(SECTOR_SIZE);
  366. if (sector == RT_NULL)
  367. {
  368. LOG_E("allocate partition sector buffer failed!");
  369. return -RT_ENOMEM;
  370. }
  371. status = rt_mmcsd_req_blk(card, 0, sector, 1, 0);
  372. if (status == RT_EOK)
  373. {
  374. rt_uint8_t i;
  375. char dname[8];
  376. struct dfs_partition part;
  377. struct mmcsd_blk_device *blk_dev = RT_NULL;
  378. rt_int32_t host_id = card->host->id;
  379. /* Initial blk_device link-list. */
  380. rt_list_init(&card->blk_devices);
  381. for (i = 0; i < RT_MMCSD_MAX_PARTITION; i++)
  382. {
  383. /* Get the first partition */
  384. status = dfs_filesystem_get_partition(&part, sector, i);
  385. if (status == RT_EOK)
  386. {
  387. /* Given name is with allocated host id and its partition index. */
  388. rt_snprintf(dname, sizeof(dname), "sd%dp%d", host_id, i);
  389. blk_dev = rt_mmcsd_create_blkdev(card, (const char*)dname, &part);
  390. if ( blk_dev == RT_NULL )
  391. {
  392. err = -RT_ENOMEM;
  393. goto exit_rt_mmcsd_blk_probe;
  394. }
  395. }
  396. else
  397. {
  398. break;
  399. }
  400. }
  401. /* Always create the super node, given name is with allocated host id. */
  402. rt_snprintf(dname, sizeof(dname), "sd%d", host_id);
  403. blk_dev = rt_mmcsd_create_blkdev(card, (const char*)dname, RT_NULL);
  404. if ( blk_dev == RT_NULL )
  405. {
  406. err = -RT_ENOMEM;
  407. goto exit_rt_mmcsd_blk_probe;
  408. }
  409. }
  410. else
  411. {
  412. LOG_E("read mmcsd first sector failed");
  413. err = -RT_ERROR;
  414. }
  415. exit_rt_mmcsd_blk_probe:
  416. /* release sector buffer */
  417. rt_free(sector);
  418. return err;
  419. }
  420. void rt_mmcsd_blk_remove(struct rt_mmcsd_card *card)
  421. {
  422. rt_list_t *l, *n;
  423. struct mmcsd_blk_device *blk_dev;
  424. for (l = (&card->blk_devices)->next, n = l->next; l != &card->blk_devices; l = n, n=n->next)
  425. {
  426. blk_dev = (struct mmcsd_blk_device *)rt_list_entry(l, struct mmcsd_blk_device, list);
  427. if (blk_dev->card == card)
  428. {
  429. /* unmount file system */
  430. const char * mounted_path = dfs_filesystem_get_mounted_path(&(blk_dev->dev));
  431. if (mounted_path)
  432. {
  433. dfs_unmount(mounted_path);
  434. LOG_D("unmount file system %s for device %s.\r\n", mounted_path, blk_dev->dev.parent.name);
  435. }
  436. rt_sem_delete(blk_dev->part.lock);
  437. rt_device_unregister(&blk_dev->dev);
  438. rt_list_remove(&blk_dev->list);
  439. rt_free(blk_dev);
  440. }
  441. }
  442. }