1
0

block_dev.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2011-07-25 weety first version
  9. */
  10. #include <rtthread.h>
  11. #include <dfs_fs.h>
  12. #include <drivers/mmcsd_core.h>
  13. #include <drivers/gpt.h>
  14. #define DBG_TAG "SDIO"
  15. #ifdef RT_SDIO_DEBUG
  16. #define DBG_LVL DBG_LOG
  17. #else
  18. #define DBG_LVL DBG_INFO
  19. #endif /* RT_SDIO_DEBUG */
  20. #include <rtdbg.h>
  21. static rt_list_t blk_devices = RT_LIST_OBJECT_INIT(blk_devices);
  22. #define BLK_MIN(a, b) ((a) < (b) ? (a) : (b))
  23. struct mmcsd_blk_device
  24. {
  25. struct rt_mmcsd_card *card;
  26. rt_list_t list;
  27. struct rt_device dev;
  28. struct dfs_partition part;
  29. struct rt_device_blk_geometry geometry;
  30. rt_size_t max_req_size;
  31. };
  32. #ifndef RT_MMCSD_MAX_PARTITION
  33. #define RT_MMCSD_MAX_PARTITION 16
  34. #endif
  35. #define RT_GPT_PARTITION_MAX 128
  36. static int __send_status(struct rt_mmcsd_card *card, rt_uint32_t *status, unsigned retries)
  37. {
  38. int err;
  39. struct rt_mmcsd_cmd cmd;
  40. cmd.busy_timeout = 0;
  41. cmd.cmd_code = SEND_STATUS;
  42. cmd.arg = card->rca << 16;
  43. cmd.flags = RESP_R1 | CMD_AC;
  44. err = mmcsd_send_cmd(card->host, &cmd, retries);
  45. if (err)
  46. return err;
  47. if (status)
  48. *status = cmd.resp[0];
  49. return 0;
  50. }
  51. static int card_busy_detect(struct rt_mmcsd_card *card, unsigned int timeout_ms,
  52. rt_uint32_t *resp_errs)
  53. {
  54. int timeout = rt_tick_from_millisecond(timeout_ms);
  55. int err = 0;
  56. rt_uint32_t status;
  57. rt_tick_t start;
  58. start = rt_tick_get();
  59. do
  60. {
  61. rt_bool_t out = (int)(rt_tick_get() - start) > timeout;
  62. err = __send_status(card, &status, 5);
  63. if (err)
  64. {
  65. LOG_E("error %d requesting status", err);
  66. return err;
  67. }
  68. /* Accumulate any response error bits seen */
  69. if (resp_errs)
  70. *resp_errs |= status;
  71. if (out)
  72. {
  73. LOG_E("wait card busy timeout");
  74. return -RT_ETIMEOUT;
  75. }
  76. /*
  77. * Some cards mishandle the status bits,
  78. * so make sure to check both the busy
  79. * indication and the card state.
  80. */
  81. }
  82. while (!(status & R1_READY_FOR_DATA) ||
  83. (R1_CURRENT_STATE(status) == 7));
  84. return err;
  85. }
  86. rt_int32_t mmcsd_num_wr_blocks(struct rt_mmcsd_card *card)
  87. {
  88. rt_int32_t err;
  89. rt_uint32_t blocks;
  90. struct rt_mmcsd_req req;
  91. struct rt_mmcsd_cmd cmd;
  92. struct rt_mmcsd_data data;
  93. rt_uint32_t timeout_us;
  94. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  95. cmd.cmd_code = APP_CMD;
  96. cmd.arg = card->rca << 16;
  97. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC;
  98. err = mmcsd_send_cmd(card->host, &cmd, 0);
  99. if (err)
  100. return -RT_ERROR;
  101. if (!controller_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
  102. return -RT_ERROR;
  103. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  104. cmd.cmd_code = SD_APP_SEND_NUM_WR_BLKS;
  105. cmd.arg = 0;
  106. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
  107. rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
  108. data.timeout_ns = card->tacc_ns * 100;
  109. data.timeout_clks = card->tacc_clks * 100;
  110. timeout_us = data.timeout_ns / 1000;
  111. timeout_us += data.timeout_clks * 1000 /
  112. (card->host->io_cfg.clock / 1000);
  113. if (timeout_us > 100000)
  114. {
  115. data.timeout_ns = 100000000;
  116. data.timeout_clks = 0;
  117. }
  118. data.blksize = 4;
  119. data.blks = 1;
  120. data.flags = DATA_DIR_READ;
  121. data.buf = &blocks;
  122. rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
  123. req.cmd = &cmd;
  124. req.data = &data;
  125. mmcsd_send_request(card->host, &req);
  126. if (cmd.err || data.err)
  127. return -RT_ERROR;
  128. return blocks;
  129. }
  130. static rt_err_t rt_mmcsd_req_blk(struct rt_mmcsd_card *card,
  131. rt_uint32_t sector,
  132. void *buf,
  133. rt_size_t blks,
  134. rt_uint8_t dir)
  135. {
  136. struct rt_mmcsd_cmd cmd, stop;
  137. struct rt_mmcsd_data data;
  138. struct rt_mmcsd_req req;
  139. struct rt_mmcsd_host *host = card->host;
  140. rt_uint32_t r_cmd, w_cmd;
  141. mmcsd_host_lock(host);
  142. rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
  143. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  144. rt_memset(&stop, 0, sizeof(struct rt_mmcsd_cmd));
  145. rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
  146. req.cmd = &cmd;
  147. req.data = &data;
  148. cmd.arg = sector;
  149. if (!(card->flags & CARD_FLAG_SDHC))
  150. {
  151. cmd.arg <<= 9;
  152. }
  153. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
  154. data.blksize = SECTOR_SIZE;
  155. data.blks = blks;
  156. if (blks > 1)
  157. {
  158. if (!controller_is_spi(card->host) || !dir)
  159. {
  160. req.stop = &stop;
  161. stop.cmd_code = STOP_TRANSMISSION;
  162. stop.arg = 0;
  163. stop.flags = RESP_SPI_R1B | RESP_R1B | CMD_AC;
  164. }
  165. r_cmd = READ_MULTIPLE_BLOCK;
  166. w_cmd = WRITE_MULTIPLE_BLOCK;
  167. }
  168. else
  169. {
  170. req.stop = RT_NULL;
  171. r_cmd = READ_SINGLE_BLOCK;
  172. w_cmd = WRITE_BLOCK;
  173. }
  174. if (!controller_is_spi(card->host) && (card->flags & 0x8000))
  175. {
  176. /* last request is WRITE,need check busy */
  177. card_busy_detect(card, 10000, RT_NULL);
  178. }
  179. if (!dir)
  180. {
  181. cmd.cmd_code = r_cmd;
  182. data.flags |= DATA_DIR_READ;
  183. card->flags &= 0x7fff;
  184. }
  185. else
  186. {
  187. cmd.cmd_code = w_cmd;
  188. data.flags |= DATA_DIR_WRITE;
  189. card->flags |= 0x8000;
  190. }
  191. mmcsd_set_data_timeout(&data, card);
  192. data.buf = buf;
  193. mmcsd_send_request(host, &req);
  194. mmcsd_host_unlock(host);
  195. if (cmd.err || data.err || stop.err)
  196. {
  197. LOG_E("mmcsd request blocks error");
  198. LOG_E("%d,%d,%d, 0x%08x,0x%08x",
  199. cmd.err, data.err, stop.err, data.flags, sector);
  200. return -RT_ERROR;
  201. }
  202. return RT_EOK;
  203. }
  204. static rt_err_t rt_mmcsd_init(rt_device_t dev)
  205. {
  206. return RT_EOK;
  207. }
  208. static rt_err_t rt_mmcsd_open(rt_device_t dev, rt_uint16_t oflag)
  209. {
  210. return RT_EOK;
  211. }
  212. static rt_err_t rt_mmcsd_close(rt_device_t dev)
  213. {
  214. return RT_EOK;
  215. }
  216. static rt_err_t rt_mmcsd_control(rt_device_t dev, int cmd, void *args)
  217. {
  218. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  219. switch (cmd)
  220. {
  221. case RT_DEVICE_CTRL_BLK_GETGEOME:
  222. rt_memcpy(args, &blk_dev->geometry, sizeof(struct rt_device_blk_geometry));
  223. break;
  224. case RT_DEVICE_CTRL_BLK_PARTITION:
  225. rt_memcpy(args, &blk_dev->part, sizeof(struct dfs_partition));
  226. default:
  227. break;
  228. }
  229. return RT_EOK;
  230. }
  231. static rt_ssize_t rt_mmcsd_read(rt_device_t dev,
  232. rt_off_t pos,
  233. void *buffer,
  234. rt_size_t size)
  235. {
  236. rt_err_t err = 0;
  237. rt_size_t offset = 0;
  238. rt_size_t req_size = 0;
  239. rt_size_t remain_size = size;
  240. void *rd_ptr = (void *)buffer;
  241. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  242. struct dfs_partition *part = &blk_dev->part;
  243. if (dev == RT_NULL)
  244. {
  245. rt_set_errno(-EINVAL);
  246. return 0;
  247. }
  248. rt_sem_take(part->lock, RT_WAITING_FOREVER);
  249. while (remain_size)
  250. {
  251. req_size = (remain_size > blk_dev->max_req_size) ? blk_dev->max_req_size : remain_size;
  252. err = rt_mmcsd_req_blk(blk_dev->card, part->offset + pos + offset, rd_ptr, req_size, 0);
  253. if (err)
  254. break;
  255. offset += req_size;
  256. rd_ptr = (void *)((rt_uint8_t *)rd_ptr + (req_size << 9));
  257. remain_size -= req_size;
  258. }
  259. rt_sem_release(part->lock);
  260. /* the length of reading must align to SECTOR SIZE */
  261. if (err)
  262. {
  263. rt_set_errno(-EIO);
  264. return 0;
  265. }
  266. return size - remain_size;
  267. }
  268. static rt_ssize_t rt_mmcsd_write(rt_device_t dev,
  269. rt_off_t pos,
  270. const void *buffer,
  271. rt_size_t size)
  272. {
  273. rt_err_t err = 0;
  274. rt_size_t offset = 0;
  275. rt_size_t req_size = 0;
  276. rt_size_t remain_size = size;
  277. void *wr_ptr = (void *)buffer;
  278. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  279. struct dfs_partition *part = &blk_dev->part;
  280. if (dev == RT_NULL)
  281. {
  282. rt_set_errno(-EINVAL);
  283. return 0;
  284. }
  285. rt_sem_take(part->lock, RT_WAITING_FOREVER);
  286. while (remain_size)
  287. {
  288. req_size = (remain_size > blk_dev->max_req_size) ? blk_dev->max_req_size : remain_size;
  289. err = rt_mmcsd_req_blk(blk_dev->card, part->offset + pos + offset, wr_ptr, req_size, 1);
  290. if (err)
  291. break;
  292. offset += req_size;
  293. wr_ptr = (void *)((rt_uint8_t *)wr_ptr + (req_size << 9));
  294. remain_size -= req_size;
  295. }
  296. rt_sem_release(part->lock);
  297. /* the length of reading must align to SECTOR SIZE */
  298. if (err)
  299. {
  300. rt_set_errno(-EIO);
  301. return 0;
  302. }
  303. return size - remain_size;
  304. }
  305. static rt_int32_t mmcsd_set_blksize(struct rt_mmcsd_card *card)
  306. {
  307. struct rt_mmcsd_cmd cmd;
  308. int err;
  309. /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
  310. if (card->flags & CARD_FLAG_SDHC)
  311. return 0;
  312. mmcsd_host_lock(card->host);
  313. cmd.cmd_code = SET_BLOCKLEN;
  314. cmd.arg = 512;
  315. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC;
  316. err = mmcsd_send_cmd(card->host, &cmd, 5);
  317. mmcsd_host_unlock(card->host);
  318. if (err)
  319. {
  320. LOG_E("MMCSD: unable to set block size to %d: %d", cmd.arg, err);
  321. return -RT_ERROR;
  322. }
  323. return 0;
  324. }
  325. rt_int32_t read_lba(struct rt_mmcsd_card *card, size_t lba, uint8_t *buffer, size_t count)
  326. {
  327. rt_uint8_t status = 0;
  328. status = mmcsd_set_blksize(card);
  329. if (status)
  330. {
  331. return status;
  332. }
  333. rt_thread_mdelay(1);
  334. status = rt_mmcsd_req_blk(card, lba, buffer, count, 0);
  335. return status;
  336. }
  337. #ifdef RT_USING_DEVICE_OPS
  338. const static struct rt_device_ops mmcsd_blk_ops =
  339. {
  340. rt_mmcsd_init,
  341. rt_mmcsd_open,
  342. rt_mmcsd_close,
  343. rt_mmcsd_read,
  344. rt_mmcsd_write,
  345. rt_mmcsd_control
  346. };
  347. #endif
  348. rt_int32_t gpt_device_probe(struct rt_mmcsd_card *card)
  349. {
  350. rt_int32_t err = RT_EOK;
  351. rt_uint8_t i, status;
  352. char dname[10];
  353. char sname[16];
  354. struct mmcsd_blk_device *blk_dev = RT_NULL;
  355. blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
  356. if (!blk_dev)
  357. {
  358. LOG_E("mmcsd:malloc memory failed!");
  359. return -1;
  360. }
  361. blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
  362. card->host->max_seg_size) >> 9,
  363. (card->host->max_blk_count *
  364. card->host->max_blk_size) >> 9);
  365. blk_dev->part.offset = 0;
  366. blk_dev->part.size = 0;
  367. rt_snprintf(sname, sizeof(sname) - 1, "sem_%s%d", card->host->name, 0);
  368. blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
  369. /* register mmcsd device */
  370. blk_dev->dev.type = RT_Device_Class_Block;
  371. #ifdef RT_USING_DEVICE_OPS
  372. blk_dev->dev.ops = &mmcsd_blk_ops;
  373. #else
  374. blk_dev->dev.init = rt_mmcsd_init;
  375. blk_dev->dev.open = rt_mmcsd_open;
  376. blk_dev->dev.close = rt_mmcsd_close;
  377. blk_dev->dev.read = rt_mmcsd_read;
  378. blk_dev->dev.write = rt_mmcsd_write;
  379. blk_dev->dev.control = rt_mmcsd_control;
  380. #endif
  381. blk_dev->card = card;
  382. blk_dev->geometry.bytes_per_sector = 1 << 9;
  383. blk_dev->geometry.block_size = card->card_blksize;
  384. blk_dev->geometry.sector_count =
  385. card->card_capacity * (1024 / 512);
  386. blk_dev->dev.user_data = blk_dev;
  387. rt_device_register(&(blk_dev->dev), card->host->name,
  388. RT_DEVICE_FLAG_RDWR);
  389. rt_list_insert_after(&blk_devices, &blk_dev->list);
  390. for (i = 0; i < RT_GPT_PARTITION_MAX; i++)
  391. {
  392. blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
  393. if (!blk_dev)
  394. {
  395. LOG_E("mmcsd:malloc memory failed!");
  396. break;
  397. }
  398. blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
  399. card->host->max_seg_size) >> 9,
  400. (card->host->max_blk_count *
  401. card->host->max_blk_size) >> 9);
  402. /* get the first partition */
  403. status = gpt_get_partition_param(card, &blk_dev->part, i);
  404. if (status == RT_EOK)
  405. {
  406. rt_snprintf(dname, sizeof(dname) - 1, "%s%d", card->host->name, i);
  407. rt_snprintf(sname, sizeof(sname) - 1, "sem_%s%d", card->host->name, i + 1);
  408. blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
  409. /* register mmcsd device */
  410. blk_dev->dev.type = RT_Device_Class_Block;
  411. #ifdef RT_USING_DEVICE_OPS
  412. blk_dev->dev.ops = &mmcsd_blk_ops;
  413. #else
  414. blk_dev->dev.init = rt_mmcsd_init;
  415. blk_dev->dev.open = rt_mmcsd_open;
  416. blk_dev->dev.close = rt_mmcsd_close;
  417. blk_dev->dev.read = rt_mmcsd_read;
  418. blk_dev->dev.write = rt_mmcsd_write;
  419. blk_dev->dev.control = rt_mmcsd_control;
  420. #endif
  421. blk_dev->card = card;
  422. blk_dev->geometry.bytes_per_sector = 1 << 9;
  423. blk_dev->geometry.block_size = card->card_blksize;
  424. blk_dev->geometry.sector_count = blk_dev->part.size;
  425. blk_dev->dev.user_data = blk_dev;
  426. rt_device_register(&(blk_dev->dev), dname,
  427. RT_DEVICE_FLAG_RDWR);
  428. rt_list_insert_after(&blk_devices, &blk_dev->list);
  429. }
  430. else
  431. {
  432. rt_free(blk_dev);
  433. blk_dev = RT_NULL;
  434. break;
  435. }
  436. #ifdef RT_USING_DFS_MNTTABLE
  437. if (blk_dev)
  438. {
  439. LOG_I("try to mount file system!");
  440. /* try to mount file system on this block device */
  441. dfs_mount_device(&(blk_dev->dev));
  442. }
  443. #endif
  444. }
  445. gpt_free();
  446. return err;
  447. }
  448. rt_int32_t mbr_device_probe(struct rt_mmcsd_card *card)
  449. {
  450. rt_int32_t err = 0;
  451. rt_uint8_t i, status;
  452. rt_uint8_t *sector;
  453. char dname[10];
  454. char sname[16];
  455. struct mmcsd_blk_device *blk_dev = RT_NULL;
  456. err = mmcsd_set_blksize(card);
  457. if (err)
  458. {
  459. return err;
  460. }
  461. rt_thread_mdelay(1);
  462. /* get the first sector to read partition table */
  463. sector = (rt_uint8_t *)rt_malloc(SECTOR_SIZE);
  464. if (sector == RT_NULL)
  465. {
  466. LOG_E("allocate partition sector buffer failed!");
  467. return -RT_ENOMEM;
  468. }
  469. status = rt_mmcsd_req_blk(card, 0, sector, 1, 0);
  470. if (status == RT_EOK)
  471. {
  472. blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
  473. if (!blk_dev)
  474. {
  475. LOG_E("mmcsd:malloc memory failed!");
  476. return -1;
  477. }
  478. blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
  479. card->host->max_seg_size) >> 9,
  480. (card->host->max_blk_count *
  481. card->host->max_blk_size) >> 9);
  482. blk_dev->part.offset = 0;
  483. blk_dev->part.size = 0;
  484. rt_snprintf(sname, sizeof(sname) - 1, "sem_%s%d", card->host->name, 0);
  485. blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
  486. /* register mmcsd device */
  487. blk_dev->dev.type = RT_Device_Class_Block;
  488. #ifdef RT_USING_DEVICE_OPS
  489. blk_dev->dev.ops = &mmcsd_blk_ops;
  490. #else
  491. blk_dev->dev.init = rt_mmcsd_init;
  492. blk_dev->dev.open = rt_mmcsd_open;
  493. blk_dev->dev.close = rt_mmcsd_close;
  494. blk_dev->dev.read = rt_mmcsd_read;
  495. blk_dev->dev.write = rt_mmcsd_write;
  496. blk_dev->dev.control = rt_mmcsd_control;
  497. #endif
  498. blk_dev->card = card;
  499. blk_dev->geometry.bytes_per_sector = 1 << 9;
  500. blk_dev->geometry.block_size = card->card_blksize;
  501. blk_dev->geometry.sector_count =
  502. card->card_capacity * (1024 / 512);
  503. blk_dev->dev.user_data = blk_dev;
  504. rt_device_register(&(blk_dev->dev), card->host->name,
  505. RT_DEVICE_FLAG_RDWR);
  506. rt_list_insert_after(&blk_devices, &blk_dev->list);
  507. for (i = 0; i < RT_MMCSD_MAX_PARTITION; i++)
  508. {
  509. blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
  510. if (!blk_dev)
  511. {
  512. LOG_E("mmcsd:malloc memory failed!");
  513. break;
  514. }
  515. blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
  516. card->host->max_seg_size) >> 9,
  517. (card->host->max_blk_count *
  518. card->host->max_blk_size) >> 9);
  519. /* get the first partition */
  520. status = dfs_filesystem_get_partition(&blk_dev->part, sector, i);
  521. if (status == RT_EOK)
  522. {
  523. rt_snprintf(dname, sizeof(dname) - 1, "%s%d", card->host->name, i);
  524. rt_snprintf(sname, sizeof(sname) - 1, "sem_%s%d", card->host->name, i + 1);
  525. blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
  526. /* register mmcsd device */
  527. blk_dev->dev.type = RT_Device_Class_Block;
  528. #ifdef RT_USING_DEVICE_OPS
  529. blk_dev->dev.ops = &mmcsd_blk_ops;
  530. #else
  531. blk_dev->dev.init = rt_mmcsd_init;
  532. blk_dev->dev.open = rt_mmcsd_open;
  533. blk_dev->dev.close = rt_mmcsd_close;
  534. blk_dev->dev.read = rt_mmcsd_read;
  535. blk_dev->dev.write = rt_mmcsd_write;
  536. blk_dev->dev.control = rt_mmcsd_control;
  537. #endif
  538. blk_dev->card = card;
  539. blk_dev->geometry.bytes_per_sector = 1 << 9;
  540. blk_dev->geometry.block_size = card->card_blksize;
  541. blk_dev->geometry.sector_count = blk_dev->part.size;
  542. blk_dev->dev.user_data = blk_dev;
  543. rt_device_register(&(blk_dev->dev), dname,
  544. RT_DEVICE_FLAG_RDWR);
  545. rt_list_insert_after(&blk_devices, &blk_dev->list);
  546. }
  547. else
  548. {
  549. rt_free(blk_dev);
  550. blk_dev = RT_NULL;
  551. break;
  552. }
  553. #ifdef RT_USING_DFS_MNTTABLE
  554. if (blk_dev)
  555. {
  556. LOG_I("try to mount file system!");
  557. /* try to mount file system on this block device */
  558. dfs_mount_device(&(blk_dev->dev));
  559. }
  560. #endif
  561. }
  562. }
  563. else
  564. {
  565. LOG_E("read mmcsd first sector failed");
  566. err = -RT_ERROR;
  567. }
  568. /* release sector buffer */
  569. rt_free(sector);
  570. return err;
  571. }
  572. rt_int32_t rt_mmcsd_blk_probe(struct rt_mmcsd_card *card)
  573. {
  574. uint32_t err = 0;
  575. LOG_D("probe mmcsd block device!");
  576. if (check_gpt(card) != 0)
  577. {
  578. err = gpt_device_probe(card);
  579. }
  580. else
  581. {
  582. err = mbr_device_probe(card);
  583. }
  584. return err;
  585. }
  586. void rt_mmcsd_blk_remove(struct rt_mmcsd_card *card)
  587. {
  588. rt_list_t *l, *n;
  589. struct mmcsd_blk_device *blk_dev;
  590. for (l = (&blk_devices)->next, n = l->next; l != &blk_devices; l = n, n = n->next)
  591. {
  592. blk_dev = (struct mmcsd_blk_device *)rt_list_entry(l, struct mmcsd_blk_device, list);
  593. if (blk_dev->card == card)
  594. {
  595. /* unmount file system */
  596. const char *mounted_path = dfs_filesystem_get_mounted_path(&(blk_dev->dev));
  597. if (mounted_path)
  598. {
  599. dfs_unmount(mounted_path);
  600. LOG_D("unmount file system %s for device %s.\r\n", mounted_path, blk_dev->dev.parent.name);
  601. }
  602. rt_sem_delete(blk_dev->part.lock);
  603. rt_device_unregister(&blk_dev->dev);
  604. rt_list_remove(&blk_dev->list);
  605. rt_free(blk_dev);
  606. }
  607. }
  608. }
  609. /*
  610. * This function will initialize block device on the mmc/sd.
  611. *
  612. * @deprecated since 2.1.0, this function does not need to be invoked
  613. * in the system initialization.
  614. */
  615. int rt_mmcsd_blk_init(void)
  616. {
  617. /* nothing */
  618. return 0;
  619. }