block_dev.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2011-07-25 weety first version
  9. */
  10. #include <rtthread.h>
  11. #include <dfs_fs.h>
  12. #include <drivers/mmcsd_core.h>
  13. #include <drivers/gpt.h>
  14. #define DBG_TAG "SDIO"
  15. #ifdef RT_SDIO_DEBUG
  16. #define DBG_LVL DBG_LOG
  17. #else
  18. #define DBG_LVL DBG_INFO
  19. #endif /* RT_SDIO_DEBUG */
  20. #include <rtdbg.h>
  21. static rt_list_t blk_devices = RT_LIST_OBJECT_INIT(blk_devices);
  22. #define BLK_MIN(a, b) ((a) < (b) ? (a) : (b))
  23. struct mmcsd_blk_device
  24. {
  25. struct rt_mmcsd_card *card;
  26. rt_list_t list;
  27. struct rt_device dev;
  28. struct dfs_partition part;
  29. struct rt_device_blk_geometry geometry;
  30. rt_size_t max_req_size;
  31. };
  32. #ifndef RT_MMCSD_MAX_PARTITION
  33. #define RT_MMCSD_MAX_PARTITION 16
  34. #endif
  35. #define RT_GPT_PARTITION_MAX 128
  36. rt_int32_t mmcsd_num_wr_blocks(struct rt_mmcsd_card *card)
  37. {
  38. rt_int32_t err;
  39. rt_uint32_t blocks;
  40. struct rt_mmcsd_req req;
  41. struct rt_mmcsd_cmd cmd;
  42. struct rt_mmcsd_data data;
  43. rt_uint32_t timeout_us;
  44. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  45. cmd.cmd_code = APP_CMD;
  46. cmd.arg = card->rca << 16;
  47. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC;
  48. err = mmcsd_send_cmd(card->host, &cmd, 0);
  49. if (err)
  50. return -RT_ERROR;
  51. if (!controller_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
  52. return -RT_ERROR;
  53. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  54. cmd.cmd_code = SD_APP_SEND_NUM_WR_BLKS;
  55. cmd.arg = 0;
  56. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
  57. rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
  58. data.timeout_ns = card->tacc_ns * 100;
  59. data.timeout_clks = card->tacc_clks * 100;
  60. timeout_us = data.timeout_ns / 1000;
  61. timeout_us += data.timeout_clks * 1000 /
  62. (card->host->io_cfg.clock / 1000);
  63. if (timeout_us > 100000)
  64. {
  65. data.timeout_ns = 100000000;
  66. data.timeout_clks = 0;
  67. }
  68. data.blksize = 4;
  69. data.blks = 1;
  70. data.flags = DATA_DIR_READ;
  71. data.buf = &blocks;
  72. rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
  73. req.cmd = &cmd;
  74. req.data = &data;
  75. mmcsd_send_request(card->host, &req);
  76. if (cmd.err || data.err)
  77. return -RT_ERROR;
  78. return blocks;
  79. }
  80. static rt_err_t rt_mmcsd_req_blk(struct rt_mmcsd_card *card,
  81. rt_uint32_t sector,
  82. void *buf,
  83. rt_size_t blks,
  84. rt_uint8_t dir)
  85. {
  86. struct rt_mmcsd_cmd cmd, stop;
  87. struct rt_mmcsd_data data;
  88. struct rt_mmcsd_req req;
  89. struct rt_mmcsd_host *host = card->host;
  90. rt_uint32_t r_cmd, w_cmd;
  91. mmcsd_host_lock(host);
  92. rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
  93. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  94. rt_memset(&stop, 0, sizeof(struct rt_mmcsd_cmd));
  95. rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
  96. req.cmd = &cmd;
  97. req.data = &data;
  98. cmd.arg = sector;
  99. if (!(card->flags & CARD_FLAG_SDHC))
  100. {
  101. cmd.arg <<= 9;
  102. }
  103. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
  104. data.blksize = SECTOR_SIZE;
  105. data.blks = blks;
  106. if (blks > 1)
  107. {
  108. if (!controller_is_spi(card->host) || !dir)
  109. {
  110. req.stop = &stop;
  111. stop.cmd_code = STOP_TRANSMISSION;
  112. stop.arg = 0;
  113. stop.flags = RESP_SPI_R1B | RESP_R1B | CMD_AC;
  114. }
  115. r_cmd = READ_MULTIPLE_BLOCK;
  116. w_cmd = WRITE_MULTIPLE_BLOCK;
  117. }
  118. else
  119. {
  120. req.stop = RT_NULL;
  121. r_cmd = READ_SINGLE_BLOCK;
  122. w_cmd = WRITE_BLOCK;
  123. }
  124. if (!dir)
  125. {
  126. cmd.cmd_code = r_cmd;
  127. data.flags |= DATA_DIR_READ;
  128. }
  129. else
  130. {
  131. cmd.cmd_code = w_cmd;
  132. data.flags |= DATA_DIR_WRITE;
  133. }
  134. mmcsd_set_data_timeout(&data, card);
  135. data.buf = buf;
  136. mmcsd_send_request(host, &req);
  137. if (!controller_is_spi(card->host) && dir != 0)
  138. {
  139. do
  140. {
  141. rt_int32_t err;
  142. cmd.cmd_code = SEND_STATUS;
  143. cmd.arg = card->rca << 16;
  144. cmd.flags = RESP_R1 | CMD_AC;
  145. err = mmcsd_send_cmd(card->host, &cmd, 5);
  146. if (err)
  147. {
  148. LOG_E("error %d requesting status", err);
  149. break;
  150. }
  151. /*
  152. * Some cards mishandle the status bits,
  153. * so make sure to check both the busy
  154. * indication and the card state.
  155. */
  156. } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
  157. (R1_CURRENT_STATE(cmd.resp[0]) == 7));
  158. }
  159. mmcsd_host_unlock(host);
  160. if (cmd.err || data.err || stop.err)
  161. {
  162. LOG_E("mmcsd request blocks error");
  163. LOG_E("%d,%d,%d, 0x%08x,0x%08x",
  164. cmd.err, data.err, stop.err, data.flags, sector);
  165. return -RT_ERROR;
  166. }
  167. return RT_EOK;
  168. }
  169. static rt_err_t rt_mmcsd_init(rt_device_t dev)
  170. {
  171. return RT_EOK;
  172. }
  173. static rt_err_t rt_mmcsd_open(rt_device_t dev, rt_uint16_t oflag)
  174. {
  175. return RT_EOK;
  176. }
  177. static rt_err_t rt_mmcsd_close(rt_device_t dev)
  178. {
  179. return RT_EOK;
  180. }
  181. static rt_err_t rt_mmcsd_control(rt_device_t dev, int cmd, void *args)
  182. {
  183. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  184. switch (cmd)
  185. {
  186. case RT_DEVICE_CTRL_BLK_GETGEOME:
  187. rt_memcpy(args, &blk_dev->geometry, sizeof(struct rt_device_blk_geometry));
  188. break;
  189. case RT_DEVICE_CTRL_BLK_PARTITION:
  190. rt_memcpy(args, &blk_dev->part, sizeof(struct dfs_partition));
  191. default:
  192. break;
  193. }
  194. return RT_EOK;
  195. }
  196. static rt_size_t rt_mmcsd_read(rt_device_t dev,
  197. rt_off_t pos,
  198. void *buffer,
  199. rt_size_t size)
  200. {
  201. rt_err_t err = 0;
  202. rt_size_t offset = 0;
  203. rt_size_t req_size = 0;
  204. rt_size_t remain_size = size;
  205. void *rd_ptr = (void *)buffer;
  206. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  207. struct dfs_partition *part = &blk_dev->part;
  208. if (dev == RT_NULL)
  209. {
  210. rt_set_errno(-EINVAL);
  211. return 0;
  212. }
  213. rt_sem_take(part->lock, RT_WAITING_FOREVER);
  214. while (remain_size)
  215. {
  216. req_size = (remain_size > blk_dev->max_req_size) ? blk_dev->max_req_size : remain_size;
  217. err = rt_mmcsd_req_blk(blk_dev->card, pos + offset, rd_ptr, req_size, 0);
  218. if (err)
  219. break;
  220. offset += req_size;
  221. rd_ptr = (void *)((rt_uint8_t *)rd_ptr + (req_size << 9));
  222. remain_size -= req_size;
  223. }
  224. rt_sem_release(part->lock);
  225. /* the length of reading must align to SECTOR SIZE */
  226. if (err)
  227. {
  228. rt_set_errno(-EIO);
  229. return 0;
  230. }
  231. return size - remain_size;
  232. }
  233. static rt_size_t rt_mmcsd_write(rt_device_t dev,
  234. rt_off_t pos,
  235. const void *buffer,
  236. rt_size_t size)
  237. {
  238. rt_err_t err = 0;
  239. rt_size_t offset = 0;
  240. rt_size_t req_size = 0;
  241. rt_size_t remain_size = size;
  242. void *wr_ptr = (void *)buffer;
  243. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  244. struct dfs_partition *part = &blk_dev->part;
  245. if (dev == RT_NULL)
  246. {
  247. rt_set_errno(-EINVAL);
  248. return 0;
  249. }
  250. rt_sem_take(part->lock, RT_WAITING_FOREVER);
  251. while (remain_size)
  252. {
  253. req_size = (remain_size > blk_dev->max_req_size) ? blk_dev->max_req_size : remain_size;
  254. err = rt_mmcsd_req_blk(blk_dev->card, pos + offset, wr_ptr, req_size, 1);
  255. if (err)
  256. break;
  257. offset += req_size;
  258. wr_ptr = (void *)((rt_uint8_t *)wr_ptr + (req_size << 9));
  259. remain_size -= req_size;
  260. }
  261. rt_sem_release(part->lock);
  262. /* the length of reading must align to SECTOR SIZE */
  263. if (err)
  264. {
  265. rt_set_errno(-EIO);
  266. return 0;
  267. }
  268. return size - remain_size;
  269. }
  270. static rt_int32_t mmcsd_set_blksize(struct rt_mmcsd_card *card)
  271. {
  272. struct rt_mmcsd_cmd cmd;
  273. int err;
  274. /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
  275. if (card->flags & CARD_FLAG_SDHC)
  276. return 0;
  277. mmcsd_host_lock(card->host);
  278. cmd.cmd_code = SET_BLOCKLEN;
  279. cmd.arg = 512;
  280. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC;
  281. err = mmcsd_send_cmd(card->host, &cmd, 5);
  282. mmcsd_host_unlock(card->host);
  283. if (err)
  284. {
  285. LOG_E("MMCSD: unable to set block size to %d: %d", cmd.arg, err);
  286. return -RT_ERROR;
  287. }
  288. return 0;
  289. }
  290. rt_int32_t read_lba(struct rt_mmcsd_card *card, size_t lba, uint8_t *buffer, size_t count)
  291. {
  292. rt_uint8_t status = 0;
  293. status = mmcsd_set_blksize(card);
  294. if(status)
  295. {
  296. return status;
  297. }
  298. mmcsd_delay_ms(1);
  299. status = rt_mmcsd_req_blk(card, lba, buffer, count, 0);
  300. return status;
  301. }
  302. #ifdef RT_USING_DEVICE_OPS
  303. const static struct rt_device_ops mmcsd_blk_ops =
  304. {
  305. rt_mmcsd_init,
  306. rt_mmcsd_open,
  307. rt_mmcsd_close,
  308. rt_mmcsd_read,
  309. rt_mmcsd_write,
  310. rt_mmcsd_control
  311. };
  312. #endif
  313. rt_int32_t gpt_device_probe(struct rt_mmcsd_card *card)
  314. {
  315. rt_int32_t err = RT_EOK;
  316. rt_uint8_t i, status;
  317. char dname[10];
  318. char sname[16];
  319. struct mmcsd_blk_device *blk_dev = RT_NULL;
  320. blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
  321. if (!blk_dev)
  322. {
  323. LOG_E("mmcsd:malloc memory failed!");
  324. return -1;
  325. }
  326. blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
  327. card->host->max_seg_size) >> 9,
  328. (card->host->max_blk_count *
  329. card->host->max_blk_size) >> 9);
  330. blk_dev->part.offset = 0;
  331. blk_dev->part.size = 0;
  332. rt_snprintf(sname, sizeof(sname)-1, "sem_%s%d", card->host->name,0);
  333. blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
  334. /* register mmcsd device */
  335. blk_dev->dev.type = RT_Device_Class_Block;
  336. #ifdef RT_USING_DEVICE_OPS
  337. blk_dev->dev.ops = &mmcsd_blk_ops;
  338. #else
  339. blk_dev->dev.init = rt_mmcsd_init;
  340. blk_dev->dev.open = rt_mmcsd_open;
  341. blk_dev->dev.close = rt_mmcsd_close;
  342. blk_dev->dev.read = rt_mmcsd_read;
  343. blk_dev->dev.write = rt_mmcsd_write;
  344. blk_dev->dev.control = rt_mmcsd_control;
  345. #endif
  346. blk_dev->card = card;
  347. blk_dev->geometry.bytes_per_sector = 1<<9;
  348. blk_dev->geometry.block_size = card->card_blksize;
  349. blk_dev->geometry.sector_count =
  350. card->card_capacity * (1024 / 512);
  351. blk_dev->dev.user_data = blk_dev;
  352. rt_device_register(&(blk_dev->dev), card->host->name,
  353. RT_DEVICE_FLAG_RDWR);
  354. rt_list_insert_after(&blk_devices, &blk_dev->list);
  355. for (i = 0; i < RT_GPT_PARTITION_MAX; i++)
  356. {
  357. blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
  358. if (!blk_dev)
  359. {
  360. LOG_E("mmcsd:malloc memory failed!");
  361. break;
  362. }
  363. blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
  364. card->host->max_seg_size) >> 9,
  365. (card->host->max_blk_count *
  366. card->host->max_blk_size) >> 9);
  367. /* get the first partition */
  368. status = gpt_get_partition_param(card, &blk_dev->part, i);
  369. if (status == RT_EOK)
  370. {
  371. rt_snprintf(dname, sizeof(dname)-1, "%s%d", card->host->name,i);
  372. rt_snprintf(sname, sizeof(sname)-1, "sem_%s%d", card->host->name,i+1);
  373. blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
  374. /* register mmcsd device */
  375. blk_dev->dev.type = RT_Device_Class_Block;
  376. #ifdef RT_USING_DEVICE_OPS
  377. blk_dev->dev.ops = &mmcsd_blk_ops;
  378. #else
  379. blk_dev->dev.init = rt_mmcsd_init;
  380. blk_dev->dev.open = rt_mmcsd_open;
  381. blk_dev->dev.close = rt_mmcsd_close;
  382. blk_dev->dev.read = rt_mmcsd_read;
  383. blk_dev->dev.write = rt_mmcsd_write;
  384. blk_dev->dev.control = rt_mmcsd_control;
  385. #endif
  386. blk_dev->card = card;
  387. blk_dev->geometry.bytes_per_sector = 1<<9;
  388. blk_dev->geometry.block_size = card->card_blksize;
  389. blk_dev->geometry.sector_count = blk_dev->part.size;
  390. blk_dev->dev.user_data = blk_dev;
  391. rt_device_register(&(blk_dev->dev), dname,
  392. RT_DEVICE_FLAG_RDWR);
  393. rt_list_insert_after(&blk_devices, &blk_dev->list);
  394. }
  395. else
  396. {
  397. rt_free(blk_dev);
  398. blk_dev = RT_NULL;
  399. break;
  400. }
  401. #ifdef RT_USING_DFS_MNTTABLE
  402. if (blk_dev)
  403. {
  404. LOG_I("try to mount file system!");
  405. /* try to mount file system on this block device */
  406. dfs_mount_device(&(blk_dev->dev));
  407. }
  408. #endif
  409. }
  410. gpt_free();
  411. return err;
  412. }
  413. rt_int32_t mbr_device_probe(struct rt_mmcsd_card *card)
  414. {
  415. rt_int32_t err = 0;
  416. rt_uint8_t i, status;
  417. rt_uint8_t *sector;
  418. char dname[10];
  419. char sname[16];
  420. struct mmcsd_blk_device *blk_dev = RT_NULL;
  421. err = mmcsd_set_blksize(card);
  422. if(err)
  423. {
  424. return err;
  425. }
  426. mmcsd_delay_ms(1);
  427. /* get the first sector to read partition table */
  428. sector = (rt_uint8_t *)rt_malloc(SECTOR_SIZE);
  429. if (sector == RT_NULL)
  430. {
  431. LOG_E("allocate partition sector buffer failed!");
  432. return -RT_ENOMEM;
  433. }
  434. status = rt_mmcsd_req_blk(card, 0, sector, 1, 0);
  435. if (status == RT_EOK)
  436. {
  437. blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
  438. if (!blk_dev)
  439. {
  440. LOG_E("mmcsd:malloc memory failed!");
  441. return -1;
  442. }
  443. blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
  444. card->host->max_seg_size) >> 9,
  445. (card->host->max_blk_count *
  446. card->host->max_blk_size) >> 9);
  447. blk_dev->part.offset = 0;
  448. blk_dev->part.size = 0;
  449. rt_snprintf(sname, sizeof(sname)-1, "sem_%s%d", card->host->name,0);
  450. blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
  451. /* register mmcsd device */
  452. blk_dev->dev.type = RT_Device_Class_Block;
  453. #ifdef RT_USING_DEVICE_OPS
  454. blk_dev->dev.ops = &mmcsd_blk_ops;
  455. #else
  456. blk_dev->dev.init = rt_mmcsd_init;
  457. blk_dev->dev.open = rt_mmcsd_open;
  458. blk_dev->dev.close = rt_mmcsd_close;
  459. blk_dev->dev.read = rt_mmcsd_read;
  460. blk_dev->dev.write = rt_mmcsd_write;
  461. blk_dev->dev.control = rt_mmcsd_control;
  462. #endif
  463. blk_dev->card = card;
  464. blk_dev->geometry.bytes_per_sector = 1<<9;
  465. blk_dev->geometry.block_size = card->card_blksize;
  466. blk_dev->geometry.sector_count =
  467. card->card_capacity * (1024 / 512);
  468. blk_dev->dev.user_data = blk_dev;
  469. rt_device_register(&(blk_dev->dev), card->host->name,
  470. RT_DEVICE_FLAG_RDWR);
  471. rt_list_insert_after(&blk_devices, &blk_dev->list);
  472. for (i = 0; i < RT_MMCSD_MAX_PARTITION; i++)
  473. {
  474. blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
  475. if (!blk_dev)
  476. {
  477. LOG_E("mmcsd:malloc memory failed!");
  478. break;
  479. }
  480. blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
  481. card->host->max_seg_size) >> 9,
  482. (card->host->max_blk_count *
  483. card->host->max_blk_size) >> 9);
  484. /* get the first partition */
  485. status = dfs_filesystem_get_partition(&blk_dev->part, sector, i);
  486. if (status == RT_EOK)
  487. {
  488. rt_snprintf(dname, sizeof(dname)-1, "%s%d", card->host->name,i);
  489. rt_snprintf(sname, sizeof(sname)-1, "sem_%s%d", card->host->name,i+1);
  490. blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
  491. /* register mmcsd device */
  492. blk_dev->dev.type = RT_Device_Class_Block;
  493. #ifdef RT_USING_DEVICE_OPS
  494. blk_dev->dev.ops = &mmcsd_blk_ops;
  495. #else
  496. blk_dev->dev.init = rt_mmcsd_init;
  497. blk_dev->dev.open = rt_mmcsd_open;
  498. blk_dev->dev.close = rt_mmcsd_close;
  499. blk_dev->dev.read = rt_mmcsd_read;
  500. blk_dev->dev.write = rt_mmcsd_write;
  501. blk_dev->dev.control = rt_mmcsd_control;
  502. #endif
  503. blk_dev->card = card;
  504. blk_dev->geometry.bytes_per_sector = 1<<9;
  505. blk_dev->geometry.block_size = card->card_blksize;
  506. blk_dev->geometry.sector_count = blk_dev->part.size;
  507. blk_dev->dev.user_data = blk_dev;
  508. rt_device_register(&(blk_dev->dev), dname,
  509. RT_DEVICE_FLAG_RDWR);
  510. rt_list_insert_after(&blk_devices, &blk_dev->list);
  511. }
  512. else
  513. {
  514. rt_free(blk_dev);
  515. blk_dev = RT_NULL;
  516. break;
  517. }
  518. #ifdef RT_USING_DFS_MNTTABLE
  519. if (blk_dev)
  520. {
  521. LOG_I("try to mount file system!");
  522. /* try to mount file system on this block device */
  523. dfs_mount_device(&(blk_dev->dev));
  524. }
  525. #endif
  526. }
  527. }
  528. else
  529. {
  530. LOG_E("read mmcsd first sector failed");
  531. err = -RT_ERROR;
  532. }
  533. /* release sector buffer */
  534. rt_free(sector);
  535. return err;
  536. }
  537. rt_int32_t rt_mmcsd_blk_probe(struct rt_mmcsd_card *card)
  538. {
  539. uint32_t err = 0;
  540. LOG_D("probe mmcsd block device!");
  541. if (check_gpt(card) != 0)
  542. {
  543. err = gpt_device_probe(card);
  544. }
  545. else
  546. {
  547. err = mbr_device_probe(card);
  548. }
  549. return err;
  550. }
  551. void rt_mmcsd_blk_remove(struct rt_mmcsd_card *card)
  552. {
  553. rt_list_t *l, *n;
  554. struct mmcsd_blk_device *blk_dev;
  555. for (l = (&blk_devices)->next, n = l->next; l != &blk_devices; l = n, n = n->next)
  556. {
  557. blk_dev = (struct mmcsd_blk_device *)rt_list_entry(l, struct mmcsd_blk_device, list);
  558. if (blk_dev->card == card)
  559. {
  560. /* unmount file system */
  561. const char * mounted_path = dfs_filesystem_get_mounted_path(&(blk_dev->dev));
  562. if (mounted_path)
  563. {
  564. dfs_unmount(mounted_path);
  565. LOG_D("unmount file system %s for device %s.\r\n", mounted_path, blk_dev->dev.parent.name);
  566. }
  567. rt_sem_delete(blk_dev->part.lock);
  568. rt_device_unregister(&blk_dev->dev);
  569. rt_list_remove(&blk_dev->list);
  570. rt_free(blk_dev);
  571. }
  572. }
  573. }
  574. /*
  575. * This function will initialize block device on the mmc/sd.
  576. *
  577. * @deprecated since 2.1.0, this function does not need to be invoked
  578. * in the system initialization.
  579. */
  580. int rt_mmcsd_blk_init(void)
  581. {
  582. /* nothing */
  583. return 0;
  584. }