block_dev.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2011-07-25 weety first version
  9. */
  10. #include <rtthread.h>
  11. #include <dfs_fs.h>
  12. #include <dfs_file.h>
  13. #include <drivers/mmcsd_core.h>
  14. #include <drivers/gpt.h>
  15. #define DBG_TAG "SDIO"
  16. #ifdef RT_SDIO_DEBUG
  17. #define DBG_LVL DBG_LOG
  18. #else
  19. #define DBG_LVL DBG_INFO
  20. #endif /* RT_SDIO_DEBUG */
  21. #include <rtdbg.h>
  22. static rt_list_t blk_devices = RT_LIST_OBJECT_INIT(blk_devices);
  23. #define BLK_MIN(a, b) ((a) < (b) ? (a) : (b))
  24. #define RT_DEVICE_CTRL_BLK_SSIZEGET 0x1268 /**< get number of bytes per sector */
  25. #define RT_DEVICE_CTRL_ALL_BLK_SSIZEGET 0x80081272 /**< get number of bytes per sector * sector counts*/
  26. struct mmcsd_blk_device
  27. {
  28. struct rt_mmcsd_card *card;
  29. rt_list_t list;
  30. struct rt_device dev;
  31. struct dfs_partition part;
  32. struct rt_device_blk_geometry geometry;
  33. rt_size_t max_req_size;
  34. };
  35. #ifndef RT_MMCSD_MAX_PARTITION
  36. #define RT_MMCSD_MAX_PARTITION 16
  37. #endif
  38. #define RT_GPT_PARTITION_MAX 128
  39. static int __send_status(struct rt_mmcsd_card *card, rt_uint32_t *status, unsigned retries)
  40. {
  41. int err;
  42. struct rt_mmcsd_cmd cmd;
  43. cmd.busy_timeout = 0;
  44. cmd.cmd_code = SEND_STATUS;
  45. cmd.arg = card->rca << 16;
  46. cmd.flags = RESP_R1 | CMD_AC;
  47. err = mmcsd_send_cmd(card->host, &cmd, retries);
  48. if (err)
  49. return err;
  50. if (status)
  51. *status = cmd.resp[0];
  52. return 0;
  53. }
  54. static int card_busy_detect(struct rt_mmcsd_card *card, unsigned int timeout_ms,
  55. rt_uint32_t *resp_errs)
  56. {
  57. int timeout = rt_tick_from_millisecond(timeout_ms);
  58. int err = 0;
  59. rt_uint32_t status;
  60. rt_tick_t start;
  61. start = rt_tick_get();
  62. do
  63. {
  64. rt_bool_t out = (int)(rt_tick_get() - start) > timeout;
  65. err = __send_status(card, &status, 5);
  66. if (err)
  67. {
  68. LOG_E("error %d requesting status", err);
  69. return err;
  70. }
  71. /* Accumulate any response error bits seen */
  72. if (resp_errs)
  73. *resp_errs |= status;
  74. if (out)
  75. {
  76. LOG_E("wait card busy timeout");
  77. return -RT_ETIMEOUT;
  78. }
  79. /*
  80. * Some cards mishandle the status bits,
  81. * so make sure to check both the busy
  82. * indication and the card state.
  83. */
  84. }
  85. while (!(status & R1_READY_FOR_DATA) ||
  86. (R1_CURRENT_STATE(status) == 7));
  87. return err;
  88. }
  89. rt_int32_t mmcsd_num_wr_blocks(struct rt_mmcsd_card *card)
  90. {
  91. rt_int32_t err;
  92. rt_uint32_t blocks;
  93. struct rt_mmcsd_req req;
  94. struct rt_mmcsd_cmd cmd;
  95. struct rt_mmcsd_data data;
  96. rt_uint32_t timeout_us;
  97. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  98. cmd.cmd_code = APP_CMD;
  99. cmd.arg = card->rca << 16;
  100. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC;
  101. err = mmcsd_send_cmd(card->host, &cmd, 0);
  102. if (err)
  103. return -RT_ERROR;
  104. if (!controller_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
  105. return -RT_ERROR;
  106. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  107. cmd.cmd_code = SD_APP_SEND_NUM_WR_BLKS;
  108. cmd.arg = 0;
  109. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
  110. rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
  111. data.timeout_ns = card->tacc_ns * 100;
  112. data.timeout_clks = card->tacc_clks * 100;
  113. timeout_us = data.timeout_ns / 1000;
  114. timeout_us += data.timeout_clks * 1000 /
  115. (card->host->io_cfg.clock / 1000);
  116. if (timeout_us > 100000)
  117. {
  118. data.timeout_ns = 100000000;
  119. data.timeout_clks = 0;
  120. }
  121. data.blksize = 4;
  122. data.blks = 1;
  123. data.flags = DATA_DIR_READ;
  124. data.buf = &blocks;
  125. rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
  126. req.cmd = &cmd;
  127. req.data = &data;
  128. mmcsd_send_request(card->host, &req);
  129. if (cmd.err || data.err)
  130. return -RT_ERROR;
  131. return blocks;
  132. }
  133. static rt_err_t rt_mmcsd_req_blk(struct rt_mmcsd_card *card,
  134. rt_uint32_t sector,
  135. void *buf,
  136. rt_size_t blks,
  137. rt_uint8_t dir)
  138. {
  139. struct rt_mmcsd_cmd cmd, stop;
  140. struct rt_mmcsd_data data;
  141. struct rt_mmcsd_req req;
  142. struct rt_mmcsd_host *host = card->host;
  143. rt_uint32_t r_cmd, w_cmd;
  144. mmcsd_host_lock(host);
  145. rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
  146. rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
  147. rt_memset(&stop, 0, sizeof(struct rt_mmcsd_cmd));
  148. rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
  149. req.cmd = &cmd;
  150. req.data = &data;
  151. cmd.arg = sector;
  152. if (!(card->flags & CARD_FLAG_SDHC))
  153. {
  154. cmd.arg <<= 9;
  155. }
  156. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
  157. data.blksize = SECTOR_SIZE;
  158. data.blks = blks;
  159. if (blks > 1)
  160. {
  161. if (!controller_is_spi(card->host) || !dir)
  162. {
  163. req.stop = &stop;
  164. stop.cmd_code = STOP_TRANSMISSION;
  165. stop.arg = 0;
  166. stop.flags = RESP_SPI_R1B | RESP_R1B | CMD_AC;
  167. }
  168. r_cmd = READ_MULTIPLE_BLOCK;
  169. w_cmd = WRITE_MULTIPLE_BLOCK;
  170. }
  171. else
  172. {
  173. req.stop = RT_NULL;
  174. r_cmd = READ_SINGLE_BLOCK;
  175. w_cmd = WRITE_BLOCK;
  176. }
  177. if (!controller_is_spi(card->host) && (card->flags & 0x8000))
  178. {
  179. /* last request is WRITE,need check busy */
  180. card_busy_detect(card, 10000, RT_NULL);
  181. }
  182. if (!dir)
  183. {
  184. cmd.cmd_code = r_cmd;
  185. data.flags |= DATA_DIR_READ;
  186. card->flags &= 0x7fff;
  187. }
  188. else
  189. {
  190. cmd.cmd_code = w_cmd;
  191. data.flags |= DATA_DIR_WRITE;
  192. card->flags |= 0x8000;
  193. }
  194. mmcsd_set_data_timeout(&data, card);
  195. data.buf = buf;
  196. mmcsd_send_request(host, &req);
  197. mmcsd_host_unlock(host);
  198. if (cmd.err || data.err || stop.err)
  199. {
  200. LOG_E("mmcsd request blocks error");
  201. LOG_E("%d,%d,%d, 0x%08x,0x%08x",
  202. cmd.err, data.err, stop.err, data.flags, sector);
  203. return -RT_ERROR;
  204. }
  205. return RT_EOK;
  206. }
  207. static rt_err_t rt_mmcsd_init(rt_device_t dev)
  208. {
  209. return RT_EOK;
  210. }
  211. static rt_err_t rt_mmcsd_open(rt_device_t dev, rt_uint16_t oflag)
  212. {
  213. return RT_EOK;
  214. }
  215. static rt_err_t rt_mmcsd_close(rt_device_t dev)
  216. {
  217. return RT_EOK;
  218. }
  219. static rt_err_t rt_mmcsd_control(rt_device_t dev, int cmd, void *args)
  220. {
  221. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  222. switch (cmd)
  223. {
  224. case RT_DEVICE_CTRL_BLK_GETGEOME:
  225. rt_memcpy(args, &blk_dev->geometry, sizeof(struct rt_device_blk_geometry));
  226. break;
  227. case RT_DEVICE_CTRL_BLK_PARTITION:
  228. rt_memcpy(args, &blk_dev->part, sizeof(struct dfs_partition));
  229. break;
  230. case RT_DEVICE_CTRL_BLK_SSIZEGET:
  231. rt_memcpy(args, &blk_dev->geometry.bytes_per_sector, sizeof(rt_uint32_t));
  232. break;
  233. case RT_DEVICE_CTRL_ALL_BLK_SSIZEGET:
  234. {
  235. rt_uint64_t count_mul_per = blk_dev->geometry.bytes_per_sector * blk_dev->geometry.sector_count;
  236. rt_memcpy(args, &count_mul_per, sizeof(rt_uint64_t));
  237. }
  238. break;
  239. default:
  240. break;
  241. }
  242. return RT_EOK;
  243. }
  244. static rt_ssize_t rt_mmcsd_read(rt_device_t dev,
  245. rt_off_t pos,
  246. void *buffer,
  247. rt_size_t size)
  248. {
  249. rt_err_t err = 0;
  250. rt_size_t offset = 0;
  251. rt_size_t req_size = 0;
  252. rt_size_t remain_size = size;
  253. void *rd_ptr = (void *)buffer;
  254. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  255. struct dfs_partition *part = &blk_dev->part;
  256. if (dev == RT_NULL)
  257. {
  258. rt_set_errno(-EINVAL);
  259. return 0;
  260. }
  261. rt_sem_take(part->lock, RT_WAITING_FOREVER);
  262. while (remain_size)
  263. {
  264. req_size = (remain_size > blk_dev->max_req_size) ? blk_dev->max_req_size : remain_size;
  265. err = rt_mmcsd_req_blk(blk_dev->card, part->offset + pos + offset, rd_ptr, req_size, 0);
  266. if (err)
  267. break;
  268. offset += req_size;
  269. rd_ptr = (void *)((rt_uint8_t *)rd_ptr + (req_size << 9));
  270. remain_size -= req_size;
  271. }
  272. rt_sem_release(part->lock);
  273. /* the length of reading must align to SECTOR SIZE */
  274. if (err)
  275. {
  276. rt_set_errno(-EIO);
  277. return 0;
  278. }
  279. return size - remain_size;
  280. }
  281. static rt_ssize_t rt_mmcsd_write(rt_device_t dev,
  282. rt_off_t pos,
  283. const void *buffer,
  284. rt_size_t size)
  285. {
  286. rt_err_t err = 0;
  287. rt_size_t offset = 0;
  288. rt_size_t req_size = 0;
  289. rt_size_t remain_size = size;
  290. void *wr_ptr = (void *)buffer;
  291. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  292. struct dfs_partition *part = &blk_dev->part;
  293. if (dev == RT_NULL)
  294. {
  295. rt_set_errno(-EINVAL);
  296. return 0;
  297. }
  298. rt_sem_take(part->lock, RT_WAITING_FOREVER);
  299. while (remain_size)
  300. {
  301. req_size = (remain_size > blk_dev->max_req_size) ? blk_dev->max_req_size : remain_size;
  302. err = rt_mmcsd_req_blk(blk_dev->card, part->offset + pos + offset, wr_ptr, req_size, 1);
  303. if (err)
  304. break;
  305. offset += req_size;
  306. wr_ptr = (void *)((rt_uint8_t *)wr_ptr + (req_size << 9));
  307. remain_size -= req_size;
  308. }
  309. rt_sem_release(part->lock);
  310. /* the length of reading must align to SECTOR SIZE */
  311. if (err)
  312. {
  313. rt_set_errno(-EIO);
  314. return 0;
  315. }
  316. return size - remain_size;
  317. }
  318. static rt_int32_t mmcsd_set_blksize(struct rt_mmcsd_card *card)
  319. {
  320. struct rt_mmcsd_cmd cmd;
  321. int err;
  322. /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
  323. if (card->flags & CARD_FLAG_SDHC)
  324. return 0;
  325. mmcsd_host_lock(card->host);
  326. cmd.cmd_code = SET_BLOCKLEN;
  327. cmd.arg = 512;
  328. cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC;
  329. err = mmcsd_send_cmd(card->host, &cmd, 5);
  330. mmcsd_host_unlock(card->host);
  331. if (err)
  332. {
  333. LOG_E("MMCSD: unable to set block size to %d: %d", cmd.arg, err);
  334. return -RT_ERROR;
  335. }
  336. return 0;
  337. }
  338. rt_int32_t read_lba(struct rt_mmcsd_card *card, size_t lba, uint8_t *buffer, size_t count)
  339. {
  340. rt_uint8_t status = 0;
  341. status = mmcsd_set_blksize(card);
  342. if (status)
  343. {
  344. return status;
  345. }
  346. rt_thread_mdelay(1);
  347. status = rt_mmcsd_req_blk(card, lba, buffer, count, 0);
  348. return status;
  349. }
  350. #ifdef RT_USING_DEVICE_OPS
  351. const static struct rt_device_ops mmcsd_blk_ops =
  352. {
  353. rt_mmcsd_init,
  354. rt_mmcsd_open,
  355. rt_mmcsd_close,
  356. rt_mmcsd_read,
  357. rt_mmcsd_write,
  358. rt_mmcsd_control
  359. };
  360. #endif
  361. #ifdef RT_USING_DFS_V2
  362. static ssize_t rt_mmcsd_fops_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
  363. {
  364. int result = 0;
  365. rt_device_t dev = (rt_device_t)file->vnode->data;
  366. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  367. int bytes_per_sector = blk_dev->geometry.bytes_per_sector;
  368. int blk_pos = *pos / bytes_per_sector;
  369. int first_offs = *pos % bytes_per_sector;
  370. char *rbuf;
  371. int rsize = 0;
  372. rbuf = rt_malloc(bytes_per_sector);
  373. if (!rbuf)
  374. {
  375. return 0;
  376. }
  377. /*
  378. ** #1: read first unalign block size.
  379. */
  380. result = rt_mmcsd_read(dev, blk_pos, rbuf, 1);
  381. if (result != 1)
  382. {
  383. rt_free(rbuf);
  384. return 0;
  385. }
  386. if (count > bytes_per_sector - first_offs)
  387. {
  388. rsize = bytes_per_sector - first_offs;
  389. }
  390. else
  391. {
  392. rsize = count;
  393. }
  394. rt_memcpy(buf, rbuf + first_offs, rsize);
  395. blk_pos++;
  396. /*
  397. ** #2: read continuous block size.
  398. */
  399. while (rsize < count)
  400. {
  401. result = rt_mmcsd_read(dev, blk_pos++, rbuf, 1);
  402. if (result != 1)
  403. {
  404. break;
  405. }
  406. if (count - rsize >= bytes_per_sector)
  407. {
  408. rt_memcpy(buf + rsize, rbuf, bytes_per_sector);
  409. rsize += bytes_per_sector;
  410. }
  411. else
  412. {
  413. rt_memcpy(buf + rsize, rbuf, count - rsize);
  414. rsize = count;
  415. }
  416. }
  417. rt_free(rbuf);
  418. *pos += rsize;
  419. return rsize;
  420. }
  421. static int rt_mmcsd_fops_ioctl(struct dfs_file *file, int cmd, void *arg)
  422. {
  423. rt_device_t dev = (rt_device_t)file->vnode->data;
  424. return rt_mmcsd_control(dev,cmd,arg);
  425. }
  426. static int rt_mmcsd_fops_open(struct dfs_file *file)
  427. {
  428. rt_device_t dev = (rt_device_t)file->vnode->data;
  429. rt_mmcsd_control(dev, RT_DEVICE_CTRL_ALL_BLK_SSIZEGET, &file->vnode->size);
  430. return RT_EOK;
  431. }
  432. static int rt_mmcsd_fops_close(struct dfs_file *file)
  433. {
  434. return RT_EOK;
  435. }
  436. static ssize_t rt_mmcsd_fops_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos)
  437. {
  438. int result = 0;
  439. rt_device_t dev = (rt_device_t)file->vnode->data;
  440. struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data;
  441. int bytes_per_sector = blk_dev->geometry.bytes_per_sector;
  442. int blk_pos = *pos / bytes_per_sector;
  443. int first_offs = *pos % bytes_per_sector;
  444. char *rbuf = 0;
  445. int wsize = 0;
  446. /*
  447. ** #1: write first unalign block size.
  448. */
  449. if (first_offs != 0)
  450. {
  451. if (count > bytes_per_sector - first_offs)
  452. {
  453. wsize = bytes_per_sector - first_offs;
  454. }
  455. else
  456. {
  457. wsize = count;
  458. }
  459. rbuf = rt_malloc(bytes_per_sector);
  460. if (!rbuf)
  461. {
  462. return 0;
  463. }
  464. result = rt_mmcsd_read(dev, blk_pos, rbuf, 1);
  465. if (result != 1)
  466. {
  467. rt_free(rbuf);
  468. return 0;
  469. }
  470. rt_memcpy(rbuf + first_offs, buf, wsize);
  471. result = rt_mmcsd_write(dev, blk_pos, rbuf, 1);
  472. if (result != 1)
  473. {
  474. rt_free(rbuf);
  475. return 0;
  476. }
  477. rt_free(rbuf);
  478. blk_pos += 1;
  479. }
  480. /*
  481. ** #2: write continuous block size.
  482. */
  483. if ((count - wsize) / bytes_per_sector != 0)
  484. {
  485. result = rt_mmcsd_write(dev, blk_pos, buf + wsize, (count - wsize) / bytes_per_sector);
  486. wsize += result * bytes_per_sector;
  487. blk_pos += result;
  488. if (result != (count - wsize) / bytes_per_sector)
  489. {
  490. *pos += wsize;
  491. return wsize;
  492. }
  493. }
  494. /*
  495. ** # 3: write last unalign block size.
  496. */
  497. if ((count - wsize) != 0)
  498. {
  499. rbuf = rt_malloc(bytes_per_sector);
  500. if (rbuf != RT_NULL)
  501. {
  502. result = rt_mmcsd_read(dev, blk_pos, rbuf, 1);
  503. if (result == 1)
  504. {
  505. rt_memcpy(rbuf, buf + wsize, count - wsize);
  506. result = rt_mmcsd_write(dev, blk_pos, rbuf, 1);
  507. if (result == 1)
  508. {
  509. wsize += count - wsize;
  510. }
  511. }
  512. rt_free(rbuf);
  513. }
  514. }
  515. *pos += wsize;
  516. return wsize;
  517. }
  518. static int rt_mmcsd_fops_poll(struct dfs_file *file, struct rt_pollreq *req)
  519. {
  520. int mask = 0;
  521. return mask;
  522. }
  523. static int rt_mmcsd_fops_flush(struct dfs_file *file)
  524. {
  525. return RT_EOK;
  526. }
  527. const static struct dfs_file_ops mmcsd_blk_fops =
  528. {
  529. rt_mmcsd_fops_open,
  530. rt_mmcsd_fops_close,
  531. rt_mmcsd_fops_ioctl,
  532. rt_mmcsd_fops_read,
  533. rt_mmcsd_fops_write,
  534. rt_mmcsd_fops_flush,
  535. generic_dfs_lseek,
  536. RT_NULL,
  537. RT_NULL,
  538. rt_mmcsd_fops_poll
  539. };
  540. #endif
  541. rt_int32_t gpt_device_probe(struct rt_mmcsd_card *card)
  542. {
  543. rt_int32_t err = RT_EOK;
  544. rt_uint8_t i, status;
  545. char dname[10];
  546. char sname[16];
  547. struct mmcsd_blk_device *blk_dev = RT_NULL;
  548. blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
  549. if (!blk_dev)
  550. {
  551. LOG_E("mmcsd:malloc memory failed!");
  552. return -1;
  553. }
  554. blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
  555. card->host->max_seg_size) >> 9,
  556. (card->host->max_blk_count *
  557. card->host->max_blk_size) >> 9);
  558. blk_dev->part.offset = 0;
  559. blk_dev->part.size = 0;
  560. rt_snprintf(sname, sizeof(sname) - 1, "sem_%s%d", card->host->name, 0);
  561. blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
  562. /* register mmcsd device */
  563. blk_dev->dev.type = RT_Device_Class_Block;
  564. #ifdef RT_USING_DEVICE_OPS
  565. blk_dev->dev.ops = &mmcsd_blk_ops;
  566. #else
  567. blk_dev->dev.init = rt_mmcsd_init;
  568. blk_dev->dev.open = rt_mmcsd_open;
  569. blk_dev->dev.close = rt_mmcsd_close;
  570. blk_dev->dev.read = rt_mmcsd_read;
  571. blk_dev->dev.write = rt_mmcsd_write;
  572. blk_dev->dev.control = rt_mmcsd_control;
  573. #endif
  574. blk_dev->card = card;
  575. blk_dev->geometry.bytes_per_sector = 1 << 9;
  576. blk_dev->geometry.block_size = card->card_blksize;
  577. blk_dev->geometry.sector_count =
  578. card->card_capacity * (1024 / 512);
  579. blk_dev->dev.user_data = blk_dev;
  580. rt_device_register(&(blk_dev->dev), card->host->name,
  581. RT_DEVICE_FLAG_RDWR);
  582. #ifdef RT_USING_POSIX_DEVIO
  583. #ifdef RT_USING_DFS_V2
  584. blk_dev->dev.fops = &mmcsd_blk_fops;
  585. #endif
  586. #endif
  587. rt_list_insert_after(&blk_devices, &blk_dev->list);
  588. for (i = 0; i < RT_GPT_PARTITION_MAX; i++)
  589. {
  590. blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
  591. if (!blk_dev)
  592. {
  593. LOG_E("mmcsd:malloc memory failed!");
  594. break;
  595. }
  596. blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
  597. card->host->max_seg_size) >> 9,
  598. (card->host->max_blk_count *
  599. card->host->max_blk_size) >> 9);
  600. /* get the first partition */
  601. status = gpt_get_partition_param(card, &blk_dev->part, i);
  602. if (status == RT_EOK)
  603. {
  604. rt_snprintf(dname, sizeof(dname) - 1, "%s%d", card->host->name, i);
  605. rt_snprintf(sname, sizeof(sname) - 1, "sem_%s%d", card->host->name, i + 1);
  606. blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
  607. /* register mmcsd device */
  608. blk_dev->dev.type = RT_Device_Class_Block;
  609. #ifdef RT_USING_DEVICE_OPS
  610. blk_dev->dev.ops = &mmcsd_blk_ops;
  611. #else
  612. blk_dev->dev.init = rt_mmcsd_init;
  613. blk_dev->dev.open = rt_mmcsd_open;
  614. blk_dev->dev.close = rt_mmcsd_close;
  615. blk_dev->dev.read = rt_mmcsd_read;
  616. blk_dev->dev.write = rt_mmcsd_write;
  617. blk_dev->dev.control = rt_mmcsd_control;
  618. #endif
  619. blk_dev->card = card;
  620. blk_dev->geometry.bytes_per_sector = 1 << 9;
  621. blk_dev->geometry.block_size = card->card_blksize;
  622. blk_dev->geometry.sector_count = blk_dev->part.size;
  623. blk_dev->dev.user_data = blk_dev;
  624. rt_device_register(&(blk_dev->dev), dname,
  625. RT_DEVICE_FLAG_RDWR);
  626. #ifdef RT_USING_POSIX_DEVIO
  627. #ifdef RT_USING_DFS_V2
  628. blk_dev->dev.fops = &mmcsd_blk_fops;
  629. #endif
  630. #endif
  631. rt_list_insert_after(&blk_devices, &blk_dev->list);
  632. }
  633. else
  634. {
  635. rt_free(blk_dev);
  636. blk_dev = RT_NULL;
  637. break;
  638. }
  639. #ifdef RT_USING_DFS_MNTTABLE
  640. if (blk_dev)
  641. {
  642. LOG_I("try to mount file system!");
  643. /* try to mount file system on this block device */
  644. dfs_mount_device(&(blk_dev->dev));
  645. }
  646. #endif
  647. }
  648. gpt_free();
  649. return err;
  650. }
  651. rt_int32_t mbr_device_probe(struct rt_mmcsd_card *card)
  652. {
  653. rt_int32_t err = 0;
  654. rt_uint8_t i, status;
  655. rt_uint8_t *sector;
  656. char dname[10];
  657. char sname[16];
  658. struct mmcsd_blk_device *blk_dev = RT_NULL;
  659. err = mmcsd_set_blksize(card);
  660. if (err)
  661. {
  662. return err;
  663. }
  664. rt_thread_mdelay(1);
  665. /* get the first sector to read partition table */
  666. sector = (rt_uint8_t *)rt_malloc(SECTOR_SIZE);
  667. if (sector == RT_NULL)
  668. {
  669. LOG_E("allocate partition sector buffer failed!");
  670. return -RT_ENOMEM;
  671. }
  672. status = rt_mmcsd_req_blk(card, 0, sector, 1, 0);
  673. if (status == RT_EOK)
  674. {
  675. blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
  676. if (!blk_dev)
  677. {
  678. LOG_E("mmcsd:malloc memory failed!");
  679. return -1;
  680. }
  681. blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
  682. card->host->max_seg_size) >> 9,
  683. (card->host->max_blk_count *
  684. card->host->max_blk_size) >> 9);
  685. blk_dev->part.offset = 0;
  686. blk_dev->part.size = 0;
  687. rt_snprintf(sname, sizeof(sname) - 1, "sem_%s%d", card->host->name, 0);
  688. blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
  689. /* register mmcsd device */
  690. blk_dev->dev.type = RT_Device_Class_Block;
  691. #ifdef RT_USING_DEVICE_OPS
  692. blk_dev->dev.ops = &mmcsd_blk_ops;
  693. #else
  694. blk_dev->dev.init = rt_mmcsd_init;
  695. blk_dev->dev.open = rt_mmcsd_open;
  696. blk_dev->dev.close = rt_mmcsd_close;
  697. blk_dev->dev.read = rt_mmcsd_read;
  698. blk_dev->dev.write = rt_mmcsd_write;
  699. blk_dev->dev.control = rt_mmcsd_control;
  700. #endif
  701. blk_dev->card = card;
  702. blk_dev->geometry.bytes_per_sector = 1 << 9;
  703. blk_dev->geometry.block_size = card->card_blksize;
  704. blk_dev->geometry.sector_count =
  705. card->card_capacity * (1024 / 512);
  706. blk_dev->dev.user_data = blk_dev;
  707. rt_device_register(&(blk_dev->dev), card->host->name,
  708. RT_DEVICE_FLAG_RDWR);
  709. rt_list_insert_after(&blk_devices, &blk_dev->list);
  710. for (i = 0; i < RT_MMCSD_MAX_PARTITION; i++)
  711. {
  712. blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device));
  713. if (!blk_dev)
  714. {
  715. LOG_E("mmcsd:malloc memory failed!");
  716. break;
  717. }
  718. blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs *
  719. card->host->max_seg_size) >> 9,
  720. (card->host->max_blk_count *
  721. card->host->max_blk_size) >> 9);
  722. /* get the first partition */
  723. status = dfs_filesystem_get_partition(&blk_dev->part, sector, i);
  724. if (status == RT_EOK)
  725. {
  726. rt_snprintf(dname, sizeof(dname) - 1, "%s%d", card->host->name, i);
  727. rt_snprintf(sname, sizeof(sname) - 1, "sem_%s%d", card->host->name, i + 1);
  728. blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO);
  729. /* register mmcsd device */
  730. blk_dev->dev.type = RT_Device_Class_Block;
  731. #ifdef RT_USING_DEVICE_OPS
  732. blk_dev->dev.ops = &mmcsd_blk_ops;
  733. #else
  734. blk_dev->dev.init = rt_mmcsd_init;
  735. blk_dev->dev.open = rt_mmcsd_open;
  736. blk_dev->dev.close = rt_mmcsd_close;
  737. blk_dev->dev.read = rt_mmcsd_read;
  738. blk_dev->dev.write = rt_mmcsd_write;
  739. blk_dev->dev.control = rt_mmcsd_control;
  740. #endif
  741. blk_dev->card = card;
  742. blk_dev->geometry.bytes_per_sector = 1 << 9;
  743. blk_dev->geometry.block_size = card->card_blksize;
  744. blk_dev->geometry.sector_count = blk_dev->part.size;
  745. blk_dev->dev.user_data = blk_dev;
  746. rt_device_register(&(blk_dev->dev), dname,
  747. RT_DEVICE_FLAG_RDWR);
  748. rt_list_insert_after(&blk_devices, &blk_dev->list);
  749. }
  750. else
  751. {
  752. rt_free(blk_dev);
  753. blk_dev = RT_NULL;
  754. break;
  755. }
  756. #ifdef RT_USING_DFS_MNTTABLE
  757. if (blk_dev)
  758. {
  759. LOG_I("try to mount file system!");
  760. /* try to mount file system on this block device */
  761. dfs_mount_device(&(blk_dev->dev));
  762. }
  763. #endif
  764. }
  765. }
  766. else
  767. {
  768. LOG_E("read mmcsd first sector failed");
  769. err = -RT_ERROR;
  770. }
  771. /* release sector buffer */
  772. rt_free(sector);
  773. return err;
  774. }
  775. rt_int32_t rt_mmcsd_blk_probe(struct rt_mmcsd_card *card)
  776. {
  777. uint32_t err = 0;
  778. LOG_D("probe mmcsd block device!");
  779. if (check_gpt(card) != 0)
  780. {
  781. err = gpt_device_probe(card);
  782. }
  783. else
  784. {
  785. err = mbr_device_probe(card);
  786. }
  787. return err;
  788. }
  789. void rt_mmcsd_blk_remove(struct rt_mmcsd_card *card)
  790. {
  791. rt_list_t *l, *n;
  792. struct mmcsd_blk_device *blk_dev;
  793. for (l = (&blk_devices)->next, n = l->next; l != &blk_devices; l = n, n = n->next)
  794. {
  795. blk_dev = (struct mmcsd_blk_device *)rt_list_entry(l, struct mmcsd_blk_device, list);
  796. if (blk_dev->card == card)
  797. {
  798. /* unmount file system */
  799. const char *mounted_path = dfs_filesystem_get_mounted_path(&(blk_dev->dev));
  800. if (mounted_path)
  801. {
  802. dfs_unmount(mounted_path);
  803. LOG_D("unmount file system %s for device %s.\r\n", mounted_path, blk_dev->dev.parent.name);
  804. }
  805. rt_sem_delete(blk_dev->part.lock);
  806. rt_device_unregister(&blk_dev->dev);
  807. rt_list_remove(&blk_dev->list);
  808. rt_free(blk_dev);
  809. }
  810. }
  811. }
  812. /*
  813. * This function will initialize block device on the mmc/sd.
  814. *
  815. * @deprecated since 2.1.0, this function does not need to be invoked
  816. * in the system initialization.
  817. */
  818. int rt_mmcsd_blk_init(void)
  819. {
  820. /* nothing */
  821. return 0;
  822. }