blkpart.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627
  1. #define pr_fmt(fmt) "blkpart: " fmt
  2. #include <stdio.h>
  3. #include <stdint.h>
  4. #include <errno.h>
  5. #include <stdlib.h>
  6. #include <stdbool.h>
  7. #include <string.h>
  8. #include <blkpart.h>
  9. #include <rtthread.h>
  10. #include <rtdevice.h>
  11. #define MIN(a, b) ((a) > (b) ? (b) : (a))
  12. #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
  13. #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
  14. #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
  15. static struct blkpart *blk_head = NULL;
  16. void blkpart_del_list(struct blkpart *blk)
  17. {
  18. struct blkpart *pblk, *pre;
  19. if (!blk_head)
  20. {
  21. return;
  22. }
  23. pblk = pre = blk_head;
  24. for (pblk = blk_head; pblk; pre = pblk, pblk = pblk->next)
  25. {
  26. if (pblk == blk)
  27. {
  28. if (pblk == blk_head)
  29. {
  30. blk_head = NULL;
  31. }
  32. else
  33. {
  34. pre->next = pblk->next;
  35. }
  36. break;
  37. }
  38. }
  39. }
  40. void blkpart_add_list(struct blkpart *blk)
  41. {
  42. struct blkpart *pblk, *pre;
  43. blk->next = NULL;
  44. if (!blk_head)
  45. {
  46. blk_head = blk;
  47. return;
  48. }
  49. pblk = pre = blk_head;
  50. while (pblk)
  51. {
  52. pre = pblk;
  53. pblk = pblk->next;
  54. }
  55. pre->next = blk;
  56. }
  57. void del_blkpart(struct blkpart *blk)
  58. {
  59. int i;
  60. if (!blk)
  61. {
  62. return;
  63. }
  64. for (i = 0; i < blk->n_parts; i++)
  65. {
  66. struct part *part = &blk->parts[i];
  67. if (!part)
  68. {
  69. continue;
  70. }
  71. }
  72. blkpart_del_list(blk);
  73. }
  74. struct part *get_part_by_index(const char *blk_name, uint32_t index)
  75. {
  76. struct blkpart *blk = blk_head;
  77. for (blk = blk_head; blk; blk = blk->next)
  78. {
  79. if (!strcmp(blk_name, blk->name))
  80. {
  81. if (index == 0)
  82. {
  83. return &blk->root;
  84. }
  85. else if (index == PARTINDEX_THE_LAST)
  86. {
  87. return &blk->parts[blk->n_parts - 1];
  88. }
  89. else if (blk->n_parts >= index)
  90. {
  91. return &blk->parts[index - 1];
  92. }
  93. else
  94. {
  95. return NULL;
  96. }
  97. }
  98. }
  99. return NULL;
  100. }
  101. #ifdef CONFIG_BLKPART_SHOW_INFO_CMD
  102. static int part_info_main(int argc, char **argv)
  103. {
  104. int i;
  105. struct blkpart *blk;
  106. struct part *part;
  107. for (blk = blk_head; blk; blk = blk->next)
  108. {
  109. for (i = 0; i < blk->n_parts; i++)
  110. {
  111. part = &blk->parts[i];
  112. printf("%s(%s): bytes 0x%llx off 0x%llx\n", part->name, part->devname,
  113. part->bytes, part->off);
  114. }
  115. }
  116. return 0;
  117. }
  118. FINSH_FUNCTION_EXPORT_CMD(part_info_main, __cmd_part_info, dump nor partitions);
  119. #endif
  120. struct part *get_part_by_name(const char *name)
  121. {
  122. struct blkpart *blk;
  123. if (!strncmp(name, "/dev/", sizeof("/dev/") - 1))
  124. {
  125. name += sizeof("/dev/") - 1;
  126. }
  127. for (blk = blk_head; blk; blk = blk->next)
  128. {
  129. int i;
  130. for (i = 0; i < blk->n_parts; i++)
  131. {
  132. struct part *part = &blk->parts[i];
  133. if (!strcmp(part->name, name))
  134. {
  135. return part;
  136. }
  137. if (!strcmp(part->devname, name))
  138. {
  139. return part;
  140. }
  141. }
  142. }
  143. return NULL;
  144. }
  145. struct blkpart *get_blkpart_by_name(const char *name)
  146. {
  147. struct blkpart *blk;
  148. if (!name)
  149. {
  150. return blk_head;
  151. }
  152. for (blk = blk_head; blk; blk = blk->next)
  153. {
  154. if (!strcmp(blk->name, name))
  155. {
  156. return blk;
  157. }
  158. }
  159. return NULL;
  160. }
  161. rt_size_t part_read(rt_device_t dev, rt_off_t offset, void *data, rt_size_t size)
  162. {
  163. if (size == 0)
  164. {
  165. return 0;
  166. }
  167. ssize_t ret, sz = 0;
  168. struct part *part = (struct part *)dev->user_data;
  169. struct blkpart *blk = part->blk;
  170. rt_device_t spinor_dev = blk->dev;
  171. size *= blk->blk_bytes; /* sector to size */
  172. offset *= blk->blk_bytes;
  173. char *page_buf = NULL;
  174. if (offset >= part->bytes)
  175. {
  176. printf("read offset %lu over part size %lu\n", offset, part->bytes);
  177. return 0;
  178. }
  179. if (offset + size > part->bytes)
  180. {
  181. printf("read %s(%s) over limit: offset %lu + size %lu over %lu\n",
  182. part->name, part->devname, offset, size, part->bytes);
  183. }
  184. size = MIN(part->bytes - offset, size);
  185. pr_debug("read %s(%s) off 0x%x size %lu\n", part->name, part->devname,
  186. offset, size);
  187. offset += part->off;
  188. if (offset % blk->page_bytes || size % blk->page_bytes)
  189. {
  190. page_buf = malloc(blk->page_bytes);
  191. if (!page_buf)
  192. {
  193. return -ENOMEM;
  194. }
  195. memset(page_buf, 0, blk->page_bytes);
  196. }
  197. /**
  198. * Step 1:
  199. * read the beginning data that not align to block size
  200. */
  201. if (offset % blk->page_bytes)
  202. {
  203. uint32_t addr, poff, len;
  204. addr = ALIGN_DOWN(offset, blk->page_bytes);
  205. poff = offset - addr;
  206. len = MIN(blk->page_bytes - poff, size);
  207. pr_debug("offset %lu not align %u, fix them before align read\n",
  208. offset, blk->blk_bytes);
  209. pr_debug("step1: read page data from addr 0x%x\n", addr);
  210. ret = spinor_dev->read(spinor_dev, addr / blk->page_bytes, page_buf, blk->page_bytes / blk->page_bytes);
  211. ret *= blk->page_bytes;
  212. if (ret != blk->blk_bytes)
  213. {
  214. goto err;
  215. }
  216. pr_debug("step2: copy page data to buf with page offset 0x%x and len %u\n",
  217. poff, len);
  218. memcpy(data, page_buf + poff, len);
  219. offset += len;
  220. data += len;
  221. sz += len;
  222. size -= len;
  223. }
  224. /**
  225. * Step 2:
  226. * read data that align to block size
  227. */
  228. while (size >= blk->page_bytes)
  229. {
  230. uint32_t len = (size/blk->page_bytes)*blk->page_bytes;
  231. ret = spinor_dev->read(spinor_dev, offset / blk->blk_bytes, (char *)data, len / blk->blk_bytes);
  232. ret *= blk->page_bytes;
  233. if (ret != len)
  234. {
  235. goto err;
  236. }
  237. offset += len;
  238. data += len;
  239. sz += len;
  240. size -= len;
  241. }
  242. /**
  243. * Step 3:
  244. * read the last data that not align to block size
  245. */
  246. if (size)
  247. {
  248. pr_debug("last size %u not align %u, read them\n", size, blk->blk_bytes);
  249. pr_debug("step1: read page data from addr 0x%x\n", offset);
  250. ret = spinor_dev->read(spinor_dev, offset / blk->blk_bytes, page_buf, blk->page_bytes / blk->page_bytes);
  251. ret *= blk->page_bytes;
  252. if (ret != blk->page_bytes)
  253. {
  254. goto err;
  255. }
  256. pr_debug("step2: copy page data to buf with page with len %u\n", size);
  257. memcpy(data, page_buf, size);
  258. sz += size;
  259. }
  260. #ifdef DEBUG
  261. pr_debug("read data:\n");
  262. hexdump(data, sz);
  263. #endif
  264. ret = 0;
  265. goto out;
  266. err:
  267. pr_err("read failed - %d\n", (int)ret);
  268. out:
  269. if (page_buf)
  270. {
  271. free(page_buf);
  272. }
  273. return ret ? ret / blk->blk_bytes: sz / blk->blk_bytes;
  274. }
  275. int do_write_without_erase(rt_device_t dev, struct blkpart *blk, uint32_t addr, uint32_t size, char *buf)
  276. {
  277. return dev->write(dev, addr, buf, size);
  278. }
  279. static int do_erase_write_blk(rt_device_t dev, struct blkpart *blk, uint32_t addr, uint32_t size, char *buf)
  280. {
  281. #if 0
  282. /* The code is prepared for elmfat which mounted at spinor */
  283. int ret;
  284. uint8_t *read_buf;
  285. unsigned int align_addr = ALIGN_DOWN(addr, blk->blk_bytes);
  286. read_buf = malloc(blk->blk_bytes);
  287. if (!read_buf)
  288. {
  289. return -ENOMEM;
  290. }
  291. memset(read_buf, 0, blk->blk_bytes);
  292. ret = dev->read(dev, align_addr, read_buf, blk->blk_bytes);
  293. if (ret != blk->blk_bytes)
  294. {
  295. free(read_buf);
  296. return -EIO;
  297. }
  298. if (!(align_addr % blk->blk_bytes))
  299. {
  300. blk_dev_erase_t erase_sector;
  301. memset(&erase_sector, 0, sizeof(blk_dev_erase_t));
  302. erase_sector.addr = align_addr;
  303. erase_sector.len = blk->blk_bytes;
  304. ret = dev->control(dev, BLOCK_DEVICE_CMD_ERASE_SECTOR, &erase_sector);
  305. if (ret)
  306. {
  307. free(read_buf);
  308. return ret;
  309. }
  310. }
  311. memcpy(read_buf + (addr - align_addr), buf, blk->page_bytes);
  312. ret = dev->write(dev, align_addr, read_buf, blk->blk_bytes);
  313. free(read_buf);
  314. if (ret == blk->blk_bytes)
  315. {
  316. return blk->page_bytes;
  317. }
  318. else
  319. {
  320. return -EIO;
  321. }
  322. #else
  323. int ret = -1;
  324. blk_dev_erase_t erase_sector;
  325. memset(&erase_sector, 0, sizeof(blk_dev_erase_t));
  326. erase_sector.addr = addr;
  327. erase_sector.len = size;
  328. ret = dev->control(dev, BLOCK_DEVICE_CMD_ERASE_SECTOR, &erase_sector);
  329. if (ret)
  330. {
  331. return -EIO;
  332. }
  333. ret = dev->write(dev, addr, buf, size);
  334. if (ret == size)
  335. {
  336. return size;
  337. }
  338. else
  339. {
  340. return -EIO;
  341. }
  342. #endif
  343. }
  344. rt_size_t _part_write(rt_device_t dev, rt_off_t offset, const void *data, rt_size_t size, int erase_before_write)
  345. {
  346. ssize_t ret, sz = 0;
  347. struct part *part = (struct part *)dev->user_data;
  348. struct blkpart *blk = part->blk;
  349. rt_device_t spinor_dev = blk->dev;
  350. char *blk_buf = NULL;
  351. int (*pwrite)(rt_device_t dev, struct blkpart * blk, uint32_t addr, uint32_t size, char *buf);
  352. if (size == 0)
  353. {
  354. return 0;
  355. }
  356. size *= blk->blk_bytes; /* sector to size */
  357. offset *= blk->blk_bytes;
  358. if (offset >= part->bytes)
  359. {
  360. printf("write offset %lu over part size %lu\n", offset, part->bytes);
  361. return 0;
  362. }
  363. if (offset + size > part->bytes)
  364. {
  365. printf("write %s(%s) over limit: offset %lu + size %lu over %lu\n",
  366. part->name, part->devname, offset, size, part->bytes);
  367. }
  368. size = MIN(part->bytes - offset, size);
  369. pr_debug("write %s(%s) off 0x%x size %lu (erase %d)\n", part->name,
  370. part->devname, offset, size, erase_before_write);
  371. offset += part->off;
  372. if (offset % blk->blk_bytes || size % blk->blk_bytes)
  373. {
  374. blk_buf = malloc(blk->blk_bytes);
  375. if (!blk_buf)
  376. {
  377. return -ENOMEM;
  378. }
  379. memset(blk_buf, 0, blk->blk_bytes);
  380. }
  381. if (erase_before_write)
  382. {
  383. pwrite = do_erase_write_blk;
  384. }
  385. else
  386. {
  387. pwrite = do_write_without_erase;
  388. }
  389. /**
  390. * Step 1:
  391. * write the beginning data that not align to block size
  392. */
  393. if (offset % blk->blk_bytes)
  394. {
  395. uint32_t addr, poff, len;
  396. addr = ALIGN_DOWN(offset, blk->blk_bytes);
  397. poff = offset - addr;
  398. len = MIN(blk->blk_bytes - poff, size);
  399. pr_debug("offset %u not align %u, fix them before align write\n",
  400. offset, blk->blk_bytes);
  401. pr_debug("step1: read page data from addr 0x%x\n", addr);
  402. ret = spinor_dev->read(spinor_dev, addr / blk->blk_bytes, blk_buf, blk->blk_bytes / blk->blk_bytes);
  403. ret *= blk->blk_bytes;
  404. if (ret != blk->blk_bytes)
  405. {
  406. goto err;
  407. }
  408. /* addr must less or equal to address */
  409. pr_debug("step2: copy buf data to page data with page offset 0x%x and len %u\n",
  410. poff, len);
  411. memcpy(blk_buf + poff, data, len);
  412. pr_debug("step3: flush the fixed page data\n");
  413. ret = pwrite(spinor_dev, blk, addr / blk->blk_bytes, blk->blk_bytes / blk->blk_bytes, blk_buf);
  414. ret *= blk->blk_bytes;
  415. if (ret != blk->blk_bytes)
  416. {
  417. goto err;
  418. }
  419. offset += len;
  420. data += len;
  421. sz += len;
  422. size -= len;
  423. }
  424. while (size >= blk->blk_bytes)
  425. {
  426. uint32_t len = (size/blk->blk_bytes)*blk->blk_bytes;
  427. ret = pwrite(spinor_dev, blk, offset / blk->blk_bytes, len / blk->blk_bytes, (char *)data);
  428. ret *= blk->blk_bytes;
  429. if (ret != len)
  430. {
  431. goto err;
  432. }
  433. offset += len;
  434. data += len;
  435. sz += len;
  436. size -= len;
  437. }
  438. if (size)
  439. {
  440. pr_debug("last size %u not align %u, write them\n", size, blk->blk_bytes);
  441. pr_debug("step1: read page data from addr 0x%x\n", offset);
  442. memset(blk_buf, 0x00, sizeof(blk->blk_bytes));
  443. ret = spinor_dev->read(spinor_dev, offset / blk->blk_bytes, blk_buf, blk->blk_bytes);
  444. if (ret != blk->blk_bytes)
  445. {
  446. goto err;
  447. }
  448. pr_debug("step2: copy buf to page data with page with len %u\n", size);
  449. memcpy(blk_buf, data, size);
  450. pr_debug("step3: flush the fixed page data\n");
  451. ret = pwrite(spinor_dev, blk, offset / blk->blk_bytes, blk->blk_bytes / blk->blk_bytes, blk_buf);
  452. ret *= blk->blk_bytes;
  453. if (ret != blk->blk_bytes)
  454. {
  455. goto err;
  456. }
  457. sz += size;
  458. }
  459. #ifdef DEBUG
  460. pr_debug("write data:\n");
  461. hexdump(data, sz);
  462. #endif
  463. ret = 0;
  464. goto out;
  465. err:
  466. pr_err("write failed - %d\n", (int)ret);
  467. out:
  468. if (blk_buf)
  469. {
  470. free(blk_buf);
  471. }
  472. return ret ? ret / blk->blk_bytes: sz / blk->blk_bytes;
  473. }
  474. rt_size_t part_erase_before_write(rt_device_t dev, rt_off_t offset, const void *data, rt_size_t size)
  475. {
  476. return _part_write(dev, offset, data, size, 1);
  477. }
  478. rt_size_t part_erase_without_write(rt_device_t dev, rt_off_t offset, const void *data, rt_size_t size)
  479. {
  480. return _part_write(dev, offset, data, size, 0);
  481. }
  482. rt_err_t part_control(rt_device_t dev, int cmd, void *args)
  483. {
  484. rt_err_t ret = -1;
  485. struct part *part = (struct part *)dev->user_data;
  486. struct blkpart *blk = part->blk;
  487. rt_device_t spinor_dev = blk->dev;
  488. struct rt_device_blk_geometry *geometry = NULL;
  489. blk_dev_erase_t *erase_sector = (blk_dev_erase_t *)args;
  490. switch (cmd)
  491. {
  492. case DEVICE_PART_CMD_ERASE_SECTOR:
  493. erase_sector = (blk_dev_erase_t *)(args);
  494. if (erase_sector->addr + erase_sector->len > part->bytes)
  495. {
  496. printf("erase %s(%s) over limit: offset %u + size %u over %lu\n",
  497. part->name, part->devname, erase_sector->addr, erase_sector->len, part->bytes);
  498. }
  499. erase_sector->len = MIN(part->bytes - erase_sector->addr, erase_sector->len);
  500. erase_sector->addr = erase_sector->addr + part->off;
  501. if (spinor_dev && spinor_dev->control)
  502. {
  503. ret = spinor_dev->control(spinor_dev, BLOCK_DEVICE_CMD_ERASE_SECTOR, erase_sector);
  504. }
  505. break;
  506. case DEVICE_PART_CMD_GET_BLOCK_SIZE:
  507. if (spinor_dev && spinor_dev->control)
  508. {
  509. ret = spinor_dev->control(spinor_dev, BLOCK_DEVICE_CMD_GET_BLOCK_SIZE, args);
  510. }
  511. else
  512. {
  513. ret = -1;
  514. }
  515. break;
  516. case DEVICE_PART_CMD_GET_TOTAL_SIZE:
  517. *(unsigned int *)args = part->bytes;
  518. ret = 0;
  519. break;
  520. case RT_DEVICE_CTRL_BLK_GETGEOME:
  521. geometry = (struct rt_device_blk_geometry *)args;
  522. memset(geometry, 0, sizeof(struct rt_device_blk_geometry));
  523. if (spinor_dev && spinor_dev->control)
  524. {
  525. ret = spinor_dev->control(spinor_dev, RT_DEVICE_CTRL_BLK_GETGEOME, args);
  526. if (!ret)
  527. {
  528. geometry->sector_count = part->bytes / geometry->bytes_per_sector;
  529. ret = 0;
  530. }
  531. }
  532. break;
  533. case RT_DEVICE_CTRL_BLK_ERASE:
  534. ret = 0;
  535. break;
  536. default:
  537. break;
  538. }
  539. return ret;
  540. }