drv_spinand.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913
  1. /**************************************************************************//**
  2. *
  3. * @copyright (C) 2019 Nuvoton Technology Corp. All rights reserved.
  4. *
  5. * SPDX-License-Identifier: Apache-2.0
  6. *
  7. * Change Logs:
  8. * Date Author Notes
  9. * 2021-1-13 Wayne First version
  10. *
  11. ******************************************************************************/
  12. #include <rtthread.h>
  13. #if defined(NU_PKG_USING_SPINAND) && defined(RT_USING_MTD_NAND)
  14. #define LOG_TAG "drv_spinand"
  15. #define DBG_ENABLE
  16. #define DBG_SECTION_NAME LOG_TAG
  17. #define DBG_LEVEL DBG_INFO
  18. #define DBG_COLOR
  19. #include <rtdbg.h>
  20. #include "spinand.h"
  21. struct nu_spinand g_spinandflash_dev = {0};
  22. rt_size_t nu_qspi_transfer_message(struct rt_qspi_device *device, struct rt_qspi_message *message)
  23. {
  24. rt_err_t result;
  25. struct rt_spi_message *index;
  26. RT_ASSERT(device != RT_NULL);
  27. RT_ASSERT(message != RT_NULL);
  28. result = rt_mutex_take(&(device->parent.bus->lock), RT_WAITING_FOREVER);
  29. if (result != RT_EOK)
  30. {
  31. rt_set_errno(-RT_EBUSY);
  32. return 0;
  33. }
  34. /* reset errno */
  35. rt_set_errno(RT_EOK);
  36. /* configure SPI bus */
  37. if (device->parent.bus->owner != &device->parent)
  38. {
  39. /* not the same owner as current, re-configure SPI bus */
  40. result = device->parent.bus->ops->configure(&device->parent, &device->parent.config);
  41. if (result == RT_EOK)
  42. {
  43. /* set SPI bus owner */
  44. device->parent.bus->owner = &device->parent;
  45. }
  46. else
  47. {
  48. /* configure SPI bus failed */
  49. rt_set_errno(-RT_EIO);
  50. goto __exit;
  51. }
  52. }
  53. /* transmit each SPI message */
  54. index = &message->parent;
  55. while (index)
  56. {
  57. if (device->parent.bus->ops->xfer(&device->parent, index) == 0)
  58. {
  59. result = -RT_EIO;
  60. rt_set_errno(-RT_EIO);
  61. goto __exit;
  62. }
  63. index = index->next;
  64. }
  65. result = RT_EOK;
  66. __exit:
  67. /* release bus lock */
  68. rt_mutex_release(&(device->parent.bus->lock));
  69. return result;
  70. }
  71. rt_err_t nu_qspi_send_then_recv(struct rt_qspi_device *device, const void *send_buf, rt_size_t send_length, void *recv_buf, rt_size_t recv_length)
  72. {
  73. struct rt_qspi_message message[2] = {0};
  74. RT_ASSERT(send_buf);
  75. RT_ASSERT(recv_buf);
  76. RT_ASSERT(send_length != 0);
  77. /* Send message */
  78. message[0].qspi_data_lines = 1;
  79. /* Set send buf and send size */
  80. message[0].parent.recv_buf = RT_NULL;
  81. message[0].parent.send_buf = send_buf;
  82. message[0].parent.length = send_length;
  83. message[0].parent.cs_take = 1;
  84. message[0].parent.next = &message[1].parent;
  85. /* Receive message */
  86. message[1].qspi_data_lines = 1;
  87. /* Set recv buf and recv size */
  88. message[1].parent.recv_buf = recv_buf;
  89. message[1].parent.send_buf = RT_NULL;
  90. message[1].parent.length = recv_length;
  91. message[1].parent.cs_release = 1;
  92. return nu_qspi_transfer_message(device, &message[0]);
  93. }
  94. rt_err_t nu_qspi_send(struct rt_qspi_device *device, const void *send_buf, rt_size_t length)
  95. {
  96. RT_ASSERT(send_buf);
  97. RT_ASSERT(length != 0);
  98. struct rt_qspi_message message = {0};
  99. char *ptr = (char *)send_buf;
  100. rt_size_t count = 0;
  101. message.instruction.content = ptr[0];
  102. message.instruction.qspi_lines = 1;
  103. count++;
  104. /* set send buf and send size */
  105. message.qspi_data_lines = 1;
  106. message.parent.send_buf = ptr + count;
  107. message.parent.recv_buf = RT_NULL;
  108. message.parent.length = length - count;
  109. message.parent.cs_take = 1;
  110. message.parent.cs_release = 1;
  111. return nu_qspi_transfer_message(device, &message);
  112. }
  113. static void spinand_dump_buffer(int page, rt_uint8_t *buf, int len, const char *title)
  114. {
  115. if ((DBG_LEVEL) >= DBG_LOG)
  116. {
  117. int i;
  118. if (!buf)
  119. {
  120. return;
  121. }
  122. /* Just print 64-bytes.*/
  123. len = (len < 64) ? len : 64;
  124. LOG_I("[%s-Page-%d]", title, page);
  125. for (i = 0; i < len; i ++)
  126. {
  127. rt_kprintf("%02X ", buf[i]);
  128. if (i % 32 == 31) rt_kprintf("\n");
  129. }
  130. rt_kprintf("\n");
  131. }
  132. }
  133. static rt_err_t spinand_read_id(struct rt_mtd_nand_device *device)
  134. {
  135. rt_err_t result = RT_EOK ;
  136. uint32_t id = 0;
  137. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  138. RT_ASSERT(result == RT_EOK);
  139. SPINAND_FLASH_OPS->jedecid_get(SPINAND_FLASH_QSPI, &id);
  140. result = rt_mutex_release(SPINAND_FLASH_LOCK);
  141. RT_ASSERT(result == RT_EOK);
  142. return (id != 0x0) ? RT_EOK : -RT_ERROR;
  143. }
  144. static rt_err_t spinand_read_page(struct rt_mtd_nand_device *device,
  145. rt_off_t page,
  146. rt_uint8_t *data,
  147. rt_uint32_t data_len,
  148. rt_uint8_t *spare,
  149. rt_uint32_t spare_len)
  150. {
  151. rt_err_t result = RT_EOK ;
  152. LOG_D("[R-%d]data: 0x%08x %d, spare: 0x%08x, %d", page, data, data_len, spare, spare_len);
  153. RT_ASSERT(device != RT_NULL);
  154. if (page / device->pages_per_block > device->block_end)
  155. {
  156. LOG_E("[EIO] read page:%d", page);
  157. return -RT_MTD_EIO;
  158. }
  159. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  160. RT_ASSERT(result == RT_EOK);
  161. /* Data load, Read data from flash to cache */
  162. result = SPINAND_FLASH_OPS->read_dataload(SPINAND_FLASH_QSPI, (page >> 16) & 0xFF, (page >> 8) & 0xFF, (page & 0xFF));
  163. if (result != RT_EOK)
  164. goto exit_spinand_read_page;
  165. if (data && data_len)
  166. {
  167. /* Read data: 0~data_len, Read cache to data */
  168. result = SPINAND_FLASH_OPS->read_quadoutput(SPINAND_FLASH_QSPI, 0, 0, data, data_len);
  169. if (result != RT_EOK)
  170. goto exit_spinand_read_page;
  171. }
  172. if (spare && spare_len)
  173. {
  174. /* Read data: 2048~spare_len, Read cache to spare */
  175. result = SPINAND_FLASH_OPS->read_quadoutput(SPINAND_FLASH_QSPI, (SPINAND_FLASH_PAGE_SIZE >> 8) & 0xff, SPINAND_FLASH_PAGE_SIZE & 0xff, spare, spare_len);
  176. if (result != RT_EOK)
  177. goto exit_spinand_read_page;
  178. }
  179. exit_spinand_read_page:
  180. rt_mutex_release(SPINAND_FLASH_LOCK);
  181. spinand_dump_buffer(page, data, data_len, "Read Data");
  182. spinand_dump_buffer(page, spare, spare_len, "Read Spare");
  183. return result;
  184. }
  185. static rt_err_t spinand_write_page(struct rt_mtd_nand_device *device,
  186. rt_off_t page,
  187. const rt_uint8_t *data,
  188. rt_uint32_t data_len,
  189. const rt_uint8_t *spare,
  190. rt_uint32_t spare_len)
  191. {
  192. rt_err_t result = RT_EOK ;
  193. LOG_D("[W-%d]data: 0x%08x %d, spare: 0x%08x, %d", page, data, data_len, spare, spare_len);
  194. RT_ASSERT(device != RT_NULL);
  195. if (page / device->pages_per_block > device->block_end)
  196. {
  197. LOG_E("[EIO] write page:%d", page);
  198. return -RT_MTD_EIO;
  199. }
  200. spinand_dump_buffer(page, (uint8_t *)data, data_len, "WRITE DATA");
  201. spinand_dump_buffer(page, (uint8_t *)spare, spare_len, "WRITE SPARE");
  202. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  203. RT_ASSERT(result == RT_EOK);
  204. if (SPINAND_FLASH_MCP == 1)
  205. {
  206. /* Select die. */
  207. if ((result = SPINAND_FLASH_OPS->die_select(SPINAND_FLASH_QSPI, SPINAND_DIE_ID0)) != RT_EOK)
  208. goto exit_spinand_write_page;
  209. }
  210. /* Read data: 0~2111, to cache */
  211. if (data && data_len)
  212. result = SPINAND_FLASH_OPS->program_dataload(SPINAND_FLASH_QSPI, 0, 0, (uint8_t *)data, data_len, (uint8_t *)spare, spare_len);
  213. else
  214. result = SPINAND_FLASH_OPS->program_dataload(SPINAND_FLASH_QSPI, (SPINAND_FLASH_PAGE_SIZE >> 8) & 0xff, SPINAND_FLASH_PAGE_SIZE & 0xff, RT_NULL, 0, (uint8_t *)spare, spare_len);
  215. if (result != RT_EOK)
  216. goto exit_spinand_write_page;
  217. /* Flush data in cache to flash */
  218. result = SPINAND_FLASH_OPS->program_execute(SPINAND_FLASH_QSPI, (((page) >> 16) & 0xFF), (((page) >> 8) & 0xFF), (page) & 0xFF);
  219. if (result != RT_EOK)
  220. goto exit_spinand_write_page;
  221. result = RT_EOK;
  222. exit_spinand_write_page:
  223. rt_mutex_release(SPINAND_FLASH_LOCK);
  224. return result;
  225. }
  226. static rt_err_t spinand_move_page(struct rt_mtd_nand_device *device, rt_off_t src_page, rt_off_t dst_page)
  227. {
  228. rt_err_t result = RT_EOK ;
  229. uint8_t u8WECmd;
  230. RT_ASSERT(device != RT_NULL);
  231. if ((src_page / device->pages_per_block > device->block_end) ||
  232. (dst_page / device->pages_per_block > device->block_end))
  233. {
  234. LOG_E("EIO src:%08x, dst:%08x!", src_page, dst_page);
  235. return -RT_MTD_EIO;
  236. }
  237. LOG_D("src_page: %d, dst_page: %d", src_page, dst_page);
  238. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  239. RT_ASSERT(result == RT_EOK);
  240. /* Data load, Read data from flash to cache */
  241. result = SPINAND_FLASH_OPS->read_dataload(SPINAND_FLASH_QSPI, (src_page >> 16) & 0xFF, (src_page >> 8) & 0xFF, (src_page & 0xFF));
  242. if (result != RT_EOK)
  243. goto exit_spinand_move_page;
  244. /* Enable WE before writting. */
  245. u8WECmd = 0x06;
  246. if ((result = nu_qspi_send(SPINAND_FLASH_QSPI, &u8WECmd, sizeof(u8WECmd))) != RT_EOK)
  247. goto exit_spinand_move_page;
  248. /* Flush cache to flash */
  249. result = SPINAND_FLASH_OPS->program_execute(SPINAND_FLASH_QSPI, (((dst_page) >> 16) & 0xFF), (((dst_page) >> 8) & 0xFF), (dst_page) & 0xFF);
  250. if (result != RT_EOK)
  251. goto exit_spinand_move_page;
  252. result = RT_EOK;
  253. exit_spinand_move_page:
  254. rt_mutex_release(SPINAND_FLASH_LOCK);
  255. return result;
  256. }
  257. static rt_err_t spinand_erase_block_force(struct rt_mtd_nand_device *device, rt_uint32_t block)
  258. {
  259. rt_err_t result = RT_EOK ;
  260. uint32_t page;
  261. RT_ASSERT(device != RT_NULL);
  262. if (block > device->block_end)
  263. {
  264. LOG_E("[EIO] block:%d", block);
  265. return -RT_MTD_EIO;
  266. }
  267. page = block * SPINAND_FLASH_PAGE_PER_BLOCK_NUM;
  268. LOG_D("force erase block: %d -> page: %d", block, page);
  269. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  270. RT_ASSERT(result == RT_EOK);
  271. result = SPINAND_FLASH_OPS->block_erase(SPINAND_FLASH_QSPI, (page >> 16) & 0xFF, (page >> 8) & 0xFF, page & 0xFF);
  272. if (result != RT_EOK)
  273. goto exit_spinand_erase_block_force;
  274. result = RT_EOK;
  275. exit_spinand_erase_block_force:
  276. rt_mutex_release(SPINAND_FLASH_LOCK);
  277. return result;
  278. }
  279. static rt_err_t spinand_erase_block(struct rt_mtd_nand_device *device, rt_uint32_t block)
  280. {
  281. rt_err_t result = RT_EOK ;
  282. uint32_t page;
  283. RT_ASSERT(device != RT_NULL);
  284. if (block > device->block_end)
  285. {
  286. LOG_E("[EIO] block:%d", block);
  287. return -RT_MTD_EIO;
  288. }
  289. page = block * SPINAND_FLASH_PAGE_PER_BLOCK_NUM;
  290. LOG_D("erase block: %d -> page: %d", block, page);
  291. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  292. RT_ASSERT(result == RT_EOK);
  293. /* Erase block after checking it is bad or not. */
  294. if (SPINAND_FLASH_OPS->block_isbad(SPINAND_FLASH_QSPI, page) != 0)
  295. {
  296. LOG_W("Block %d is bad.\n", block);
  297. result = -RT_ERROR;
  298. goto exit_spinand_erase_block;
  299. }
  300. else
  301. {
  302. result = SPINAND_FLASH_OPS->block_erase(SPINAND_FLASH_QSPI, (page >> 16) & 0xFF, (page >> 8) & 0xFF, page & 0xFF);
  303. if (result != RT_EOK)
  304. goto exit_spinand_erase_block;
  305. }
  306. result = RT_EOK;
  307. exit_spinand_erase_block:
  308. rt_mutex_release(SPINAND_FLASH_LOCK);
  309. return result;
  310. }
  311. static rt_err_t spinand_check_block(struct rt_mtd_nand_device *device, rt_uint32_t block)
  312. {
  313. rt_err_t result = RT_EOK ;
  314. uint32_t page = 0;
  315. uint8_t isbad = 0;
  316. RT_ASSERT(device != RT_NULL);
  317. if (block > device->block_end)
  318. {
  319. LOG_E("[EIO] block:%d", block);
  320. return -RT_MTD_EIO;
  321. }
  322. page = block * SPINAND_FLASH_PAGE_PER_BLOCK_NUM;
  323. LOG_D("check block status: %d -> page: %d", block, page);
  324. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  325. RT_ASSERT(result == RT_EOK);
  326. isbad = SPINAND_FLASH_OPS->block_isbad(SPINAND_FLASH_QSPI, page);
  327. result = rt_mutex_release(SPINAND_FLASH_LOCK);
  328. RT_ASSERT(result == RT_EOK);
  329. return (isbad == 0) ? RT_EOK : -RT_ERROR ;
  330. }
  331. static rt_err_t spinand_mark_badblock(struct rt_mtd_nand_device *device, rt_uint32_t block)
  332. {
  333. rt_err_t result = RT_EOK ;
  334. uint32_t page = 0;
  335. RT_ASSERT(device != RT_NULL);
  336. if (block > device->block_end)
  337. {
  338. LOG_E("[EIO] block:%d", block);
  339. return -RT_MTD_EIO;
  340. }
  341. page = block * SPINAND_FLASH_PAGE_PER_BLOCK_NUM;
  342. LOG_D("mark bad block: %d -> page: %d", block, page);
  343. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  344. RT_ASSERT(result == RT_EOK);
  345. /* Erase block after checking it is bad or not. */
  346. if (SPINAND_FLASH_OPS->block_isbad(SPINAND_FLASH_QSPI, page) != 0)
  347. {
  348. LOG_W("Block %d is bad.\n", block);
  349. result = RT_EOK;
  350. }
  351. else
  352. {
  353. result = SPINAND_FLASH_OPS->block_markbad(SPINAND_FLASH_QSPI, page);
  354. }
  355. rt_mutex_release(SPINAND_FLASH_LOCK);
  356. return result;
  357. }
  358. static struct rt_mtd_nand_driver_ops spinand_ops =
  359. {
  360. spinand_read_id,
  361. spinand_read_page,
  362. spinand_write_page,
  363. spinand_move_page,
  364. spinand_erase_block,
  365. spinand_check_block,
  366. spinand_mark_badblock
  367. };
  368. static uint32_t u32IsInited = 0;
  369. rt_err_t rt_hw_mtd_spinand_init(void)
  370. {
  371. int i = 0;
  372. rt_err_t result;
  373. char szTmp[8];
  374. if (u32IsInited)
  375. return RT_EOK;
  376. result = rt_mutex_init(SPINAND_FLASH_LOCK, "spinand", RT_IPC_FLAG_PRIO);
  377. RT_ASSERT(result == RT_EOK);
  378. result = spinand_flash_init(SPINAND_FLASH_QSPI);
  379. if (result != RT_EOK)
  380. return -RT_ERROR;
  381. for (i = 0; i < MTD_SPINAND_PARTITION_NUM; i++)
  382. {
  383. mtd_partitions[i].page_size = SPINAND_FLASH_PAGE_SIZE; /* The Page size in the flash */
  384. mtd_partitions[i].pages_per_block = SPINAND_FLASH_PAGE_PER_BLOCK_NUM; /* How many page number in a block */
  385. mtd_partitions[i].oob_size = SPINAND_FLASH_OOB_SIZE; /* Out of bank size */
  386. mtd_partitions[i].oob_free = 32; /* the free area in oob that flash driver not use */
  387. mtd_partitions[i].plane_num = SPINAND_FLASH_MCP ; /* the number of plane in the NAND Flash */
  388. mtd_partitions[i].ops = &spinand_ops;
  389. rt_snprintf(szTmp, sizeof(szTmp), "nand%d", i);
  390. result = rt_mtd_nand_register_device(szTmp, &mtd_partitions[i]);
  391. RT_ASSERT(result == RT_EOK);
  392. }
  393. u32IsInited = 1;
  394. return result;
  395. }
  396. rt_err_t rt_hw_mtd_spinand_register(const char *device_name)
  397. {
  398. rt_device_t pDev;
  399. rt_err_t result;
  400. if ((pDev = rt_device_find(device_name)) == RT_NULL)
  401. return -RT_ERROR;
  402. SPINAND_FLASH_QSPI = (struct rt_qspi_device *)pDev;
  403. SPINAND_FLASH_QSPI->config.parent.mode = RT_SPI_MODE_0 | RT_SPI_MSB;
  404. SPINAND_FLASH_QSPI->config.parent.data_width = 8;
  405. SPINAND_FLASH_QSPI->config.parent.max_hz = 48000000;
  406. SPINAND_FLASH_QSPI->config.ddr_mode = 0;
  407. SPINAND_FLASH_QSPI->config.qspi_dl_width = 4;
  408. result = rt_spi_configure(&SPINAND_FLASH_QSPI->parent, &SPINAND_FLASH_QSPI->config.parent);
  409. RT_ASSERT(result == RT_EOK);
  410. return rt_hw_mtd_spinand_init();
  411. }
  412. #if defined(RT_USING_DFS_UFFS)
  413. #include "dfs_uffs.h"
  414. void uffs_setup_storage(struct uffs_StorageAttrSt *attr,
  415. struct rt_mtd_nand_device *nand)
  416. {
  417. RT_ASSERT(attr != RT_NULL);
  418. RT_ASSERT(nand != RT_NULL);
  419. rt_memset(attr, 0, sizeof(struct uffs_StorageAttrSt));
  420. attr->page_data_size = nand->page_size; /* page data size */
  421. attr->pages_per_block = nand->pages_per_block; /* pages per block */
  422. attr->spare_size = nand->oob_size; /* page spare size */
  423. attr->ecc_opt = RT_CONFIG_UFFS_ECC_MODE; /* ecc option */
  424. attr->ecc_size = nand->oob_size - nand->oob_free; /* ecc size */
  425. attr->block_status_offs = 0; /* indicate block bad or good, offset in spare */
  426. attr->layout_opt = RT_CONFIG_UFFS_LAYOUT; /* let UFFS do the spare layout */
  427. /* initialize _uffs_data_layout and _uffs_ecc_layout */
  428. rt_memcpy(attr->_uffs_data_layout, spinand_flash_data_layout, UFFS_SPARE_LAYOUT_SIZE);
  429. rt_memcpy(attr->_uffs_ecc_layout, spinand_flash_ecc_layout, UFFS_SPARE_LAYOUT_SIZE);
  430. attr->data_layout = attr->_uffs_data_layout;
  431. attr->ecc_layout = attr->_uffs_ecc_layout;
  432. }
  433. #endif
  434. #include <finsh.h>
  435. static int nread(int argc, char **argv)
  436. {
  437. int ret = -1;
  438. rt_uint8_t *spare = RT_NULL;
  439. rt_uint8_t *data_ptr = RT_NULL;
  440. struct rt_mtd_nand_device *device;
  441. rt_uint32_t partition, page;
  442. if (argc != 3)
  443. {
  444. LOG_E("Usage %s: %s <partition_no> <page>.\n", __func__, __func__);
  445. goto exit_nread;
  446. }
  447. page = atoi(argv[2]);
  448. partition = atoi(argv[1]);
  449. if (partition >= MTD_SPINAND_PARTITION_NUM)
  450. goto exit_nread;
  451. device = &mtd_partitions[partition];
  452. data_ptr = (rt_uint8_t *) rt_malloc(SPINAND_FLASH_PAGE_SIZE);
  453. if (data_ptr == RT_NULL)
  454. {
  455. LOG_E("data_ptr: no memory\n");
  456. goto exit_nread;
  457. }
  458. spare = (rt_uint8_t *) rt_malloc(SPINAND_FLASH_OOB_SIZE);
  459. if (spare == RT_NULL)
  460. {
  461. LOG_E("spare: no memory\n");
  462. goto exit_nread;
  463. }
  464. rt_memset(spare, 0, SPINAND_FLASH_OOB_SIZE);
  465. rt_memset(data_ptr, 0, SPINAND_FLASH_PAGE_SIZE);
  466. page = page + device->block_start * device->pages_per_block;
  467. if (spinand_read_page(device, page, &data_ptr[0], SPINAND_FLASH_PAGE_SIZE, &spare[0], SPINAND_FLASH_OOB_SIZE) != RT_EOK)
  468. goto exit_nread;
  469. LOG_I("Partion:%d page-%d", partition, page);
  470. ret = 0;
  471. exit_nread:
  472. /* release memory */
  473. if (data_ptr)
  474. rt_free(data_ptr);
  475. if (spare)
  476. rt_free(spare);
  477. return ret;
  478. }
  479. static int nwrite(int argc, char **argv)
  480. {
  481. int i, ret = -1;
  482. rt_uint8_t *data_ptr = RT_NULL;
  483. struct rt_mtd_nand_device *device;
  484. rt_uint32_t partition, page;
  485. if (argc != 3)
  486. {
  487. LOG_E("Usage %s: %s <partition_no> <page>.\n", __func__, __func__);
  488. goto exit_nwrite;
  489. }
  490. partition = atoi(argv[1]);
  491. page = atoi(argv[2]);
  492. if (partition >= MTD_SPINAND_PARTITION_NUM)
  493. goto exit_nwrite;
  494. device = &mtd_partitions[partition];
  495. data_ptr = (rt_uint8_t *) rt_malloc(SPINAND_FLASH_PAGE_SIZE);
  496. if (data_ptr == RT_NULL)
  497. {
  498. LOG_E("data_ptr: no memory\n");
  499. goto exit_nwrite;
  500. }
  501. /* Need random data to test ECC */
  502. for (i = 0; i < SPINAND_FLASH_PAGE_SIZE; i ++)
  503. data_ptr[i] = i / 5 - i;
  504. page = page + device->block_start * device->pages_per_block;
  505. spinand_write_page(device, page, &data_ptr[0], SPINAND_FLASH_PAGE_SIZE, NULL, 0);
  506. LOG_I("Wrote data into %d in partition-index %d.", page, partition);
  507. ret = 0;
  508. exit_nwrite:
  509. /* release memory */
  510. if (data_ptr)
  511. rt_free(data_ptr);
  512. return ret;
  513. }
  514. static int nmove(int argc, char **argv)
  515. {
  516. struct rt_mtd_nand_device *device;
  517. rt_uint32_t partition, src, dst;
  518. if (argc != 4)
  519. {
  520. LOG_E("Usage %s: %s <partition_no> <src page> <dst page>.\n", __func__, __func__);
  521. goto exit_nmove;
  522. }
  523. partition = atoi(argv[1]);
  524. src = atoi(argv[2]);
  525. dst = atoi(argv[3]);
  526. if (partition >= MTD_SPINAND_PARTITION_NUM)
  527. return -1;
  528. device = &mtd_partitions[partition];
  529. spinand_move_page(device,
  530. src + device->block_start * device->pages_per_block,
  531. dst + device->block_start * device->pages_per_block);
  532. LOG_I("Move data into %d from %d in partition-index %d.", dst, src, partition);
  533. return 0;
  534. exit_nmove:
  535. return -1;
  536. }
  537. static int nerase(int argc, char **argv)
  538. {
  539. struct rt_mtd_nand_device *device;
  540. int partition, block;
  541. if (argc != 3)
  542. {
  543. LOG_E("Usage %s: %s <partition_no> <block_no>.\n", __func__, __func__);
  544. goto exit_nerase;
  545. }
  546. partition = atoi(argv[1]);
  547. block = atoi(argv[2]);
  548. if (partition >= MTD_SPINAND_PARTITION_NUM)
  549. goto exit_nerase;
  550. device = &mtd_partitions[partition];
  551. if (spinand_erase_block(device, block + device->block_start) != RT_EOK)
  552. goto exit_nerase;
  553. LOG_I("Erased block %d in partition-index %d.", block + device->block_start, partition);
  554. return 0;
  555. exit_nerase:
  556. return -1;
  557. }
  558. static int nerase_force(int argc, char **argv)
  559. {
  560. struct rt_mtd_nand_device *device;
  561. int partition, block;
  562. if (argc != 2)
  563. {
  564. LOG_E("Usage %s: %s <partition_no>\n", __func__, __func__);
  565. goto exit_nerase_force;
  566. }
  567. partition = atoi(argv[1]);
  568. if (partition >= MTD_SPINAND_PARTITION_NUM)
  569. goto exit_nerase_force;
  570. device = &mtd_partitions[partition];
  571. for (block = 0; block <= device->block_end; block++)
  572. {
  573. if (spinand_erase_block_force(device, block + device->block_start) != RT_EOK)
  574. goto exit_nerase_force;
  575. LOG_I("Erased block %d in partition-index %d. forcely", block + device->block_start, partition);
  576. }
  577. return 0;
  578. exit_nerase_force:
  579. return -1;
  580. }
  581. static rt_err_t nmarkbad(int argc, char **argv)
  582. {
  583. struct rt_mtd_nand_device *device;
  584. int partition, block;
  585. if (argc != 3)
  586. {
  587. LOG_E("Usage %s: %s <partition_no> <block_no>.\n", __func__, __func__);
  588. goto exit_nmarkbad;
  589. }
  590. partition = atoi(argv[1]);
  591. block = atoi(argv[2]);
  592. if (partition >= MTD_SPINAND_PARTITION_NUM)
  593. goto exit_nmarkbad;
  594. device = &mtd_partitions[partition];
  595. if (spinand_mark_badblock(device, block + device->block_start) != RT_EOK)
  596. goto exit_nmarkbad;
  597. LOG_I("Marked block %d in partition-index %d.", block + device->block_start, partition);
  598. return 0;
  599. exit_nmarkbad:
  600. return -1;
  601. }
  602. static int nerase_all(int argc, char **argv)
  603. {
  604. rt_uint32_t index;
  605. rt_uint32_t partition;
  606. struct rt_mtd_nand_device *device;
  607. if (argc != 2)
  608. {
  609. LOG_E("Usage %s: %s <partition_no>.\n", __func__, __func__);
  610. goto exit_nerase_all;
  611. }
  612. partition = atoi(argv[1]);
  613. if (partition >= MTD_SPINAND_PARTITION_NUM)
  614. goto exit_nerase_all;
  615. device = &mtd_partitions[partition];
  616. for (index = 0; index < device->block_total; index ++)
  617. {
  618. spinand_erase_block(device, index);
  619. }
  620. LOG_I("Erased all block in partition-index %d.", partition);
  621. return 0;
  622. exit_nerase_all:
  623. return -1;
  624. }
  625. static int ncheck_all(int argc, char **argv)
  626. {
  627. rt_uint32_t index;
  628. rt_uint32_t partition;
  629. struct rt_mtd_nand_device *device;
  630. if (argc != 2)
  631. {
  632. LOG_E("Usage %s: %s <partition_no>.\n", __func__, __func__);
  633. return -1;
  634. }
  635. partition = atoi(argv[1]);
  636. if (partition >= MTD_SPINAND_PARTITION_NUM)
  637. return -1;
  638. device = &mtd_partitions[partition];
  639. for (index = 0; index < device->block_total; index ++)
  640. {
  641. LOG_I("Partion:%d Block-%d is %s", partition, index, spinand_check_block(device, index) ? "bad" : "good");
  642. }
  643. return 0;
  644. }
  645. static int nid(int argc, char **argv)
  646. {
  647. spinand_read_id(RT_NULL);
  648. return 0;
  649. }
  650. static int nlist(int argc, char **argv)
  651. {
  652. rt_uint32_t index;
  653. struct rt_mtd_nand_device *device;
  654. rt_kprintf("\n");
  655. for (index = 0 ; index < MTD_SPINAND_PARTITION_NUM ; index++)
  656. {
  657. device = &mtd_partitions[index];
  658. rt_kprintf("[Partition #%d]\n", index);
  659. rt_kprintf("Name: %s\n", device->parent.parent.name);
  660. rt_kprintf("Start block: %d\n", device->block_start);
  661. rt_kprintf("End block: %d\n", device->block_end);
  662. rt_kprintf("Block number: %d\n", device->block_total);
  663. rt_kprintf("Plane number: %d\n", device->plane_num);
  664. rt_kprintf("Pages per Block: %d\n", device->pages_per_block);
  665. rt_kprintf("Page size: %d bytes\n", device->page_size);
  666. rt_kprintf("Spare size: %d bytes\n", device->oob_size);
  667. rt_kprintf("Total size: %d bytes (%d KB)\n", device->block_total * device->pages_per_block * device->page_size,
  668. device->block_total * device->pages_per_block * device->page_size / 1024);
  669. rt_kprintf("\n");
  670. }
  671. return 0;
  672. }
  673. #ifdef FINSH_USING_MSH
  674. MSH_CMD_EXPORT(nid, nand id);
  675. MSH_CMD_EXPORT(nlist, list all partition information on nand);
  676. MSH_CMD_EXPORT(nmove, nand copy page);
  677. MSH_CMD_EXPORT(nerase, nand erase a block of one partiton);
  678. MSH_CMD_EXPORT(nerase_force, nand erase a block of one partiton forcely);
  679. MSH_CMD_EXPORT(nerase_all, erase all blocks of a partition);
  680. MSH_CMD_EXPORT(ncheck_all, check all blocks of a partition);
  681. MSH_CMD_EXPORT(nmarkbad, nand mark bad block of one partition);
  682. MSH_CMD_EXPORT(nwrite, nand write page);
  683. MSH_CMD_EXPORT(nread, nand read page);
  684. #endif
  685. #endif