1
0

drv_spinand.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. /**************************************************************************//**
  2. *
  3. * @copyright (C) 2019 Nuvoton Technology Corp. All rights reserved.
  4. *
  5. * SPDX-License-Identifier: Apache-2.0
  6. *
  7. * Change Logs:
  8. * Date Author Notes
  9. * 2021-1-13 Wayne First version
  10. *
  11. ******************************************************************************/
  12. #include <rtthread.h>
  13. #if defined(NU_PKG_USING_SPINAND) && defined(RT_USING_MTD_NAND)
  14. #define LOG_TAG "drv_spinand"
  15. #define DBG_ENABLE
  16. #define DBG_SECTION_NAME LOG_TAG
  17. #define DBG_LEVEL DBG_INFO
  18. #define DBG_COLOR
  19. #include <rtdbg.h>
  20. #include "spinand.h"
  21. struct nu_spinand g_spinandflash_dev = {0};
  22. rt_size_t nu_qspi_transfer_message(struct rt_qspi_device *device, struct rt_qspi_message *message)
  23. {
  24. rt_err_t result;
  25. struct rt_spi_message *index;
  26. RT_ASSERT(device != RT_NULL);
  27. RT_ASSERT(message != RT_NULL);
  28. result = rt_mutex_take(&(device->parent.bus->lock), RT_WAITING_FOREVER);
  29. if (result != RT_EOK)
  30. {
  31. rt_set_errno(-RT_EBUSY);
  32. return 0;
  33. }
  34. /* reset errno */
  35. rt_set_errno(RT_EOK);
  36. /* configure SPI bus */
  37. if (device->parent.bus->owner != &device->parent)
  38. {
  39. /* not the same owner as current, re-configure SPI bus */
  40. result = device->parent.bus->ops->configure(&device->parent, &device->parent.config);
  41. if (result == RT_EOK)
  42. {
  43. /* set SPI bus owner */
  44. device->parent.bus->owner = &device->parent;
  45. }
  46. else
  47. {
  48. /* configure SPI bus failed */
  49. rt_set_errno(-RT_EIO);
  50. goto __exit;
  51. }
  52. }
  53. /* transmit each SPI message */
  54. index = &message->parent;
  55. while (index)
  56. {
  57. if (device->parent.bus->ops->xfer(&device->parent, index) == 0)
  58. {
  59. result = -RT_EIO;
  60. rt_set_errno(-RT_EIO);
  61. goto __exit;
  62. }
  63. index = index->next;
  64. }
  65. result = RT_EOK;
  66. __exit:
  67. /* release bus lock */
  68. rt_mutex_release(&(device->parent.bus->lock));
  69. return result;
  70. }
  71. rt_err_t nu_qspi_send_then_recv(struct rt_qspi_device *device, const void *send_buf, rt_size_t send_length, void *recv_buf, rt_size_t recv_length)
  72. {
  73. struct rt_qspi_message message[2] = {0};
  74. RT_ASSERT(send_buf);
  75. RT_ASSERT(recv_buf);
  76. RT_ASSERT(send_length != 0);
  77. /* Send message */
  78. message[0].qspi_data_lines = 1;
  79. /* Set send buf and send size */
  80. message[0].parent.recv_buf = RT_NULL;
  81. message[0].parent.send_buf = send_buf;
  82. message[0].parent.length = send_length;
  83. message[0].parent.cs_take = 1;
  84. message[0].parent.next = &message[1].parent;
  85. /* Receive message */
  86. message[1].qspi_data_lines = 1;
  87. /* Set recv buf and recv size */
  88. message[1].parent.recv_buf = recv_buf;
  89. message[1].parent.send_buf = RT_NULL;
  90. message[1].parent.length = recv_length;
  91. message[1].parent.cs_release = 1;
  92. return nu_qspi_transfer_message(device, &message[0]);
  93. }
  94. rt_err_t nu_qspi_send(struct rt_qspi_device *device, const void *send_buf, rt_size_t length)
  95. {
  96. RT_ASSERT(send_buf);
  97. RT_ASSERT(length != 0);
  98. struct rt_qspi_message message = {0};
  99. char *ptr = (char *)send_buf;
  100. rt_size_t count = 0;
  101. message.instruction.content = ptr[0];
  102. message.instruction.qspi_lines = 1;
  103. count++;
  104. /* set send buf and send size */
  105. message.qspi_data_lines = 1;
  106. message.parent.send_buf = ptr + count;
  107. message.parent.recv_buf = RT_NULL;
  108. message.parent.length = length - count;
  109. message.parent.cs_take = 1;
  110. message.parent.cs_release = 1;
  111. return nu_qspi_transfer_message(device, &message);
  112. }
  113. static void spinand_dump_buffer(int page, rt_uint8_t *buf, int len, const char *title)
  114. {
  115. if ((DBG_LEVEL) >= DBG_LOG)
  116. {
  117. int i;
  118. if (!buf)
  119. {
  120. return;
  121. }
  122. /* Just print 64-bytes.*/
  123. len = (len < 64) ? len : 64;
  124. LOG_I("[%s-Page-%d]", title, page);
  125. for (i = 0; i < len; i ++)
  126. {
  127. rt_kprintf("%02X ", buf[i]);
  128. if (i % 32 == 31) rt_kprintf("\n");
  129. }
  130. rt_kprintf("\n");
  131. }
  132. }
  133. static rt_err_t spinand_read_id(struct rt_mtd_nand_device *device)
  134. {
  135. rt_err_t result = RT_EOK ;
  136. uint32_t id = 0;
  137. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  138. RT_ASSERT(result == RT_EOK);
  139. SPINAND_FLASH_OPS->jedecid_get(SPINAND_FLASH_QSPI, &id);
  140. result = rt_mutex_release(SPINAND_FLASH_LOCK);
  141. RT_ASSERT(result == RT_EOK);
  142. return (id != 0x0) ? RT_EOK : -RT_ERROR;
  143. }
  144. static rt_err_t spinand_read_page(struct rt_mtd_nand_device *device,
  145. rt_off_t page,
  146. rt_uint8_t *data,
  147. rt_uint32_t data_len,
  148. rt_uint8_t *spare,
  149. rt_uint32_t spare_len)
  150. {
  151. rt_err_t result = RT_EOK ;
  152. LOG_D("[R-%d]data: 0x%08x %d, spare: 0x%08x, %d", page, data, data_len, spare, spare_len);
  153. RT_ASSERT(device != RT_NULL);
  154. if (page / device->pages_per_block > device->block_end)
  155. {
  156. LOG_E("[EIO] read page:%d", page);
  157. return -RT_MTD_EIO;
  158. }
  159. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  160. RT_ASSERT(result == RT_EOK);
  161. /* Data load, Read data from flash to cache */
  162. result = SPINAND_FLASH_OPS->read_dataload(SPINAND_FLASH_QSPI, (page >> 16) & 0xFF, (page >> 8) & 0xFF, (page & 0xFF));
  163. if (result != RT_EOK)
  164. goto exit_spinand_read_page;
  165. if (data && data_len)
  166. {
  167. /* Read data: 0~data_len, Read cache to data */
  168. result = SPINAND_FLASH_OPS->read_quadoutput(SPINAND_FLASH_QSPI, 0, 0, data, data_len);
  169. if (result != RT_EOK)
  170. goto exit_spinand_read_page;
  171. }
  172. if (spare && spare_len)
  173. {
  174. /* Read data: 2048~spare_len, Read cache to spare */
  175. result = SPINAND_FLASH_OPS->read_quadoutput(SPINAND_FLASH_QSPI, (SPINAND_FLASH_PAGE_SIZE >> 8) & 0xff, SPINAND_FLASH_PAGE_SIZE & 0xff, spare, spare_len);
  176. if (result != RT_EOK)
  177. goto exit_spinand_read_page;
  178. }
  179. exit_spinand_read_page:
  180. rt_mutex_release(SPINAND_FLASH_LOCK);
  181. spinand_dump_buffer(page, data, data_len, "Read Data");
  182. spinand_dump_buffer(page, spare, spare_len, "Read Spare");
  183. return result;
  184. }
  185. static rt_err_t spinand_write_page(struct rt_mtd_nand_device *device,
  186. rt_off_t page,
  187. const rt_uint8_t *data,
  188. rt_uint32_t data_len,
  189. const rt_uint8_t *spare,
  190. rt_uint32_t spare_len)
  191. {
  192. rt_err_t result = RT_EOK ;
  193. LOG_D("[W-%d]data: 0x%08x %d, spare: 0x%08x, %d", page, data, data_len, spare, spare_len);
  194. RT_ASSERT(device != RT_NULL);
  195. if (page / device->pages_per_block > device->block_end)
  196. {
  197. LOG_E("[EIO] write page:%d", page);
  198. return -RT_MTD_EIO;
  199. }
  200. spinand_dump_buffer(page, (uint8_t *)data, data_len, "WRITE DATA");
  201. spinand_dump_buffer(page, (uint8_t *)spare, spare_len, "WRITE SPARE");
  202. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  203. RT_ASSERT(result == RT_EOK);
  204. if (SPINAND_FLASH_MCP == 1)
  205. {
  206. /* Select die. */
  207. if ((result = SPINAND_FLASH_OPS->die_select(SPINAND_FLASH_QSPI, SPINAND_DIE_ID0)) != RT_EOK)
  208. goto exit_spinand_write_page;
  209. }
  210. /* Read data: 0~2111, to cache */
  211. if (data && data_len)
  212. result = SPINAND_FLASH_OPS->program_dataload(SPINAND_FLASH_QSPI, 0, 0, (uint8_t *)data, data_len, (uint8_t *)spare, spare_len);
  213. else
  214. result = SPINAND_FLASH_OPS->program_dataload(SPINAND_FLASH_QSPI, (SPINAND_FLASH_PAGE_SIZE >> 8) & 0xff, SPINAND_FLASH_PAGE_SIZE & 0xff, RT_NULL, 0, (uint8_t *)spare, spare_len);
  215. if (result != RT_EOK)
  216. goto exit_spinand_write_page;
  217. /* Flush data in cache to flash */
  218. result = SPINAND_FLASH_OPS->program_execute(SPINAND_FLASH_QSPI, (((page) >> 16) & 0xFF), (((page) >> 8) & 0xFF), (page) & 0xFF);
  219. if (result != RT_EOK)
  220. goto exit_spinand_write_page;
  221. result = RT_EOK;
  222. exit_spinand_write_page:
  223. rt_mutex_release(SPINAND_FLASH_LOCK);
  224. return result;
  225. }
  226. static rt_err_t spinand_move_page(struct rt_mtd_nand_device *device, rt_off_t src_page, rt_off_t dst_page)
  227. {
  228. rt_err_t result = RT_EOK ;
  229. uint8_t u8WECmd;
  230. RT_ASSERT(device != RT_NULL);
  231. if ((src_page / device->pages_per_block > device->block_end) ||
  232. (dst_page / device->pages_per_block > device->block_end))
  233. {
  234. LOG_E("EIO src:%08x, dst:%08x!", src_page, dst_page);
  235. return -RT_MTD_EIO;
  236. }
  237. LOG_D("src_page: %d, dst_page: %d", src_page, dst_page);
  238. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  239. RT_ASSERT(result == RT_EOK);
  240. /* Data load, Read data from flash to cache */
  241. result = SPINAND_FLASH_OPS->read_dataload(SPINAND_FLASH_QSPI, (src_page >> 16) & 0xFF, (src_page >> 8) & 0xFF, (src_page & 0xFF));
  242. if (result != RT_EOK)
  243. goto exit_spinand_move_page;
  244. /* Enable WE before writting. */
  245. u8WECmd = 0x06;
  246. if ((result = nu_qspi_send(SPINAND_FLASH_QSPI, &u8WECmd, sizeof(u8WECmd))) != RT_EOK)
  247. goto exit_spinand_move_page;
  248. /* Flush cache to flash */
  249. result = SPINAND_FLASH_OPS->program_execute(SPINAND_FLASH_QSPI, (((dst_page) >> 16) & 0xFF), (((dst_page) >> 8) & 0xFF), (dst_page) & 0xFF);
  250. if (result != RT_EOK)
  251. goto exit_spinand_move_page;
  252. result = RT_EOK;
  253. exit_spinand_move_page:
  254. rt_mutex_release(SPINAND_FLASH_LOCK);
  255. return result;
  256. }
  257. static rt_err_t spinand_erase_block_force(struct rt_mtd_nand_device *device, rt_uint32_t block)
  258. {
  259. rt_err_t result = RT_EOK ;
  260. uint32_t page;
  261. RT_ASSERT(device != RT_NULL);
  262. if (block > device->block_end)
  263. {
  264. LOG_E("[EIO] block:%d", block);
  265. return -RT_MTD_EIO;
  266. }
  267. page = block * SPINAND_FLASH_PAGE_PER_BLOCK_NUM;
  268. LOG_D("force erase block: %d -> page: %d", block, page);
  269. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  270. RT_ASSERT(result == RT_EOK);
  271. result = SPINAND_FLASH_OPS->block_erase(SPINAND_FLASH_QSPI, (page >> 16) & 0xFF, (page >> 8) & 0xFF, page & 0xFF);
  272. if (result != RT_EOK)
  273. goto exit_spinand_erase_block_force;
  274. result = RT_EOK;
  275. exit_spinand_erase_block_force:
  276. rt_mutex_release(SPINAND_FLASH_LOCK);
  277. return result;
  278. }
  279. static rt_err_t spinand_erase_block(struct rt_mtd_nand_device *device, rt_uint32_t block)
  280. {
  281. rt_err_t result = RT_EOK ;
  282. uint32_t page;
  283. RT_ASSERT(device != RT_NULL);
  284. if (block > device->block_end)
  285. {
  286. LOG_E("[EIO] block:%d", block);
  287. return -RT_MTD_EIO;
  288. }
  289. page = block * SPINAND_FLASH_PAGE_PER_BLOCK_NUM;
  290. LOG_D("erase block: %d -> page: %d", block, page);
  291. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  292. RT_ASSERT(result == RT_EOK);
  293. /* Erase block after checking it is bad or not. */
  294. if (SPINAND_FLASH_OPS->block_isbad(SPINAND_FLASH_QSPI, page) != 0)
  295. {
  296. LOG_W("Block %d is bad.\n", block);
  297. result = -RT_ERROR;
  298. goto exit_spinand_erase_block;
  299. }
  300. else
  301. {
  302. result = SPINAND_FLASH_OPS->block_erase(SPINAND_FLASH_QSPI, (page >> 16) & 0xFF, (page >> 8) & 0xFF, page & 0xFF);
  303. if (result != RT_EOK)
  304. goto exit_spinand_erase_block;
  305. }
  306. result = RT_EOK;
  307. exit_spinand_erase_block:
  308. rt_mutex_release(SPINAND_FLASH_LOCK);
  309. return result;
  310. }
  311. static rt_err_t spinand_check_block(struct rt_mtd_nand_device *device, rt_uint32_t block)
  312. {
  313. rt_err_t result = RT_EOK ;
  314. uint32_t page = 0;
  315. uint8_t isbad = 0;
  316. RT_ASSERT(device != RT_NULL);
  317. if (block > device->block_end)
  318. {
  319. LOG_E("[EIO] block:%d", block);
  320. return -RT_MTD_EIO;
  321. }
  322. page = block * SPINAND_FLASH_PAGE_PER_BLOCK_NUM;
  323. LOG_D("check block status: %d -> page: %d", block, page);
  324. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  325. RT_ASSERT(result == RT_EOK);
  326. isbad = SPINAND_FLASH_OPS->block_isbad(SPINAND_FLASH_QSPI, page);
  327. result = rt_mutex_release(SPINAND_FLASH_LOCK);
  328. RT_ASSERT(result == RT_EOK);
  329. return (isbad == 0) ? RT_EOK : -RT_ERROR ;
  330. }
  331. static rt_err_t spinand_mark_badblock(struct rt_mtd_nand_device *device, rt_uint32_t block)
  332. {
  333. rt_err_t result = RT_EOK ;
  334. uint32_t page = 0;
  335. RT_ASSERT(device != RT_NULL);
  336. if (block > device->block_end)
  337. {
  338. LOG_E("[EIO] block:%d", block);
  339. return -RT_MTD_EIO;
  340. }
  341. page = block * SPINAND_FLASH_PAGE_PER_BLOCK_NUM;
  342. LOG_D("mark bad block: %d -> page: %d", block, page);
  343. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  344. RT_ASSERT(result == RT_EOK);
  345. /* Erase block after checking it is bad or not. */
  346. if (SPINAND_FLASH_OPS->block_isbad(SPINAND_FLASH_QSPI, page) != 0)
  347. {
  348. LOG_W("Block %d is bad.\n", block);
  349. result = RT_EOK;
  350. }
  351. else
  352. {
  353. result = SPINAND_FLASH_OPS->block_markbad(SPINAND_FLASH_QSPI, page);
  354. }
  355. rt_mutex_release(SPINAND_FLASH_LOCK);
  356. return result;
  357. }
  358. static struct rt_mtd_nand_driver_ops spinand_ops =
  359. {
  360. spinand_read_id,
  361. spinand_read_page,
  362. spinand_write_page,
  363. spinand_move_page,
  364. spinand_erase_block,
  365. spinand_check_block,
  366. spinand_mark_badblock
  367. };
  368. static uint32_t u32IsInited = 0;
  369. rt_err_t rt_hw_mtd_spinand_init(void)
  370. {
  371. int i = 0;
  372. rt_err_t result;
  373. char szTmp[8];
  374. if (u32IsInited)
  375. return RT_EOK;
  376. result = rt_mutex_init(SPINAND_FLASH_LOCK, "spinand", RT_IPC_FLAG_PRIO);
  377. RT_ASSERT(result == RT_EOK);
  378. result = spinand_flash_init(SPINAND_FLASH_QSPI);
  379. RT_ASSERT(result == RT_EOK);
  380. for (i = 0; i < MTD_SPINAND_PARTITION_NUM; i++)
  381. {
  382. mtd_partitions[i].page_size = SPINAND_FLASH_PAGE_SIZE; /* The Page size in the flash */
  383. mtd_partitions[i].pages_per_block = SPINAND_FLASH_PAGE_PER_BLOCK_NUM; /* How many page number in a block */
  384. mtd_partitions[i].oob_size = SPINAND_FLASH_OOB_SIZE; /* Out of bank size */
  385. mtd_partitions[i].oob_free = 32; /* the free area in oob that flash driver not use */
  386. mtd_partitions[i].plane_num = SPINAND_FLASH_MCP ; /* the number of plane in the NAND Flash */
  387. mtd_partitions[i].ops = &spinand_ops;
  388. rt_snprintf(szTmp, sizeof(szTmp), "nand%d", i);
  389. result = rt_mtd_nand_register_device(szTmp, &mtd_partitions[i]);
  390. RT_ASSERT(result == RT_EOK);
  391. }
  392. u32IsInited = 1;
  393. return result;
  394. }
  395. rt_err_t rt_hw_mtd_spinand_register(const char *device_name)
  396. {
  397. rt_device_t pDev;
  398. rt_err_t result;
  399. if ((pDev = rt_device_find(device_name)) == RT_NULL)
  400. return -RT_ERROR;
  401. SPINAND_FLASH_QSPI = (struct rt_qspi_device *)pDev;
  402. SPINAND_FLASH_QSPI->config.parent.mode = RT_SPI_MODE_0 | RT_SPI_MSB;
  403. SPINAND_FLASH_QSPI->config.parent.data_width = 8;
  404. SPINAND_FLASH_QSPI->config.parent.max_hz = 48000000;
  405. SPINAND_FLASH_QSPI->config.ddr_mode = 0;
  406. SPINAND_FLASH_QSPI->config.qspi_dl_width = 4;
  407. result = rt_spi_configure(&SPINAND_FLASH_QSPI->parent, &SPINAND_FLASH_QSPI->config.parent);
  408. RT_ASSERT(result == RT_EOK);
  409. return rt_hw_mtd_spinand_init();
  410. }
  411. #if defined(RT_USING_DFS_UFFS)
  412. #include "dfs_uffs.h"
  413. void uffs_setup_storage(struct uffs_StorageAttrSt *attr,
  414. struct rt_mtd_nand_device *nand)
  415. {
  416. RT_ASSERT(attr != RT_NULL);
  417. RT_ASSERT(nand != RT_NULL);
  418. rt_memset(attr, 0, sizeof(struct uffs_StorageAttrSt));
  419. attr->page_data_size = nand->page_size; /* page data size */
  420. attr->pages_per_block = nand->pages_per_block; /* pages per block */
  421. attr->spare_size = nand->oob_size; /* page spare size */
  422. attr->ecc_opt = RT_CONFIG_UFFS_ECC_MODE; /* ecc option */
  423. attr->ecc_size = nand->oob_size - nand->oob_free; /* ecc size */
  424. attr->block_status_offs = 0; /* indicate block bad or good, offset in spare */
  425. attr->layout_opt = RT_CONFIG_UFFS_LAYOUT; /* let UFFS do the spare layout */
  426. /* initialize _uffs_data_layout and _uffs_ecc_layout */
  427. rt_memcpy(attr->_uffs_data_layout, spinand_flash_data_layout, UFFS_SPARE_LAYOUT_SIZE);
  428. rt_memcpy(attr->_uffs_ecc_layout, spinand_flash_ecc_layout, UFFS_SPARE_LAYOUT_SIZE);
  429. attr->data_layout = attr->_uffs_data_layout;
  430. attr->ecc_layout = attr->_uffs_ecc_layout;
  431. }
  432. #endif
  433. #include <finsh.h>
  434. static int nread(int argc, char **argv)
  435. {
  436. int ret = -1;
  437. rt_uint8_t *spare = RT_NULL;
  438. rt_uint8_t *data_ptr = RT_NULL;
  439. struct rt_mtd_nand_device *device;
  440. rt_uint32_t partition, page;
  441. if (argc != 3)
  442. {
  443. LOG_E("Usage %s: %s <partition_no> <page>.\n", __func__, __func__);
  444. goto exit_nread;
  445. }
  446. page = atoi(argv[2]);
  447. partition = atoi(argv[1]);
  448. if (partition >= MTD_SPINAND_PARTITION_NUM)
  449. goto exit_nread;
  450. device = &mtd_partitions[partition];
  451. data_ptr = (rt_uint8_t *) rt_malloc(SPINAND_FLASH_PAGE_SIZE);
  452. if (data_ptr == RT_NULL)
  453. {
  454. LOG_E("data_ptr: no memory\n");
  455. goto exit_nread;
  456. }
  457. spare = (rt_uint8_t *) rt_malloc(SPINAND_FLASH_OOB_SIZE);
  458. if (spare == RT_NULL)
  459. {
  460. LOG_E("spare: no memory\n");
  461. goto exit_nread;
  462. }
  463. rt_memset(spare, 0, SPINAND_FLASH_OOB_SIZE);
  464. rt_memset(data_ptr, 0, SPINAND_FLASH_PAGE_SIZE);
  465. page = page + device->block_start * device->pages_per_block;
  466. if (spinand_read_page(device, page, &data_ptr[0], SPINAND_FLASH_PAGE_SIZE, &spare[0], SPINAND_FLASH_OOB_SIZE) != RT_EOK)
  467. goto exit_nread;
  468. LOG_I("Partion:%d page-%d", partition, page);
  469. ret = 0;
  470. exit_nread:
  471. /* release memory */
  472. if (data_ptr)
  473. rt_free(data_ptr);
  474. if (spare)
  475. rt_free(spare);
  476. return ret;
  477. }
  478. static int nwrite(int argc, char **argv)
  479. {
  480. int i, ret = -1;
  481. rt_uint8_t *data_ptr = RT_NULL;
  482. struct rt_mtd_nand_device *device;
  483. rt_uint32_t partition, page;
  484. if (argc != 3)
  485. {
  486. LOG_E("Usage %s: %s <partition_no> <page>.\n", __func__, __func__);
  487. goto exit_nwrite;
  488. }
  489. partition = atoi(argv[1]);
  490. page = atoi(argv[2]);
  491. if (partition >= MTD_SPINAND_PARTITION_NUM)
  492. goto exit_nwrite;
  493. device = &mtd_partitions[partition];
  494. data_ptr = (rt_uint8_t *) rt_malloc(SPINAND_FLASH_PAGE_SIZE);
  495. if (data_ptr == RT_NULL)
  496. {
  497. LOG_E("data_ptr: no memory\n");
  498. goto exit_nwrite;
  499. }
  500. /* Need random data to test ECC */
  501. for (i = 0; i < SPINAND_FLASH_PAGE_SIZE; i ++)
  502. data_ptr[i] = i / 5 - i;
  503. page = page + device->block_start * device->pages_per_block;
  504. spinand_write_page(device, page, &data_ptr[0], SPINAND_FLASH_PAGE_SIZE, NULL, 0);
  505. LOG_I("Wrote data into %d in partition-index %d.", page, partition);
  506. ret = 0;
  507. exit_nwrite:
  508. /* release memory */
  509. if (data_ptr)
  510. rt_free(data_ptr);
  511. return ret;
  512. }
  513. static int nmove(int argc, char **argv)
  514. {
  515. struct rt_mtd_nand_device *device;
  516. rt_uint32_t partition, src, dst;
  517. if (argc != 4)
  518. {
  519. LOG_E("Usage %s: %s <partition_no> <src page> <dst page>.\n", __func__, __func__);
  520. goto exit_nmove;
  521. }
  522. partition = atoi(argv[1]);
  523. src = atoi(argv[2]);
  524. dst = atoi(argv[3]);
  525. if (partition >= MTD_SPINAND_PARTITION_NUM)
  526. return -1;
  527. device = &mtd_partitions[partition];
  528. spinand_move_page(device,
  529. src + device->block_start * device->pages_per_block,
  530. dst + device->block_start * device->pages_per_block);
  531. LOG_I("Move data into %d from %d in partition-index %d.", dst, src, partition);
  532. return 0;
  533. exit_nmove:
  534. return -1;
  535. }
  536. static int nerase(int argc, char **argv)
  537. {
  538. struct rt_mtd_nand_device *device;
  539. int partition, block;
  540. if (argc != 3)
  541. {
  542. LOG_E("Usage %s: %s <partition_no> <block_no>.\n", __func__, __func__);
  543. goto exit_nerase;
  544. }
  545. partition = atoi(argv[1]);
  546. block = atoi(argv[2]);
  547. if (partition >= MTD_SPINAND_PARTITION_NUM)
  548. goto exit_nerase;
  549. device = &mtd_partitions[partition];
  550. if (spinand_erase_block(device, block + device->block_start) != RT_EOK)
  551. goto exit_nerase;
  552. LOG_I("Erased block %d in partition-index %d.", block + device->block_start, partition);
  553. return 0;
  554. exit_nerase:
  555. return -1;
  556. }
  557. static int nerase_force(int argc, char **argv)
  558. {
  559. struct rt_mtd_nand_device *device;
  560. int partition, block;
  561. if (argc != 2)
  562. {
  563. LOG_E("Usage %s: %s <partition_no>\n", __func__, __func__);
  564. goto exit_nerase_force;
  565. }
  566. partition = atoi(argv[1]);
  567. if (partition >= MTD_SPINAND_PARTITION_NUM)
  568. goto exit_nerase_force;
  569. device = &mtd_partitions[partition];
  570. for (block = 0; block <= device->block_end; block++)
  571. {
  572. if (spinand_erase_block_force(device, block + device->block_start) != RT_EOK)
  573. goto exit_nerase_force;
  574. LOG_I("Erased block %d in partition-index %d. forcely", block + device->block_start, partition);
  575. }
  576. return 0;
  577. exit_nerase_force:
  578. return -1;
  579. }
  580. static rt_err_t nmarkbad(int argc, char **argv)
  581. {
  582. struct rt_mtd_nand_device *device;
  583. int partition, block;
  584. if (argc != 3)
  585. {
  586. LOG_E("Usage %s: %s <partition_no> <block_no>.\n", __func__, __func__);
  587. goto exit_nmarkbad;
  588. }
  589. partition = atoi(argv[1]);
  590. block = atoi(argv[2]);
  591. if (partition >= MTD_SPINAND_PARTITION_NUM)
  592. goto exit_nmarkbad;
  593. device = &mtd_partitions[partition];
  594. if (spinand_mark_badblock(device, block + device->block_start) != RT_EOK)
  595. goto exit_nmarkbad;
  596. LOG_I("Marked block %d in partition-index %d.", block + device->block_start, partition);
  597. return 0;
  598. exit_nmarkbad:
  599. return -1;
  600. }
  601. static int nerase_all(int argc, char **argv)
  602. {
  603. rt_uint32_t index;
  604. rt_uint32_t partition;
  605. struct rt_mtd_nand_device *device;
  606. if (argc != 2)
  607. {
  608. LOG_E("Usage %s: %s <partition_no>.\n", __func__, __func__);
  609. goto exit_nerase_all;
  610. }
  611. partition = atoi(argv[1]);
  612. if (partition >= MTD_SPINAND_PARTITION_NUM)
  613. goto exit_nerase_all;
  614. device = &mtd_partitions[partition];
  615. for (index = 0; index < device->block_total; index ++)
  616. {
  617. spinand_erase_block(device, index);
  618. }
  619. LOG_I("Erased all block in partition-index %d.", partition);
  620. return 0;
  621. exit_nerase_all:
  622. return -1;
  623. }
  624. static int ncheck_all(int argc, char **argv)
  625. {
  626. rt_uint32_t index;
  627. rt_uint32_t partition;
  628. struct rt_mtd_nand_device *device;
  629. if (argc != 2)
  630. {
  631. LOG_E("Usage %s: %s <partition_no>.\n", __func__, __func__);
  632. return -1;
  633. }
  634. partition = atoi(argv[1]);
  635. if (partition >= MTD_SPINAND_PARTITION_NUM)
  636. return -1;
  637. device = &mtd_partitions[partition];
  638. for (index = 0; index < device->block_total; index ++)
  639. {
  640. LOG_I("Partion:%d Block-%d is %s", partition, index, spinand_check_block(device, index) ? "bad" : "good");
  641. }
  642. return 0;
  643. }
  644. static int nid(int argc, char **argv)
  645. {
  646. spinand_read_id(RT_NULL);
  647. return 0;
  648. }
  649. static int nlist(int argc, char **argv)
  650. {
  651. rt_uint32_t index;
  652. struct rt_mtd_nand_device *device;
  653. rt_kprintf("\n");
  654. for (index = 0 ; index < MTD_SPINAND_PARTITION_NUM ; index++)
  655. {
  656. device = &mtd_partitions[index];
  657. rt_kprintf("[Partition #%d]\n", index);
  658. rt_kprintf("Name: %s\n", device->parent.parent.name);
  659. rt_kprintf("Start block: %d\n", device->block_start);
  660. rt_kprintf("End block: %d\n", device->block_end);
  661. rt_kprintf("Block number: %d\n", device->block_total);
  662. rt_kprintf("Plane number: %d\n", device->plane_num);
  663. rt_kprintf("Pages per Block: %d\n", device->pages_per_block);
  664. rt_kprintf("Page size: %d bytes\n", device->page_size);
  665. rt_kprintf("Spare size: %d bytes\n", device->oob_size);
  666. rt_kprintf("Total size: %d bytes (%d KB)\n", device->block_total * device->pages_per_block * device->page_size,
  667. device->block_total * device->pages_per_block * device->page_size / 1024);
  668. rt_kprintf("\n");
  669. }
  670. return 0;
  671. }
  672. #ifdef FINSH_USING_MSH
  673. MSH_CMD_EXPORT(nid, nand id);
  674. MSH_CMD_EXPORT(nlist, list all partition information on nand);
  675. MSH_CMD_EXPORT(nmove, nand copy page);
  676. MSH_CMD_EXPORT(nerase, nand erase a block of one partiton);
  677. MSH_CMD_EXPORT(nerase_force, nand erase a block of one partiton forcely);
  678. MSH_CMD_EXPORT(nerase_all, erase all blocks of a partition);
  679. MSH_CMD_EXPORT(ncheck_all, check all blocks of a partition);
  680. MSH_CMD_EXPORT(nmarkbad, nand mark bad block of one partition);
  681. MSH_CMD_EXPORT(nwrite, nand write page);
  682. MSH_CMD_EXPORT(nread, nand read page);
  683. #endif
  684. #endif