drv_spinand.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990
  1. /**************************************************************************//**
  2. *
  3. * @copyright (C) 2019 Nuvoton Technology Corp. All rights reserved.
  4. *
  5. * SPDX-License-Identifier: Apache-2.0
  6. *
  7. * Change Logs:
  8. * Date Author Notes
  9. * 2021-1-13 Wayne First version
  10. *
  11. ******************************************************************************/
  12. #include <rtthread.h>
  13. #if defined(NU_PKG_USING_SPINAND) && defined(RT_USING_MTD_NAND)
  14. #define LOG_TAG "drv_spinand"
  15. #undef DBG_ENABLE
  16. #define DBG_SECTION_NAME LOG_TAG
  17. #define DBG_LEVEL LOG_LVL_INFO
  18. #define DBG_COLOR
  19. #include <rtdbg.h>
  20. #include "spinand.h"
  21. struct nu_spinand g_spinandflash_dev = {0};
  22. rt_size_t nu_qspi_transfer_message(struct rt_qspi_device *device, struct rt_qspi_message *message)
  23. {
  24. rt_err_t result;
  25. struct rt_spi_message *index;
  26. RT_ASSERT(device);
  27. RT_ASSERT(message);
  28. result = rt_mutex_take(&(device->parent.bus->lock), RT_WAITING_FOREVER);
  29. if (result != RT_EOK)
  30. {
  31. rt_set_errno(-RT_EBUSY);
  32. return 0;
  33. }
  34. /* reset errno */
  35. rt_set_errno(RT_EOK);
  36. /* configure SPI bus */
  37. if (device->parent.bus->owner != &device->parent)
  38. {
  39. /* not the same owner as current, re-configure SPI bus */
  40. result = device->parent.bus->ops->configure(&device->parent, &device->parent.config);
  41. if (result == RT_EOK)
  42. {
  43. /* set SPI bus owner */
  44. device->parent.bus->owner = &device->parent;
  45. }
  46. else
  47. {
  48. /* configure SPI bus failed */
  49. rt_set_errno(-RT_EIO);
  50. goto __exit;
  51. }
  52. }
  53. /* transmit each SPI message */
  54. index = &message->parent;
  55. while (index)
  56. {
  57. if (device->parent.bus->ops->xfer(&device->parent, index) == 0)
  58. {
  59. result = -RT_EIO;
  60. rt_set_errno(-RT_EIO);
  61. goto __exit;
  62. }
  63. index = index->next;
  64. }
  65. result = RT_EOK;
  66. __exit:
  67. /* release bus lock */
  68. rt_mutex_release(&(device->parent.bus->lock));
  69. return result;
  70. }
  71. rt_err_t nu_qspi_send_then_recv(struct rt_qspi_device *device, const void *send_buf, rt_size_t send_length, void *recv_buf, rt_size_t recv_length)
  72. {
  73. struct rt_qspi_message message[2] = {0};
  74. RT_ASSERT(send_buf);
  75. RT_ASSERT(recv_buf);
  76. RT_ASSERT(send_length != 0);
  77. /* Send message */
  78. message[0].qspi_data_lines = 1;
  79. /* Set send buf and send size */
  80. message[0].parent.recv_buf = RT_NULL;
  81. message[0].parent.send_buf = send_buf;
  82. message[0].parent.length = send_length;
  83. message[0].parent.cs_take = 1;
  84. message[0].parent.next = &message[1].parent;
  85. /* Receive message */
  86. message[1].qspi_data_lines = 1;
  87. /* Set recv buf and recv size */
  88. message[1].parent.recv_buf = recv_buf;
  89. message[1].parent.send_buf = RT_NULL;
  90. message[1].parent.length = recv_length;
  91. message[1].parent.cs_release = 1;
  92. return nu_qspi_transfer_message(device, &message[0]);
  93. }
  94. rt_err_t nu_qspi_send(struct rt_qspi_device *device, const void *send_buf, rt_size_t length)
  95. {
  96. RT_ASSERT(send_buf);
  97. RT_ASSERT(length != 0);
  98. struct rt_qspi_message message = {0};
  99. char *ptr = (char *)send_buf;
  100. rt_size_t count = 0;
  101. message.instruction.content = ptr[0];
  102. message.instruction.qspi_lines = 1;
  103. count++;
  104. /* set send buf and send size */
  105. message.qspi_data_lines = 1;
  106. message.parent.send_buf = ptr + count;
  107. message.parent.recv_buf = RT_NULL;
  108. message.parent.length = length - count;
  109. message.parent.cs_take = 1;
  110. message.parent.cs_release = 1;
  111. return nu_qspi_transfer_message(device, &message);
  112. }
  113. static void spinand_dump_buffer(int page, rt_uint8_t *buf, int len, const char *title)
  114. {
  115. if (!buf || len == 0)
  116. {
  117. return;
  118. }
  119. LOG_D("%s-->", title);
  120. LOG_HEX("spinand", 16, (void *)buf, len);
  121. }
  122. static rt_err_t spinand_read_id(struct rt_mtd_nand_device *device)
  123. {
  124. rt_err_t result = RT_EOK ;
  125. uint32_t id = 0;
  126. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  127. RT_ASSERT(result == RT_EOK);
  128. SPINAND_FLASH_OPS->jedecid_get(SPINAND_FLASH_QSPI, &id);
  129. result = rt_mutex_release(SPINAND_FLASH_LOCK);
  130. RT_ASSERT(result == RT_EOK);
  131. LOG_I("JEDEC ID of the SPI NAND is [%08X]", id);
  132. return (id != 0x0) ? RT_EOK : -RT_ERROR;
  133. }
  134. static rt_err_t spinand_read_page(struct rt_mtd_nand_device *device,
  135. rt_off_t page,
  136. rt_uint8_t *data,
  137. rt_uint32_t data_len,
  138. rt_uint8_t *spare,
  139. rt_uint32_t spare_len)
  140. {
  141. rt_err_t result = RT_EOK ;
  142. LOG_D("[R-%d]data: 0x%08x %d, spare: 0x%08x, %d", page, data, data_len, spare, spare_len);
  143. RT_ASSERT(device);
  144. if (page / device->pages_per_block > device->block_end)
  145. {
  146. LOG_E("[EIO] read page:%d", page);
  147. return -RT_MTD_EIO;
  148. }
  149. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  150. RT_ASSERT(result == RT_EOK);
  151. /* Data load, Read data from flash to cache */
  152. result = SPINAND_FLASH_OPS->read_dataload(SPINAND_FLASH_QSPI, (page >> 16) & 0xFF, (page >> 8) & 0xFF, (page & 0xFF));
  153. if (result != RT_EOK)
  154. goto exit_spinand_read_page;
  155. if (data && data_len)
  156. {
  157. /* Read data: 0~data_len, Read cache to data */
  158. result = SPINAND_FLASH_OPS->read_quadoutput(SPINAND_FLASH_QSPI, 0, 0, data, data_len);
  159. if (result != RT_EOK)
  160. goto exit_spinand_read_page;
  161. }
  162. if (spare && spare_len)
  163. {
  164. /* Read data: 2048~spare_len, Read cache to spare */
  165. result = SPINAND_FLASH_OPS->read_quadoutput(SPINAND_FLASH_QSPI, (SPINAND_FLASH_PAGE_SIZE >> 8) & 0xff, SPINAND_FLASH_PAGE_SIZE & 0xff, spare, spare_len);
  166. if (result != RT_EOK)
  167. goto exit_spinand_read_page;
  168. }
  169. exit_spinand_read_page:
  170. rt_mutex_release(SPINAND_FLASH_LOCK);
  171. return result;
  172. }
  173. static rt_err_t spinand_write_page(struct rt_mtd_nand_device *device,
  174. rt_off_t page,
  175. const rt_uint8_t *data,
  176. rt_uint32_t data_len,
  177. const rt_uint8_t *spare,
  178. rt_uint32_t spare_len)
  179. {
  180. rt_err_t result = RT_EOK ;
  181. LOG_D("[W-%d]data: 0x%08x %d, spare: 0x%08x, %d", page, data, data_len, spare, spare_len);
  182. RT_ASSERT(device);
  183. if (page / device->pages_per_block > device->block_end)
  184. {
  185. LOG_E("[EIO] write page:%d", page);
  186. return -RT_MTD_EIO;
  187. }
  188. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  189. RT_ASSERT(result == RT_EOK);
  190. if (SPINAND_FLASH_MCP == 1)
  191. {
  192. /* Select die. */
  193. if ((result = SPINAND_FLASH_OPS->die_select(SPINAND_FLASH_QSPI, SPINAND_DIE_ID0)) != RT_EOK)
  194. goto exit_spinand_write_page;
  195. }
  196. /* Read data: 0~2111, to cache */
  197. if (data && data_len)
  198. result = SPINAND_FLASH_OPS->program_dataload(SPINAND_FLASH_QSPI, 0, 0, (uint8_t *)data, data_len, (uint8_t *)spare, spare_len);
  199. else
  200. result = SPINAND_FLASH_OPS->program_dataload(SPINAND_FLASH_QSPI, (SPINAND_FLASH_PAGE_SIZE >> 8) & 0xff, SPINAND_FLASH_PAGE_SIZE & 0xff, RT_NULL, 0, (uint8_t *)spare, spare_len);
  201. if (result != RT_EOK)
  202. goto exit_spinand_write_page;
  203. /* Flush data in cache to flash */
  204. result = SPINAND_FLASH_OPS->program_execute(SPINAND_FLASH_QSPI, (((page) >> 16) & 0xFF), (((page) >> 8) & 0xFF), (page) & 0xFF);
  205. if (result != RT_EOK)
  206. goto exit_spinand_write_page;
  207. result = RT_EOK;
  208. exit_spinand_write_page:
  209. rt_mutex_release(SPINAND_FLASH_LOCK);
  210. return result;
  211. }
  212. static rt_err_t spinand_move_page(struct rt_mtd_nand_device *device, rt_off_t src_page, rt_off_t dst_page)
  213. {
  214. rt_err_t result = RT_EOK ;
  215. uint8_t u8WECmd;
  216. RT_ASSERT(device);
  217. if ((src_page / device->pages_per_block > device->block_end) ||
  218. (dst_page / device->pages_per_block > device->block_end))
  219. {
  220. LOG_E("EIO src:%08x, dst:%08x!", src_page, dst_page);
  221. return -RT_MTD_EIO;
  222. }
  223. LOG_D("src_page: %d, dst_page: %d", src_page, dst_page);
  224. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  225. RT_ASSERT(result == RT_EOK);
  226. /* Data load, Read data from flash to cache */
  227. result = SPINAND_FLASH_OPS->read_dataload(SPINAND_FLASH_QSPI, (src_page >> 16) & 0xFF, (src_page >> 8) & 0xFF, (src_page & 0xFF));
  228. if (result != RT_EOK)
  229. goto exit_spinand_move_page;
  230. /* Enable WE before writting. */
  231. u8WECmd = 0x06;
  232. if ((result = nu_qspi_send(SPINAND_FLASH_QSPI, &u8WECmd, sizeof(u8WECmd))) != RT_EOK)
  233. goto exit_spinand_move_page;
  234. /* Flush cache to flash */
  235. result = SPINAND_FLASH_OPS->program_execute(SPINAND_FLASH_QSPI, (((dst_page) >> 16) & 0xFF), (((dst_page) >> 8) & 0xFF), (dst_page) & 0xFF);
  236. if (result != RT_EOK)
  237. goto exit_spinand_move_page;
  238. result = RT_EOK;
  239. exit_spinand_move_page:
  240. rt_mutex_release(SPINAND_FLASH_LOCK);
  241. return result;
  242. }
  243. static rt_err_t spinand_erase_block_force(struct rt_mtd_nand_device *device, rt_uint32_t block)
  244. {
  245. rt_err_t result = RT_EOK ;
  246. uint32_t page;
  247. RT_ASSERT(device);
  248. if (block > device->block_end)
  249. {
  250. LOG_E("[EIO] block:%d", block);
  251. return -RT_MTD_EIO;
  252. }
  253. page = block * SPINAND_FLASH_PAGE_PER_BLOCK_NUM;
  254. LOG_D("force erase block: %d -> page: %d", block, page);
  255. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  256. RT_ASSERT(result == RT_EOK);
  257. result = SPINAND_FLASH_OPS->block_erase(SPINAND_FLASH_QSPI, (page >> 16) & 0xFF, (page >> 8) & 0xFF, page & 0xFF);
  258. if (result != RT_EOK)
  259. goto exit_spinand_erase_block_force;
  260. result = RT_EOK;
  261. exit_spinand_erase_block_force:
  262. rt_mutex_release(SPINAND_FLASH_LOCK);
  263. return result;
  264. }
  265. static rt_err_t spinand_erase_block(struct rt_mtd_nand_device *device, rt_uint32_t block)
  266. {
  267. rt_err_t result = RT_EOK ;
  268. uint32_t page;
  269. RT_ASSERT(device);
  270. if (block > device->block_end)
  271. {
  272. LOG_E("[EIO] block:%d", block);
  273. return -RT_MTD_EIO;
  274. }
  275. page = block * SPINAND_FLASH_PAGE_PER_BLOCK_NUM;
  276. LOG_D("erase block: %d -> page: %d", block, page);
  277. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  278. RT_ASSERT(result == RT_EOK);
  279. /* Erase block after checking it is bad or not. */
  280. if (SPINAND_FLASH_OPS->block_isbad(SPINAND_FLASH_QSPI, page) != 0)
  281. {
  282. LOG_W("Block %d is bad.\n", block);
  283. result = -RT_ERROR;
  284. goto exit_spinand_erase_block;
  285. }
  286. else
  287. {
  288. result = SPINAND_FLASH_OPS->block_erase(SPINAND_FLASH_QSPI, (page >> 16) & 0xFF, (page >> 8) & 0xFF, page & 0xFF);
  289. if (result != RT_EOK)
  290. goto exit_spinand_erase_block;
  291. }
  292. result = RT_EOK;
  293. exit_spinand_erase_block:
  294. rt_mutex_release(SPINAND_FLASH_LOCK);
  295. return result;
  296. }
  297. static rt_err_t spinand_check_block(struct rt_mtd_nand_device *device, rt_uint32_t block)
  298. {
  299. rt_err_t result = RT_EOK ;
  300. uint32_t page = 0;
  301. uint8_t isbad = 0;
  302. RT_ASSERT(device);
  303. if (block > device->block_end)
  304. {
  305. LOG_E("[EIO] block:%d", block);
  306. return -RT_MTD_EIO;
  307. }
  308. page = block * SPINAND_FLASH_PAGE_PER_BLOCK_NUM;
  309. LOG_D("check block status: %d -> page: %d", block, page);
  310. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  311. RT_ASSERT(result == RT_EOK);
  312. isbad = SPINAND_FLASH_OPS->block_isbad(SPINAND_FLASH_QSPI, page);
  313. result = rt_mutex_release(SPINAND_FLASH_LOCK);
  314. RT_ASSERT(result == RT_EOK);
  315. return (isbad == 0) ? RT_EOK : -RT_ERROR ;
  316. }
  317. static rt_err_t spinand_mark_badblock(struct rt_mtd_nand_device *device, rt_uint32_t block)
  318. {
  319. rt_err_t result = RT_EOK ;
  320. uint32_t page = 0;
  321. RT_ASSERT(device);
  322. if (block > device->block_end)
  323. {
  324. LOG_E("[EIO] block:%d", block);
  325. return -RT_MTD_EIO;
  326. }
  327. page = block * SPINAND_FLASH_PAGE_PER_BLOCK_NUM;
  328. LOG_D("mark bad block: %d -> page: %d", block, page);
  329. result = rt_mutex_take(SPINAND_FLASH_LOCK, RT_WAITING_FOREVER);
  330. RT_ASSERT(result == RT_EOK);
  331. /* Erase block after checking it is bad or not. */
  332. if (SPINAND_FLASH_OPS->block_isbad(SPINAND_FLASH_QSPI, page) != 0)
  333. {
  334. LOG_W("Block %d is bad.\n", block);
  335. result = RT_EOK;
  336. }
  337. else
  338. {
  339. result = SPINAND_FLASH_OPS->block_markbad(SPINAND_FLASH_QSPI, page);
  340. }
  341. rt_mutex_release(SPINAND_FLASH_LOCK);
  342. return result;
  343. }
  344. static struct rt_mtd_nand_driver_ops spinand_ops =
  345. {
  346. spinand_read_id,
  347. spinand_read_page,
  348. spinand_write_page,
  349. spinand_move_page,
  350. spinand_erase_block,
  351. spinand_check_block,
  352. spinand_mark_badblock
  353. };
  354. static uint32_t u32IsInited = 0;
  355. rt_err_t rt_hw_mtd_spinand_init(void)
  356. {
  357. int i = 0;
  358. rt_err_t result;
  359. char szTmp[8];
  360. if (u32IsInited)
  361. return RT_EOK;
  362. result = rt_mutex_init(SPINAND_FLASH_LOCK, "spinand", RT_IPC_FLAG_PRIO);
  363. RT_ASSERT(result == RT_EOK);
  364. result = spinand_flash_init(SPINAND_FLASH_QSPI);
  365. if (result != RT_EOK)
  366. return -RT_ERROR;
  367. for (i = 0; i < MTD_SPINAND_PARTITION_NUM; i++)
  368. {
  369. mtd_partitions[i].page_size = SPINAND_FLASH_PAGE_SIZE; /* The Page size in the flash */
  370. mtd_partitions[i].pages_per_block = SPINAND_FLASH_PAGE_PER_BLOCK_NUM; /* How many page number in a block */
  371. mtd_partitions[i].oob_size = SPINAND_FLASH_OOB_SIZE; /* Out of bank size */
  372. mtd_partitions[i].oob_free = 32; /* the free area in oob that flash driver not use */
  373. mtd_partitions[i].plane_num = SPINAND_FLASH_MCP ; /* the number of plane in the NAND Flash */
  374. mtd_partitions[i].ops = &spinand_ops;
  375. rt_snprintf(szTmp, sizeof(szTmp), "nand%d", i);
  376. result = rt_mtd_nand_register_device(szTmp, &mtd_partitions[i]);
  377. RT_ASSERT(result == RT_EOK);
  378. }
  379. u32IsInited = 1;
  380. return result;
  381. }
  382. rt_err_t rt_hw_mtd_spinand_register(const char *device_name)
  383. {
  384. rt_device_t pDev;
  385. rt_err_t result;
  386. if ((pDev = rt_device_find(device_name)) == RT_NULL)
  387. return -RT_ERROR;
  388. SPINAND_FLASH_QSPI = (struct rt_qspi_device *)pDev;
  389. SPINAND_FLASH_QSPI->config.parent.mode = RT_SPI_MODE_0 | RT_SPI_MSB;
  390. SPINAND_FLASH_QSPI->config.parent.data_width = 8;
  391. SPINAND_FLASH_QSPI->config.parent.max_hz = 48000000;
  392. SPINAND_FLASH_QSPI->config.ddr_mode = 0;
  393. SPINAND_FLASH_QSPI->config.qspi_dl_width = 4;
  394. result = rt_spi_configure(&SPINAND_FLASH_QSPI->parent, &SPINAND_FLASH_QSPI->config.parent);
  395. RT_ASSERT(result == RT_EOK);
  396. return rt_hw_mtd_spinand_init();
  397. }
  398. #if defined(RT_USING_DFS_UFFS)
  399. #include "dfs_uffs.h"
  400. void uffs_setup_storage(struct uffs_StorageAttrSt *attr,
  401. struct rt_mtd_nand_device *nand)
  402. {
  403. RT_ASSERT(attr != RT_NULL);
  404. RT_ASSERT(nand != RT_NULL);
  405. rt_memset(attr, 0, sizeof(struct uffs_StorageAttrSt));
  406. attr->page_data_size = nand->page_size; /* page data size */
  407. attr->pages_per_block = nand->pages_per_block; /* pages per block */
  408. attr->spare_size = nand->oob_size; /* page spare size */
  409. attr->ecc_opt = RT_CONFIG_UFFS_ECC_MODE; /* ecc option */
  410. attr->ecc_size = nand->oob_size - nand->oob_free; /* ecc size */
  411. attr->block_status_offs = 0; /* indicate block bad or good, offset in spare */
  412. attr->layout_opt = RT_CONFIG_UFFS_LAYOUT; /* let UFFS do the spare layout */
  413. /* initialize _uffs_data_layout and _uffs_ecc_layout */
  414. rt_memcpy(attr->_uffs_data_layout, spinand_flash_data_layout, UFFS_SPARE_LAYOUT_SIZE);
  415. rt_memcpy(attr->_uffs_ecc_layout, spinand_flash_ecc_layout, UFFS_SPARE_LAYOUT_SIZE);
  416. attr->data_layout = attr->_uffs_data_layout;
  417. attr->ecc_layout = attr->_uffs_ecc_layout;
  418. }
  419. #endif
  420. #include <finsh.h>
  421. static int nread(int argc, char **argv)
  422. {
  423. int ret = -1;
  424. rt_uint8_t *spare = RT_NULL;
  425. rt_uint8_t *data_ptr = RT_NULL;
  426. struct rt_mtd_nand_device *device;
  427. rt_uint32_t partition, page;
  428. if (argc != 3)
  429. {
  430. LOG_E("Usage %s: %s <partition_no> <page>.\n", __func__, __func__);
  431. goto exit_nread;
  432. }
  433. page = atoi(argv[2]);
  434. partition = atoi(argv[1]);
  435. if (partition >= MTD_SPINAND_PARTITION_NUM)
  436. goto exit_nread;
  437. device = &mtd_partitions[partition];
  438. data_ptr = (rt_uint8_t *) rt_malloc(SPINAND_FLASH_PAGE_SIZE);
  439. if (data_ptr == RT_NULL)
  440. {
  441. LOG_E("data_ptr: no memory\n");
  442. goto exit_nread;
  443. }
  444. spare = (rt_uint8_t *) rt_malloc(SPINAND_FLASH_OOB_SIZE);
  445. if (spare == RT_NULL)
  446. {
  447. LOG_E("spare: no memory\n");
  448. goto exit_nread;
  449. }
  450. rt_memset(spare, 0, SPINAND_FLASH_OOB_SIZE);
  451. rt_memset(data_ptr, 0, SPINAND_FLASH_PAGE_SIZE);
  452. page = page + device->block_start * device->pages_per_block;
  453. if (spinand_read_page(device, page, &data_ptr[0], SPINAND_FLASH_PAGE_SIZE, &spare[0], SPINAND_FLASH_OOB_SIZE) != RT_EOK)
  454. goto exit_nread;
  455. spinand_dump_buffer(page, data_ptr, SPINAND_FLASH_PAGE_SIZE, "Data");
  456. spinand_dump_buffer(page, spare, SPINAND_FLASH_OOB_SIZE, "Spare");
  457. LOG_I("Partion:%d page-%d", partition, page);
  458. ret = 0;
  459. exit_nread:
  460. /* release memory */
  461. if (data_ptr)
  462. rt_free(data_ptr);
  463. if (spare)
  464. rt_free(spare);
  465. return ret;
  466. }
  467. static int nwrite(int argc, char **argv)
  468. {
  469. int i, ret = -1;
  470. rt_uint8_t *data_ptr = RT_NULL;
  471. struct rt_mtd_nand_device *device;
  472. rt_uint32_t partition, page;
  473. if (argc != 3)
  474. {
  475. LOG_E("Usage %s: %s <partition_no> <page>.\n", __func__, __func__);
  476. goto exit_nwrite;
  477. }
  478. partition = atoi(argv[1]);
  479. page = atoi(argv[2]);
  480. if (partition >= MTD_SPINAND_PARTITION_NUM)
  481. goto exit_nwrite;
  482. device = &mtd_partitions[partition];
  483. data_ptr = (rt_uint8_t *) rt_malloc(SPINAND_FLASH_PAGE_SIZE);
  484. if (data_ptr == RT_NULL)
  485. {
  486. LOG_E("data_ptr: no memory\n");
  487. goto exit_nwrite;
  488. }
  489. /* Need random data to test ECC */
  490. for (i = 0; i < SPINAND_FLASH_PAGE_SIZE; i ++)
  491. data_ptr[i] = i / 5 - i;
  492. page = page + device->block_start * device->pages_per_block;
  493. spinand_dump_buffer(page, (uint8_t *)data_ptr, SPINAND_FLASH_PAGE_SIZE, "Data");
  494. spinand_write_page(device, page, &data_ptr[0], SPINAND_FLASH_PAGE_SIZE, NULL, 0);
  495. LOG_I("Wrote data into %d in partition-index %d.", page, partition);
  496. ret = 0;
  497. exit_nwrite:
  498. /* release memory */
  499. if (data_ptr)
  500. rt_free(data_ptr);
  501. return ret;
  502. }
  503. static int nmove(int argc, char **argv)
  504. {
  505. struct rt_mtd_nand_device *device;
  506. rt_uint32_t partition, src, dst;
  507. if (argc != 4)
  508. {
  509. LOG_E("Usage %s: %s <partition_no> <src page> <dst page>.\n", __func__, __func__);
  510. goto exit_nmove;
  511. }
  512. partition = atoi(argv[1]);
  513. src = atoi(argv[2]);
  514. dst = atoi(argv[3]);
  515. if (partition >= MTD_SPINAND_PARTITION_NUM)
  516. return -1;
  517. device = &mtd_partitions[partition];
  518. spinand_move_page(device,
  519. src + device->block_start * device->pages_per_block,
  520. dst + device->block_start * device->pages_per_block);
  521. LOG_I("Move data into %d from %d in partition-index %d.", dst, src, partition);
  522. return 0;
  523. exit_nmove:
  524. return -1;
  525. }
  526. static int nerase(int argc, char **argv)
  527. {
  528. struct rt_mtd_nand_device *device;
  529. int partition, block;
  530. if (argc != 3)
  531. {
  532. LOG_E("Usage %s: %s <partition_no> <block_no>.\n", __func__, __func__);
  533. goto exit_nerase;
  534. }
  535. partition = atoi(argv[1]);
  536. block = atoi(argv[2]);
  537. if (partition >= MTD_SPINAND_PARTITION_NUM)
  538. goto exit_nerase;
  539. device = &mtd_partitions[partition];
  540. if (spinand_erase_block(device, block + device->block_start) != RT_EOK)
  541. goto exit_nerase;
  542. LOG_I("Erased block %d in partition-index %d.", block + device->block_start, partition);
  543. return 0;
  544. exit_nerase:
  545. return -1;
  546. }
  547. static int nerase_force(int argc, char **argv)
  548. {
  549. struct rt_mtd_nand_device *device;
  550. int partition, block;
  551. if (argc != 2)
  552. {
  553. LOG_E("Usage %s: %s <partition_no>\n", __func__, __func__);
  554. goto exit_nerase_force;
  555. }
  556. partition = atoi(argv[1]);
  557. if (partition >= MTD_SPINAND_PARTITION_NUM)
  558. goto exit_nerase_force;
  559. device = &mtd_partitions[partition];
  560. for (block = 0; block <= device->block_end; block++)
  561. {
  562. if (spinand_erase_block_force(device, block + device->block_start) != RT_EOK)
  563. goto exit_nerase_force;
  564. LOG_I("Erased block %d in partition-index %d. forcely", block + device->block_start, partition);
  565. }
  566. return 0;
  567. exit_nerase_force:
  568. return -1;
  569. }
  570. static rt_err_t nmarkbad(int argc, char **argv)
  571. {
  572. struct rt_mtd_nand_device *device;
  573. int partition, block;
  574. if (argc != 3)
  575. {
  576. LOG_E("Usage %s: %s <partition_no> <block_no>.\n", __func__, __func__);
  577. goto exit_nmarkbad;
  578. }
  579. partition = atoi(argv[1]);
  580. block = atoi(argv[2]);
  581. if (partition >= MTD_SPINAND_PARTITION_NUM)
  582. goto exit_nmarkbad;
  583. device = &mtd_partitions[partition];
  584. if (spinand_mark_badblock(device, block + device->block_start) != RT_EOK)
  585. goto exit_nmarkbad;
  586. LOG_I("Marked block %d in partition-index %d.", block + device->block_start, partition);
  587. return 0;
  588. exit_nmarkbad:
  589. return -1;
  590. }
  591. static int nerase_all(int argc, char **argv)
  592. {
  593. rt_uint32_t index;
  594. rt_uint32_t partition;
  595. struct rt_mtd_nand_device *device;
  596. if (argc != 2)
  597. {
  598. LOG_E("Usage %s: %s <partition_no>.\n", __func__, __func__);
  599. goto exit_nerase_all;
  600. }
  601. partition = atoi(argv[1]);
  602. if (partition >= MTD_SPINAND_PARTITION_NUM)
  603. goto exit_nerase_all;
  604. device = &mtd_partitions[partition];
  605. for (index = 0; index < device->block_total; index ++)
  606. {
  607. spinand_erase_block(device, index);
  608. }
  609. LOG_I("Erased all block in partition-index %d.", partition);
  610. return 0;
  611. exit_nerase_all:
  612. return -1;
  613. }
  614. static int ncheck_all(int argc, char **argv)
  615. {
  616. rt_uint32_t index;
  617. rt_uint32_t partition;
  618. struct rt_mtd_nand_device *device;
  619. if (argc != 2)
  620. {
  621. LOG_E("Usage %s: %s <partition_no>.\n", __func__, __func__);
  622. return -1;
  623. }
  624. partition = atoi(argv[1]);
  625. if (partition >= MTD_SPINAND_PARTITION_NUM)
  626. return -1;
  627. device = &mtd_partitions[partition];
  628. for (index = 0; index < device->block_total; index ++)
  629. {
  630. LOG_I("Partion:%d Block-%d is %s", partition, index, spinand_check_block(device, index) ? "bad" : "good");
  631. }
  632. return 0;
  633. }
  634. static int nid(int argc, char **argv)
  635. {
  636. spinand_read_id(RT_NULL);
  637. return 0;
  638. }
  639. #if defined(SOC_SERIES_MA35D1)
  640. /*
  641. This function just help you find a valid window for transmission over SPI bus.
  642. */
  643. #include "drv_spi.h"
  644. static int find_valid_window(const char *pcDevName)
  645. {
  646. rt_device_t psRtDev;
  647. nu_spi_t psNuSpiBus;
  648. int i, j, k;
  649. psRtDev = rt_device_find(pcDevName);
  650. if (!psRtDev || (psRtDev->type != RT_Device_Class_SPIDevice))
  651. {
  652. LOG_E("Usage %s: %s <spi device name>.\n", __func__, __func__);
  653. return -1;
  654. }
  655. psNuSpiBus = (nu_spi_t)((struct rt_spi_device *)psRtDev)->bus;
  656. for (k = 0 ; k < spinand_supported_flash_size(); k++)
  657. {
  658. rt_uint32_t u32JedecId = spinand_info_get(k)->u32JEDECID;
  659. rt_uint32_t id = 0;
  660. LOG_I("Probe JEDEC[%08X] on %s bus.", u32JedecId, psNuSpiBus->name);
  661. rt_kprintf(" ");
  662. for (i = 0; i < 8; i++) // Pin driving
  663. rt_kprintf("%d ", i);
  664. rt_kprintf("\n");
  665. for (j = 0; j < 0xC; j++) // Master RX delay cycle
  666. {
  667. rt_kprintf("%X: ", j);
  668. for (i = 0; i < 8; i++) // Pin driving
  669. {
  670. SPI_SET_MRXPHASE(psNuSpiBus->spi_base, j);
  671. GPIO_SetDrivingCtl(PD, (BIT0 | BIT1 | BIT2 | BIT3 | BIT4 | BIT5), i);
  672. spinand_jedecid_get((struct rt_qspi_device *)psRtDev, &id);
  673. if (id == u32JedecId)
  674. {
  675. rt_kprintf("O ");
  676. }
  677. else
  678. {
  679. rt_kprintf("X ");
  680. }
  681. }
  682. rt_kprintf("\n");
  683. }
  684. rt_kprintf("\n");
  685. } //for (k = 0 ; k < SPINAND_LIST_ELEMENT_NUM; k++)
  686. return 0;
  687. }
  688. static int nprobe(int argc, char **argv)
  689. {
  690. if (argc != 2)
  691. {
  692. LOG_E("Usage %s: %s <spi device name>.\n", __func__, __func__);
  693. return -1;
  694. }
  695. find_valid_window(argv[1]);
  696. return 0;
  697. }
  698. static int nprobe_auto(int argc, char **argv)
  699. {
  700. int count = 0;
  701. while (count++ < 100)
  702. find_valid_window("qspi01");
  703. return 0;
  704. }
  705. #ifdef FINSH_USING_MSH
  706. MSH_CMD_EXPORT(nprobe_auto, auto nprobe);
  707. MSH_CMD_EXPORT(nprobe, check valid window);
  708. #endif
  709. #endif
  710. static int nlist(int argc, char **argv)
  711. {
  712. rt_uint32_t index;
  713. struct rt_mtd_nand_device *device;
  714. rt_kprintf("\n");
  715. for (index = 0 ; index < MTD_SPINAND_PARTITION_NUM ; index++)
  716. {
  717. device = &mtd_partitions[index];
  718. rt_kprintf("[Partition #%d]\n", index);
  719. rt_kprintf("Name: %s\n", device->parent.parent.name);
  720. rt_kprintf("Start block: %d\n", device->block_start);
  721. rt_kprintf("End block: %d\n", device->block_end);
  722. rt_kprintf("Block number: %d\n", device->block_total);
  723. rt_kprintf("Plane number: %d\n", device->plane_num);
  724. rt_kprintf("Pages per Block: %d\n", device->pages_per_block);
  725. rt_kprintf("Page size: %d bytes\n", device->page_size);
  726. rt_kprintf("Spare size: %d bytes\n", device->oob_size);
  727. rt_kprintf("Total size: %d bytes (%d KB)\n", device->block_total * device->pages_per_block * device->page_size,
  728. device->block_total * device->pages_per_block * device->page_size / 1024);
  729. rt_kprintf("\n");
  730. }
  731. return 0;
  732. }
  733. #ifdef FINSH_USING_MSH
  734. MSH_CMD_EXPORT(nid, nand id);
  735. MSH_CMD_EXPORT(nlist, list all partition information on nand);
  736. MSH_CMD_EXPORT(nmove, nand copy page);
  737. MSH_CMD_EXPORT(nerase, nand erase a block of one partiton);
  738. MSH_CMD_EXPORT(nerase_force, nand erase a block of one partiton forcely);
  739. MSH_CMD_EXPORT(nerase_all, erase all blocks of a partition);
  740. MSH_CMD_EXPORT(ncheck_all, check all blocks of a partition);
  741. MSH_CMD_EXPORT(nmarkbad, nand mark bad block of one partition);
  742. MSH_CMD_EXPORT(nwrite, nand write page);
  743. MSH_CMD_EXPORT(nread, nand read page);
  744. #endif
  745. #endif