drv_nand.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2020-06-30 thread-liu first version
  9. */
  10. #include <rtthread.h>
  11. #include <rtdevice.h>
  12. #include <board.h>
  13. #ifdef BSP_USING_NAND
  14. #define DRV_DEBUG
  15. #define LOG_TAG "drv.nand"
  16. #include <drv_log.h>
  17. #include "drv_nand.h"
  18. #define NAND_RB_PIN GET_PIN(D, 6)
  19. static rt_uint32_t ecc_rdbuf[NAND_MAX_PAGE_SIZE/NAND_ECC_SECTOR_SIZE];
  20. static rt_uint32_t ecc_hdbuf[NAND_MAX_PAGE_SIZE/NAND_ECC_SECTOR_SIZE];
  21. struct rthw_fmc
  22. {
  23. rt_uint32_t id;
  24. struct rt_mutex lock;
  25. };
  26. static struct rthw_fmc _device = {0};
  27. static void rt_hw_nand_gpio_init(void)
  28. {
  29. GPIO_InitTypeDef GPIO_InitStruct = {0};
  30. RCC_PeriphCLKInitTypeDef PeriphClkInit = {0};
  31. if (IS_ENGINEERING_BOOT_MODE())
  32. {
  33. PeriphClkInit.PeriphClockSelection = RCC_PERIPHCLK_FMC;
  34. PeriphClkInit.AdcClockSelection = RCC_FMCCLKSOURCE_ACLK;
  35. if (HAL_RCCEx_PeriphCLKConfig(&PeriphClkInit) != HAL_OK)
  36. {
  37. Error_Handler();
  38. }
  39. }
  40. __HAL_RCC_FMC_CLK_ENABLE();
  41. __HAL_RCC_GPIOD_CLK_ENABLE();
  42. __HAL_RCC_GPIOE_CLK_ENABLE();
  43. __HAL_RCC_GPIOG_CLK_ENABLE();
  44. /* PD6 R/B */
  45. GPIO_InitStruct.Pin = GPIO_PIN_6;
  46. GPIO_InitStruct.Mode = GPIO_MODE_INPUT;
  47. GPIO_InitStruct.Pull = GPIO_NOPULL;
  48. GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH;
  49. HAL_GPIO_Init(GPIOD, &GPIO_InitStruct);
  50. /* PG9 NCE */
  51. GPIO_InitStruct.Pin = GPIO_PIN_9;
  52. GPIO_InitStruct.Mode = GPIO_MODE_AF_PP;
  53. GPIO_InitStruct.Pull = GPIO_NOPULL;
  54. GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH;
  55. GPIO_InitStruct.Alternate = GPIO_AF12_FMC;
  56. HAL_GPIO_Init(GPIOG, &GPIO_InitStruct);
  57. /* PD0,1,4,5,11,12,14,15 */
  58. GPIO_InitStruct.Pin = GPIO_PIN_0 | GPIO_PIN_1 | GPIO_PIN_4 | GPIO_PIN_5 |
  59. GPIO_PIN_11 | GPIO_PIN_12 | GPIO_PIN_14 | GPIO_PIN_15;
  60. GPIO_InitStruct.Pull = GPIO_NOPULL;
  61. HAL_GPIO_Init(GPIOD, &GPIO_InitStruct);
  62. /* PE7,8,9,10 */
  63. GPIO_InitStruct.Pin = GPIO_PIN_7 | GPIO_PIN_8 | GPIO_PIN_9 | GPIO_PIN_10;
  64. HAL_GPIO_Init(GPIOE, &GPIO_InitStruct);
  65. }
  66. /* nand delay */
  67. static void rt_hw_nand_delay(volatile uint32_t i)
  68. {
  69. while (i > 0)
  70. {
  71. i--;
  72. }
  73. }
  74. /* read nand flash status */
  75. static rt_err_t rt_hw_nand_read_status(void)
  76. {
  77. rt_err_t result = RT_EOK;
  78. NAND_CMD_AREA = NAND_READSTA;
  79. rt_hw_nand_delay(NAND_TWHR_DELAY);
  80. result = NAND_ADDR_AREA;
  81. return result;
  82. }
  83. /* wait nand flash read */
  84. static rt_err_t rt_hw_nand_wait_ready(void)
  85. {
  86. rt_err_t result = RT_EOK;
  87. static uint32_t time = 0;
  88. while (1)
  89. {
  90. result = rt_hw_nand_read_status();
  91. if (result & NAND_READY)
  92. {
  93. break;
  94. }
  95. time++;
  96. if (time >= 0X1FFFFFFF)
  97. {
  98. return RT_ETIMEOUT;
  99. }
  100. }
  101. return RT_EOK;
  102. }
  103. /* set nand mode */
  104. static rt_err_t rt_hw_nand_set_mode(uint8_t mode)
  105. {
  106. NAND_CMD_AREA = NAND_FEATURE;
  107. NAND_DATA_AREA = 0x01;
  108. NAND_ADDR_AREA = mode;
  109. NAND_ADDR_AREA = 0;
  110. NAND_ADDR_AREA = 0;
  111. NAND_ADDR_AREA = 0;
  112. if (rt_hw_nand_wait_ready() == RT_EOK)
  113. {
  114. return RT_EOK;
  115. }
  116. else
  117. {
  118. return RT_ERROR;
  119. }
  120. }
  121. /* reset nand flash */
  122. static rt_err_t rt_hw_nand_reset(void)
  123. {
  124. NAND_CMD_AREA = NAND_RESET;
  125. if (rt_hw_nand_wait_ready() == RT_EOK)
  126. {
  127. return RT_EOK; /* success */
  128. }
  129. else
  130. {
  131. return RT_ERROR;
  132. }
  133. }
  134. /* read nand flash id */
  135. static rt_err_t _read_id(struct rt_mtd_nand_device *device)
  136. {
  137. RT_ASSERT(device != RT_NULL);
  138. uint8_t deviceid[5];
  139. NAND_CMD_AREA = NAND_READID; /* read id command */
  140. NAND_DATA_AREA = 0x00;
  141. deviceid[0] = NAND_ADDR_AREA; /* Byte 0 */
  142. deviceid[1] = NAND_ADDR_AREA; /* Byte 1 */
  143. deviceid[2] = NAND_ADDR_AREA; /* Byte 2 */
  144. deviceid[3] = NAND_ADDR_AREA; /* Byte 3 */
  145. deviceid[4] = NAND_ADDR_AREA; /* Byte 4 */
  146. _device.id = ((uint32_t)deviceid[4]) << 24 | ((uint32_t)deviceid[3]) << 16 | ((uint32_t)deviceid[2]) << 8 | deviceid[1];
  147. LOG_D("nand id: 0x%08x", _device.id);
  148. return RT_EOK;
  149. }
  150. static rt_uint8_t rt_hw_nand_ecc_check(rt_uint32_t generatedEcc, rt_uint32_t readEcc, rt_uint8_t* data)
  151. {
  152. #define ECC_MASK28 0x0FFFFFFF /* 28 valid ECC parity bits. */
  153. #define ECC_MASK 0x05555555 /* 14 ECC parity bits. */
  154. rt_uint32_t count, bitNum, byteAddr;
  155. rt_uint32_t mask;
  156. rt_uint32_t syndrome;
  157. rt_uint32_t eccP; /* 14 even ECC parity bits. */
  158. rt_uint32_t eccPn; /* 14 odd ECC parity bits. */
  159. syndrome = (generatedEcc ^ readEcc) & ECC_MASK28;
  160. if (syndrome == 0)
  161. {
  162. return (RT_EOK); /* No errors in data. */
  163. }
  164. eccPn = syndrome & ECC_MASK; /* Get 14 odd parity bits. */
  165. eccP = (syndrome >> 1) & ECC_MASK; /* Get 14 even parity bits. */
  166. if ((eccPn ^ eccP) == ECC_MASK) /* 1-bit correctable error ? */
  167. {
  168. bitNum = (eccP & 0x01) |
  169. ((eccP >> 1) & 0x02) |
  170. ((eccP >> 2) & 0x04);
  171. LOG_D("ECC bit %d\n",bitNum);
  172. byteAddr = ((eccP >> 6) & 0x001) |
  173. ((eccP >> 7) & 0x002) |
  174. ((eccP >> 8) & 0x004) |
  175. ((eccP >> 9) & 0x008) |
  176. ((eccP >> 10) & 0x010) |
  177. ((eccP >> 11) & 0x020) |
  178. ((eccP >> 12) & 0x040) |
  179. ((eccP >> 13) & 0x080) |
  180. ((eccP >> 14) & 0x100) |
  181. ((eccP >> 15) & 0x200) |
  182. ((eccP >> 16) & 0x400) ;
  183. data[ byteAddr ] ^= 1 << bitNum;
  184. return RT_EOK;
  185. }
  186. /* Count number of one's in the syndrome. */
  187. count = 0;
  188. mask = 0x00800000;
  189. while (mask)
  190. {
  191. if (syndrome & mask)
  192. count++;
  193. mask >>= 1;
  194. }
  195. if (count == 1) /* Error in the ECC itself. */
  196. return RT_EIO;
  197. return RT_EIO; /* Unable to correct data. */
  198. #undef ECC_MASK
  199. #undef ECC_MASK24
  200. }
  201. static rt_err_t _read_page(struct rt_mtd_nand_device *device,
  202. rt_off_t page,
  203. rt_uint8_t *data,
  204. rt_uint32_t data_len,
  205. rt_uint8_t *spare,
  206. rt_uint32_t spare_len)
  207. {
  208. RT_ASSERT(device != RT_NULL);
  209. rt_uint32_t index, i, tickstart, eccnum;
  210. rt_err_t result;
  211. rt_uint8_t *p = RT_NULL;
  212. page = page + device->block_start * device->pages_per_block;
  213. if (page / device->pages_per_block > device->block_end)
  214. {
  215. return -RT_EIO;
  216. }
  217. rt_mutex_take(&_device.lock, RT_WAITING_FOREVER);
  218. if (data && data_len)
  219. {
  220. NAND_CMD_AREA = NAND_AREA_A;
  221. NAND_DATA_AREA = (rt_uint8_t)0;
  222. NAND_DATA_AREA = (rt_uint8_t)(0 >> 8);
  223. NAND_DATA_AREA = (rt_uint8_t)page;
  224. NAND_DATA_AREA = (rt_uint8_t)(page >> 8);
  225. NAND_DATA_AREA = (rt_uint8_t)(page >> 16);
  226. NAND_CMD_AREA = NAND_AREA_TRUE1;
  227. rt_hw_nand_delay(10);
  228. /* not an integer multiple of NAND ECC SECTOR SIZE, no ECC checks*/
  229. if (data_len % NAND_ECC_SECTOR_SIZE)
  230. {
  231. for (i = 0; i < data_len; i++)
  232. {
  233. *data++ = NAND_ADDR_AREA;
  234. }
  235. }
  236. else
  237. {
  238. eccnum = data_len/NAND_ECC_SECTOR_SIZE;
  239. p = data;
  240. for (index = 0; index < 4; index++)
  241. {
  242. FMC_Bank3_R->PCR |= 1<<6; /* enable ecc */
  243. for (i = 0; i < NAND_ECC_SECTOR_SIZE; i++)
  244. {
  245. *data++ = NAND_ADDR_AREA;
  246. }
  247. /* Get tick */
  248. tickstart = rt_tick_get();
  249. /* Wait until FIFO is empty */
  250. while ((FMC_Bank3_R->SR & (1 << 6)) == RESET)
  251. {
  252. /* Check for the Timeout */
  253. if ((rt_tick_get() - tickstart) > 10000)
  254. {
  255. result = RT_ETIMEOUT;
  256. goto _exit;
  257. }
  258. }
  259. ecc_hdbuf[index] = FMC_Bank3_R->HECCR; /* read hardware ecc */
  260. FMC_Bank3_R->PCR &= ~(1<<6); /* disable ecc */
  261. }
  262. i = device->page_size + 0x10;
  263. rt_hw_nand_delay(10);
  264. NAND_CMD_AREA = 0x05;
  265. NAND_DATA_AREA = (rt_uint8_t)i;
  266. NAND_DATA_AREA = (rt_uint8_t)(i>>8);
  267. NAND_CMD_AREA = 0xE0;
  268. rt_hw_nand_delay(10);
  269. data =(rt_uint8_t*)&ecc_rdbuf[0];
  270. for (i = 0; i < 4*eccnum; i++)
  271. {
  272. *data++ = NAND_ADDR_AREA;
  273. }
  274. /* check ecc */
  275. for(i = 0; i< eccnum; i++)
  276. {
  277. if(ecc_rdbuf[i] != ecc_hdbuf[i])
  278. {
  279. result = rt_hw_nand_ecc_check(ecc_hdbuf[i], ecc_rdbuf[i], p + NAND_ECC_SECTOR_SIZE*i);
  280. if (result != RT_EOK)
  281. {
  282. goto _exit;
  283. }
  284. }
  285. }
  286. }
  287. }
  288. if (spare && spare_len)
  289. {
  290. NAND_CMD_AREA = NAND_AREA_A;
  291. NAND_DATA_AREA = (rt_uint8_t)0;
  292. NAND_DATA_AREA = (rt_uint8_t)(0 >> 8);
  293. NAND_DATA_AREA = (rt_uint8_t)page;
  294. NAND_DATA_AREA = (rt_uint8_t)(page >> 8);
  295. NAND_DATA_AREA = (rt_uint8_t)(page >> 16);
  296. NAND_CMD_AREA = NAND_AREA_TRUE1;
  297. rt_thread_delay(10);
  298. for (i = 0; i < spare_len; i ++)
  299. {
  300. *spare++ = NAND_ADDR_AREA;
  301. }
  302. }
  303. if (rt_hw_nand_wait_ready() != RT_EOK)
  304. {
  305. result = RT_ETIMEOUT;
  306. goto _exit;
  307. }
  308. _exit:
  309. rt_mutex_release(&_device.lock);
  310. return result;
  311. }
  312. static rt_err_t _write_page(struct rt_mtd_nand_device *device,
  313. rt_off_t page,
  314. const rt_uint8_t *data,
  315. rt_uint32_t data_len,
  316. const rt_uint8_t *spare,
  317. rt_uint32_t spare_len)
  318. {
  319. RT_ASSERT(device != RT_NULL);
  320. rt_err_t result = RT_EOK;
  321. rt_uint32_t eccnum;
  322. rt_uint32_t i, index;
  323. rt_uint32_t tickstart = 0;
  324. page = page + device->block_start * device->pages_per_block;
  325. if (page / device->pages_per_block > device->block_end)
  326. {
  327. return -RT_EIO;
  328. }
  329. rt_mutex_take(&_device.lock, RT_WAITING_FOREVER);
  330. if (data && data_len)
  331. {
  332. NAND_CMD_AREA = NAND_WRITE0;
  333. NAND_DATA_AREA = (rt_uint8_t)0;
  334. NAND_DATA_AREA = (rt_uint8_t)(0 >> 8);
  335. NAND_DATA_AREA = (rt_uint8_t)(page & 0xFF);
  336. NAND_DATA_AREA = (rt_uint8_t)(page >> 8);
  337. NAND_DATA_AREA = (rt_uint8_t)(page >> 16);
  338. rt_hw_nand_delay(10);
  339. if (data_len % NAND_ECC_SECTOR_SIZE)
  340. {
  341. /* read nand flash */
  342. for (i = 0; i < data_len; i++)
  343. {
  344. NAND_ADDR_AREA = *data++;
  345. }
  346. }
  347. else
  348. {
  349. eccnum = data_len/NAND_ECC_SECTOR_SIZE;
  350. for (index = 0; index < eccnum; index++)
  351. {
  352. FMC_Bank3_R->PCR |= 1<<6; /* enable ecc */
  353. for (i = 0; i < NAND_ECC_SECTOR_SIZE; i++)
  354. {
  355. NAND_ADDR_AREA = *data++;
  356. }
  357. /* Get tick */
  358. tickstart = rt_tick_get();
  359. /* Wait until FIFO is empty */
  360. while ((FMC_Bank3_R->SR & (1 << 6)) == RESET)
  361. {
  362. /* Check for the Timeout */
  363. if ((rt_tick_get() - tickstart) > 10000)
  364. {
  365. result = RT_ETIMEOUT;
  366. goto _exit;
  367. }
  368. }
  369. ecc_hdbuf[index] = FMC_Bank3_R->HECCR; /* read hardware ecc */
  370. FMC_Bank3_R->PCR &= ~(1<<6); /* disable ecc */
  371. }
  372. i = device->page_size + 0x10;
  373. rt_hw_nand_delay(10);
  374. NAND_CMD_AREA = 0x85;
  375. NAND_DATA_AREA = (rt_uint8_t)i;
  376. NAND_DATA_AREA = (rt_uint8_t)(i>>8);
  377. rt_hw_nand_delay(10);
  378. data = (uint8_t*)&ecc_hdbuf[0];
  379. for (index = 0; index < eccnum; index++)
  380. {
  381. for (i = 0; i < 4; i++)
  382. {
  383. NAND_ADDR_AREA = *data++;
  384. }
  385. }
  386. }
  387. }
  388. NAND_CMD_AREA = NAND_WRITE_TURE1;
  389. if (rt_hw_nand_wait_ready() != RT_EOK)
  390. {
  391. result = -RT_EIO;
  392. goto _exit;
  393. }
  394. if (spare && spare_len)
  395. {
  396. NAND_CMD_AREA = NAND_WRITE0;
  397. NAND_DATA_AREA = (rt_uint8_t)(4096 & 0xFF);
  398. NAND_DATA_AREA = (rt_uint8_t)(4096 >> 8);
  399. NAND_DATA_AREA = (rt_uint8_t)(page & 0xFF);
  400. NAND_DATA_AREA = (rt_uint8_t)(page >> 8);
  401. NAND_DATA_AREA = (rt_uint8_t)(page >> 16);
  402. for (i = 4; i < spare_len; i++)
  403. {
  404. NAND_ADDR_AREA = spare[i];
  405. }
  406. NAND_CMD_AREA = NAND_WRITE_TURE1;
  407. if (rt_hw_nand_wait_ready() != RT_EOK)
  408. {
  409. result = -RT_EIO;
  410. goto _exit;
  411. }
  412. }
  413. _exit:
  414. rt_mutex_release(&_device.lock);
  415. return result;
  416. }
  417. /* erase one block */
  418. static rt_err_t _erase_block(struct rt_mtd_nand_device *device, rt_uint32_t block)
  419. {
  420. RT_ASSERT(device != RT_NULL);
  421. unsigned int block_num;
  422. rt_err_t result = RT_EOK;
  423. block = block + device->block_start;
  424. block_num = block << 6;
  425. rt_mutex_take(&_device.lock, RT_WAITING_FOREVER);
  426. NAND_CMD_AREA = NAND_ERASE0;
  427. NAND_DATA_AREA = (uint8_t)block_num;
  428. NAND_DATA_AREA = (uint8_t)(block_num >> 8);
  429. NAND_DATA_AREA = (uint8_t)(block_num >> 16);
  430. NAND_CMD_AREA = NAND_ERASE1;
  431. rt_thread_delay(NAND_TBERS_DELAY);
  432. if (rt_hw_nand_wait_ready() != RT_EOK)
  433. {
  434. result = -RT_ERROR;
  435. }
  436. rt_mutex_release(&_device.lock);
  437. return result;
  438. }
  439. static rt_err_t _page_copy(struct rt_mtd_nand_device *device,
  440. rt_off_t src_page,
  441. rt_off_t dst_page)
  442. {
  443. RT_ASSERT(device != RT_NULL);
  444. rt_err_t result = RT_EOK;
  445. rt_uint32_t source_block = 0, dest_block = 0;
  446. src_page = src_page + device->block_start * device->pages_per_block;
  447. dst_page = dst_page + device->block_start * device->pages_per_block;
  448. source_block = src_page / device->pages_per_block;
  449. dest_block = dst_page / device->pages_per_block;
  450. if ((source_block % 2) != (dest_block % 2))
  451. {
  452. return RT_MTD_ESRC;
  453. }
  454. NAND_CMD_AREA = NAND_MOVEDATA_CMD0;
  455. NAND_DATA_AREA = (rt_uint8_t)(0 & 0xFF);
  456. NAND_DATA_AREA = (rt_uint8_t)(0 >> 8);
  457. NAND_DATA_AREA = (rt_uint8_t)(src_page & 0xFF);
  458. NAND_DATA_AREA = (rt_uint8_t)(src_page >> 8);
  459. NAND_DATA_AREA = (rt_uint8_t)(src_page >> 16);
  460. NAND_CMD_AREA = NAND_MOVEDATA_CMD1;
  461. rt_hw_nand_delay(10);
  462. NAND_CMD_AREA = NAND_MOVEDATA_CMD2;
  463. NAND_DATA_AREA = ((rt_uint8_t)(0 & 0xFF));
  464. NAND_DATA_AREA = ((rt_uint8_t)(0 >> 8));
  465. NAND_DATA_AREA = ((rt_uint8_t)(dst_page & 0xFF));
  466. NAND_DATA_AREA = ((rt_uint8_t)(dst_page >> 8));
  467. NAND_DATA_AREA = ((rt_uint8_t)(dst_page >> 16));
  468. NAND_CMD_AREA = (NAND_MOVEDATA_CMD3);
  469. if (rt_hw_nand_wait_ready() != RT_EOK)
  470. {
  471. result = -RT_ERROR;
  472. }
  473. return result;
  474. }
  475. static rt_err_t _check_block(struct rt_mtd_nand_device *device, rt_uint32_t block)
  476. {
  477. RT_ASSERT(device != RT_NULL);
  478. return (RT_MTD_EOK);
  479. }
  480. static rt_err_t _mark_bad(struct rt_mtd_nand_device *device, rt_uint32_t block)
  481. {
  482. RT_ASSERT(device != RT_NULL);
  483. return (RT_MTD_EOK);
  484. }
  485. static const struct rt_mtd_nand_driver_ops ops =
  486. {
  487. _read_id,
  488. _read_page,
  489. _write_page,
  490. _page_copy,
  491. _erase_block,
  492. _check_block,
  493. _mark_bad,
  494. };
  495. static struct rt_mtd_nand_device nand_dev;
  496. static rt_err_t nand_init(struct rt_mtd_nand_device *device)
  497. {
  498. RT_ASSERT(device != RT_NULL);
  499. uint32_t tempreg = 0;
  500. rt_hw_nand_gpio_init();
  501. tempreg |= 0 << 1; /* disable Wait feature enable bit */
  502. tempreg |= 0 << 4; /* Data bus width 8*/
  503. tempreg |= 0 << 6; /* disable ECC */
  504. tempreg |= 1 << 17; /* ECC page 512 BYTE */
  505. tempreg |= 5 << 9; /* set TCLR */
  506. tempreg |= 5 << 13; /* set TAR */
  507. FMC_Bank3_R->PCR = tempreg; /* set nand control register */
  508. tempreg &= 0;
  509. tempreg |= 3 << 0; /* set MEMSET */
  510. tempreg |= 5 << 8; /* set MEMWAIT */
  511. tempreg |= 2 << 16; /* set MEMHOLD */
  512. tempreg |= 3 << 24; /* set MEMHIZ */
  513. FMC_Bank3_R->PMEM = tempreg;
  514. FMC_Bank3_R->PATT = 0; /* Attribute memory space timing registers */
  515. FMC_Bank3_R->PCR |= 1 << 2; /* NAND Flash memory bank enable bit */
  516. FMC_Bank1_R->BTCR[0] |= (uint32_t)1 << 31; /* enable fmc */
  517. rt_hw_nand_reset(); /* reset nand flash*/
  518. rt_thread_delay(100);
  519. /* read id */
  520. _read_id(&nand_dev);
  521. if (_device.id != MT29F8G08ABACAH4)
  522. {
  523. LOG_E("nand id 0x%08x not support", _device.id);
  524. return RT_ERROR; /* can't find nand flash */
  525. }
  526. rt_hw_nand_set_mode(4); /* set mode 4, high speed mode*/
  527. return RT_EOK;
  528. }
  529. int rt_hw_nand_init(void)
  530. {
  531. rt_err_t result = RT_EOK;
  532. rt_pin_mode(NAND_RB_PIN, PIN_MODE_INPUT_PULLUP); /* nand flash R/B pin */
  533. result = nand_init(&nand_dev);
  534. if (result != RT_EOK)
  535. {
  536. LOG_D("nand flash init error!");
  537. return RT_ERROR;
  538. }
  539. rt_mutex_init(&_device.lock, "nand", RT_IPC_FLAG_PRIO);
  540. nand_dev.page_size = 4096;
  541. nand_dev.pages_per_block = 224;
  542. nand_dev.plane_num = 2;
  543. nand_dev.oob_size = 64;
  544. nand_dev.oob_free = 64 - ((4096) * 3 / 256);
  545. nand_dev.block_start = 0;
  546. nand_dev.block_end = 4095;
  547. nand_dev.block_total = nand_dev.block_end - nand_dev.block_start;
  548. nand_dev.ops = &ops;
  549. result = rt_mtd_nand_register_device("nand", &nand_dev);
  550. if (result != RT_EOK)
  551. {
  552. rt_device_unregister(&nand_dev.parent);
  553. return RT_ERROR;
  554. }
  555. rt_kprintf("nand flash init success, id: 0x%08x\n", _device.id);
  556. return RT_EOK;
  557. }
  558. INIT_DEVICE_EXPORT(rt_hw_nand_init);
  559. #endif