drv_eth.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907
  1. /*
  2. * Copyright (c) 2006-2019, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2017-10-10 Tanek the first version
  9. * 2019-5-10 misonyo add DMA TX and RX function
  10. */
  11. #include <rtthread.h>
  12. #include "board.h"
  13. #include <rtdevice.h>
  14. #ifdef RT_USING_FINSH
  15. #include <finsh.h>
  16. #endif
  17. #include "fsl_enet.h"
  18. #include "fsl_gpio.h"
  19. #include "fsl_phy.h"
  20. #include "fsl_cache.h"
  21. #include "fsl_iomuxc.h"
  22. #include "fsl_common.h"
  23. #ifdef RT_USING_LWIP
  24. #include <netif/ethernetif.h>
  25. #include "lwipopts.h"
  26. #define ENET_RXBD_NUM (4)
  27. #define ENET_TXBD_NUM (4)
  28. #define ENET_RXBUFF_SIZE (ENET_FRAME_MAX_FRAMELEN)
  29. #define ENET_TXBUFF_SIZE (ENET_FRAME_MAX_FRAMELEN)
  30. /* debug option */
  31. #undef ETH_RX_DUMP
  32. #undef ETH_TX_DUMP
  33. #define DBG_ENABLE
  34. #define DBG_SECTION_NAME "[ETH]"
  35. #define DBG_COLOR
  36. #define DBG_LEVEL DBG_INFO
  37. #include <rtdbg.h>
  38. #define MAX_ADDR_LEN 6
  39. struct rt_imxrt_eth
  40. {
  41. /* inherit from ethernet device */
  42. struct eth_device parent;
  43. enet_handle_t enet_handle;
  44. ENET_Type *enet_base;
  45. enet_data_error_stats_t error_statistic;
  46. rt_uint8_t dev_addr[MAX_ADDR_LEN]; /* hw address */
  47. rt_bool_t tx_is_waiting;
  48. struct rt_semaphore tx_wait;
  49. enet_mii_speed_t speed;
  50. enet_mii_duplex_t duplex;
  51. };
  52. AT_NONCACHEABLE_SECTION_ALIGN(enet_tx_bd_struct_t g_txBuffDescrip[ENET_TXBD_NUM], ENET_BUFF_ALIGNMENT);
  53. ALIGN(ENET_BUFF_ALIGNMENT) rt_uint8_t g_txDataBuff[ENET_TXBD_NUM][RT_ALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  54. AT_NONCACHEABLE_SECTION_ALIGN(enet_rx_bd_struct_t g_rxBuffDescrip[ENET_RXBD_NUM], ENET_BUFF_ALIGNMENT);
  55. ALIGN(ENET_BUFF_ALIGNMENT) rt_uint8_t g_rxDataBuff[ENET_RXBD_NUM][RT_ALIGN(ENET_RXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  56. static struct rt_imxrt_eth imxrt_eth_device;
  57. void _enet_rx_callback(struct rt_imxrt_eth *eth)
  58. {
  59. rt_err_t result;
  60. ENET_DisableInterrupts(eth->enet_base, kENET_RxFrameInterrupt);
  61. result = eth_device_ready(&(eth->parent));
  62. if (result != RT_EOK)
  63. rt_kprintf("RX err =%d\n", result);
  64. }
  65. void _enet_tx_callback(struct rt_imxrt_eth *eth)
  66. {
  67. if (eth->tx_is_waiting == RT_TRUE)
  68. {
  69. eth->tx_is_waiting = RT_FALSE;
  70. rt_sem_release(&eth->tx_wait);
  71. }
  72. }
  73. void _enet_callback(ENET_Type *base, enet_handle_t *handle, enet_event_t event, void *userData)
  74. {
  75. switch (event)
  76. {
  77. case kENET_RxEvent:
  78. _enet_rx_callback((struct rt_imxrt_eth *)userData);
  79. break;
  80. case kENET_TxEvent:
  81. _enet_tx_callback((struct rt_imxrt_eth *)userData);
  82. break;
  83. case kENET_ErrEvent:
  84. dbg_log(DBG_LOG, "kENET_ErrEvent\n");
  85. break;
  86. case kENET_WakeUpEvent:
  87. dbg_log(DBG_LOG, "kENET_WakeUpEvent\n");
  88. break;
  89. case kENET_TimeStampEvent:
  90. dbg_log(DBG_LOG, "kENET_TimeStampEvent\n");
  91. break;
  92. case kENET_TimeStampAvailEvent:
  93. dbg_log(DBG_LOG, "kENET_TimeStampAvailEvent \n");
  94. break;
  95. default:
  96. dbg_log(DBG_LOG, "unknow error\n");
  97. break;
  98. }
  99. }
  100. static void _enet_clk_init(void)
  101. {
  102. const clock_enet_pll_config_t config = {.enableClkOutput = true, .enableClkOutput25M = false, .loopDivider = 1};
  103. CLOCK_InitEnetPll(&config);
  104. IOMUXC_EnableMode(IOMUXC_GPR, kIOMUXC_GPR_ENET1TxClkOutputDir, true);
  105. IOMUXC_GPR->GPR1|=1<<23;
  106. }
  107. static void _enet_config(void)
  108. {
  109. enet_config_t config;
  110. uint32_t sysClock;
  111. /* prepare the buffer configuration. */
  112. enet_buffer_config_t buffConfig =
  113. {
  114. ENET_RXBD_NUM,
  115. ENET_TXBD_NUM,
  116. SDK_SIZEALIGN(ENET_RXBUFF_SIZE, ENET_BUFF_ALIGNMENT),
  117. SDK_SIZEALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT),
  118. &g_rxBuffDescrip[0],
  119. &g_txBuffDescrip[0],
  120. &g_rxDataBuff[0][0],
  121. &g_txDataBuff[0][0],
  122. };
  123. /* Get default configuration. */
  124. /*
  125. * config.miiMode = kENET_RmiiMode;
  126. * config.miiSpeed = kENET_MiiSpeed100M;
  127. * config.miiDuplex = kENET_MiiFullDuplex;
  128. * config.rxMaxFrameLen = ENET_FRAME_MAX_FRAMELEN;
  129. */
  130. ENET_GetDefaultConfig(&config);
  131. config.interrupt = kENET_TxFrameInterrupt | kENET_RxFrameInterrupt;
  132. config.miiSpeed = imxrt_eth_device.speed;
  133. config.miiDuplex = imxrt_eth_device.duplex;
  134. /* Set SMI to get PHY link status. */
  135. sysClock = CLOCK_GetFreq(kCLOCK_AhbClk);
  136. dbg_log(DBG_LOG, "deinit\n");
  137. ENET_Deinit(imxrt_eth_device.enet_base);
  138. dbg_log(DBG_LOG, "init\n");
  139. ENET_Init(imxrt_eth_device.enet_base, &imxrt_eth_device.enet_handle, &config, &buffConfig, &imxrt_eth_device.dev_addr[0], sysClock);
  140. dbg_log(DBG_LOG, "set call back\n");
  141. ENET_SetCallback(&imxrt_eth_device.enet_handle, _enet_callback, &imxrt_eth_device);
  142. dbg_log(DBG_LOG, "active read\n");
  143. ENET_ActiveRead(imxrt_eth_device.enet_base);
  144. }
  145. #if defined(ETH_RX_DUMP) || defined(ETH_TX_DUMP)
  146. static void packet_dump(const char *msg, const struct pbuf *p)
  147. {
  148. const struct pbuf *q;
  149. rt_uint32_t i, j;
  150. rt_uint8_t *ptr;
  151. rt_kprintf("%s %d byte\n", msg, p->tot_len);
  152. i = 0;
  153. for (q = p; q != RT_NULL; q = q->next)
  154. {
  155. ptr = q->payload;
  156. for (j = 0; j < q->len; j++)
  157. {
  158. if ((i % 8) == 0)
  159. {
  160. rt_kprintf(" ");
  161. }
  162. if ((i % 16) == 0)
  163. {
  164. rt_kprintf("\r\n");
  165. }
  166. rt_kprintf("%02x ", *ptr);
  167. i++;
  168. ptr++;
  169. }
  170. }
  171. rt_kprintf("\n\n");
  172. }
  173. #else
  174. #define packet_dump(...)
  175. #endif /* dump */
  176. /* initialize the interface */
  177. static rt_err_t rt_imxrt_eth_init(rt_device_t dev)
  178. {
  179. dbg_log(DBG_LOG, "rt_imxrt_eth_init...\n");
  180. _enet_config();
  181. return RT_EOK;
  182. }
  183. static rt_err_t rt_imxrt_eth_open(rt_device_t dev, rt_uint16_t oflag)
  184. {
  185. dbg_log(DBG_LOG, "rt_imxrt_eth_open...\n");
  186. return RT_EOK;
  187. }
  188. static rt_err_t rt_imxrt_eth_close(rt_device_t dev)
  189. {
  190. dbg_log(DBG_LOG, "rt_imxrt_eth_close...\n");
  191. return RT_EOK;
  192. }
  193. static rt_size_t rt_imxrt_eth_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
  194. {
  195. dbg_log(DBG_LOG, "rt_imxrt_eth_read...\n");
  196. rt_set_errno(-RT_ENOSYS);
  197. return 0;
  198. }
  199. static rt_size_t rt_imxrt_eth_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
  200. {
  201. dbg_log(DBG_LOG, "rt_imxrt_eth_write...\n");
  202. rt_set_errno(-RT_ENOSYS);
  203. return 0;
  204. }
  205. static rt_err_t rt_imxrt_eth_control(rt_device_t dev, int cmd, void *args)
  206. {
  207. dbg_log(DBG_LOG, "rt_imxrt_eth_control...\n");
  208. switch (cmd)
  209. {
  210. case NIOCTL_GADDR:
  211. /* get mac address */
  212. if (args) rt_memcpy(args, imxrt_eth_device.dev_addr, 6);
  213. else return -RT_ERROR;
  214. break;
  215. default :
  216. break;
  217. }
  218. return RT_EOK;
  219. }
  220. static void _ENET_ActiveSend(ENET_Type *base, uint32_t ringId)
  221. {
  222. assert(ringId < FSL_FEATURE_ENET_QUEUE);
  223. switch (ringId)
  224. {
  225. case 0:
  226. base->TDAR = ENET_TDAR_TDAR_MASK;
  227. break;
  228. #if FSL_FEATURE_ENET_QUEUE > 1
  229. case kENET_Ring1:
  230. base->TDAR1 = ENET_TDAR1_TDAR_MASK;
  231. break;
  232. case kENET_Ring2:
  233. base->TDAR2 = ENET_TDAR2_TDAR_MASK;
  234. break;
  235. #endif /* FSL_FEATURE_ENET_QUEUE > 1 */
  236. default:
  237. base->TDAR = ENET_TDAR_TDAR_MASK;
  238. break;
  239. }
  240. }
  241. static status_t _ENET_SendFrame(ENET_Type *base, enet_handle_t *handle, const uint8_t *data, uint32_t length)
  242. {
  243. assert(handle);
  244. assert(data);
  245. volatile enet_tx_bd_struct_t *curBuffDescrip;
  246. uint32_t len = 0;
  247. uint32_t sizeleft = 0;
  248. uint32_t address;
  249. /* Check the frame length. */
  250. if (length > ENET_FRAME_MAX_FRAMELEN)
  251. {
  252. return kStatus_ENET_TxFrameOverLen;
  253. }
  254. /* Check if the transmit buffer is ready. */
  255. curBuffDescrip = handle->txBdCurrent[0];
  256. if (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK)
  257. {
  258. return kStatus_ENET_TxFrameBusy;
  259. }
  260. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  261. bool isPtpEventMessage = false;
  262. /* Check PTP message with the PTP header. */
  263. isPtpEventMessage = ENET_Ptp1588ParseFrame(data, NULL, true);
  264. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  265. /* One transmit buffer is enough for one frame. */
  266. if (handle->txBuffSizeAlign[0] >= length)
  267. {
  268. /* Copy data to the buffer for uDMA transfer. */
  269. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  270. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer,kMEMORY_DMA2Local);
  271. #else
  272. address = (uint32_t)curBuffDescrip->buffer;
  273. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  274. pbuf_copy_partial((const struct pbuf *)data, (void *)address, length, 0);
  275. /* Set data length. */
  276. curBuffDescrip->length = length;
  277. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  278. /* For enable the timestamp. */
  279. if (isPtpEventMessage)
  280. {
  281. curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  282. }
  283. else
  284. {
  285. curBuffDescrip->controlExtend1 &= ~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  286. }
  287. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  288. curBuffDescrip->control |= (ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK);
  289. /* Increase the buffer descriptor address. */
  290. if (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_WRAP_MASK)
  291. {
  292. handle->txBdCurrent[0] = handle->txBdBase[0];
  293. }
  294. else
  295. {
  296. handle->txBdCurrent[0]++;
  297. }
  298. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  299. /* Add the cache clean maintain. */
  300. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  301. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer,kMEMORY_DMA2Local);
  302. #else
  303. address = (uint32_t)curBuffDescrip->buffer;
  304. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  305. DCACHE_CleanByRange(address, length);
  306. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  307. /* Active the transmit buffer descriptor. */
  308. _ENET_ActiveSend(base, 0);
  309. return kStatus_Success;
  310. }
  311. else
  312. {
  313. /* One frame requires more than one transmit buffers. */
  314. do
  315. {
  316. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  317. /* For enable the timestamp. */
  318. if (isPtpEventMessage)
  319. {
  320. curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  321. }
  322. else
  323. {
  324. curBuffDescrip->controlExtend1 &= ~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  325. }
  326. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  327. /* Increase the buffer descriptor address. */
  328. if (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_WRAP_MASK)
  329. {
  330. handle->txBdCurrent[0] = handle->txBdBase[0];
  331. }
  332. else
  333. {
  334. handle->txBdCurrent[0]++;
  335. }
  336. /* update the size left to be transmit. */
  337. sizeleft = length - len;
  338. if (sizeleft > handle->txBuffSizeAlign[0])
  339. {
  340. /* Data copy. */
  341. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  342. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer,kMEMORY_DMA2Local);
  343. #else
  344. address = (uint32_t)curBuffDescrip->buffer;
  345. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  346. memcpy((void *)address, data + len, handle->txBuffSizeAlign[0]);
  347. /* Data length update. */
  348. curBuffDescrip->length = handle->txBuffSizeAlign[0];
  349. len += handle->txBuffSizeAlign[0];
  350. /* Sets the control flag. */
  351. curBuffDescrip->control &= ~ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
  352. curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_TX_READY_MASK;
  353. /* Active the transmit buffer descriptor*/
  354. _ENET_ActiveSend(base, 0);
  355. }
  356. else
  357. {
  358. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  359. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer,kMEMORY_DMA2Local);
  360. #else
  361. address = (uint32_t)curBuffDescrip->buffer;
  362. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  363. memcpy((void *)address, data + len, sizeleft);
  364. curBuffDescrip->length = sizeleft;
  365. /* Set Last buffer wrap flag. */
  366. curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
  367. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  368. /* Add the cache clean maintain. */
  369. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  370. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer,kMEMORY_DMA2Local);
  371. #else
  372. address = (uint32_t)curBuffDescrip->buffer;
  373. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  374. DCACHE_CleanByRange(address, handle->txBuffSizeAlign[0]);
  375. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  376. /* Active the transmit buffer descriptor. */
  377. _ENET_ActiveSend(base, 0);
  378. return kStatus_Success;
  379. }
  380. /* Get the current buffer descriptor address. */
  381. curBuffDescrip = handle->txBdCurrent[0];
  382. } while (!(curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK));
  383. return kStatus_ENET_TxFrameBusy;
  384. }
  385. }
  386. /* ethernet device interface */
  387. /* transmit packet. */
  388. rt_err_t rt_imxrt_eth_tx(rt_device_t dev, struct pbuf *p)
  389. {
  390. rt_err_t result = RT_EOK;
  391. enet_handle_t * enet_handle = &imxrt_eth_device.enet_handle;
  392. RT_ASSERT(p != NULL);
  393. RT_ASSERT(enet_handle != RT_NULL);
  394. dbg_log(DBG_LOG, "rt_imxrt_eth_tx: %d\n", p->len);
  395. #ifdef ETH_TX_DUMP
  396. packet_dump("send", p);
  397. #endif
  398. do
  399. {
  400. result = _ENET_SendFrame(imxrt_eth_device.enet_base, enet_handle, (const uint8_t *)p, p->tot_len);
  401. if (result == kStatus_ENET_TxFrameBusy)
  402. {
  403. imxrt_eth_device.tx_is_waiting = RT_TRUE;
  404. rt_sem_take(&imxrt_eth_device.tx_wait, RT_WAITING_FOREVER);
  405. }
  406. }
  407. while (result == kStatus_ENET_TxFrameBusy);
  408. return RT_EOK;
  409. }
  410. /* reception packet. */
  411. struct pbuf *rt_imxrt_eth_rx(rt_device_t dev)
  412. {
  413. uint32_t length = 0;
  414. status_t status;
  415. struct pbuf *p = RT_NULL;
  416. enet_handle_t *enet_handle = &imxrt_eth_device.enet_handle;
  417. ENET_Type *enet_base = imxrt_eth_device.enet_base;
  418. enet_data_error_stats_t *error_statistic = &imxrt_eth_device.error_statistic;
  419. /* Get the Frame size */
  420. status = ENET_GetRxFrameSize(enet_handle, &length);
  421. /* Call ENET_ReadFrame when there is a received frame. */
  422. if (length != 0)
  423. {
  424. /* Received valid frame. Deliver the rx buffer with the size equal to length. */
  425. p = pbuf_alloc(PBUF_RAW, length, PBUF_POOL);
  426. if (p != NULL)
  427. {
  428. status = ENET_ReadFrame(enet_base, enet_handle, p->payload, length);
  429. if (status == kStatus_Success)
  430. {
  431. #ifdef ETH_RX_DUMP
  432. packet_dump("recv", p);
  433. #endif
  434. return p;
  435. }
  436. else
  437. {
  438. dbg_log(DBG_LOG, " A frame read failed\n");
  439. pbuf_free(p);
  440. }
  441. }
  442. else
  443. {
  444. dbg_log(DBG_LOG, " pbuf_alloc faild\n");
  445. }
  446. }
  447. else if (status == kStatus_ENET_RxFrameError)
  448. {
  449. dbg_log(DBG_WARNING, "ENET_GetRxFrameSize: kStatus_ENET_RxFrameError\n");
  450. /* Update the received buffer when error happened. */
  451. /* Get the error information of the received g_frame. */
  452. ENET_GetRxErrBeforeReadFrame(enet_handle, error_statistic);
  453. /* update the receive buffer. */
  454. ENET_ReadFrame(enet_base, enet_handle, NULL, 0);
  455. }
  456. ENET_EnableInterrupts(enet_base, kENET_RxFrameInterrupt);
  457. return NULL;
  458. }
  459. static void phy_monitor_thread_entry(void *parameter)
  460. {
  461. phy_speed_t speed;
  462. phy_duplex_t duplex;
  463. bool link = false;
  464. imxrt_enet_phy_reset_by_gpio();
  465. PHY_Init(imxrt_eth_device.enet_base, PHY_ADDRESS, CLOCK_GetFreq(kCLOCK_AhbClk));
  466. while (1)
  467. {
  468. bool new_link = false;
  469. status_t status = PHY_GetLinkStatus(imxrt_eth_device.enet_base, PHY_ADDRESS, &new_link);
  470. if ((status == kStatus_Success) && (link != new_link))
  471. {
  472. link = new_link;
  473. if (link) // link up
  474. {
  475. PHY_GetLinkSpeedDuplex(imxrt_eth_device.enet_base,
  476. PHY_ADDRESS, &speed, &duplex);
  477. if (kPHY_Speed10M == speed)
  478. {
  479. dbg_log(DBG_LOG, "10M\n");
  480. }
  481. else
  482. {
  483. dbg_log(DBG_LOG, "100M\n");
  484. }
  485. if (kPHY_HalfDuplex == duplex)
  486. {
  487. dbg_log(DBG_LOG, "half dumplex\n");
  488. }
  489. else
  490. {
  491. dbg_log(DBG_LOG, "full dumplex\n");
  492. }
  493. if ((imxrt_eth_device.speed != (enet_mii_speed_t)speed)
  494. || (imxrt_eth_device.duplex != (enet_mii_duplex_t)duplex))
  495. {
  496. imxrt_eth_device.speed = (enet_mii_speed_t)speed;
  497. imxrt_eth_device.duplex = (enet_mii_duplex_t)duplex;
  498. dbg_log(DBG_LOG, "link up, and update eth mode.\n");
  499. rt_imxrt_eth_init((rt_device_t)&imxrt_eth_device);
  500. }
  501. else
  502. {
  503. dbg_log(DBG_LOG, "link up, eth not need re-config.\n");
  504. }
  505. dbg_log(DBG_LOG, "link up.\n");
  506. eth_device_linkchange(&imxrt_eth_device.parent, RT_TRUE);
  507. }
  508. else
  509. {
  510. dbg_log(DBG_LOG, "link down.\n");
  511. eth_device_linkchange(&imxrt_eth_device.parent, RT_FALSE);
  512. }
  513. }
  514. rt_thread_delay(RT_TICK_PER_SECOND * 2);
  515. }
  516. }
  517. static int rt_hw_imxrt_eth_init(void)
  518. {
  519. rt_err_t state;
  520. _enet_clk_init();
  521. /* NXP (Freescale) MAC OUI */
  522. imxrt_eth_device.dev_addr[0] = 0x00;
  523. imxrt_eth_device.dev_addr[1] = 0x04;
  524. imxrt_eth_device.dev_addr[2] = 0x9F;
  525. /* generate MAC addr from 96bit unique ID (only for test). */
  526. imxrt_eth_device.dev_addr[3] = 0x05;
  527. imxrt_eth_device.dev_addr[4] = 0x44;
  528. imxrt_eth_device.dev_addr[5] = 0xE5;
  529. imxrt_eth_device.speed = kENET_MiiSpeed100M;
  530. imxrt_eth_device.duplex = kENET_MiiFullDuplex;
  531. imxrt_eth_device.enet_base = ENET;
  532. imxrt_eth_device.parent.parent.init = rt_imxrt_eth_init;
  533. imxrt_eth_device.parent.parent.open = rt_imxrt_eth_open;
  534. imxrt_eth_device.parent.parent.close = rt_imxrt_eth_close;
  535. imxrt_eth_device.parent.parent.read = rt_imxrt_eth_read;
  536. imxrt_eth_device.parent.parent.write = rt_imxrt_eth_write;
  537. imxrt_eth_device.parent.parent.control = rt_imxrt_eth_control;
  538. imxrt_eth_device.parent.parent.user_data = RT_NULL;
  539. imxrt_eth_device.parent.eth_rx = rt_imxrt_eth_rx;
  540. imxrt_eth_device.parent.eth_tx = rt_imxrt_eth_tx;
  541. dbg_log(DBG_LOG, "sem init: tx_wait\r\n");
  542. /* init tx semaphore */
  543. rt_sem_init(&imxrt_eth_device.tx_wait, "tx_wait", 0, RT_IPC_FLAG_FIFO);
  544. /* register eth device */
  545. dbg_log(DBG_LOG, "eth_device_init start\r\n");
  546. state = eth_device_init(&(imxrt_eth_device.parent), "e0");
  547. if (RT_EOK == state)
  548. {
  549. dbg_log(DBG_LOG, "eth_device_init success\r\n");
  550. }
  551. else
  552. {
  553. dbg_log(DBG_LOG, "eth_device_init faild: %d\r\n", state);
  554. }
  555. eth_device_linkchange(&imxrt_eth_device.parent, RT_FALSE);
  556. /* start phy monitor */
  557. {
  558. rt_thread_t tid;
  559. tid = rt_thread_create("phy",
  560. phy_monitor_thread_entry,
  561. RT_NULL,
  562. 512,
  563. RT_THREAD_PRIORITY_MAX - 2,
  564. 2);
  565. if (tid != RT_NULL)
  566. rt_thread_startup(tid);
  567. }
  568. return state;
  569. }
  570. INIT_DEVICE_EXPORT(rt_hw_imxrt_eth_init);
  571. #endif
  572. #ifdef RT_USING_FINSH
  573. #include <finsh.h>
  574. void phy_read(uint32_t phyReg)
  575. {
  576. uint32_t data;
  577. status_t status;
  578. status = PHY_Read(imxrt_eth_device.enet_base, PHY_ADDRESS, phyReg, &data);
  579. if (kStatus_Success == status)
  580. {
  581. rt_kprintf("PHY_Read: %02X --> %08X", phyReg, data);
  582. }
  583. else
  584. {
  585. rt_kprintf("PHY_Read: %02X --> faild", phyReg);
  586. }
  587. }
  588. void phy_write(uint32_t phyReg, uint32_t data)
  589. {
  590. status_t status;
  591. status = PHY_Write(imxrt_eth_device.enet_base, PHY_ADDRESS, phyReg, data);
  592. if (kStatus_Success == status)
  593. {
  594. rt_kprintf("PHY_Write: %02X --> %08X\n", phyReg, data);
  595. }
  596. else
  597. {
  598. rt_kprintf("PHY_Write: %02X --> faild\n", phyReg);
  599. }
  600. }
  601. void phy_dump(void)
  602. {
  603. uint32_t data;
  604. status_t status;
  605. int i;
  606. for (i = 0; i < 32; i++)
  607. {
  608. status = PHY_Read(imxrt_eth_device.enet_base, PHY_ADDRESS, i, &data);
  609. if (kStatus_Success != status)
  610. {
  611. rt_kprintf("phy_dump: %02X --> faild", i);
  612. break;
  613. }
  614. if (i % 8 == 7)
  615. {
  616. rt_kprintf("%02X --> %08X ", i, data);
  617. }
  618. else
  619. {
  620. rt_kprintf("%02X --> %08X\n", i, data);
  621. }
  622. }
  623. }
  624. void enet_reg_dump(void)
  625. {
  626. ENET_Type *enet_base = imxrt_eth_device.enet_base;
  627. #define DUMP_REG(__REG) \
  628. rt_kprintf("%s(%08X): %08X\n", #__REG, (uint32_t)&enet_base->__REG, enet_base->__REG)
  629. DUMP_REG(EIR);
  630. DUMP_REG(EIMR);
  631. DUMP_REG(RDAR);
  632. DUMP_REG(TDAR);
  633. DUMP_REG(ECR);
  634. DUMP_REG(MMFR);
  635. DUMP_REG(MSCR);
  636. DUMP_REG(MIBC);
  637. DUMP_REG(RCR);
  638. DUMP_REG(TCR);
  639. DUMP_REG(PALR);
  640. DUMP_REG(PAUR);
  641. DUMP_REG(OPD);
  642. DUMP_REG(TXIC);
  643. DUMP_REG(RXIC);
  644. DUMP_REG(IAUR);
  645. DUMP_REG(IALR);
  646. DUMP_REG(GAUR);
  647. DUMP_REG(GALR);
  648. DUMP_REG(TFWR);
  649. DUMP_REG(RDSR);
  650. DUMP_REG(TDSR);
  651. DUMP_REG(MRBR);
  652. DUMP_REG(RSFL);
  653. DUMP_REG(RSEM);
  654. DUMP_REG(RAEM);
  655. DUMP_REG(RAFL);
  656. DUMP_REG(TSEM);
  657. DUMP_REG(TAEM);
  658. DUMP_REG(TAFL);
  659. DUMP_REG(TIPG);
  660. DUMP_REG(FTRL);
  661. DUMP_REG(TACC);
  662. DUMP_REG(RACC);
  663. DUMP_REG(RMON_T_DROP);
  664. DUMP_REG(RMON_T_PACKETS);
  665. DUMP_REG(RMON_T_BC_PKT);
  666. DUMP_REG(RMON_T_MC_PKT);
  667. DUMP_REG(RMON_T_CRC_ALIGN);
  668. DUMP_REG(RMON_T_UNDERSIZE);
  669. DUMP_REG(RMON_T_OVERSIZE);
  670. DUMP_REG(RMON_T_FRAG);
  671. DUMP_REG(RMON_T_JAB);
  672. DUMP_REG(RMON_T_COL);
  673. DUMP_REG(RMON_T_P64);
  674. DUMP_REG(RMON_T_P65TO127);
  675. DUMP_REG(RMON_T_P128TO255);
  676. DUMP_REG(RMON_T_P256TO511);
  677. DUMP_REG(RMON_T_P512TO1023);
  678. DUMP_REG(RMON_T_P1024TO2047);
  679. DUMP_REG(RMON_T_P_GTE2048);
  680. DUMP_REG(RMON_T_OCTETS);
  681. DUMP_REG(IEEE_T_DROP);
  682. DUMP_REG(IEEE_T_FRAME_OK);
  683. DUMP_REG(IEEE_T_1COL);
  684. DUMP_REG(IEEE_T_MCOL);
  685. DUMP_REG(IEEE_T_DEF);
  686. DUMP_REG(IEEE_T_LCOL);
  687. DUMP_REG(IEEE_T_EXCOL);
  688. DUMP_REG(IEEE_T_MACERR);
  689. DUMP_REG(IEEE_T_CSERR);
  690. DUMP_REG(IEEE_T_SQE);
  691. DUMP_REG(IEEE_T_FDXFC);
  692. DUMP_REG(IEEE_T_OCTETS_OK);
  693. DUMP_REG(RMON_R_PACKETS);
  694. DUMP_REG(RMON_R_BC_PKT);
  695. DUMP_REG(RMON_R_MC_PKT);
  696. DUMP_REG(RMON_R_CRC_ALIGN);
  697. DUMP_REG(RMON_R_UNDERSIZE);
  698. DUMP_REG(RMON_R_OVERSIZE);
  699. DUMP_REG(RMON_R_FRAG);
  700. DUMP_REG(RMON_R_JAB);
  701. DUMP_REG(RMON_R_RESVD_0);
  702. DUMP_REG(RMON_R_P64);
  703. DUMP_REG(RMON_R_P65TO127);
  704. DUMP_REG(RMON_R_P128TO255);
  705. DUMP_REG(RMON_R_P256TO511);
  706. DUMP_REG(RMON_R_P512TO1023);
  707. DUMP_REG(RMON_R_P1024TO2047);
  708. DUMP_REG(RMON_R_P_GTE2048);
  709. DUMP_REG(RMON_R_OCTETS);
  710. DUMP_REG(IEEE_R_DROP);
  711. DUMP_REG(IEEE_R_FRAME_OK);
  712. DUMP_REG(IEEE_R_CRC);
  713. DUMP_REG(IEEE_R_ALIGN);
  714. DUMP_REG(IEEE_R_MACERR);
  715. DUMP_REG(IEEE_R_FDXFC);
  716. DUMP_REG(IEEE_R_OCTETS_OK);
  717. DUMP_REG(ATCR);
  718. DUMP_REG(ATVR);
  719. DUMP_REG(ATOFF);
  720. DUMP_REG(ATPER);
  721. DUMP_REG(ATCOR);
  722. DUMP_REG(ATINC);
  723. DUMP_REG(ATSTMP);
  724. DUMP_REG(TGSR);
  725. }
  726. void enet_nvic_tog(void)
  727. {
  728. NVIC_SetPendingIRQ(ENET_IRQn);
  729. }
  730. void enet_rx_stat(void)
  731. {
  732. enet_data_error_stats_t *error_statistic = &imxrt_eth_device.error_statistic;
  733. #define DUMP_STAT(__VAR) \
  734. rt_kprintf("%-25s: %08X\n", #__VAR, error_statistic->__VAR);
  735. DUMP_STAT(statsRxLenGreaterErr);
  736. DUMP_STAT(statsRxAlignErr);
  737. DUMP_STAT(statsRxFcsErr);
  738. DUMP_STAT(statsRxOverRunErr);
  739. DUMP_STAT(statsRxTruncateErr);
  740. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  741. DUMP_STAT(statsRxProtocolChecksumErr);
  742. DUMP_STAT(statsRxIpHeadChecksumErr);
  743. DUMP_STAT(statsRxMacErr);
  744. DUMP_STAT(statsRxPhyErr);
  745. DUMP_STAT(statsRxCollisionErr);
  746. DUMP_STAT(statsTxErr);
  747. DUMP_STAT(statsTxFrameErr);
  748. DUMP_STAT(statsTxOverFlowErr);
  749. DUMP_STAT(statsTxLateCollisionErr);
  750. DUMP_STAT(statsTxExcessCollisionErr);
  751. DUMP_STAT(statsTxUnderFlowErr);
  752. DUMP_STAT(statsTxTsErr);
  753. #endif
  754. }
  755. void enet_buf_info(void)
  756. {
  757. int i = 0;
  758. for (i = 0; i < ENET_RXBD_NUM; i++)
  759. {
  760. rt_kprintf("%d: length: %-8d, control: %04X, buffer:%p\n",
  761. i,
  762. g_rxBuffDescrip[i].length,
  763. g_rxBuffDescrip[i].control,
  764. g_rxBuffDescrip[i].buffer);
  765. }
  766. for (i = 0; i < ENET_TXBD_NUM; i++)
  767. {
  768. rt_kprintf("%d: length: %-8d, control: %04X, buffer:%p\n",
  769. i,
  770. g_txBuffDescrip[i].length,
  771. g_txBuffDescrip[i].control,
  772. g_txBuffDescrip[i].buffer);
  773. }
  774. }
  775. FINSH_FUNCTION_EXPORT(phy_read, read phy register);
  776. FINSH_FUNCTION_EXPORT(phy_write, write phy register);
  777. FINSH_FUNCTION_EXPORT(phy_dump, dump phy registers);
  778. FINSH_FUNCTION_EXPORT(enet_reg_dump, dump enet registers);
  779. FINSH_FUNCTION_EXPORT(enet_nvic_tog, toggle enet nvic pendding bit);
  780. FINSH_FUNCTION_EXPORT(enet_rx_stat, dump enet rx statistic);
  781. FINSH_FUNCTION_EXPORT(enet_buf_info, dump enet tx and tx buffer descripter);
  782. #endif