drv_eth.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906
  1. /*
  2. * Copyright (c) 2006-2019, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2017-10-10 Tanek the first version
  9. * 2019-5-10 misonyo add DMA TX and RX function
  10. */
  11. #include <rtthread.h>
  12. #include "board.h"
  13. #include <rtdevice.h>
  14. #ifdef RT_USING_FINSH
  15. #include <finsh.h>
  16. #endif
  17. #include "fsl_enet.h"
  18. #include "fsl_gpio.h"
  19. #include "fsl_phy.h"
  20. #include "fsl_cache.h"
  21. #include "fsl_iomuxc.h"
  22. #ifdef RT_USING_LWIP
  23. #include <netif/ethernetif.h>
  24. #include "lwipopts.h"
  25. #define ENET_RXBD_NUM (4)
  26. #define ENET_TXBD_NUM (4)
  27. #define ENET_RXBUFF_SIZE (ENET_FRAME_MAX_FRAMELEN)
  28. #define ENET_TXBUFF_SIZE (ENET_FRAME_MAX_FRAMELEN)
  29. /* debug option */
  30. #undef ETH_RX_DUMP
  31. #undef ETH_TX_DUMP
  32. #define DBG_ENABLE
  33. #define DBG_SECTION_NAME "[ETH]"
  34. #define DBG_COLOR
  35. #define DBG_LEVEL DBG_INFO
  36. #include <rtdbg.h>
  37. #define MAX_ADDR_LEN 6
  38. struct rt_imxrt_eth
  39. {
  40. /* inherit from ethernet device */
  41. struct eth_device parent;
  42. enet_handle_t enet_handle;
  43. ENET_Type *enet_base;
  44. enet_data_error_stats_t error_statistic;
  45. rt_uint8_t dev_addr[MAX_ADDR_LEN]; /* hw address */
  46. rt_bool_t tx_is_waiting;
  47. struct rt_semaphore tx_wait;
  48. enet_mii_speed_t speed;
  49. enet_mii_duplex_t duplex;
  50. };
  51. ALIGN(ENET_BUFF_ALIGNMENT) enet_tx_bd_struct_t g_txBuffDescrip[ENET_TXBD_NUM] SECTION("NonCacheable");
  52. ALIGN(ENET_BUFF_ALIGNMENT) rt_uint8_t g_txDataBuff[ENET_TXBD_NUM][RT_ALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  53. ALIGN(ENET_BUFF_ALIGNMENT) enet_rx_bd_struct_t g_rxBuffDescrip[ENET_RXBD_NUM] SECTION("NonCacheable");
  54. ALIGN(ENET_BUFF_ALIGNMENT) rt_uint8_t g_rxDataBuff[ENET_RXBD_NUM][RT_ALIGN(ENET_RXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  55. static struct rt_imxrt_eth imxrt_eth_device;
  56. void _enet_rx_callback(struct rt_imxrt_eth *eth)
  57. {
  58. rt_err_t result;
  59. ENET_DisableInterrupts(eth->enet_base, kENET_RxFrameInterrupt);
  60. result = eth_device_ready(&(eth->parent));
  61. if (result != RT_EOK)
  62. rt_kprintf("RX err =%d\n", result);
  63. }
  64. void _enet_tx_callback(struct rt_imxrt_eth *eth)
  65. {
  66. if (eth->tx_is_waiting == RT_TRUE)
  67. {
  68. eth->tx_is_waiting = RT_FALSE;
  69. rt_sem_release(&eth->tx_wait);
  70. }
  71. }
  72. void _enet_callback(ENET_Type *base, enet_handle_t *handle, enet_event_t event, void *userData)
  73. {
  74. switch (event)
  75. {
  76. case kENET_RxEvent:
  77. _enet_rx_callback((struct rt_imxrt_eth *)userData);
  78. break;
  79. case kENET_TxEvent:
  80. _enet_tx_callback((struct rt_imxrt_eth *)userData);
  81. break;
  82. case kENET_ErrEvent:
  83. dbg_log(DBG_LOG, "kENET_ErrEvent\n");
  84. break;
  85. case kENET_WakeUpEvent:
  86. dbg_log(DBG_LOG, "kENET_WakeUpEvent\n");
  87. break;
  88. case kENET_TimeStampEvent:
  89. dbg_log(DBG_LOG, "kENET_TimeStampEvent\n");
  90. break;
  91. case kENET_TimeStampAvailEvent:
  92. dbg_log(DBG_LOG, "kENET_TimeStampAvailEvent \n");
  93. break;
  94. default:
  95. dbg_log(DBG_LOG, "unknow error\n");
  96. break;
  97. }
  98. }
  99. static void _enet_clk_init(void)
  100. {
  101. const clock_enet_pll_config_t config = {.enableClkOutput = true, .enableClkOutput25M = false, .loopDivider = 1};
  102. CLOCK_InitEnetPll(&config);
  103. IOMUXC_EnableMode(IOMUXC_GPR, kIOMUXC_GPR_ENET1TxClkOutputDir, true);
  104. IOMUXC_GPR->GPR1|=1<<23;
  105. }
  106. static void _enet_config(void)
  107. {
  108. enet_config_t config;
  109. uint32_t sysClock;
  110. /* prepare the buffer configuration. */
  111. enet_buffer_config_t buffConfig =
  112. {
  113. ENET_RXBD_NUM,
  114. ENET_TXBD_NUM,
  115. SDK_SIZEALIGN(ENET_RXBUFF_SIZE, ENET_BUFF_ALIGNMENT),
  116. SDK_SIZEALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT),
  117. &g_rxBuffDescrip[0],
  118. &g_txBuffDescrip[0],
  119. &g_rxDataBuff[0][0],
  120. &g_txDataBuff[0][0],
  121. };
  122. /* Get default configuration. */
  123. /*
  124. * config.miiMode = kENET_RmiiMode;
  125. * config.miiSpeed = kENET_MiiSpeed100M;
  126. * config.miiDuplex = kENET_MiiFullDuplex;
  127. * config.rxMaxFrameLen = ENET_FRAME_MAX_FRAMELEN;
  128. */
  129. ENET_GetDefaultConfig(&config);
  130. config.interrupt = kENET_TxFrameInterrupt | kENET_RxFrameInterrupt;
  131. config.miiSpeed = imxrt_eth_device.speed;
  132. config.miiDuplex = imxrt_eth_device.duplex;
  133. /* Set SMI to get PHY link status. */
  134. sysClock = CLOCK_GetFreq(kCLOCK_AhbClk);
  135. dbg_log(DBG_LOG, "deinit\n");
  136. ENET_Deinit(imxrt_eth_device.enet_base);
  137. dbg_log(DBG_LOG, "init\n");
  138. ENET_Init(imxrt_eth_device.enet_base, &imxrt_eth_device.enet_handle, &config, &buffConfig, &imxrt_eth_device.dev_addr[0], sysClock);
  139. dbg_log(DBG_LOG, "set call back\n");
  140. ENET_SetCallback(&imxrt_eth_device.enet_handle, _enet_callback, &imxrt_eth_device);
  141. dbg_log(DBG_LOG, "active read\n");
  142. ENET_ActiveRead(imxrt_eth_device.enet_base);
  143. }
  144. #if defined(ETH_RX_DUMP) || defined(ETH_TX_DUMP)
  145. static void packet_dump(const char *msg, const struct pbuf *p)
  146. {
  147. const struct pbuf *q;
  148. rt_uint32_t i, j;
  149. rt_uint8_t *ptr;
  150. rt_kprintf("%s %d byte\n", msg, p->tot_len);
  151. i = 0;
  152. for (q = p; q != RT_NULL; q = q->next)
  153. {
  154. ptr = q->payload;
  155. for (j = 0; j < q->len; j++)
  156. {
  157. if ((i % 8) == 0)
  158. {
  159. rt_kprintf(" ");
  160. }
  161. if ((i % 16) == 0)
  162. {
  163. rt_kprintf("\r\n");
  164. }
  165. rt_kprintf("%02x ", *ptr);
  166. i++;
  167. ptr++;
  168. }
  169. }
  170. rt_kprintf("\n\n");
  171. }
  172. #else
  173. #define packet_dump(...)
  174. #endif /* dump */
  175. /* initialize the interface */
  176. static rt_err_t rt_imxrt_eth_init(rt_device_t dev)
  177. {
  178. dbg_log(DBG_LOG, "rt_imxrt_eth_init...\n");
  179. _enet_config();
  180. return RT_EOK;
  181. }
  182. static rt_err_t rt_imxrt_eth_open(rt_device_t dev, rt_uint16_t oflag)
  183. {
  184. dbg_log(DBG_LOG, "rt_imxrt_eth_open...\n");
  185. return RT_EOK;
  186. }
  187. static rt_err_t rt_imxrt_eth_close(rt_device_t dev)
  188. {
  189. dbg_log(DBG_LOG, "rt_imxrt_eth_close...\n");
  190. return RT_EOK;
  191. }
  192. static rt_size_t rt_imxrt_eth_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
  193. {
  194. dbg_log(DBG_LOG, "rt_imxrt_eth_read...\n");
  195. rt_set_errno(-RT_ENOSYS);
  196. return 0;
  197. }
  198. static rt_size_t rt_imxrt_eth_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
  199. {
  200. dbg_log(DBG_LOG, "rt_imxrt_eth_write...\n");
  201. rt_set_errno(-RT_ENOSYS);
  202. return 0;
  203. }
  204. static rt_err_t rt_imxrt_eth_control(rt_device_t dev, int cmd, void *args)
  205. {
  206. dbg_log(DBG_LOG, "rt_imxrt_eth_control...\n");
  207. switch (cmd)
  208. {
  209. case NIOCTL_GADDR:
  210. /* get mac address */
  211. if (args) rt_memcpy(args, imxrt_eth_device.dev_addr, 6);
  212. else return -RT_ERROR;
  213. break;
  214. default :
  215. break;
  216. }
  217. return RT_EOK;
  218. }
  219. static void _ENET_ActiveSend(ENET_Type *base, uint32_t ringId)
  220. {
  221. assert(ringId < FSL_FEATURE_ENET_QUEUE);
  222. switch (ringId)
  223. {
  224. case 0:
  225. base->TDAR = ENET_TDAR_TDAR_MASK;
  226. break;
  227. #if FSL_FEATURE_ENET_QUEUE > 1
  228. case kENET_Ring1:
  229. base->TDAR1 = ENET_TDAR1_TDAR_MASK;
  230. break;
  231. case kENET_Ring2:
  232. base->TDAR2 = ENET_TDAR2_TDAR_MASK;
  233. break;
  234. #endif /* FSL_FEATURE_ENET_QUEUE > 1 */
  235. default:
  236. base->TDAR = ENET_TDAR_TDAR_MASK;
  237. break;
  238. }
  239. }
  240. static status_t _ENET_SendFrame(ENET_Type *base, enet_handle_t *handle, const uint8_t *data, uint32_t length)
  241. {
  242. assert(handle);
  243. assert(data);
  244. volatile enet_tx_bd_struct_t *curBuffDescrip;
  245. uint32_t len = 0;
  246. uint32_t sizeleft = 0;
  247. uint32_t address;
  248. /* Check the frame length. */
  249. if (length > ENET_FRAME_MAX_FRAMELEN)
  250. {
  251. return kStatus_ENET_TxFrameOverLen;
  252. }
  253. /* Check if the transmit buffer is ready. */
  254. curBuffDescrip = handle->txBdCurrent[0];
  255. if (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK)
  256. {
  257. return kStatus_ENET_TxFrameBusy;
  258. }
  259. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  260. bool isPtpEventMessage = false;
  261. /* Check PTP message with the PTP header. */
  262. isPtpEventMessage = ENET_Ptp1588ParseFrame(data, NULL, true);
  263. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  264. /* One transmit buffer is enough for one frame. */
  265. if (handle->txBuffSizeAlign[0] >= length)
  266. {
  267. /* Copy data to the buffer for uDMA transfer. */
  268. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  269. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer,kMEMORY_DMA2Local);
  270. #else
  271. address = (uint32_t)curBuffDescrip->buffer;
  272. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  273. pbuf_copy_partial((const struct pbuf *)data, (void *)address, length, 0);
  274. /* Set data length. */
  275. curBuffDescrip->length = length;
  276. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  277. /* For enable the timestamp. */
  278. if (isPtpEventMessage)
  279. {
  280. curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  281. }
  282. else
  283. {
  284. curBuffDescrip->controlExtend1 &= ~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  285. }
  286. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  287. curBuffDescrip->control |= (ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK);
  288. /* Increase the buffer descriptor address. */
  289. if (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_WRAP_MASK)
  290. {
  291. handle->txBdCurrent[0] = handle->txBdBase[0];
  292. }
  293. else
  294. {
  295. handle->txBdCurrent[0]++;
  296. }
  297. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  298. /* Add the cache clean maintain. */
  299. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  300. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer,kMEMORY_DMA2Local);
  301. #else
  302. address = (uint32_t)curBuffDescrip->buffer;
  303. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  304. DCACHE_CleanByRange(address, length);
  305. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  306. /* Active the transmit buffer descriptor. */
  307. _ENET_ActiveSend(base, 0);
  308. return kStatus_Success;
  309. }
  310. else
  311. {
  312. /* One frame requires more than one transmit buffers. */
  313. do
  314. {
  315. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  316. /* For enable the timestamp. */
  317. if (isPtpEventMessage)
  318. {
  319. curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  320. }
  321. else
  322. {
  323. curBuffDescrip->controlExtend1 &= ~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  324. }
  325. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  326. /* Increase the buffer descriptor address. */
  327. if (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_WRAP_MASK)
  328. {
  329. handle->txBdCurrent[0] = handle->txBdBase[0];
  330. }
  331. else
  332. {
  333. handle->txBdCurrent[0]++;
  334. }
  335. /* update the size left to be transmit. */
  336. sizeleft = length - len;
  337. if (sizeleft > handle->txBuffSizeAlign[0])
  338. {
  339. /* Data copy. */
  340. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  341. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer,kMEMORY_DMA2Local);
  342. #else
  343. address = (uint32_t)curBuffDescrip->buffer;
  344. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  345. memcpy((void *)address, data + len, handle->txBuffSizeAlign[0]);
  346. /* Data length update. */
  347. curBuffDescrip->length = handle->txBuffSizeAlign[0];
  348. len += handle->txBuffSizeAlign[0];
  349. /* Sets the control flag. */
  350. curBuffDescrip->control &= ~ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
  351. curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_TX_READY_MASK;
  352. /* Active the transmit buffer descriptor*/
  353. _ENET_ActiveSend(base, 0);
  354. }
  355. else
  356. {
  357. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  358. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer,kMEMORY_DMA2Local);
  359. #else
  360. address = (uint32_t)curBuffDescrip->buffer;
  361. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  362. memcpy((void *)address, data + len, sizeleft);
  363. curBuffDescrip->length = sizeleft;
  364. /* Set Last buffer wrap flag. */
  365. curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
  366. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  367. /* Add the cache clean maintain. */
  368. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  369. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer,kMEMORY_DMA2Local);
  370. #else
  371. address = (uint32_t)curBuffDescrip->buffer;
  372. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  373. DCACHE_CleanByRange(address, handle->txBuffSizeAlign[0]);
  374. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  375. /* Active the transmit buffer descriptor. */
  376. _ENET_ActiveSend(base, 0);
  377. return kStatus_Success;
  378. }
  379. /* Get the current buffer descriptor address. */
  380. curBuffDescrip = handle->txBdCurrent[0];
  381. } while (!(curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK));
  382. return kStatus_ENET_TxFrameBusy;
  383. }
  384. }
  385. /* ethernet device interface */
  386. /* transmit packet. */
  387. rt_err_t rt_imxrt_eth_tx(rt_device_t dev, struct pbuf *p)
  388. {
  389. rt_err_t result = RT_EOK;
  390. enet_handle_t * enet_handle = &imxrt_eth_device.enet_handle;
  391. RT_ASSERT(p != NULL);
  392. RT_ASSERT(enet_handle != RT_NULL);
  393. dbg_log(DBG_LOG, "rt_imxrt_eth_tx: %d\n", p->len);
  394. #ifdef ETH_TX_DUMP
  395. packet_dump("send", p);
  396. #endif
  397. do
  398. {
  399. result = _ENET_SendFrame(imxrt_eth_device.enet_base, enet_handle, (const uint8_t *)p, p->tot_len);
  400. if (result == kStatus_ENET_TxFrameBusy)
  401. {
  402. imxrt_eth_device.tx_is_waiting = RT_TRUE;
  403. rt_sem_take(&imxrt_eth_device.tx_wait, RT_WAITING_FOREVER);
  404. }
  405. }
  406. while (result == kStatus_ENET_TxFrameBusy);
  407. return RT_EOK;
  408. }
  409. /* reception packet. */
  410. struct pbuf *rt_imxrt_eth_rx(rt_device_t dev)
  411. {
  412. uint32_t length = 0;
  413. status_t status;
  414. struct pbuf *p = RT_NULL;
  415. enet_handle_t *enet_handle = &imxrt_eth_device.enet_handle;
  416. ENET_Type *enet_base = imxrt_eth_device.enet_base;
  417. enet_data_error_stats_t *error_statistic = &imxrt_eth_device.error_statistic;
  418. /* Get the Frame size */
  419. status = ENET_GetRxFrameSize(enet_handle, &length);
  420. /* Call ENET_ReadFrame when there is a received frame. */
  421. if (length != 0)
  422. {
  423. /* Received valid frame. Deliver the rx buffer with the size equal to length. */
  424. p = pbuf_alloc(PBUF_RAW, length, PBUF_POOL);
  425. if (p != NULL)
  426. {
  427. status = ENET_ReadFrame(enet_base, enet_handle, p->payload, length);
  428. if (status == kStatus_Success)
  429. {
  430. #ifdef ETH_RX_DUMP
  431. packet_dump("recv", p);
  432. #endif
  433. return p;
  434. }
  435. else
  436. {
  437. dbg_log(DBG_LOG, " A frame read failed\n");
  438. pbuf_free(p);
  439. }
  440. }
  441. else
  442. {
  443. dbg_log(DBG_LOG, " pbuf_alloc faild\n");
  444. }
  445. }
  446. else if (status == kStatus_ENET_RxFrameError)
  447. {
  448. dbg_log(DBG_WARNING, "ENET_GetRxFrameSize: kStatus_ENET_RxFrameError\n");
  449. /* Update the received buffer when error happened. */
  450. /* Get the error information of the received g_frame. */
  451. ENET_GetRxErrBeforeReadFrame(enet_handle, error_statistic);
  452. /* update the receive buffer. */
  453. ENET_ReadFrame(enet_base, enet_handle, NULL, 0);
  454. }
  455. ENET_EnableInterrupts(enet_base, kENET_RxFrameInterrupt);
  456. return NULL;
  457. }
  458. static void phy_monitor_thread_entry(void *parameter)
  459. {
  460. phy_speed_t speed;
  461. phy_duplex_t duplex;
  462. bool link = false;
  463. imxrt_enet_phy_reset_by_gpio();
  464. PHY_Init(imxrt_eth_device.enet_base, PHY_ADDRESS, CLOCK_GetFreq(kCLOCK_AhbClk));
  465. while (1)
  466. {
  467. bool new_link = false;
  468. status_t status = PHY_GetLinkStatus(imxrt_eth_device.enet_base, PHY_ADDRESS, &new_link);
  469. if ((status == kStatus_Success) && (link != new_link))
  470. {
  471. link = new_link;
  472. if (link) // link up
  473. {
  474. PHY_GetLinkSpeedDuplex(imxrt_eth_device.enet_base,
  475. PHY_ADDRESS, &speed, &duplex);
  476. if (kPHY_Speed10M == speed)
  477. {
  478. dbg_log(DBG_LOG, "10M\n");
  479. }
  480. else
  481. {
  482. dbg_log(DBG_LOG, "100M\n");
  483. }
  484. if (kPHY_HalfDuplex == duplex)
  485. {
  486. dbg_log(DBG_LOG, "half dumplex\n");
  487. }
  488. else
  489. {
  490. dbg_log(DBG_LOG, "full dumplex\n");
  491. }
  492. if ((imxrt_eth_device.speed != (enet_mii_speed_t)speed)
  493. || (imxrt_eth_device.duplex != (enet_mii_duplex_t)duplex))
  494. {
  495. imxrt_eth_device.speed = (enet_mii_speed_t)speed;
  496. imxrt_eth_device.duplex = (enet_mii_duplex_t)duplex;
  497. dbg_log(DBG_LOG, "link up, and update eth mode.\n");
  498. rt_imxrt_eth_init((rt_device_t)&imxrt_eth_device);
  499. }
  500. else
  501. {
  502. dbg_log(DBG_LOG, "link up, eth not need re-config.\n");
  503. }
  504. dbg_log(DBG_LOG, "link up.\n");
  505. eth_device_linkchange(&imxrt_eth_device.parent, RT_TRUE);
  506. }
  507. else
  508. {
  509. dbg_log(DBG_LOG, "link down.\n");
  510. eth_device_linkchange(&imxrt_eth_device.parent, RT_FALSE);
  511. }
  512. }
  513. rt_thread_delay(RT_TICK_PER_SECOND * 2);
  514. }
  515. }
  516. static int rt_hw_imxrt_eth_init(void)
  517. {
  518. rt_err_t state;
  519. _enet_clk_init();
  520. /* NXP (Freescale) MAC OUI */
  521. imxrt_eth_device.dev_addr[0] = 0x00;
  522. imxrt_eth_device.dev_addr[1] = 0x04;
  523. imxrt_eth_device.dev_addr[2] = 0x9F;
  524. /* generate MAC addr from 96bit unique ID (only for test). */
  525. imxrt_eth_device.dev_addr[3] = 0x05;
  526. imxrt_eth_device.dev_addr[4] = 0x44;
  527. imxrt_eth_device.dev_addr[5] = 0xE5;
  528. imxrt_eth_device.speed = kENET_MiiSpeed100M;
  529. imxrt_eth_device.duplex = kENET_MiiFullDuplex;
  530. imxrt_eth_device.enet_base = ENET;
  531. imxrt_eth_device.parent.parent.init = rt_imxrt_eth_init;
  532. imxrt_eth_device.parent.parent.open = rt_imxrt_eth_open;
  533. imxrt_eth_device.parent.parent.close = rt_imxrt_eth_close;
  534. imxrt_eth_device.parent.parent.read = rt_imxrt_eth_read;
  535. imxrt_eth_device.parent.parent.write = rt_imxrt_eth_write;
  536. imxrt_eth_device.parent.parent.control = rt_imxrt_eth_control;
  537. imxrt_eth_device.parent.parent.user_data = RT_NULL;
  538. imxrt_eth_device.parent.eth_rx = rt_imxrt_eth_rx;
  539. imxrt_eth_device.parent.eth_tx = rt_imxrt_eth_tx;
  540. dbg_log(DBG_LOG, "sem init: tx_wait\r\n");
  541. /* init tx semaphore */
  542. rt_sem_init(&imxrt_eth_device.tx_wait, "tx_wait", 0, RT_IPC_FLAG_FIFO);
  543. /* register eth device */
  544. dbg_log(DBG_LOG, "eth_device_init start\r\n");
  545. state = eth_device_init(&(imxrt_eth_device.parent), "e0");
  546. if (RT_EOK == state)
  547. {
  548. dbg_log(DBG_LOG, "eth_device_init success\r\n");
  549. }
  550. else
  551. {
  552. dbg_log(DBG_LOG, "eth_device_init faild: %d\r\n", state);
  553. }
  554. eth_device_linkchange(&imxrt_eth_device.parent, RT_FALSE);
  555. /* start phy monitor */
  556. {
  557. rt_thread_t tid;
  558. tid = rt_thread_create("phy",
  559. phy_monitor_thread_entry,
  560. RT_NULL,
  561. 512,
  562. RT_THREAD_PRIORITY_MAX - 2,
  563. 2);
  564. if (tid != RT_NULL)
  565. rt_thread_startup(tid);
  566. }
  567. return state;
  568. }
  569. INIT_DEVICE_EXPORT(rt_hw_imxrt_eth_init);
  570. #endif
  571. #ifdef RT_USING_FINSH
  572. #include <finsh.h>
  573. void phy_read(uint32_t phyReg)
  574. {
  575. uint32_t data;
  576. status_t status;
  577. status = PHY_Read(imxrt_eth_device.enet_base, PHY_ADDRESS, phyReg, &data);
  578. if (kStatus_Success == status)
  579. {
  580. rt_kprintf("PHY_Read: %02X --> %08X", phyReg, data);
  581. }
  582. else
  583. {
  584. rt_kprintf("PHY_Read: %02X --> faild", phyReg);
  585. }
  586. }
  587. void phy_write(uint32_t phyReg, uint32_t data)
  588. {
  589. status_t status;
  590. status = PHY_Write(imxrt_eth_device.enet_base, PHY_ADDRESS, phyReg, data);
  591. if (kStatus_Success == status)
  592. {
  593. rt_kprintf("PHY_Write: %02X --> %08X\n", phyReg, data);
  594. }
  595. else
  596. {
  597. rt_kprintf("PHY_Write: %02X --> faild\n", phyReg);
  598. }
  599. }
  600. void phy_dump(void)
  601. {
  602. uint32_t data;
  603. status_t status;
  604. int i;
  605. for (i = 0; i < 32; i++)
  606. {
  607. status = PHY_Read(imxrt_eth_device.enet_base, PHY_ADDRESS, i, &data);
  608. if (kStatus_Success != status)
  609. {
  610. rt_kprintf("phy_dump: %02X --> faild", i);
  611. break;
  612. }
  613. if (i % 8 == 7)
  614. {
  615. rt_kprintf("%02X --> %08X ", i, data);
  616. }
  617. else
  618. {
  619. rt_kprintf("%02X --> %08X\n", i, data);
  620. }
  621. }
  622. }
  623. void enet_reg_dump(void)
  624. {
  625. ENET_Type *enet_base = imxrt_eth_device.enet_base;
  626. #define DUMP_REG(__REG) \
  627. rt_kprintf("%s(%08X): %08X\n", #__REG, (uint32_t)&enet_base->__REG, enet_base->__REG)
  628. DUMP_REG(EIR);
  629. DUMP_REG(EIMR);
  630. DUMP_REG(RDAR);
  631. DUMP_REG(TDAR);
  632. DUMP_REG(ECR);
  633. DUMP_REG(MMFR);
  634. DUMP_REG(MSCR);
  635. DUMP_REG(MIBC);
  636. DUMP_REG(RCR);
  637. DUMP_REG(TCR);
  638. DUMP_REG(PALR);
  639. DUMP_REG(PAUR);
  640. DUMP_REG(OPD);
  641. DUMP_REG(TXIC);
  642. DUMP_REG(RXIC);
  643. DUMP_REG(IAUR);
  644. DUMP_REG(IALR);
  645. DUMP_REG(GAUR);
  646. DUMP_REG(GALR);
  647. DUMP_REG(TFWR);
  648. DUMP_REG(RDSR);
  649. DUMP_REG(TDSR);
  650. DUMP_REG(MRBR);
  651. DUMP_REG(RSFL);
  652. DUMP_REG(RSEM);
  653. DUMP_REG(RAEM);
  654. DUMP_REG(RAFL);
  655. DUMP_REG(TSEM);
  656. DUMP_REG(TAEM);
  657. DUMP_REG(TAFL);
  658. DUMP_REG(TIPG);
  659. DUMP_REG(FTRL);
  660. DUMP_REG(TACC);
  661. DUMP_REG(RACC);
  662. DUMP_REG(RMON_T_DROP);
  663. DUMP_REG(RMON_T_PACKETS);
  664. DUMP_REG(RMON_T_BC_PKT);
  665. DUMP_REG(RMON_T_MC_PKT);
  666. DUMP_REG(RMON_T_CRC_ALIGN);
  667. DUMP_REG(RMON_T_UNDERSIZE);
  668. DUMP_REG(RMON_T_OVERSIZE);
  669. DUMP_REG(RMON_T_FRAG);
  670. DUMP_REG(RMON_T_JAB);
  671. DUMP_REG(RMON_T_COL);
  672. DUMP_REG(RMON_T_P64);
  673. DUMP_REG(RMON_T_P65TO127);
  674. DUMP_REG(RMON_T_P128TO255);
  675. DUMP_REG(RMON_T_P256TO511);
  676. DUMP_REG(RMON_T_P512TO1023);
  677. DUMP_REG(RMON_T_P1024TO2047);
  678. DUMP_REG(RMON_T_P_GTE2048);
  679. DUMP_REG(RMON_T_OCTETS);
  680. DUMP_REG(IEEE_T_DROP);
  681. DUMP_REG(IEEE_T_FRAME_OK);
  682. DUMP_REG(IEEE_T_1COL);
  683. DUMP_REG(IEEE_T_MCOL);
  684. DUMP_REG(IEEE_T_DEF);
  685. DUMP_REG(IEEE_T_LCOL);
  686. DUMP_REG(IEEE_T_EXCOL);
  687. DUMP_REG(IEEE_T_MACERR);
  688. DUMP_REG(IEEE_T_CSERR);
  689. DUMP_REG(IEEE_T_SQE);
  690. DUMP_REG(IEEE_T_FDXFC);
  691. DUMP_REG(IEEE_T_OCTETS_OK);
  692. DUMP_REG(RMON_R_PACKETS);
  693. DUMP_REG(RMON_R_BC_PKT);
  694. DUMP_REG(RMON_R_MC_PKT);
  695. DUMP_REG(RMON_R_CRC_ALIGN);
  696. DUMP_REG(RMON_R_UNDERSIZE);
  697. DUMP_REG(RMON_R_OVERSIZE);
  698. DUMP_REG(RMON_R_FRAG);
  699. DUMP_REG(RMON_R_JAB);
  700. DUMP_REG(RMON_R_RESVD_0);
  701. DUMP_REG(RMON_R_P64);
  702. DUMP_REG(RMON_R_P65TO127);
  703. DUMP_REG(RMON_R_P128TO255);
  704. DUMP_REG(RMON_R_P256TO511);
  705. DUMP_REG(RMON_R_P512TO1023);
  706. DUMP_REG(RMON_R_P1024TO2047);
  707. DUMP_REG(RMON_R_P_GTE2048);
  708. DUMP_REG(RMON_R_OCTETS);
  709. DUMP_REG(IEEE_R_DROP);
  710. DUMP_REG(IEEE_R_FRAME_OK);
  711. DUMP_REG(IEEE_R_CRC);
  712. DUMP_REG(IEEE_R_ALIGN);
  713. DUMP_REG(IEEE_R_MACERR);
  714. DUMP_REG(IEEE_R_FDXFC);
  715. DUMP_REG(IEEE_R_OCTETS_OK);
  716. DUMP_REG(ATCR);
  717. DUMP_REG(ATVR);
  718. DUMP_REG(ATOFF);
  719. DUMP_REG(ATPER);
  720. DUMP_REG(ATCOR);
  721. DUMP_REG(ATINC);
  722. DUMP_REG(ATSTMP);
  723. DUMP_REG(TGSR);
  724. }
  725. void enet_nvic_tog(void)
  726. {
  727. NVIC_SetPendingIRQ(ENET_IRQn);
  728. }
  729. void enet_rx_stat(void)
  730. {
  731. enet_data_error_stats_t *error_statistic = &imxrt_eth_device.error_statistic;
  732. #define DUMP_STAT(__VAR) \
  733. rt_kprintf("%-25s: %08X\n", #__VAR, error_statistic->__VAR);
  734. DUMP_STAT(statsRxLenGreaterErr);
  735. DUMP_STAT(statsRxAlignErr);
  736. DUMP_STAT(statsRxFcsErr);
  737. DUMP_STAT(statsRxOverRunErr);
  738. DUMP_STAT(statsRxTruncateErr);
  739. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  740. DUMP_STAT(statsRxProtocolChecksumErr);
  741. DUMP_STAT(statsRxIpHeadChecksumErr);
  742. DUMP_STAT(statsRxMacErr);
  743. DUMP_STAT(statsRxPhyErr);
  744. DUMP_STAT(statsRxCollisionErr);
  745. DUMP_STAT(statsTxErr);
  746. DUMP_STAT(statsTxFrameErr);
  747. DUMP_STAT(statsTxOverFlowErr);
  748. DUMP_STAT(statsTxLateCollisionErr);
  749. DUMP_STAT(statsTxExcessCollisionErr);
  750. DUMP_STAT(statsTxUnderFlowErr);
  751. DUMP_STAT(statsTxTsErr);
  752. #endif
  753. }
  754. void enet_buf_info(void)
  755. {
  756. int i = 0;
  757. for (i = 0; i < ENET_RXBD_NUM; i++)
  758. {
  759. rt_kprintf("%d: length: %-8d, control: %04X, buffer:%p\n",
  760. i,
  761. g_rxBuffDescrip[i].length,
  762. g_rxBuffDescrip[i].control,
  763. g_rxBuffDescrip[i].buffer);
  764. }
  765. for (i = 0; i < ENET_TXBD_NUM; i++)
  766. {
  767. rt_kprintf("%d: length: %-8d, control: %04X, buffer:%p\n",
  768. i,
  769. g_txBuffDescrip[i].length,
  770. g_txBuffDescrip[i].control,
  771. g_txBuffDescrip[i].buffer);
  772. }
  773. }
  774. FINSH_FUNCTION_EXPORT(phy_read, read phy register);
  775. FINSH_FUNCTION_EXPORT(phy_write, write phy register);
  776. FINSH_FUNCTION_EXPORT(phy_dump, dump phy registers);
  777. FINSH_FUNCTION_EXPORT(enet_reg_dump, dump enet registers);
  778. FINSH_FUNCTION_EXPORT(enet_nvic_tog, toggle enet nvic pendding bit);
  779. FINSH_FUNCTION_EXPORT(enet_rx_stat, dump enet rx statistic);
  780. FINSH_FUNCTION_EXPORT(enet_buf_info, dump enet tx and tx buffer descripter);
  781. #endif