drv_eth.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2017-10-10 Tanek the first version
  9. * 2019-5-10 misonyo add DMA TX and RX function
  10. * 2020-10-14 wangqiang use phy device in phy monitor thread
  11. */
  12. #include <rtthread.h>
  13. #include "board.h"
  14. #include <rtdevice.h>
  15. #ifdef RT_USING_FINSH
  16. #include <finsh.h>
  17. #endif
  18. #include "fsl_enet.h"
  19. #include "fsl_gpio.h"
  20. #include "fsl_cache.h"
  21. #include "fsl_iomuxc.h"
  22. #include "fsl_common.h"
  23. #ifdef RT_USING_LWIP
  24. #include <netif/ethernetif.h>
  25. #include "lwipopts.h"
  26. #define ENET_RXBD_NUM (4)
  27. #define ENET_TXBD_NUM (4)
  28. #define ENET_RXBUFF_SIZE (ENET_FRAME_MAX_FRAMELEN)
  29. #define ENET_TXBUFF_SIZE (ENET_FRAME_MAX_FRAMELEN)
  30. /* debug option */
  31. #undef ETH_RX_DUMP
  32. #undef ETH_TX_DUMP
  33. #define DBG_ENABLE
  34. #define DBG_SECTION_NAME "[ETH]"
  35. #define DBG_COLOR
  36. #define DBG_LEVEL DBG_INFO
  37. #include <rtdbg.h>
  38. #define MAX_ADDR_LEN 6
  39. struct rt_imxrt_eth
  40. {
  41. /* inherit from ethernet device */
  42. struct eth_device parent;
  43. enet_handle_t enet_handle;
  44. ENET_Type *enet_base;
  45. enet_data_error_stats_t error_statistic;
  46. rt_uint8_t dev_addr[MAX_ADDR_LEN]; /* hw address */
  47. rt_bool_t tx_is_waiting;
  48. struct rt_semaphore tx_wait;
  49. enet_mii_speed_t speed;
  50. enet_mii_duplex_t duplex;
  51. };
  52. AT_NONCACHEABLE_SECTION_ALIGN(enet_tx_bd_struct_t g_txBuffDescrip[ENET_TXBD_NUM], ENET_BUFF_ALIGNMENT);
  53. ALIGN(ENET_BUFF_ALIGNMENT) rt_uint8_t g_txDataBuff[ENET_TXBD_NUM][RT_ALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  54. AT_NONCACHEABLE_SECTION_ALIGN(enet_rx_bd_struct_t g_rxBuffDescrip[ENET_RXBD_NUM], ENET_BUFF_ALIGNMENT);
  55. ALIGN(ENET_BUFF_ALIGNMENT) rt_uint8_t g_rxDataBuff[ENET_RXBD_NUM][RT_ALIGN(ENET_RXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  56. static struct rt_imxrt_eth imxrt_eth_device;
  57. void _enet_rx_callback(struct rt_imxrt_eth *eth)
  58. {
  59. rt_err_t result;
  60. ENET_DisableInterrupts(eth->enet_base, kENET_RxFrameInterrupt);
  61. result = eth_device_ready(&(eth->parent));
  62. if (result != RT_EOK)
  63. rt_kprintf("RX err =%d\n", result);
  64. }
  65. void _enet_tx_callback(struct rt_imxrt_eth *eth)
  66. {
  67. if (eth->tx_is_waiting == RT_TRUE)
  68. {
  69. eth->tx_is_waiting = RT_FALSE;
  70. rt_sem_release(&eth->tx_wait);
  71. }
  72. }
  73. void _enet_callback(ENET_Type *base, enet_handle_t *handle, enet_event_t event, void *userData)
  74. {
  75. switch (event)
  76. {
  77. case kENET_RxEvent:
  78. _enet_rx_callback((struct rt_imxrt_eth *)userData);
  79. break;
  80. case kENET_TxEvent:
  81. _enet_tx_callback((struct rt_imxrt_eth *)userData);
  82. break;
  83. case kENET_ErrEvent:
  84. dbg_log(DBG_LOG, "kENET_ErrEvent\n");
  85. break;
  86. case kENET_WakeUpEvent:
  87. dbg_log(DBG_LOG, "kENET_WakeUpEvent\n");
  88. break;
  89. case kENET_TimeStampEvent:
  90. dbg_log(DBG_LOG, "kENET_TimeStampEvent\n");
  91. break;
  92. case kENET_TimeStampAvailEvent:
  93. dbg_log(DBG_LOG, "kENET_TimeStampAvailEvent \n");
  94. break;
  95. default:
  96. dbg_log(DBG_LOG, "unknow error\n");
  97. break;
  98. }
  99. }
  100. static void _enet_clk_init(void)
  101. {
  102. const clock_enet_pll_config_t config = {.enableClkOutput = true, .enableClkOutput25M = false, .loopDivider = 1};
  103. CLOCK_InitEnetPll(&config);
  104. IOMUXC_EnableMode(IOMUXC_GPR, kIOMUXC_GPR_ENET1TxClkOutputDir, true);
  105. IOMUXC_GPR->GPR1|=1<<23;
  106. }
  107. static void _enet_config(void)
  108. {
  109. enet_config_t config;
  110. uint32_t sysClock;
  111. /* prepare the buffer configuration. */
  112. enet_buffer_config_t buffConfig =
  113. {
  114. ENET_RXBD_NUM,
  115. ENET_TXBD_NUM,
  116. SDK_SIZEALIGN(ENET_RXBUFF_SIZE, ENET_BUFF_ALIGNMENT),
  117. SDK_SIZEALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT),
  118. &g_rxBuffDescrip[0],
  119. &g_txBuffDescrip[0],
  120. &g_rxDataBuff[0][0],
  121. &g_txDataBuff[0][0],
  122. };
  123. /* Get default configuration. */
  124. /*
  125. * config.miiMode = kENET_RmiiMode;
  126. * config.miiSpeed = kENET_MiiSpeed100M;
  127. * config.miiDuplex = kENET_MiiFullDuplex;
  128. * config.rxMaxFrameLen = ENET_FRAME_MAX_FRAMELEN;
  129. */
  130. ENET_GetDefaultConfig(&config);
  131. config.interrupt = kENET_TxFrameInterrupt | kENET_RxFrameInterrupt;
  132. config.miiSpeed = imxrt_eth_device.speed;
  133. config.miiDuplex = imxrt_eth_device.duplex;
  134. /* Set SMI to get PHY link status. */
  135. sysClock = CLOCK_GetFreq(kCLOCK_AhbClk);
  136. dbg_log(DBG_LOG, "deinit\n");
  137. ENET_Deinit(imxrt_eth_device.enet_base);
  138. dbg_log(DBG_LOG, "init\n");
  139. ENET_Init(imxrt_eth_device.enet_base, &imxrt_eth_device.enet_handle, &config, &buffConfig, &imxrt_eth_device.dev_addr[0], sysClock);
  140. dbg_log(DBG_LOG, "set call back\n");
  141. ENET_SetCallback(&imxrt_eth_device.enet_handle, _enet_callback, &imxrt_eth_device);
  142. dbg_log(DBG_LOG, "active read\n");
  143. ENET_ActiveRead(imxrt_eth_device.enet_base);
  144. }
  145. #if defined(ETH_RX_DUMP) || defined(ETH_TX_DUMP)
  146. static void packet_dump(const char *msg, const struct pbuf *p)
  147. {
  148. const struct pbuf *q;
  149. rt_uint32_t i, j;
  150. rt_uint8_t *ptr;
  151. rt_kprintf("%s %d byte\n", msg, p->tot_len);
  152. i = 0;
  153. for (q = p; q != RT_NULL; q = q->next)
  154. {
  155. ptr = q->payload;
  156. for (j = 0; j < q->len; j++)
  157. {
  158. if ((i % 8) == 0)
  159. {
  160. rt_kprintf(" ");
  161. }
  162. if ((i % 16) == 0)
  163. {
  164. rt_kprintf("\r\n");
  165. }
  166. rt_kprintf("%02x ", *ptr);
  167. i++;
  168. ptr++;
  169. }
  170. }
  171. rt_kprintf("\n\n");
  172. }
  173. #else
  174. #define packet_dump(...)
  175. #endif /* dump */
  176. /* initialize the interface */
  177. static rt_err_t rt_imxrt_eth_init(rt_device_t dev)
  178. {
  179. dbg_log(DBG_LOG, "rt_imxrt_eth_init...\n");
  180. _enet_config();
  181. return RT_EOK;
  182. }
  183. static rt_err_t rt_imxrt_eth_open(rt_device_t dev, rt_uint16_t oflag)
  184. {
  185. dbg_log(DBG_LOG, "rt_imxrt_eth_open...\n");
  186. return RT_EOK;
  187. }
  188. static rt_err_t rt_imxrt_eth_close(rt_device_t dev)
  189. {
  190. dbg_log(DBG_LOG, "rt_imxrt_eth_close...\n");
  191. return RT_EOK;
  192. }
  193. static rt_size_t rt_imxrt_eth_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
  194. {
  195. dbg_log(DBG_LOG, "rt_imxrt_eth_read...\n");
  196. rt_set_errno(-RT_ENOSYS);
  197. return 0;
  198. }
  199. static rt_size_t rt_imxrt_eth_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
  200. {
  201. dbg_log(DBG_LOG, "rt_imxrt_eth_write...\n");
  202. rt_set_errno(-RT_ENOSYS);
  203. return 0;
  204. }
  205. static rt_err_t rt_imxrt_eth_control(rt_device_t dev, int cmd, void *args)
  206. {
  207. dbg_log(DBG_LOG, "rt_imxrt_eth_control...\n");
  208. switch (cmd)
  209. {
  210. case NIOCTL_GADDR:
  211. /* get mac address */
  212. if (args) rt_memcpy(args, imxrt_eth_device.dev_addr, 6);
  213. else return -RT_ERROR;
  214. break;
  215. default :
  216. break;
  217. }
  218. return RT_EOK;
  219. }
  220. static void _ENET_ActiveSend(ENET_Type *base, uint32_t ringId)
  221. {
  222. assert(ringId < FSL_FEATURE_ENET_QUEUE);
  223. switch (ringId)
  224. {
  225. case 0:
  226. base->TDAR = ENET_TDAR_TDAR_MASK;
  227. break;
  228. #if FSL_FEATURE_ENET_QUEUE > 1
  229. case kENET_Ring1:
  230. base->TDAR1 = ENET_TDAR1_TDAR_MASK;
  231. break;
  232. case kENET_Ring2:
  233. base->TDAR2 = ENET_TDAR2_TDAR_MASK;
  234. break;
  235. #endif /* FSL_FEATURE_ENET_QUEUE > 1 */
  236. default:
  237. base->TDAR = ENET_TDAR_TDAR_MASK;
  238. break;
  239. }
  240. }
  241. static status_t _ENET_SendFrame(ENET_Type *base, enet_handle_t *handle, const uint8_t *data, uint32_t length)
  242. {
  243. assert(handle);
  244. assert(data);
  245. volatile enet_tx_bd_struct_t *curBuffDescrip;
  246. uint32_t len = 0;
  247. uint32_t sizeleft = 0;
  248. uint32_t address;
  249. /* Check the frame length. */
  250. if (length > ENET_FRAME_MAX_FRAMELEN)
  251. {
  252. return kStatus_ENET_TxFrameOverLen;
  253. }
  254. /* Check if the transmit buffer is ready. */
  255. curBuffDescrip = handle->txBdCurrent[0];
  256. if (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK)
  257. {
  258. return kStatus_ENET_TxFrameBusy;
  259. }
  260. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  261. bool isPtpEventMessage = false;
  262. /* Check PTP message with the PTP header. */
  263. isPtpEventMessage = ENET_Ptp1588ParseFrame(data, NULL, true);
  264. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  265. /* One transmit buffer is enough for one frame. */
  266. if (handle->txBuffSizeAlign[0] >= length)
  267. {
  268. /* Copy data to the buffer for uDMA transfer. */
  269. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  270. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer,kMEMORY_DMA2Local);
  271. #else
  272. address = (uint32_t)curBuffDescrip->buffer;
  273. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  274. pbuf_copy_partial((const struct pbuf *)data, (void *)address, length, 0);
  275. /* Set data length. */
  276. curBuffDescrip->length = length;
  277. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  278. /* For enable the timestamp. */
  279. if (isPtpEventMessage)
  280. {
  281. curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  282. }
  283. else
  284. {
  285. curBuffDescrip->controlExtend1 &= ~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  286. }
  287. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  288. curBuffDescrip->control |= (ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK);
  289. /* Increase the buffer descriptor address. */
  290. if (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_WRAP_MASK)
  291. {
  292. handle->txBdCurrent[0] = handle->txBdBase[0];
  293. }
  294. else
  295. {
  296. handle->txBdCurrent[0]++;
  297. }
  298. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  299. /* Add the cache clean maintain. */
  300. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  301. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer,kMEMORY_DMA2Local);
  302. #else
  303. address = (uint32_t)curBuffDescrip->buffer;
  304. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  305. DCACHE_CleanByRange(address, length);
  306. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  307. /* Active the transmit buffer descriptor. */
  308. _ENET_ActiveSend(base, 0);
  309. return kStatus_Success;
  310. }
  311. else
  312. {
  313. /* One frame requires more than one transmit buffers. */
  314. do
  315. {
  316. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  317. /* For enable the timestamp. */
  318. if (isPtpEventMessage)
  319. {
  320. curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  321. }
  322. else
  323. {
  324. curBuffDescrip->controlExtend1 &= ~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  325. }
  326. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  327. /* Increase the buffer descriptor address. */
  328. if (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_WRAP_MASK)
  329. {
  330. handle->txBdCurrent[0] = handle->txBdBase[0];
  331. }
  332. else
  333. {
  334. handle->txBdCurrent[0]++;
  335. }
  336. /* update the size left to be transmit. */
  337. sizeleft = length - len;
  338. if (sizeleft > handle->txBuffSizeAlign[0])
  339. {
  340. /* Data copy. */
  341. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  342. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer,kMEMORY_DMA2Local);
  343. #else
  344. address = (uint32_t)curBuffDescrip->buffer;
  345. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  346. memcpy((void *)address, data + len, handle->txBuffSizeAlign[0]);
  347. /* Data length update. */
  348. curBuffDescrip->length = handle->txBuffSizeAlign[0];
  349. len += handle->txBuffSizeAlign[0];
  350. /* Sets the control flag. */
  351. curBuffDescrip->control &= ~ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
  352. curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_TX_READY_MASK;
  353. /* Active the transmit buffer descriptor*/
  354. _ENET_ActiveSend(base, 0);
  355. }
  356. else
  357. {
  358. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  359. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer,kMEMORY_DMA2Local);
  360. #else
  361. address = (uint32_t)curBuffDescrip->buffer;
  362. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  363. memcpy((void *)address, data + len, sizeleft);
  364. curBuffDescrip->length = sizeleft;
  365. /* Set Last buffer wrap flag. */
  366. curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
  367. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  368. /* Add the cache clean maintain. */
  369. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  370. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer,kMEMORY_DMA2Local);
  371. #else
  372. address = (uint32_t)curBuffDescrip->buffer;
  373. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  374. DCACHE_CleanByRange(address, handle->txBuffSizeAlign[0]);
  375. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  376. /* Active the transmit buffer descriptor. */
  377. _ENET_ActiveSend(base, 0);
  378. return kStatus_Success;
  379. }
  380. /* Get the current buffer descriptor address. */
  381. curBuffDescrip = handle->txBdCurrent[0];
  382. } while (!(curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK));
  383. return kStatus_ENET_TxFrameBusy;
  384. }
  385. }
  386. /* ethernet device interface */
  387. /* transmit packet. */
  388. rt_err_t rt_imxrt_eth_tx(rt_device_t dev, struct pbuf *p)
  389. {
  390. rt_err_t result = RT_EOK;
  391. enet_handle_t * enet_handle = &imxrt_eth_device.enet_handle;
  392. RT_ASSERT(p != NULL);
  393. RT_ASSERT(enet_handle != RT_NULL);
  394. dbg_log(DBG_LOG, "rt_imxrt_eth_tx: %d\n", p->len);
  395. #ifdef ETH_TX_DUMP
  396. packet_dump("send", p);
  397. #endif
  398. do
  399. {
  400. result = _ENET_SendFrame(imxrt_eth_device.enet_base, enet_handle, (const uint8_t *)p, p->tot_len);
  401. if (result == kStatus_ENET_TxFrameBusy)
  402. {
  403. imxrt_eth_device.tx_is_waiting = RT_TRUE;
  404. rt_sem_take(&imxrt_eth_device.tx_wait, RT_WAITING_FOREVER);
  405. }
  406. }
  407. while (result == kStatus_ENET_TxFrameBusy);
  408. return RT_EOK;
  409. }
  410. /* reception packet. */
  411. struct pbuf *rt_imxrt_eth_rx(rt_device_t dev)
  412. {
  413. uint32_t length = 0;
  414. status_t status;
  415. struct pbuf *p = RT_NULL;
  416. enet_handle_t *enet_handle = &imxrt_eth_device.enet_handle;
  417. ENET_Type *enet_base = imxrt_eth_device.enet_base;
  418. enet_data_error_stats_t *error_statistic = &imxrt_eth_device.error_statistic;
  419. /* Get the Frame size */
  420. status = ENET_GetRxFrameSize(enet_handle, &length);
  421. /* Call ENET_ReadFrame when there is a received frame. */
  422. if (length != 0)
  423. {
  424. /* Received valid frame. Deliver the rx buffer with the size equal to length. */
  425. p = pbuf_alloc(PBUF_RAW, length, PBUF_POOL);
  426. if (p != NULL)
  427. {
  428. status = ENET_ReadFrame(enet_base, enet_handle, p->payload, length);
  429. if (status == kStatus_Success)
  430. {
  431. #ifdef ETH_RX_DUMP
  432. packet_dump("recv", p);
  433. #endif
  434. return p;
  435. }
  436. else
  437. {
  438. dbg_log(DBG_LOG, " A frame read failed\n");
  439. pbuf_free(p);
  440. }
  441. }
  442. else
  443. {
  444. dbg_log(DBG_LOG, " pbuf_alloc faild\n");
  445. }
  446. }
  447. else if (status == kStatus_ENET_RxFrameError)
  448. {
  449. dbg_log(DBG_WARNING, "ENET_GetRxFrameSize: kStatus_ENET_RxFrameError\n");
  450. /* Update the received buffer when error happened. */
  451. /* Get the error information of the received g_frame. */
  452. ENET_GetRxErrBeforeReadFrame(enet_handle, error_statistic);
  453. /* update the receive buffer. */
  454. ENET_ReadFrame(enet_base, enet_handle, NULL, 0);
  455. }
  456. ENET_EnableInterrupts(enet_base, kENET_RxFrameInterrupt);
  457. return NULL;
  458. }
  459. #ifdef BSP_USING_PHY
  460. static struct rt_phy_device *phy_dev = RT_NULL;
  461. static void phy_monitor_thread_entry(void *parameter)
  462. {
  463. rt_uint32_t speed;
  464. rt_uint32_t duplex;
  465. rt_bool_t link = RT_FALSE;
  466. phy_dev = (struct rt_phy_device *)rt_device_find("rtt-phy");
  467. if ((RT_NULL == phy_dev) || (RT_NULL == phy_dev->ops))
  468. {
  469. // TODO print warning information
  470. LOG_E("Can not find phy device called \"rtt-phy\"");
  471. return ;
  472. }
  473. if (RT_NULL == phy_dev->ops->init)
  474. {
  475. LOG_E("phy driver error!");
  476. return ;
  477. }
  478. rt_phy_status status = phy_dev->ops->init(imxrt_eth_device.enet_base, PHY_DEVICE_ADDRESS, CLOCK_GetFreq(kCLOCK_AhbClk));
  479. if (PHY_STATUS_OK != status)
  480. {
  481. LOG_E("Phy device initialize unsuccessful!\n");
  482. return ;
  483. }
  484. while (1)
  485. {
  486. rt_bool_t new_link = RT_FALSE;
  487. rt_phy_status status = phy_dev->ops->get_link_status(&new_link);
  488. if ((PHY_STATUS_OK == status) && (link != new_link))
  489. {
  490. link = new_link;
  491. if (link) // link up
  492. {
  493. phy_dev->ops->get_link_speed_duplex(&speed, &duplex);
  494. if (PHY_SPEED_10M == speed)
  495. {
  496. dbg_log(DBG_LOG, "10M\n");
  497. }
  498. else
  499. {
  500. dbg_log(DBG_LOG, "100M\n");
  501. }
  502. if (PHY_HALF_DUPLEX == duplex)
  503. {
  504. dbg_log(DBG_LOG, "half dumplex\n");
  505. }
  506. else
  507. {
  508. dbg_log(DBG_LOG, "full dumplex\n");
  509. }
  510. if ((imxrt_eth_device.speed != (enet_mii_speed_t)speed) || (imxrt_eth_device.duplex != (enet_mii_duplex_t)duplex))
  511. {
  512. imxrt_eth_device.speed = (enet_mii_speed_t)speed;
  513. imxrt_eth_device.duplex = (enet_mii_duplex_t)duplex;
  514. dbg_log(DBG_LOG, "link up, and update eth mode.\n");
  515. rt_imxrt_eth_init((rt_device_t)&imxrt_eth_device);
  516. }
  517. else
  518. {
  519. dbg_log(DBG_LOG, "link up, eth not need re-config.\n");
  520. }
  521. dbg_log(DBG_LOG, "link up.\n");
  522. eth_device_linkchange(&imxrt_eth_device.parent, RT_TRUE);
  523. }
  524. else
  525. {
  526. dbg_log(DBG_LOG, "link down.\n");
  527. eth_device_linkchange(&imxrt_eth_device.parent, RT_FALSE);
  528. }
  529. }
  530. rt_thread_delay(RT_TICK_PER_SECOND * 2);
  531. }
  532. }
  533. #endif
  534. static int rt_hw_imxrt_eth_init(void)
  535. {
  536. rt_err_t state;
  537. _enet_clk_init();
  538. /* NXP (Freescale) MAC OUI */
  539. imxrt_eth_device.dev_addr[0] = 0x00;
  540. imxrt_eth_device.dev_addr[1] = 0x04;
  541. imxrt_eth_device.dev_addr[2] = 0x9F;
  542. /* generate MAC addr from 96bit unique ID (only for test). */
  543. imxrt_eth_device.dev_addr[3] = 0x05;
  544. imxrt_eth_device.dev_addr[4] = 0x44;
  545. imxrt_eth_device.dev_addr[5] = 0xE5;
  546. imxrt_eth_device.speed = kENET_MiiSpeed100M;
  547. imxrt_eth_device.duplex = kENET_MiiFullDuplex;
  548. imxrt_eth_device.enet_base = ENET;
  549. imxrt_eth_device.parent.parent.init = rt_imxrt_eth_init;
  550. imxrt_eth_device.parent.parent.open = rt_imxrt_eth_open;
  551. imxrt_eth_device.parent.parent.close = rt_imxrt_eth_close;
  552. imxrt_eth_device.parent.parent.read = rt_imxrt_eth_read;
  553. imxrt_eth_device.parent.parent.write = rt_imxrt_eth_write;
  554. imxrt_eth_device.parent.parent.control = rt_imxrt_eth_control;
  555. imxrt_eth_device.parent.parent.user_data = RT_NULL;
  556. imxrt_eth_device.parent.eth_rx = rt_imxrt_eth_rx;
  557. imxrt_eth_device.parent.eth_tx = rt_imxrt_eth_tx;
  558. dbg_log(DBG_LOG, "sem init: tx_wait\r\n");
  559. /* init tx semaphore */
  560. rt_sem_init(&imxrt_eth_device.tx_wait, "tx_wait", 0, RT_IPC_FLAG_FIFO);
  561. /* register eth device */
  562. dbg_log(DBG_LOG, "eth_device_init start\r\n");
  563. state = eth_device_init(&(imxrt_eth_device.parent), "e0");
  564. if (RT_EOK == state)
  565. {
  566. dbg_log(DBG_LOG, "eth_device_init success\r\n");
  567. }
  568. else
  569. {
  570. dbg_log(DBG_LOG, "eth_device_init faild: %d\r\n", state);
  571. }
  572. eth_device_linkchange(&imxrt_eth_device.parent, RT_FALSE);
  573. /* start phy monitor */
  574. {
  575. #ifdef BSP_USING_PHY
  576. rt_thread_t tid;
  577. tid = rt_thread_create("phy",
  578. phy_monitor_thread_entry,
  579. RT_NULL,
  580. 512,
  581. RT_THREAD_PRIORITY_MAX - 2,
  582. 2);
  583. if (tid != RT_NULL)
  584. rt_thread_startup(tid);
  585. #endif
  586. }
  587. return state;
  588. }
  589. INIT_DEVICE_EXPORT(rt_hw_imxrt_eth_init);
  590. #endif
  591. #if defined(RT_USING_FINSH) && defined(RT_USING_PHY)
  592. #include <finsh.h>
  593. void phy_read(rt_uint32_t phy_reg)
  594. {
  595. rt_uint32_t data;
  596. rt_phy_status status = phy_dev->ops->read(phy_reg, &data);
  597. if (PHY_STATUS_OK == status)
  598. {
  599. rt_kprintf("PHY_Read: %02X --> %08X", phy_reg, data);
  600. }
  601. else
  602. {
  603. rt_kprintf("PHY_Read: %02X --> faild", phy_reg);
  604. }
  605. }
  606. void phy_write(rt_uint32_t phy_reg, rt_uint32_t data)
  607. {
  608. rt_phy_status status = phy_dev->ops->write(phy_reg, data);
  609. if (PHY_STATUS_OK == status)
  610. {
  611. rt_kprintf("PHY_Write: %02X --> %08X\n", phy_reg, data);
  612. }
  613. else
  614. {
  615. rt_kprintf("PHY_Write: %02X --> faild\n", phy_reg);
  616. }
  617. }
  618. void phy_dump(void)
  619. {
  620. rt_uint32_t data;
  621. rt_phy_status status;
  622. int i;
  623. for (i = 0; i < 32; i++)
  624. {
  625. status = phy_dev->ops->read(i, &data);
  626. if (PHY_STATUS_OK != status)
  627. {
  628. rt_kprintf("phy_dump: %02X --> faild", i);
  629. break;
  630. }
  631. if (i % 8 == 7)
  632. {
  633. rt_kprintf("%02X --> %08X ", i, data);
  634. }
  635. else
  636. {
  637. rt_kprintf("%02X --> %08X\n", i, data);
  638. }
  639. }
  640. }
  641. #endif
  642. #if defined(RT_USING_FINSH) && defined(RT_USING_LWIP)
  643. void enet_reg_dump(void)
  644. {
  645. ENET_Type *enet_base = imxrt_eth_device.enet_base;
  646. #define DUMP_REG(__REG) \
  647. rt_kprintf("%s(%08X): %08X\n", #__REG, (uint32_t)&enet_base->__REG, enet_base->__REG)
  648. DUMP_REG(EIR);
  649. DUMP_REG(EIMR);
  650. DUMP_REG(RDAR);
  651. DUMP_REG(TDAR);
  652. DUMP_REG(ECR);
  653. DUMP_REG(MMFR);
  654. DUMP_REG(MSCR);
  655. DUMP_REG(MIBC);
  656. DUMP_REG(RCR);
  657. DUMP_REG(TCR);
  658. DUMP_REG(PALR);
  659. DUMP_REG(PAUR);
  660. DUMP_REG(OPD);
  661. DUMP_REG(TXIC);
  662. DUMP_REG(RXIC);
  663. DUMP_REG(IAUR);
  664. DUMP_REG(IALR);
  665. DUMP_REG(GAUR);
  666. DUMP_REG(GALR);
  667. DUMP_REG(TFWR);
  668. DUMP_REG(RDSR);
  669. DUMP_REG(TDSR);
  670. DUMP_REG(MRBR);
  671. DUMP_REG(RSFL);
  672. DUMP_REG(RSEM);
  673. DUMP_REG(RAEM);
  674. DUMP_REG(RAFL);
  675. DUMP_REG(TSEM);
  676. DUMP_REG(TAEM);
  677. DUMP_REG(TAFL);
  678. DUMP_REG(TIPG);
  679. DUMP_REG(FTRL);
  680. DUMP_REG(TACC);
  681. DUMP_REG(RACC);
  682. DUMP_REG(RMON_T_DROP);
  683. DUMP_REG(RMON_T_PACKETS);
  684. DUMP_REG(RMON_T_BC_PKT);
  685. DUMP_REG(RMON_T_MC_PKT);
  686. DUMP_REG(RMON_T_CRC_ALIGN);
  687. DUMP_REG(RMON_T_UNDERSIZE);
  688. DUMP_REG(RMON_T_OVERSIZE);
  689. DUMP_REG(RMON_T_FRAG);
  690. DUMP_REG(RMON_T_JAB);
  691. DUMP_REG(RMON_T_COL);
  692. DUMP_REG(RMON_T_P64);
  693. DUMP_REG(RMON_T_P65TO127);
  694. DUMP_REG(RMON_T_P128TO255);
  695. DUMP_REG(RMON_T_P256TO511);
  696. DUMP_REG(RMON_T_P512TO1023);
  697. DUMP_REG(RMON_T_P1024TO2047);
  698. DUMP_REG(RMON_T_P_GTE2048);
  699. DUMP_REG(RMON_T_OCTETS);
  700. DUMP_REG(IEEE_T_DROP);
  701. DUMP_REG(IEEE_T_FRAME_OK);
  702. DUMP_REG(IEEE_T_1COL);
  703. DUMP_REG(IEEE_T_MCOL);
  704. DUMP_REG(IEEE_T_DEF);
  705. DUMP_REG(IEEE_T_LCOL);
  706. DUMP_REG(IEEE_T_EXCOL);
  707. DUMP_REG(IEEE_T_MACERR);
  708. DUMP_REG(IEEE_T_CSERR);
  709. DUMP_REG(IEEE_T_SQE);
  710. DUMP_REG(IEEE_T_FDXFC);
  711. DUMP_REG(IEEE_T_OCTETS_OK);
  712. DUMP_REG(RMON_R_PACKETS);
  713. DUMP_REG(RMON_R_BC_PKT);
  714. DUMP_REG(RMON_R_MC_PKT);
  715. DUMP_REG(RMON_R_CRC_ALIGN);
  716. DUMP_REG(RMON_R_UNDERSIZE);
  717. DUMP_REG(RMON_R_OVERSIZE);
  718. DUMP_REG(RMON_R_FRAG);
  719. DUMP_REG(RMON_R_JAB);
  720. DUMP_REG(RMON_R_RESVD_0);
  721. DUMP_REG(RMON_R_P64);
  722. DUMP_REG(RMON_R_P65TO127);
  723. DUMP_REG(RMON_R_P128TO255);
  724. DUMP_REG(RMON_R_P256TO511);
  725. DUMP_REG(RMON_R_P512TO1023);
  726. DUMP_REG(RMON_R_P1024TO2047);
  727. DUMP_REG(RMON_R_P_GTE2048);
  728. DUMP_REG(RMON_R_OCTETS);
  729. DUMP_REG(IEEE_R_DROP);
  730. DUMP_REG(IEEE_R_FRAME_OK);
  731. DUMP_REG(IEEE_R_CRC);
  732. DUMP_REG(IEEE_R_ALIGN);
  733. DUMP_REG(IEEE_R_MACERR);
  734. DUMP_REG(IEEE_R_FDXFC);
  735. DUMP_REG(IEEE_R_OCTETS_OK);
  736. DUMP_REG(ATCR);
  737. DUMP_REG(ATVR);
  738. DUMP_REG(ATOFF);
  739. DUMP_REG(ATPER);
  740. DUMP_REG(ATCOR);
  741. DUMP_REG(ATINC);
  742. DUMP_REG(ATSTMP);
  743. DUMP_REG(TGSR);
  744. }
  745. void enet_nvic_tog(void)
  746. {
  747. NVIC_SetPendingIRQ(ENET_IRQn);
  748. }
  749. void enet_rx_stat(void)
  750. {
  751. enet_data_error_stats_t *error_statistic = &imxrt_eth_device.error_statistic;
  752. #define DUMP_STAT(__VAR) \
  753. rt_kprintf("%-25s: %08X\n", #__VAR, error_statistic->__VAR);
  754. DUMP_STAT(statsRxLenGreaterErr);
  755. DUMP_STAT(statsRxAlignErr);
  756. DUMP_STAT(statsRxFcsErr);
  757. DUMP_STAT(statsRxOverRunErr);
  758. DUMP_STAT(statsRxTruncateErr);
  759. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  760. DUMP_STAT(statsRxProtocolChecksumErr);
  761. DUMP_STAT(statsRxIpHeadChecksumErr);
  762. DUMP_STAT(statsRxMacErr);
  763. DUMP_STAT(statsRxPhyErr);
  764. DUMP_STAT(statsRxCollisionErr);
  765. DUMP_STAT(statsTxErr);
  766. DUMP_STAT(statsTxFrameErr);
  767. DUMP_STAT(statsTxOverFlowErr);
  768. DUMP_STAT(statsTxLateCollisionErr);
  769. DUMP_STAT(statsTxExcessCollisionErr);
  770. DUMP_STAT(statsTxUnderFlowErr);
  771. DUMP_STAT(statsTxTsErr);
  772. #endif
  773. }
  774. void enet_buf_info(void)
  775. {
  776. int i = 0;
  777. for (i = 0; i < ENET_RXBD_NUM; i++)
  778. {
  779. rt_kprintf("%d: length: %-8d, control: %04X, buffer:%p\n",
  780. i,
  781. g_rxBuffDescrip[i].length,
  782. g_rxBuffDescrip[i].control,
  783. g_rxBuffDescrip[i].buffer);
  784. }
  785. for (i = 0; i < ENET_TXBD_NUM; i++)
  786. {
  787. rt_kprintf("%d: length: %-8d, control: %04X, buffer:%p\n",
  788. i,
  789. g_txBuffDescrip[i].length,
  790. g_txBuffDescrip[i].control,
  791. g_txBuffDescrip[i].buffer);
  792. }
  793. }
  794. FINSH_FUNCTION_EXPORT(phy_read, read phy register);
  795. FINSH_FUNCTION_EXPORT(phy_write, write phy register);
  796. FINSH_FUNCTION_EXPORT(phy_dump, dump phy registers);
  797. FINSH_FUNCTION_EXPORT(enet_reg_dump, dump enet registers);
  798. FINSH_FUNCTION_EXPORT(enet_nvic_tog, toggle enet nvic pendding bit);
  799. FINSH_FUNCTION_EXPORT(enet_rx_stat, dump enet rx statistic);
  800. FINSH_FUNCTION_EXPORT(enet_buf_info, dump enet tx and tx buffer descripter);
  801. #endif