drv_eth.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2017-10-10 Tanek the first version
  9. * 2019-5-10 misonyo add DMA TX and RX function
  10. * 2020-10-14 wangqiang use phy device in phy monitor thread
  11. * 2022-08-29 xjy198903 add 1170 rgmii support
  12. */
  13. #include <rtthread.h>
  14. #include "board.h"
  15. #include <rtdevice.h>
  16. #ifdef RT_USING_FINSH
  17. #include <finsh.h>
  18. #endif
  19. #include "fsl_enet.h"
  20. #include "fsl_gpio.h"
  21. #include "fsl_cache.h"
  22. #include "fsl_iomuxc.h"
  23. #include "fsl_common.h"
  24. #ifdef RT_USING_LWIP
  25. #include <netif/ethernetif.h>
  26. #include "lwipopts.h"
  27. #define ENET_RXBD_NUM (5)
  28. #define ENET_TXBD_NUM (3)
  29. #define ENET_RXBUFF_SIZE (ENET_FRAME_MAX_FRAMELEN)
  30. #define ENET_TXBUFF_SIZE (ENET_FRAME_MAX_FRAMELEN)
  31. /* debug option */
  32. #define ETH_RX_DUMP
  33. #undef ETH_RX_DUMP
  34. #define ETH_TX_DUMP
  35. #undef ETH_TX_DUMP
  36. #define DBG_ENABLE
  37. #define DBG_SECTION_NAME "[ETH]"
  38. #define DBG_COLOR
  39. #define DBG_LEVEL DBG_INFO
  40. #include <rtdbg.h>
  41. #define RING_ID 0
  42. #define ENET_RING_NUM 1U
  43. #define MAX_ADDR_LEN 6
  44. //#ifdef SOC_IMXRT1170_SERIES
  45. typedef uint8_t rx_buffer_t[RT_ALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  46. typedef uint8_t tx_buffer_t[RT_ALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  47. #ifndef ENET_RXBUFF_NUM
  48. #define ENET_RXBUFF_NUM (ENET_RXBD_NUM * 2)
  49. #endif
  50. //#endif
  51. //#ifdef SOC_IMXRT1170_SERIES
  52. typedef void (*pbuf_free_custom_fn)(struct pbuf *p);
  53. /** A custom pbuf: like a pbuf, but following a function pointer to free it. */
  54. struct pbuf_custom
  55. {
  56. /** The actual pbuf */
  57. struct pbuf pbuf;
  58. /** This function is called when pbuf_free deallocates this pbuf(_custom) */
  59. pbuf_free_custom_fn custom_free_function;
  60. };
  61. typedef struct rx_pbuf_wrapper
  62. {
  63. struct pbuf_custom p; /*!< Pbuf wrapper. Has to be first. */
  64. void *buffer; /*!< Original buffer wrapped by p. */
  65. volatile bool buffer_used; /*!< Wrapped buffer is used by ENET */
  66. } rx_pbuf_wrapper_t;
  67. //#endif
  68. struct rt_imxrt_eth
  69. {
  70. /* inherit from ethernet device */
  71. struct eth_device parent;
  72. enet_handle_t enet_handle;
  73. ENET_Type *enet_base;
  74. enet_data_error_stats_t error_statistic;
  75. rt_uint8_t dev_addr[MAX_ADDR_LEN]; /* hw address */
  76. rt_bool_t tx_is_waiting;
  77. struct rt_semaphore tx_wait;
  78. struct rt_semaphore buff_wait;
  79. enet_mii_speed_t speed;
  80. enet_mii_duplex_t duplex;
  81. //#ifdef SOC_IMXRT1170_SERIES
  82. enet_rx_bd_struct_t *RxBuffDescrip;
  83. enet_tx_bd_struct_t *TxBuffDescrip;
  84. rx_buffer_t *RxDataBuff;
  85. tx_buffer_t *TxDataBuff;
  86. rx_pbuf_wrapper_t RxPbufs[ENET_RXBUFF_NUM];
  87. //#endif
  88. };
  89. //#if defined(__ICCARM__) /* IAR Workbench */
  90. //#pragma location = "enet_mem_section"
  91. //ALIGN(ENET_BUFF_ALIGNMENT)
  92. //static enet_tx_bd_struct_t g_txBuffDescrip[ENET_TXBD_NUM];
  93. //
  94. //ALIGN(ENET_BUFF_ALIGNMENT)
  95. //rt_uint8_t g_txDataBuff[ENET_TXBD_NUM][RT_ALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  96. //
  97. //#pragma location = "enet_mem_section"
  98. //ALIGN(ENET_BUFF_ALIGNMENT)
  99. //static enet_rx_bd_struct_t g_rxBuffDescrip[ENET_RXBD_NUM];
  100. //
  101. //ALIGN(ENET_BUFF_ALIGNMENT)
  102. //rt_uint8_t g_rxDataBuff[ENET_RXBD_NUM][RT_ALIGN(ENET_RXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  103. //
  104. //#else
  105. AT_NONCACHEABLE_SECTION_ALIGN(static enet_tx_bd_struct_t g_txBuffDescrip[ENET_TXBD_NUM], ENET_BUFF_ALIGNMENT);
  106. rt_align(ENET_BUFF_ALIGNMENT)
  107. rt_uint8_t g_txDataBuff[ENET_TXBD_NUM][RT_ALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  108. AT_NONCACHEABLE_SECTION_ALIGN(static enet_rx_bd_struct_t g_rxBuffDescrip[ENET_RXBD_NUM], ENET_BUFF_ALIGNMENT);
  109. rt_align(ENET_BUFF_ALIGNMENT)
  110. rt_uint8_t g_rxDataBuff[ENET_RXBD_NUM][RT_ALIGN(ENET_RXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  111. //#endif
  112. static struct rt_imxrt_eth imxrt_eth_device;
  113. void _enet_rx_callback(struct rt_imxrt_eth *eth)
  114. {
  115. rt_err_t result;
  116. ENET_DisableInterrupts(eth->enet_base, kENET_RxFrameInterrupt);
  117. result = eth_device_ready(&(eth->parent));
  118. if (result != RT_EOK)
  119. rt_kprintf("RX err =%d\n", result);
  120. }
  121. void _enet_tx_callback(struct rt_imxrt_eth *eth)
  122. {
  123. dbg_log(DBG_LOG, "_enet_tx_callback\n");
  124. if (eth->tx_is_waiting == RT_TRUE)
  125. {
  126. eth->tx_is_waiting = RT_FALSE;
  127. rt_sem_release(&eth->tx_wait);
  128. }
  129. }
  130. static void _enet_callback(ENET_Type *base,
  131. enet_handle_t *handle,
  132. #if FSL_FEATURE_ENET_QUEUE > 1
  133. uint32_t ringId,
  134. #endif /* FSL_FEATURE_ENET_QUEUE */
  135. enet_event_t event,
  136. enet_frame_info_t *frameInfo,
  137. void *userData)
  138. {
  139. switch (event)
  140. {
  141. case kENET_RxEvent:
  142. _enet_rx_callback((struct rt_imxrt_eth *)userData);
  143. break;
  144. case kENET_TxEvent:
  145. _enet_tx_callback((struct rt_imxrt_eth *)userData);
  146. break;
  147. case kENET_ErrEvent:
  148. dbg_log(DBG_LOG, "kENET_ErrEvent\n");
  149. break;
  150. case kENET_WakeUpEvent:
  151. dbg_log(DBG_LOG, "kENET_WakeUpEvent\n");
  152. break;
  153. case kENET_TimeStampEvent:
  154. dbg_log(DBG_LOG, "kENET_TimeStampEvent\n");
  155. break;
  156. case kENET_TimeStampAvailEvent:
  157. dbg_log(DBG_LOG, "kENET_TimeStampAvailEvent \n");
  158. break;
  159. default:
  160. dbg_log(DBG_LOG, "unknow error\n");
  161. break;
  162. }
  163. }
  164. static void _enet_clk_init(void)
  165. {
  166. #ifdef SOC_IMXRT1170_SERIES
  167. #ifdef PHY_USING_RTL8211F
  168. const clock_sys_pll1_config_t sysPll1Config = {
  169. .pllDiv2En = true,
  170. };
  171. CLOCK_InitSysPll1(&sysPll1Config);
  172. clock_root_config_t rootCfg = {.mux = 4, .div = 4}; /* Generate 125M root clock. */
  173. CLOCK_SetRootClock(kCLOCK_Root_Enet2, &rootCfg);
  174. IOMUXC_GPR->GPR5 |= IOMUXC_GPR_GPR5_ENET1G_RGMII_EN_MASK; /* bit1:iomuxc_gpr_enet_clk_dir
  175. bit0:GPR_ENET_TX_CLK_SEL(internal or OSC) */
  176. #else
  177. const clock_sys_pll1_config_t sysPll1Config = {
  178. .pllDiv2En = true,
  179. };
  180. CLOCK_InitSysPll1(&sysPll1Config);
  181. clock_root_config_t rootCfg = {.mux = 4, .div = 10}; /* Generate 50M root clock. */
  182. CLOCK_SetRootClock(kCLOCK_Root_Enet1, &rootCfg);
  183. /* Select syspll2pfd3, 528*18/24 = 396M */
  184. CLOCK_InitPfd(kCLOCK_PllSys2, kCLOCK_Pfd3, 24);
  185. rootCfg.mux = 7;
  186. rootCfg.div = 2;
  187. CLOCK_SetRootClock(kCLOCK_Root_Bus, &rootCfg); /* Generate 198M bus clock. */
  188. IOMUXC_GPR->GPR4 |= 0x3;
  189. #endif
  190. #else
  191. // const clock_enet_pll_config_t config = {.enableClkOutput = true, .enableClkOutput25M = false, .loopDivider = 1};
  192. // CLOCK_InitEnetPll(&config);
  193. //
  194. // IOMUXC_EnableMode(IOMUXC_GPR, kIOMUXC_GPR_ENET1TxClkOutputDir, true);
  195. // IOMUXC_GPR->GPR1 |= 1 << 23;
  196. /* Set 50MHz output clock required by PHY. */
  197. const clock_enet_pll_config_t config = {.enableClkOutput = true, .loopDivider = 1};
  198. #if defined(SOC_IMXRT1020_SERIES)
  199. const clock_enet_pll_config_t config = {.enableClkOutput = true, .enableClkOutput500M = true, .loopDivider = 1};
  200. #endif
  201. CLOCK_InitEnetPll(&config);
  202. /* Output 50M clock to PHY. */
  203. IOMUXC_EnableMode(IOMUXC_GPR, kIOMUXC_GPR_ENET1TxClkOutputDir, true);
  204. #endif
  205. }
  206. //#ifdef SOC_IMXRT1170_SERIES
  207. static void *_enet_rx_alloc(ENET_Type *base, void *userData, uint8_t ringId)
  208. {
  209. void *buffer = NULL;
  210. int i;
  211. // dbg_log(DBG_LOG, "get buff_wait sem in %d\r\n", __LINE__);
  212. rt_sem_take(&imxrt_eth_device.buff_wait, RT_WAITING_FOREVER);
  213. for (i = 0; i < ENET_RXBUFF_NUM; i++)
  214. {
  215. if (!imxrt_eth_device.RxPbufs[i].buffer_used)
  216. {
  217. imxrt_eth_device.RxPbufs[i].buffer_used = true;
  218. buffer = &imxrt_eth_device.RxDataBuff[i];
  219. break;
  220. }
  221. }
  222. rt_sem_release(&imxrt_eth_device.buff_wait);
  223. // dbg_log(DBG_LOG, "release buff_wait sem in %d\r\n", __LINE__);
  224. return buffer;
  225. }
  226. static void _enet_rx_free(ENET_Type *base, void *buffer, void *userData, uint8_t ringId)
  227. {
  228. int idx = ((rx_buffer_t *)buffer) - imxrt_eth_device.RxDataBuff;
  229. if (!((idx >= 0) && (idx < ENET_RXBUFF_NUM)))
  230. {
  231. LOG_E("Freed buffer out of range\r\n");
  232. }
  233. // dbg_log(DBG_LOG, "get buff_wait sem in %d\r\n", __LINE__);
  234. rt_sem_take(&imxrt_eth_device.buff_wait, RT_WAITING_FOREVER);
  235. if (!(imxrt_eth_device.RxPbufs[idx].buffer_used))
  236. {
  237. LOG_E("_enet_rx_free: freeing unallocated buffer\r\n");
  238. }
  239. imxrt_eth_device.RxPbufs[idx].buffer_used = false;
  240. rt_sem_release(&imxrt_eth_device.buff_wait);
  241. // dbg_log(DBG_LOG, "release buff_wait sem in %d\r\n", __LINE__);
  242. }
  243. /**
  244. * Reclaims RX buffer held by the p after p is no longer used
  245. * by the application / lwIP.
  246. */
  247. static void _enet_rx_release(struct pbuf *p)
  248. {
  249. rx_pbuf_wrapper_t *wrapper = (rx_pbuf_wrapper_t *)p;
  250. _enet_rx_free(imxrt_eth_device.enet_base, wrapper->buffer, &imxrt_eth_device, 0);
  251. }
  252. //#endif
  253. static void _enet_config(void)
  254. {
  255. enet_config_t config;
  256. uint32_t sysClock;
  257. /* prepare the buffer configuration. */
  258. //#ifndef SOC_IMXRT1170_SERIES
  259. // enet_buffer_config_t buffConfig[] =
  260. // {
  261. // ENET_RXBD_NUM,
  262. // ENET_TXBD_NUM,
  263. // SDK_SIZEALIGN(ENET_RXBUFF_SIZE, ENET_BUFF_ALIGNMENT),
  264. // SDK_SIZEALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT),
  265. // &g_rxBuffDescrip[0],
  266. // &g_txBuffDescrip[0],
  267. // &g_rxDataBuff[0][0],
  268. // &g_txDataBuff[0][0],
  269. // };
  270. // /* Get default configuration. */
  271. // /*
  272. // * config.miiMode = kENET_RmiiMode;
  273. // * config.miiSpeed = kENET_MiiSpeed100M;
  274. // * config.miiDuplex = kENET_MiiFullDuplex;
  275. // * config.rxMaxFrameLen = ENET_FRAME_MAX_FRAMELEN;
  276. // */
  277. //
  278. // ENET_GetDefaultConfig(&config);
  279. // config.ringNum = ENET_RING_NUM;
  280. // config.miiSpeed = imxrt_eth_device.speed;
  281. // config.miiDuplex = imxrt_eth_device.duplex;
  282. //
  283. ////#ifdef PHY_USING_RTL8211F
  284. //// config.miiMode = kENET_RgmiiMode;
  285. //// EnableIRQ(ENET_1G_MAC0_Tx_Rx_1_IRQn);
  286. //// EnableIRQ(ENET_1G_MAC0_Tx_Rx_2_IRQn);
  287. ////#else
  288. //// config.miiMode = kENET_RmiiMode;
  289. ////#endif
  290. //
  291. //
  292. // config.interrupt |= kENET_TxFrameInterrupt | kENET_RxFrameInterrupt;
  293. // config.callback = _enet_callback;
  294. //
  295. //// ENET_GetDefaultConfig(&config);
  296. // config.ringNum = ENET_RING_NUM;
  297. //// config.interrupt = kENET_TxFrameInterrupt | kENET_RxFrameInterrupt;
  298. // config.miiSpeed = imxrt_eth_device.speed;
  299. // config.miiDuplex = imxrt_eth_device.duplex;
  300. //
  301. // /* Set SMI to get PHY link status. */
  302. // sysClock = CLOCK_GetFreq(kCLOCK_IpgClk);
  303. //
  304. // dbg_log(DBG_LOG, "deinit\n");
  305. // ENET_Deinit(imxrt_eth_device.enet_base);
  306. // dbg_log(DBG_LOG, "init\n");
  307. // ENET_Init(imxrt_eth_device.enet_base, &imxrt_eth_device.enet_handle, &config, &buffConfig[0], &imxrt_eth_device.dev_addr[0], sysClock);
  308. //// dbg_log(DBG_LOG, "set call back\n");
  309. //// ENET_SetCallback(&imxrt_eth_device.enet_handle, _enet_callback, &imxrt_eth_device);
  310. // dbg_log(DBG_LOG, "active read\n");
  311. // ENET_ActiveRead(imxrt_eth_device.enet_base);
  312. //#else
  313. int i;
  314. enet_buffer_config_t buffConfig[ENET_RING_NUM];
  315. imxrt_eth_device.RxBuffDescrip = &g_rxBuffDescrip[0];
  316. imxrt_eth_device.TxBuffDescrip = &g_txBuffDescrip[0];
  317. imxrt_eth_device.RxDataBuff = &g_rxDataBuff[0];
  318. imxrt_eth_device.TxDataBuff = &g_txDataBuff[0];
  319. buffConfig[0].rxBdNumber = ENET_RXBD_NUM; /* Receive buffer descriptor number. */
  320. buffConfig[0].txBdNumber = ENET_TXBD_NUM; /* Transmit buffer descriptor number. */
  321. buffConfig[0].rxBuffSizeAlign = sizeof(rx_buffer_t); /* Aligned receive data buffer size. */
  322. buffConfig[0].txBuffSizeAlign = sizeof(tx_buffer_t); /* Aligned transmit data buffer size. */
  323. buffConfig[0].rxBdStartAddrAlign =
  324. &(imxrt_eth_device.RxBuffDescrip[0]); /* Aligned receive buffer descriptor start address. */
  325. buffConfig[0].txBdStartAddrAlign =
  326. &(imxrt_eth_device.TxBuffDescrip[0]); /* Aligned transmit buffer descriptor start address. */
  327. buffConfig[0].rxBufferAlign =
  328. NULL; /* Receive data buffer start address. NULL when buffers are allocated by callback for RX zero-copy. */
  329. buffConfig[0].txBufferAlign = &(imxrt_eth_device.TxDataBuff[0][0]); /* Transmit data buffer start address. */
  330. buffConfig[0].txFrameInfo = NULL; /* Transmit frame information start address. Set only if using zero-copy transmit. */
  331. buffConfig[0].rxMaintainEnable = true; /* Receive buffer cache maintain. */
  332. buffConfig[0].txMaintainEnable = true; /* Transmit buffer cache maintain. */
  333. /* Get default configuration. */
  334. /*
  335. * config.miiMode = kENET_RmiiMode;
  336. * config.miiSpeed = kENET_MiiSpeed100M;
  337. * config.miiDuplex = kENET_MiiFullDuplex;
  338. * config.rxMaxFrameLen = ENET_FRAME_MAX_FRAMELEN;
  339. */
  340. ENET_GetDefaultConfig(&config);
  341. config.ringNum = ENET_RING_NUM;
  342. config.miiSpeed = imxrt_eth_device.speed;
  343. config.miiDuplex = imxrt_eth_device.duplex;
  344. #ifdef PHY_USING_RTL8211F
  345. config.miiMode = kENET_RgmiiMode;
  346. EnableIRQ(ENET_1G_MAC0_Tx_Rx_1_IRQn);
  347. EnableIRQ(ENET_1G_MAC0_Tx_Rx_2_IRQn);
  348. #else
  349. config.miiMode = kENET_RmiiMode;
  350. #endif
  351. config.rxBuffAlloc = _enet_rx_alloc;
  352. config.rxBuffFree = _enet_rx_free;
  353. config.userData = &imxrt_eth_device;
  354. #ifdef SOC_IMXRT1170_SERIES
  355. /* Set SMI to get PHY link status. */
  356. sysClock = CLOCK_GetRootClockFreq(kCLOCK_Root_Bus);
  357. #else
  358. sysClock = CLOCK_GetFreq(kCLOCK_IpgClk);
  359. #endif
  360. config.interrupt |= kENET_TxFrameInterrupt | kENET_RxFrameInterrupt | kENET_TxBufferInterrupt | kENET_LateCollisionInterrupt;
  361. config.callback = _enet_callback;
  362. for (i = 0; i < ENET_RXBUFF_NUM; i++)
  363. {
  364. imxrt_eth_device.RxPbufs[i].p.custom_free_function = _enet_rx_release;
  365. imxrt_eth_device.RxPbufs[i].buffer = &(imxrt_eth_device.RxDataBuff[i][0]);
  366. imxrt_eth_device.RxPbufs[i].buffer_used = false;
  367. }
  368. // dbg_log(DBG_LOG, "deinit\n");
  369. // ENET_Deinit(imxrt_eth_device.enet_base);
  370. dbg_log(DBG_LOG, "init\n");
  371. ENET_Init(imxrt_eth_device.enet_base, &imxrt_eth_device.enet_handle, &config, &buffConfig[0], &imxrt_eth_device.dev_addr[0], sysClock);
  372. // dbg_log(DBG_LOG, "set call back\n");
  373. // ENET_SetCallback(&imxrt_eth_device.enet_handle, _enet_callback, &imxrt_eth_device);
  374. dbg_log(DBG_LOG, "active read\n");
  375. ENET_ActiveRead(imxrt_eth_device.enet_base);
  376. //#endif
  377. }
  378. #if defined(ETH_RX_DUMP) || defined(ETH_TX_DUMP)
  379. static void packet_dump(const char *msg, const struct pbuf *p)
  380. {
  381. const struct pbuf *q;
  382. rt_uint32_t i, j;
  383. rt_uint8_t *ptr;
  384. rt_kprintf("%s %d byte\n", msg, p->tot_len);
  385. i = 0;
  386. for (q = p; q != RT_NULL; q = q->next)
  387. {
  388. ptr = q->payload;
  389. for (j = 0; j < q->len; j++)
  390. {
  391. if ((i % 8) == 0)
  392. {
  393. rt_kprintf(" ");
  394. }
  395. if ((i % 16) == 0)
  396. {
  397. rt_kprintf("\r\n");
  398. }
  399. rt_kprintf("%02x ", *ptr);
  400. i++;
  401. ptr++;
  402. }
  403. }
  404. rt_kprintf("\n\n");
  405. }
  406. #else
  407. #define packet_dump(...)
  408. #endif /* dump */
  409. /* initialize the interface */
  410. static rt_err_t rt_imxrt_eth_init(rt_device_t dev)
  411. {
  412. dbg_log(DBG_LOG, "rt_imxrt_eth_init...\n");
  413. _enet_config();
  414. return RT_EOK;
  415. }
  416. static rt_err_t rt_imxrt_eth_open(rt_device_t dev, rt_uint16_t oflag)
  417. {
  418. dbg_log(DBG_LOG, "rt_imxrt_eth_open...\n");
  419. return RT_EOK;
  420. }
  421. static rt_err_t rt_imxrt_eth_close(rt_device_t dev)
  422. {
  423. dbg_log(DBG_LOG, "rt_imxrt_eth_close...\n");
  424. return RT_EOK;
  425. }
  426. static rt_ssize_t rt_imxrt_eth_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
  427. {
  428. dbg_log(DBG_LOG, "rt_imxrt_eth_read...\n");
  429. rt_set_errno(-RT_ENOSYS);
  430. return 0;
  431. }
  432. static rt_ssize_t rt_imxrt_eth_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
  433. {
  434. dbg_log(DBG_LOG, "rt_imxrt_eth_write...\n");
  435. rt_set_errno(-RT_ENOSYS);
  436. return 0;
  437. }
  438. static rt_err_t rt_imxrt_eth_control(rt_device_t dev, int cmd, void *args)
  439. {
  440. dbg_log(DBG_LOG, "rt_imxrt_eth_control...\n");
  441. switch (cmd)
  442. {
  443. case NIOCTL_GADDR:
  444. /* get mac address */
  445. if (args)
  446. rt_memcpy(args, imxrt_eth_device.dev_addr, 6);
  447. else
  448. return -RT_ERROR;
  449. break;
  450. default:
  451. break;
  452. }
  453. return RT_EOK;
  454. }
  455. static bool _ENET_TxDirtyRingAvailable(enet_tx_dirty_ring_t *txDirtyRing)
  456. {
  457. return !txDirtyRing->isFull;
  458. }
  459. static uint16_t _ENET_IncreaseIndex(uint16_t index, uint16_t max)
  460. {
  461. assert(index < max);
  462. /* Increase the index. */
  463. index++;
  464. if (index >= max)
  465. {
  466. index = 0;
  467. }
  468. return index;
  469. }
  470. static void _ENET_ActiveSendRing(ENET_Type *base, uint8_t ringId)
  471. {
  472. assert(ringId < (uint8_t)FSL_FEATURE_ENET_INSTANCE_QUEUEn(base));
  473. volatile uint32_t *txDesActive = NULL;
  474. /* Ensure previous data update is completed with Data Synchronization Barrier before activing Tx BD. */
  475. __DSB();
  476. switch (ringId)
  477. {
  478. case 0:
  479. txDesActive = &(base->TDAR);
  480. break;
  481. #if FSL_FEATURE_ENET_QUEUE > 1
  482. case 1:
  483. txDesActive = &(base->TDAR1);
  484. break;
  485. case 2:
  486. txDesActive = &(base->TDAR2);
  487. break;
  488. #endif /* FSL_FEATURE_ENET_QUEUE > 1 */
  489. default:
  490. txDesActive = &(base->TDAR);
  491. break;
  492. }
  493. #if defined(FSL_FEATURE_ENET_HAS_ERRATA_007885) && FSL_FEATURE_ENET_HAS_ERRATA_007885
  494. /* There is a TDAR race condition for mutliQ when the software sets TDAR
  495. * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles).
  496. * This will cause the udma_tx and udma_tx_arbiter state machines to hang.
  497. * Software workaround: introduces a delay by reading the relevant ENET_TDARn_TDAR 4 times
  498. */
  499. for (uint8_t i = 0; i < 4U; i++)
  500. {
  501. if (*txDesActive == 0U)
  502. {
  503. break;
  504. }
  505. }
  506. #endif
  507. /* Write to active tx descriptor */
  508. *txDesActive = 0;
  509. }
  510. static status_t _ENET_SendFrame(ENET_Type *base,
  511. enet_handle_t *handle,
  512. const uint8_t *data,
  513. uint32_t length,
  514. uint8_t ringId,
  515. bool tsFlag,
  516. void *context)
  517. {
  518. assert(handle != NULL);
  519. assert(data != NULL);
  520. assert(FSL_FEATURE_ENET_INSTANCE_QUEUEn(base) != -1);
  521. assert(ringId < (uint8_t)FSL_FEATURE_ENET_INSTANCE_QUEUEn(base));
  522. volatile enet_tx_bd_struct_t *curBuffDescrip;
  523. enet_tx_bd_ring_t *txBdRing = &handle->txBdRing[ringId];
  524. enet_tx_dirty_ring_t *txDirtyRing = &handle->txDirtyRing[ringId];
  525. enet_frame_info_t *txDirty = NULL;
  526. uint32_t len = 0;
  527. uint32_t sizeleft = 0;
  528. uint32_t address;
  529. status_t result = kStatus_Success;
  530. uint32_t src;
  531. uint32_t configVal;
  532. bool isReturn = false;
  533. uint32_t primask;
  534. /* Check the frame length. */
  535. if (length > ENET_FRAME_TX_LEN_LIMITATION(base))
  536. {
  537. result = kStatus_ENET_TxFrameOverLen;
  538. }
  539. else
  540. {
  541. /* Check if the transmit buffer is ready. */
  542. curBuffDescrip = txBdRing->txBdBase + txBdRing->txGenIdx;
  543. if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK))
  544. {
  545. result = kStatus_ENET_TxFrameBusy;
  546. }
  547. /* Check txDirtyRing if need frameinfo in tx interrupt callback. */
  548. else if ((handle->txReclaimEnable[ringId]) && !_ENET_TxDirtyRingAvailable(txDirtyRing))
  549. {
  550. result = kStatus_ENET_TxFrameBusy;
  551. }
  552. else
  553. {
  554. /* One transmit buffer is enough for one frame. */
  555. if (handle->txBuffSizeAlign[ringId] >= length)
  556. {
  557. /* Copy data to the buffer for uDMA transfer. */
  558. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  559. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer, kMEMORY_DMA2Local);
  560. #else
  561. address = (uint32_t)curBuffDescrip->buffer;
  562. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  563. pbuf_copy_partial((const struct pbuf *)data, (void *)address, length, 0);
  564. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  565. if (handle->txMaintainEnable[ringId])
  566. {
  567. DCACHE_CleanByRange(address, length);
  568. }
  569. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  570. /* Set data length. */
  571. curBuffDescrip->length = (uint16_t)length;
  572. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  573. /* For enable the timestamp. */
  574. if (tsFlag)
  575. {
  576. curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  577. }
  578. else
  579. {
  580. curBuffDescrip->controlExtend1 &= (uint16_t)(~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK);
  581. }
  582. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  583. curBuffDescrip->control |= (ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK);
  584. /* Increase the buffer descriptor address. */
  585. txBdRing->txGenIdx = _ENET_IncreaseIndex(txBdRing->txGenIdx, txBdRing->txRingLen);
  586. /* Add context to frame info ring */
  587. if (handle->txReclaimEnable[ringId])
  588. {
  589. txDirty = txDirtyRing->txDirtyBase + txDirtyRing->txGenIdx;
  590. txDirty->context = context;
  591. txDirtyRing->txGenIdx = _ENET_IncreaseIndex(txDirtyRing->txGenIdx, txDirtyRing->txRingLen);
  592. if (txDirtyRing->txGenIdx == txDirtyRing->txConsumIdx)
  593. {
  594. txDirtyRing->isFull = true;
  595. }
  596. primask = DisableGlobalIRQ();
  597. txBdRing->txDescUsed++;
  598. EnableGlobalIRQ(primask);
  599. }
  600. /* Active the transmit buffer descriptor. */
  601. _ENET_ActiveSendRing(base, ringId);
  602. }
  603. else
  604. {
  605. /* One frame requires more than one transmit buffers. */
  606. do
  607. {
  608. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  609. /* For enable the timestamp. */
  610. if (tsFlag)
  611. {
  612. curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  613. }
  614. else
  615. {
  616. curBuffDescrip->controlExtend1 &= (uint16_t)(~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK);
  617. }
  618. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  619. /* Update the size left to be transmit. */
  620. sizeleft = length - len;
  621. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  622. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer, kMEMORY_DMA2Local);
  623. #else
  624. address = (uint32_t)curBuffDescrip->buffer;
  625. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  626. src = (uint32_t)data + len;
  627. /* Increase the current software index of BD */
  628. txBdRing->txGenIdx = _ENET_IncreaseIndex(txBdRing->txGenIdx, txBdRing->txRingLen);
  629. if (sizeleft > handle->txBuffSizeAlign[ringId])
  630. {
  631. /* Data copy. */
  632. (void)memcpy((void *)(uint32_t *)address, (void *)(uint32_t *)src,
  633. handle->txBuffSizeAlign[ringId]);
  634. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  635. if (handle->txMaintainEnable[ringId])
  636. {
  637. /* Add the cache clean maintain. */
  638. DCACHE_CleanByRange(address, handle->txBuffSizeAlign[ringId]);
  639. }
  640. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  641. /* Data length update. */
  642. curBuffDescrip->length = handle->txBuffSizeAlign[ringId];
  643. len += handle->txBuffSizeAlign[ringId];
  644. /* Sets the control flag. */
  645. configVal = (uint32_t)curBuffDescrip->control;
  646. configVal &= ~ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
  647. configVal |= ENET_BUFFDESCRIPTOR_TX_READY_MASK;
  648. curBuffDescrip->control = (uint16_t)configVal;
  649. if (handle->txReclaimEnable[ringId])
  650. {
  651. primask = DisableGlobalIRQ();
  652. txBdRing->txDescUsed++;
  653. EnableGlobalIRQ(primask);
  654. }
  655. /* Active the transmit buffer descriptor*/
  656. _ENET_ActiveSendRing(base, ringId);
  657. }
  658. else
  659. {
  660. (void)memcpy((void *)(uint32_t *)address, (void *)(uint32_t *)src, sizeleft);
  661. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  662. if (handle->txMaintainEnable[ringId])
  663. {
  664. /* Add the cache clean maintain. */
  665. DCACHE_CleanByRange(address, sizeleft);
  666. }
  667. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  668. curBuffDescrip->length = (uint16_t)sizeleft;
  669. /* Set Last buffer wrap flag. */
  670. curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
  671. if (handle->txReclaimEnable[ringId])
  672. {
  673. /* Add context to frame info ring */
  674. txDirty = txDirtyRing->txDirtyBase + txDirtyRing->txGenIdx;
  675. txDirty->context = context;
  676. txDirtyRing->txGenIdx = _ENET_IncreaseIndex(txDirtyRing->txGenIdx, txDirtyRing->txRingLen);
  677. if (txDirtyRing->txGenIdx == txDirtyRing->txConsumIdx)
  678. {
  679. txDirtyRing->isFull = true;
  680. }
  681. primask = DisableGlobalIRQ();
  682. txBdRing->txDescUsed++;
  683. EnableGlobalIRQ(primask);
  684. }
  685. /* Active the transmit buffer descriptor. */
  686. _ENET_ActiveSendRing(base, ringId);
  687. isReturn = true;
  688. break;
  689. }
  690. /* Update the buffer descriptor address. */
  691. curBuffDescrip = txBdRing->txBdBase + txBdRing->txGenIdx;
  692. } while (0U == (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK));
  693. if (isReturn == false)
  694. {
  695. result = kStatus_ENET_TxFrameBusy;
  696. }
  697. }
  698. }
  699. }
  700. return result;
  701. }
  702. /* ethernet device interface */
  703. /* transmit packet. */
  704. rt_err_t rt_imxrt_eth_tx(rt_device_t dev, struct pbuf *p)
  705. {
  706. rt_err_t result = RT_EOK;
  707. enet_handle_t *enet_handle = &imxrt_eth_device.enet_handle;
  708. RT_ASSERT(p != NULL);
  709. RT_ASSERT(enet_handle != RT_NULL);
  710. dbg_log(DBG_LOG, "rt_imxrt_eth_tx: %d\n", p->len);
  711. #ifdef ETH_TX_DUMP
  712. packet_dump("send", p);
  713. #endif
  714. do
  715. {
  716. result = _ENET_SendFrame(imxrt_eth_device.enet_base, enet_handle, (const uint8_t *)p, p->tot_len, RING_ID, false, NULL);
  717. if (result == kStatus_ENET_TxFrameBusy)
  718. {
  719. imxrt_eth_device.tx_is_waiting = RT_TRUE;
  720. rt_sem_take(&imxrt_eth_device.tx_wait, RT_WAITING_FOREVER);
  721. }
  722. } while (result == kStatus_ENET_TxFrameBusy);
  723. return RT_EOK;
  724. }
  725. /* reception packet. */
  726. struct pbuf *rt_imxrt_eth_rx(rt_device_t dev)
  727. {
  728. uint32_t length = 0;
  729. status_t status;
  730. struct pbuf *p = RT_NULL;
  731. enet_handle_t *enet_handle = &imxrt_eth_device.enet_handle;
  732. ENET_Type *enet_base = imxrt_eth_device.enet_base;
  733. enet_data_error_stats_t *error_statistic = &imxrt_eth_device.error_statistic;
  734. /* Get the Frame size */
  735. status = ENET_GetRxFrameSize(enet_handle, &length, RING_ID);
  736. /* Call ENET_ReadFrame when there is a received frame. */
  737. if (length != 0)
  738. {
  739. /* Received valid frame. Deliver the rx buffer with the size equal to length. */
  740. p = pbuf_alloc(PBUF_RAW, length, PBUF_POOL);
  741. if (p != NULL)
  742. {
  743. status = ENET_ReadFrame(enet_base, enet_handle, p->payload, length, RING_ID, NULL);
  744. if (status == kStatus_Success)
  745. {
  746. #ifdef ETH_RX_DUMP
  747. packet_dump("recv", p);
  748. #endif
  749. return p;
  750. }
  751. else
  752. {
  753. dbg_log(DBG_LOG, " A frame read failed\n");
  754. pbuf_free(p);
  755. }
  756. }
  757. else
  758. {
  759. dbg_log(DBG_LOG, " pbuf_alloc faild\n");
  760. }
  761. }
  762. else if (status == kStatus_ENET_RxFrameError)
  763. {
  764. dbg_log(DBG_WARNING, "ENET_GetRxFrameSize: kStatus_ENET_RxFrameError\n");
  765. /* Update the received buffer when error happened. */
  766. /* Get the error information of the received g_frame. */
  767. ENET_GetRxErrBeforeReadFrame(enet_handle, error_statistic, RING_ID);
  768. /* update the receive buffer. */
  769. ENET_ReadFrame(enet_base, enet_handle, NULL, 0, RING_ID, NULL);
  770. }
  771. ENET_EnableInterrupts(enet_base, kENET_RxFrameInterrupt);
  772. return NULL;
  773. }
  774. #ifdef BSP_USING_PHY
  775. static struct rt_phy_device *phy_dev = RT_NULL;
  776. static void phy_monitor_thread_entry(void *parameter)
  777. {
  778. rt_uint32_t speed;
  779. rt_uint32_t duplex;
  780. rt_bool_t link = RT_FALSE;
  781. #ifdef SOC_IMXRT1170_SERIES
  782. #ifdef PHY_USING_RTL8211F
  783. phy_dev = (struct rt_phy_device *)rt_device_find("rtl8211f");
  784. if ((RT_NULL == phy_dev) || (RT_NULL == phy_dev->ops))
  785. {
  786. // TODO print warning information
  787. LOG_E("Can not find phy device called \"rtl8211f\"");
  788. return;
  789. }
  790. #else
  791. phy_dev = (struct rt_phy_device *)rt_device_find("ksz8081");
  792. if ((RT_NULL == phy_dev) || (RT_NULL == phy_dev->ops))
  793. {
  794. // TODO print warning information
  795. LOG_E("Can not find phy device called \"ksz8081\"");
  796. return;
  797. }
  798. #endif
  799. #else
  800. phy_dev = (struct rt_phy_device *)rt_device_find("ksz8081");
  801. if ((RT_NULL == phy_dev) || (RT_NULL == phy_dev->ops))
  802. {
  803. // TODO print warning information
  804. LOG_E("Can not find phy device called \"rtt-phy\"");
  805. return;
  806. }
  807. #endif
  808. if (RT_NULL == phy_dev->ops->init)
  809. {
  810. LOG_E("phy driver error!");
  811. return;
  812. }
  813. #ifdef SOC_IMXRT1170_SERIES
  814. #ifdef PHY_USING_RTL8211F
  815. rt_phy_status status = phy_dev->ops->init(imxrt_eth_device.enet_base, PHY_RTL8211F_ADDRESS, CLOCK_GetRootClockFreq(kCLOCK_Root_Bus));
  816. #else
  817. rt_phy_status status = phy_dev->ops->init(imxrt_eth_device.enet_base, PHY_KSZ8081_ADDRESS, CLOCK_GetRootClockFreq(kCLOCK_Root_Bus));
  818. #endif
  819. #else
  820. rt_phy_status status = phy_dev->ops->init(imxrt_eth_device.enet_base, PHY_KSZ8081_ADDRESS, CLOCK_GetFreq(kCLOCK_IpgClk));
  821. #endif
  822. if (PHY_STATUS_OK != status)
  823. {
  824. LOG_E("Phy device initialize unsuccessful!\n");
  825. return;
  826. }
  827. LOG_I("Phy device initialize successfully!\n");
  828. while (1)
  829. {
  830. rt_bool_t new_link = RT_FALSE;
  831. rt_phy_status status = phy_dev->ops->get_link_status(&new_link);
  832. if ((PHY_STATUS_OK == status) && (link != new_link))
  833. {
  834. link = new_link;
  835. if (link) // link up
  836. {
  837. phy_dev->ops->get_link_speed_duplex(&speed, &duplex);
  838. if (PHY_SPEED_10M == speed)
  839. {
  840. dbg_log(DBG_LOG, "10M\n");
  841. }
  842. else if (PHY_SPEED_100M == speed)
  843. {
  844. dbg_log(DBG_LOG, "100M\n");
  845. }
  846. else
  847. {
  848. dbg_log(DBG_LOG, "1000M\n");
  849. }
  850. if (PHY_HALF_DUPLEX == duplex)
  851. {
  852. dbg_log(DBG_LOG, "half dumplex\n");
  853. }
  854. else
  855. {
  856. dbg_log(DBG_LOG, "full dumplex\n");
  857. }
  858. if ((imxrt_eth_device.speed != (enet_mii_speed_t)speed) || (imxrt_eth_device.duplex != (enet_mii_duplex_t)duplex))
  859. {
  860. imxrt_eth_device.speed = (enet_mii_speed_t)speed;
  861. imxrt_eth_device.duplex = (enet_mii_duplex_t)duplex;
  862. dbg_log(DBG_LOG, "link up, and update eth mode.\n");
  863. rt_imxrt_eth_init((rt_device_t)&imxrt_eth_device);
  864. }
  865. else
  866. {
  867. dbg_log(DBG_LOG, "link up, eth not need re-config.\n");
  868. }
  869. dbg_log(DBG_LOG, "link up.\n");
  870. eth_device_linkchange(&imxrt_eth_device.parent, RT_TRUE);
  871. }
  872. else
  873. {
  874. dbg_log(DBG_LOG, "link down.\n");
  875. eth_device_linkchange(&imxrt_eth_device.parent, RT_FALSE);
  876. }
  877. }
  878. rt_thread_delay(RT_TICK_PER_SECOND * 2);
  879. // rt_thread_mdelay(300);
  880. }
  881. }
  882. #endif
  883. static int rt_hw_imxrt_eth_init(void)
  884. {
  885. rt_err_t state;
  886. _enet_clk_init();
  887. #ifdef PHY_USING_RTL8211F
  888. /* NXP (Freescale) MAC OUI */
  889. imxrt_eth_device.dev_addr[0] = 0x54;
  890. imxrt_eth_device.dev_addr[1] = 0x27;
  891. imxrt_eth_device.dev_addr[2] = 0x8d;
  892. /* generate MAC addr from 96bit unique ID (only for test). */
  893. imxrt_eth_device.dev_addr[3] = 0x11;
  894. imxrt_eth_device.dev_addr[4] = 0x22;
  895. imxrt_eth_device.dev_addr[5] = 0x33;
  896. imxrt_eth_device.speed = kENET_MiiSpeed100M;//要支持千兆,直接赋值为kENET_MiiSpeed1000M
  897. imxrt_eth_device.duplex = kENET_MiiFullDuplex;
  898. imxrt_eth_device.enet_base = ENET_1G;
  899. #else
  900. /* NXP (Freescale) MAC OUI */
  901. imxrt_eth_device.dev_addr[0] = 0x54;
  902. imxrt_eth_device.dev_addr[1] = 0x27;
  903. imxrt_eth_device.dev_addr[2] = 0x8d;
  904. /* generate MAC addr from 96bit unique ID (only for test). */
  905. imxrt_eth_device.dev_addr[3] = 0x00;
  906. imxrt_eth_device.dev_addr[4] = 0x00;
  907. imxrt_eth_device.dev_addr[5] = 0x00;
  908. imxrt_eth_device.speed = kENET_MiiSpeed100M;
  909. imxrt_eth_device.duplex = kENET_MiiFullDuplex;
  910. imxrt_eth_device.enet_base = ENET;
  911. #endif
  912. imxrt_eth_device.parent.parent.init = rt_imxrt_eth_init;
  913. imxrt_eth_device.parent.parent.open = rt_imxrt_eth_open;
  914. imxrt_eth_device.parent.parent.close = rt_imxrt_eth_close;
  915. imxrt_eth_device.parent.parent.read = rt_imxrt_eth_read;
  916. imxrt_eth_device.parent.parent.write = rt_imxrt_eth_write;
  917. imxrt_eth_device.parent.parent.control = rt_imxrt_eth_control;
  918. imxrt_eth_device.parent.parent.user_data = RT_NULL;
  919. imxrt_eth_device.parent.eth_rx = rt_imxrt_eth_rx;
  920. imxrt_eth_device.parent.eth_tx = rt_imxrt_eth_tx;
  921. dbg_log(DBG_LOG, "sem init: tx_wait\r\n");
  922. /* init tx semaphore */
  923. rt_sem_init(&imxrt_eth_device.tx_wait, "tx_wait", 0, RT_IPC_FLAG_FIFO);
  924. dbg_log(DBG_LOG, "sem init: buff_wait\r\n");
  925. /* init tx semaphore */
  926. rt_sem_init(&imxrt_eth_device.buff_wait, "buff_wait", 1, RT_IPC_FLAG_FIFO);
  927. /* register eth device */
  928. dbg_log(DBG_LOG, "eth_device_init start\r\n");
  929. state = eth_device_init(&(imxrt_eth_device.parent), "e0");
  930. if (RT_EOK == state)
  931. {
  932. dbg_log(DBG_LOG, "eth_device_init success\r\n");
  933. }
  934. else
  935. {
  936. dbg_log(DBG_LOG, "eth_device_init faild: %d\r\n", state);
  937. }
  938. eth_device_linkchange(&imxrt_eth_device.parent, RT_FALSE);
  939. /* start phy monitor */
  940. {
  941. #ifdef BSP_USING_PHY
  942. rt_thread_t tid;
  943. tid = rt_thread_create("phy",
  944. phy_monitor_thread_entry,
  945. RT_NULL,
  946. 4096,
  947. /*RT_THREAD_PRIORITY_MAX - 2,*/
  948. 15,
  949. 2);
  950. if (tid != RT_NULL)
  951. rt_thread_startup(tid);
  952. #endif
  953. }
  954. return state;
  955. }
  956. INIT_DEVICE_EXPORT(rt_hw_imxrt_eth_init);
  957. #endif
  958. #if defined(RT_USING_FINSH) && defined(RT_USING_PHY)
  959. #include <finsh.h>
  960. void phy_read(rt_uint32_t phy_reg)
  961. {
  962. rt_uint32_t data;
  963. rt_phy_status status = phy_dev->ops->read(phy_reg, &data);
  964. if (PHY_STATUS_OK == status)
  965. {
  966. rt_kprintf("PHY_Read: %02X --> %08X", phy_reg, data);
  967. }
  968. else
  969. {
  970. rt_kprintf("PHY_Read: %02X --> faild", phy_reg);
  971. }
  972. }
  973. void phy_write(rt_uint32_t phy_reg, rt_uint32_t data)
  974. {
  975. rt_phy_status status = phy_dev->ops->write(phy_reg, data);
  976. if (PHY_STATUS_OK == status)
  977. {
  978. rt_kprintf("PHY_Write: %02X --> %08X\n", phy_reg, data);
  979. }
  980. else
  981. {
  982. rt_kprintf("PHY_Write: %02X --> faild\n", phy_reg);
  983. }
  984. }
  985. void phy_dump(void)
  986. {
  987. rt_uint32_t data;
  988. rt_phy_status status;
  989. int i;
  990. for (i = 0; i < 32; i++)
  991. {
  992. status = phy_dev->ops->read(i, &data);
  993. if (PHY_STATUS_OK != status)
  994. {
  995. rt_kprintf("phy_dump: %02X --> faild", i);
  996. break;
  997. }
  998. if (i % 8 == 7)
  999. {
  1000. rt_kprintf("%02X --> %08X ", i, data);
  1001. }
  1002. else
  1003. {
  1004. rt_kprintf("%02X --> %08X\n", i, data);
  1005. }
  1006. }
  1007. }
  1008. #endif
  1009. #if defined(RT_USING_FINSH) && defined(RT_USING_LWIP)
  1010. void enet_reg_dump(void)
  1011. {
  1012. ENET_Type *enet_base = imxrt_eth_device.enet_base;
  1013. #define DUMP_REG(__REG) \
  1014. rt_kprintf("%s(%08X): %08X\n", #__REG, (uint32_t)&enet_base->__REG, enet_base->__REG)
  1015. DUMP_REG(EIR);
  1016. DUMP_REG(EIMR);
  1017. DUMP_REG(RDAR);
  1018. DUMP_REG(TDAR);
  1019. DUMP_REG(ECR);
  1020. DUMP_REG(MMFR);
  1021. DUMP_REG(MSCR);
  1022. DUMP_REG(MIBC);
  1023. DUMP_REG(RCR);
  1024. DUMP_REG(TCR);
  1025. DUMP_REG(PALR);
  1026. DUMP_REG(PAUR);
  1027. DUMP_REG(OPD);
  1028. DUMP_REG(TXIC);
  1029. DUMP_REG(RXIC);
  1030. DUMP_REG(IAUR);
  1031. DUMP_REG(IALR);
  1032. DUMP_REG(GAUR);
  1033. DUMP_REG(GALR);
  1034. DUMP_REG(TFWR);
  1035. DUMP_REG(RDSR);
  1036. DUMP_REG(TDSR);
  1037. DUMP_REG(MRBR);
  1038. DUMP_REG(RSFL);
  1039. DUMP_REG(RSEM);
  1040. DUMP_REG(RAEM);
  1041. DUMP_REG(RAFL);
  1042. DUMP_REG(TSEM);
  1043. DUMP_REG(TAEM);
  1044. DUMP_REG(TAFL);
  1045. DUMP_REG(TIPG);
  1046. DUMP_REG(FTRL);
  1047. DUMP_REG(TACC);
  1048. DUMP_REG(RACC);
  1049. // DUMP_REG(RMON_T_DROP);
  1050. DUMP_REG(RMON_T_PACKETS);
  1051. DUMP_REG(RMON_T_BC_PKT);
  1052. DUMP_REG(RMON_T_MC_PKT);
  1053. DUMP_REG(RMON_T_CRC_ALIGN);
  1054. DUMP_REG(RMON_T_UNDERSIZE);
  1055. DUMP_REG(RMON_T_OVERSIZE);
  1056. DUMP_REG(RMON_T_FRAG);
  1057. DUMP_REG(RMON_T_JAB);
  1058. DUMP_REG(RMON_T_COL);
  1059. DUMP_REG(RMON_T_P64);
  1060. DUMP_REG(RMON_T_P65TO127);
  1061. DUMP_REG(RMON_T_P128TO255);
  1062. DUMP_REG(RMON_T_P256TO511);
  1063. DUMP_REG(RMON_T_P512TO1023);
  1064. DUMP_REG(RMON_T_P1024TO2047);
  1065. DUMP_REG(RMON_T_P_GTE2048);
  1066. DUMP_REG(RMON_T_OCTETS);
  1067. // DUMP_REG(IEEE_T_DROP);
  1068. DUMP_REG(IEEE_T_FRAME_OK);
  1069. DUMP_REG(IEEE_T_1COL);
  1070. DUMP_REG(IEEE_T_MCOL);
  1071. DUMP_REG(IEEE_T_DEF);
  1072. DUMP_REG(IEEE_T_LCOL);
  1073. DUMP_REG(IEEE_T_EXCOL);
  1074. DUMP_REG(IEEE_T_MACERR);
  1075. DUMP_REG(IEEE_T_CSERR);
  1076. DUMP_REG(IEEE_T_SQE);
  1077. DUMP_REG(IEEE_T_FDXFC);
  1078. DUMP_REG(IEEE_T_OCTETS_OK);
  1079. DUMP_REG(RMON_R_PACKETS);
  1080. DUMP_REG(RMON_R_BC_PKT);
  1081. DUMP_REG(RMON_R_MC_PKT);
  1082. DUMP_REG(RMON_R_CRC_ALIGN);
  1083. DUMP_REG(RMON_R_UNDERSIZE);
  1084. DUMP_REG(RMON_R_OVERSIZE);
  1085. DUMP_REG(RMON_R_FRAG);
  1086. DUMP_REG(RMON_R_JAB);
  1087. // DUMP_REG(RMON_R_RESVD_0);
  1088. DUMP_REG(RMON_R_P64);
  1089. DUMP_REG(RMON_R_P65TO127);
  1090. DUMP_REG(RMON_R_P128TO255);
  1091. DUMP_REG(RMON_R_P256TO511);
  1092. DUMP_REG(RMON_R_P512TO1023);
  1093. DUMP_REG(RMON_R_P1024TO2047);
  1094. DUMP_REG(RMON_R_P_GTE2048);
  1095. DUMP_REG(RMON_R_OCTETS);
  1096. DUMP_REG(IEEE_R_DROP);
  1097. DUMP_REG(IEEE_R_FRAME_OK);
  1098. DUMP_REG(IEEE_R_CRC);
  1099. DUMP_REG(IEEE_R_ALIGN);
  1100. DUMP_REG(IEEE_R_MACERR);
  1101. DUMP_REG(IEEE_R_FDXFC);
  1102. DUMP_REG(IEEE_R_OCTETS_OK);
  1103. DUMP_REG(ATCR);
  1104. DUMP_REG(ATVR);
  1105. DUMP_REG(ATOFF);
  1106. DUMP_REG(ATPER);
  1107. DUMP_REG(ATCOR);
  1108. DUMP_REG(ATINC);
  1109. DUMP_REG(ATSTMP);
  1110. DUMP_REG(TGSR);
  1111. }
  1112. void enet_nvic_tog(void)
  1113. {
  1114. NVIC_SetPendingIRQ(ENET_IRQn);
  1115. }
  1116. void enet_rx_stat(void)
  1117. {
  1118. enet_data_error_stats_t *error_statistic = &imxrt_eth_device.error_statistic;
  1119. #define DUMP_STAT(__VAR) \
  1120. rt_kprintf("%-25s: %08X\n", #__VAR, error_statistic->__VAR);
  1121. DUMP_STAT(statsRxLenGreaterErr);
  1122. DUMP_STAT(statsRxAlignErr);
  1123. DUMP_STAT(statsRxFcsErr);
  1124. DUMP_STAT(statsRxOverRunErr);
  1125. DUMP_STAT(statsRxTruncateErr);
  1126. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  1127. DUMP_STAT(statsRxProtocolChecksumErr);
  1128. DUMP_STAT(statsRxIpHeadChecksumErr);
  1129. DUMP_STAT(statsRxMacErr);
  1130. DUMP_STAT(statsRxPhyErr);
  1131. DUMP_STAT(statsRxCollisionErr);
  1132. DUMP_STAT(statsTxErr);
  1133. DUMP_STAT(statsTxFrameErr);
  1134. DUMP_STAT(statsTxOverFlowErr);
  1135. DUMP_STAT(statsTxLateCollisionErr);
  1136. DUMP_STAT(statsTxExcessCollisionErr);
  1137. DUMP_STAT(statsTxUnderFlowErr);
  1138. DUMP_STAT(statsTxTsErr);
  1139. #endif
  1140. }
  1141. void enet_buf_info(void)
  1142. {
  1143. int i = 0;
  1144. for (i = 0; i < ENET_RXBD_NUM; i++)
  1145. {
  1146. rt_kprintf("%d: length: %-8d, control: %04X, buffer:%p\n",
  1147. i,
  1148. g_rxBuffDescrip[i].length,
  1149. g_rxBuffDescrip[i].control,
  1150. g_rxBuffDescrip[i].buffer);
  1151. }
  1152. for (i = 0; i < ENET_TXBD_NUM; i++)
  1153. {
  1154. rt_kprintf("%d: length: %-8d, control: %04X, buffer:%p\n",
  1155. i,
  1156. g_txBuffDescrip[i].length,
  1157. g_txBuffDescrip[i].control,
  1158. g_txBuffDescrip[i].buffer);
  1159. }
  1160. }
  1161. FINSH_FUNCTION_EXPORT(phy_read, read phy register);
  1162. FINSH_FUNCTION_EXPORT(phy_write, write phy register);
  1163. FINSH_FUNCTION_EXPORT(phy_dump, dump phy registers);
  1164. FINSH_FUNCTION_EXPORT(enet_reg_dump, dump enet registers);
  1165. FINSH_FUNCTION_EXPORT(enet_nvic_tog, toggle enet nvic pendding bit);
  1166. FINSH_FUNCTION_EXPORT(enet_rx_stat, dump enet rx statistic);
  1167. FINSH_FUNCTION_EXPORT(enet_buf_info, dump enet tx and tx buffer descripter);
  1168. #endif