drv_eth.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2017-10-10 Tanek the first version
  9. * 2019-5-10 misonyo add DMA TX and RX function
  10. * 2020-10-14 wangqiang use phy device in phy monitor thread
  11. * 2022-08-29 xjy198903 add 1170 rgmii support
  12. */
  13. #include <rtthread.h>
  14. #include "board.h"
  15. #include <rtdevice.h>
  16. #ifdef RT_USING_FINSH
  17. #include <finsh.h>
  18. #endif
  19. #include "fsl_enet.h"
  20. #include "fsl_gpio.h"
  21. #include "fsl_cache.h"
  22. #include "fsl_iomuxc.h"
  23. #include "fsl_common.h"
  24. #ifdef RT_USING_LWIP
  25. #include <netif/ethernetif.h>
  26. #include "lwipopts.h"
  27. #ifdef SOC_IMXRT1170_SERIES
  28. #define ENET_RXBD_NUM (5)
  29. #define ENET_TXBD_NUM (3)
  30. #else
  31. #define ENET_RXBD_NUM (4)
  32. #define ENET_TXBD_NUM (4)
  33. #endif
  34. #define ENET_RXBUFF_SIZE (ENET_FRAME_MAX_FRAMELEN)
  35. #define ENET_TXBUFF_SIZE (ENET_FRAME_MAX_FRAMELEN)
  36. /* debug option */
  37. #define ETH_RX_DUMP
  38. #undef ETH_RX_DUMP
  39. #define ETH_TX_DUMP
  40. #undef ETH_TX_DUMP
  41. #define DBG_ENABLE
  42. #define DBG_SECTION_NAME "[ETH]"
  43. #define DBG_COLOR
  44. #define DBG_LEVEL DBG_INFO
  45. #include <rtdbg.h>
  46. #define MAX_ADDR_LEN 6
  47. #ifdef SOC_IMXRT1170_SERIES
  48. #define ENET_RING_NUM 1U
  49. #define RING_ID 0
  50. typedef uint8_t rx_buffer_t[RT_ALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  51. typedef uint8_t tx_buffer_t[RT_ALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  52. #ifndef ENET_RXBUFF_NUM
  53. #define ENET_RXBUFF_NUM (ENET_RXBD_NUM * 2)
  54. #endif
  55. #endif
  56. #ifdef SOC_IMXRT1170_SERIES
  57. typedef void (*pbuf_free_custom_fn)(struct pbuf *p);
  58. /** A custom pbuf: like a pbuf, but following a function pointer to free it. */
  59. struct pbuf_custom
  60. {
  61. /** The actual pbuf */
  62. struct pbuf pbuf;
  63. /** This function is called when pbuf_free deallocates this pbuf(_custom) */
  64. pbuf_free_custom_fn custom_free_function;
  65. };
  66. typedef struct rx_pbuf_wrapper
  67. {
  68. struct pbuf_custom p; /*!< Pbuf wrapper. Has to be first. */
  69. void *buffer; /*!< Original buffer wrapped by p. */
  70. volatile bool buffer_used; /*!< Wrapped buffer is used by ENET */
  71. } rx_pbuf_wrapper_t;
  72. #endif
  73. struct rt_imxrt_eth
  74. {
  75. /* inherit from ethernet device */
  76. struct eth_device parent;
  77. enet_handle_t enet_handle;
  78. ENET_Type *enet_base;
  79. enet_data_error_stats_t error_statistic;
  80. rt_uint8_t dev_addr[MAX_ADDR_LEN]; /* hw address */
  81. rt_bool_t tx_is_waiting;
  82. struct rt_semaphore tx_wait;
  83. struct rt_semaphore buff_wait;
  84. enet_mii_speed_t speed;
  85. enet_mii_duplex_t duplex;
  86. #ifdef SOC_IMXRT1170_SERIES
  87. enet_rx_bd_struct_t *RxBuffDescrip;
  88. enet_tx_bd_struct_t *TxBuffDescrip;
  89. rx_buffer_t *RxDataBuff;
  90. tx_buffer_t *TxDataBuff;
  91. rx_pbuf_wrapper_t RxPbufs[ENET_RXBUFF_NUM];
  92. #endif
  93. };
  94. AT_NONCACHEABLE_SECTION_ALIGN(static enet_tx_bd_struct_t g_txBuffDescrip[ENET_TXBD_NUM], ENET_BUFF_ALIGNMENT);
  95. rt_align(ENET_BUFF_ALIGNMENT)
  96. rt_uint8_t g_txDataBuff[ENET_TXBD_NUM][RT_ALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  97. AT_NONCACHEABLE_SECTION_ALIGN(static enet_rx_bd_struct_t g_rxBuffDescrip[ENET_RXBD_NUM], ENET_BUFF_ALIGNMENT);
  98. rt_align(ENET_BUFF_ALIGNMENT)
  99. rt_uint8_t g_rxDataBuff[ENET_RXBD_NUM][RT_ALIGN(ENET_RXBUFF_SIZE, ENET_BUFF_ALIGNMENT)];
  100. static struct rt_imxrt_eth imxrt_eth_device;
  101. void _enet_rx_callback(struct rt_imxrt_eth *eth)
  102. {
  103. rt_err_t result;
  104. ENET_DisableInterrupts(eth->enet_base, kENET_RxFrameInterrupt);
  105. result = eth_device_ready(&(eth->parent));
  106. if (result != RT_EOK)
  107. rt_kprintf("RX err =%d\n", result);
  108. }
  109. void _enet_tx_callback(struct rt_imxrt_eth *eth)
  110. {
  111. dbg_log(DBG_LOG, "_enet_tx_callback\n");
  112. if (eth->tx_is_waiting == RT_TRUE)
  113. {
  114. eth->tx_is_waiting = RT_FALSE;
  115. rt_sem_release(&eth->tx_wait);
  116. }
  117. }
  118. #ifdef SOC_IMXRT1170_SERIES
  119. static void _enet_callback(ENET_Type *base,
  120. enet_handle_t *handle,
  121. #if FSL_FEATURE_ENET_QUEUE > 1
  122. uint32_t ringId,
  123. #endif /* FSL_FEATURE_ENET_QUEUE */
  124. enet_event_t event,
  125. enet_frame_info_t *frameInfo,
  126. void *userData)
  127. #else
  128. void _enet_callback(ENET_Type *base, enet_handle_t *handle, enet_event_t event, void *userData)
  129. #endif
  130. {
  131. switch (event)
  132. {
  133. case kENET_RxEvent:
  134. _enet_rx_callback((struct rt_imxrt_eth *)userData);
  135. break;
  136. case kENET_TxEvent:
  137. _enet_tx_callback((struct rt_imxrt_eth *)userData);
  138. break;
  139. case kENET_ErrEvent:
  140. dbg_log(DBG_LOG, "kENET_ErrEvent\n");
  141. break;
  142. case kENET_WakeUpEvent:
  143. dbg_log(DBG_LOG, "kENET_WakeUpEvent\n");
  144. break;
  145. case kENET_TimeStampEvent:
  146. dbg_log(DBG_LOG, "kENET_TimeStampEvent\n");
  147. break;
  148. case kENET_TimeStampAvailEvent:
  149. dbg_log(DBG_LOG, "kENET_TimeStampAvailEvent \n");
  150. break;
  151. default:
  152. dbg_log(DBG_LOG, "unknow error\n");
  153. break;
  154. }
  155. }
  156. static void _enet_clk_init(void)
  157. {
  158. #ifdef SOC_IMXRT1170_SERIES
  159. #ifdef PHY_USING_RTL8211F
  160. const clock_sys_pll1_config_t sysPll1Config = {
  161. .pllDiv2En = true,
  162. };
  163. CLOCK_InitSysPll1(&sysPll1Config);
  164. clock_root_config_t rootCfg = {.mux = 4, .div = 4}; /* Generate 125M root clock. */
  165. CLOCK_SetRootClock(kCLOCK_Root_Enet2, &rootCfg);
  166. IOMUXC_GPR->GPR5 |= IOMUXC_GPR_GPR5_ENET1G_RGMII_EN_MASK; /* bit1:iomuxc_gpr_enet_clk_dir
  167. bit0:GPR_ENET_TX_CLK_SEL(internal or OSC) */
  168. #else
  169. const clock_sys_pll1_config_t sysPll1Config = {
  170. .pllDiv2En = true,
  171. };
  172. CLOCK_InitSysPll1(&sysPll1Config);
  173. clock_root_config_t rootCfg = {.mux = 4, .div = 10}; /* Generate 50M root clock. */
  174. CLOCK_SetRootClock(kCLOCK_Root_Enet1, &rootCfg);
  175. /* Select syspll2pfd3, 528*18/24 = 396M */
  176. CLOCK_InitPfd(kCLOCK_PllSys2, kCLOCK_Pfd3, 24);
  177. rootCfg.mux = 7;
  178. rootCfg.div = 2;
  179. CLOCK_SetRootClock(kCLOCK_Root_Bus, &rootCfg); /* Generate 198M bus clock. */
  180. IOMUXC_GPR->GPR4 |= 0x3;
  181. #endif
  182. #else
  183. const clock_enet_pll_config_t config = {.enableClkOutput = true, .enableClkOutput25M = false, .loopDivider = 1};
  184. CLOCK_InitEnetPll(&config);
  185. IOMUXC_EnableMode(IOMUXC_GPR, kIOMUXC_GPR_ENET1TxClkOutputDir, true);
  186. IOMUXC_GPR->GPR1 |= 1 << 23;
  187. #endif
  188. }
  189. #ifdef SOC_IMXRT1170_SERIES
  190. static void *_enet_rx_alloc(ENET_Type *base, void *userData, uint8_t ringId)
  191. {
  192. void *buffer = NULL;
  193. int i;
  194. // dbg_log(DBG_LOG, "get buff_wait sem in %d\r\n", __LINE__);
  195. rt_sem_take(&imxrt_eth_device.buff_wait, RT_WAITING_FOREVER);
  196. for (i = 0; i < ENET_RXBUFF_NUM; i++)
  197. {
  198. if (!imxrt_eth_device.RxPbufs[i].buffer_used)
  199. {
  200. imxrt_eth_device.RxPbufs[i].buffer_used = true;
  201. buffer = &imxrt_eth_device.RxDataBuff[i];
  202. break;
  203. }
  204. }
  205. rt_sem_release(&imxrt_eth_device.buff_wait);
  206. // dbg_log(DBG_LOG, "release buff_wait sem in %d\r\n", __LINE__);
  207. return buffer;
  208. }
  209. static void _enet_rx_free(ENET_Type *base, void *buffer, void *userData, uint8_t ringId)
  210. {
  211. int idx = ((rx_buffer_t *)buffer) - imxrt_eth_device.RxDataBuff;
  212. if (!((idx >= 0) && (idx < ENET_RXBUFF_NUM)))
  213. {
  214. LOG_E("Freed buffer out of range\r\n");
  215. }
  216. // dbg_log(DBG_LOG, "get buff_wait sem in %d\r\n", __LINE__);
  217. rt_sem_take(&imxrt_eth_device.buff_wait, RT_WAITING_FOREVER);
  218. if (!(imxrt_eth_device.RxPbufs[idx].buffer_used))
  219. {
  220. LOG_E("_enet_rx_free: freeing unallocated buffer\r\n");
  221. }
  222. imxrt_eth_device.RxPbufs[idx].buffer_used = false;
  223. rt_sem_release(&imxrt_eth_device.buff_wait);
  224. // dbg_log(DBG_LOG, "release buff_wait sem in %d\r\n", __LINE__);
  225. }
  226. /**
  227. * Reclaims RX buffer held by the p after p is no longer used
  228. * by the application / lwIP.
  229. */
  230. static void _enet_rx_release(struct pbuf *p)
  231. {
  232. rx_pbuf_wrapper_t *wrapper = (rx_pbuf_wrapper_t *)p;
  233. _enet_rx_free(imxrt_eth_device.enet_base, wrapper->buffer, &imxrt_eth_device, 0);
  234. }
  235. #endif
  236. static void _enet_config(void)
  237. {
  238. enet_config_t config;
  239. uint32_t sysClock;
  240. /* prepare the buffer configuration. */
  241. #ifndef SOC_IMXRT1170_SERIES
  242. enet_buffer_config_t buffConfig =
  243. {
  244. ENET_RXBD_NUM,
  245. ENET_TXBD_NUM,
  246. SDK_SIZEALIGN(ENET_RXBUFF_SIZE, ENET_BUFF_ALIGNMENT),
  247. SDK_SIZEALIGN(ENET_TXBUFF_SIZE, ENET_BUFF_ALIGNMENT),
  248. &g_rxBuffDescrip[0],
  249. &g_txBuffDescrip[0],
  250. &g_rxDataBuff[0][0],
  251. &g_txDataBuff[0][0],
  252. };
  253. /* Get default configuration. */
  254. /*
  255. * config.miiMode = kENET_RmiiMode;
  256. * config.miiSpeed = kENET_MiiSpeed100M;
  257. * config.miiDuplex = kENET_MiiFullDuplex;
  258. * config.rxMaxFrameLen = ENET_FRAME_MAX_FRAMELEN;
  259. */
  260. ENET_GetDefaultConfig(&config);
  261. config.interrupt = kENET_TxFrameInterrupt | kENET_RxFrameInterrupt;
  262. config.miiSpeed = imxrt_eth_device.speed;
  263. config.miiDuplex = imxrt_eth_device.duplex;
  264. /* Set SMI to get PHY link status. */
  265. sysClock = CLOCK_GetFreq(kCLOCK_AhbClk);
  266. dbg_log(DBG_LOG, "deinit\n");
  267. ENET_Deinit(imxrt_eth_device.enet_base);
  268. dbg_log(DBG_LOG, "init\n");
  269. ENET_Init(imxrt_eth_device.enet_base, &imxrt_eth_device.enet_handle, &config, &buffConfig, &imxrt_eth_device.dev_addr[0], sysClock);
  270. dbg_log(DBG_LOG, "set call back\n");
  271. ENET_SetCallback(&imxrt_eth_device.enet_handle, _enet_callback, &imxrt_eth_device);
  272. dbg_log(DBG_LOG, "active read\n");
  273. ENET_ActiveRead(imxrt_eth_device.enet_base);
  274. #else
  275. int i;
  276. enet_buffer_config_t buffConfig[ENET_RING_NUM];
  277. imxrt_eth_device.RxBuffDescrip = &g_rxBuffDescrip[0];
  278. imxrt_eth_device.TxBuffDescrip = &g_txBuffDescrip[0];
  279. imxrt_eth_device.RxDataBuff = &g_rxDataBuff[0];
  280. imxrt_eth_device.TxDataBuff = &g_txDataBuff[0];
  281. buffConfig[0].rxBdNumber = ENET_RXBD_NUM; /* Receive buffer descriptor number. */
  282. buffConfig[0].txBdNumber = ENET_TXBD_NUM; /* Transmit buffer descriptor number. */
  283. buffConfig[0].rxBuffSizeAlign = sizeof(rx_buffer_t); /* Aligned receive data buffer size. */
  284. buffConfig[0].txBuffSizeAlign = sizeof(tx_buffer_t); /* Aligned transmit data buffer size. */
  285. buffConfig[0].rxBdStartAddrAlign =
  286. &(imxrt_eth_device.RxBuffDescrip[0]); /* Aligned receive buffer descriptor start address. */
  287. buffConfig[0].txBdStartAddrAlign =
  288. &(imxrt_eth_device.TxBuffDescrip[0]); /* Aligned transmit buffer descriptor start address. */
  289. buffConfig[0].rxBufferAlign =
  290. NULL; /* Receive data buffer start address. NULL when buffers are allocated by callback for RX zero-copy. */
  291. buffConfig[0].txBufferAlign = &(imxrt_eth_device.TxDataBuff[0][0]); /* Transmit data buffer start address. */
  292. buffConfig[0].txFrameInfo = NULL; /* Transmit frame information start address. Set only if using zero-copy transmit. */
  293. buffConfig[0].rxMaintainEnable = true; /* Receive buffer cache maintain. */
  294. buffConfig[0].txMaintainEnable = true; /* Transmit buffer cache maintain. */
  295. /* Get default configuration. */
  296. /*
  297. * config.miiMode = kENET_RmiiMode;
  298. * config.miiSpeed = kENET_MiiSpeed100M;
  299. * config.miiDuplex = kENET_MiiFullDuplex;
  300. * config.rxMaxFrameLen = ENET_FRAME_MAX_FRAMELEN;
  301. */
  302. ENET_GetDefaultConfig(&config);
  303. config.ringNum = ENET_RING_NUM;
  304. config.miiSpeed = imxrt_eth_device.speed;
  305. config.miiDuplex = imxrt_eth_device.duplex;
  306. #ifdef PHY_USING_RTL8211F
  307. config.miiMode = kENET_RgmiiMode;
  308. EnableIRQ(ENET_1G_MAC0_Tx_Rx_1_IRQn);
  309. EnableIRQ(ENET_1G_MAC0_Tx_Rx_2_IRQn);
  310. #else
  311. config.miiMode = kENET_RmiiMode;
  312. #endif
  313. config.rxBuffAlloc = _enet_rx_alloc;
  314. config.rxBuffFree = _enet_rx_free;
  315. config.userData = &imxrt_eth_device;
  316. /* Set SMI to get PHY link status. */
  317. sysClock = CLOCK_GetRootClockFreq(kCLOCK_Root_Bus);
  318. config.interrupt |= kENET_TxFrameInterrupt | kENET_RxFrameInterrupt | kENET_TxBufferInterrupt | kENET_LateCollisionInterrupt;
  319. config.callback = _enet_callback;
  320. for (i = 0; i < ENET_RXBUFF_NUM; i++)
  321. {
  322. imxrt_eth_device.RxPbufs[i].p.custom_free_function = _enet_rx_release;
  323. imxrt_eth_device.RxPbufs[i].buffer = &(imxrt_eth_device.RxDataBuff[i][0]);
  324. imxrt_eth_device.RxPbufs[i].buffer_used = false;
  325. }
  326. // dbg_log(DBG_LOG, "deinit\n");
  327. // ENET_Deinit(imxrt_eth_device.enet_base);
  328. dbg_log(DBG_LOG, "init\n");
  329. ENET_Init(imxrt_eth_device.enet_base, &imxrt_eth_device.enet_handle, &config, &buffConfig[0], &imxrt_eth_device.dev_addr[0], sysClock);
  330. // dbg_log(DBG_LOG, "set call back\n");
  331. // ENET_SetCallback(&imxrt_eth_device.enet_handle, _enet_callback, &imxrt_eth_device);
  332. dbg_log(DBG_LOG, "active read\n");
  333. ENET_ActiveRead(imxrt_eth_device.enet_base);
  334. #endif
  335. }
  336. #if defined(ETH_RX_DUMP) || defined(ETH_TX_DUMP)
  337. static void packet_dump(const char *msg, const struct pbuf *p)
  338. {
  339. const struct pbuf *q;
  340. rt_uint32_t i, j;
  341. rt_uint8_t *ptr;
  342. rt_kprintf("%s %d byte\n", msg, p->tot_len);
  343. i = 0;
  344. for (q = p; q != RT_NULL; q = q->next)
  345. {
  346. ptr = q->payload;
  347. for (j = 0; j < q->len; j++)
  348. {
  349. if ((i % 8) == 0)
  350. {
  351. rt_kprintf(" ");
  352. }
  353. if ((i % 16) == 0)
  354. {
  355. rt_kprintf("\r\n");
  356. }
  357. rt_kprintf("%02x ", *ptr);
  358. i++;
  359. ptr++;
  360. }
  361. }
  362. rt_kprintf("\n\n");
  363. }
  364. #else
  365. #define packet_dump(...)
  366. #endif /* dump */
  367. /* initialize the interface */
  368. static rt_err_t rt_imxrt_eth_init(rt_device_t dev)
  369. {
  370. dbg_log(DBG_LOG, "rt_imxrt_eth_init...\n");
  371. _enet_config();
  372. return RT_EOK;
  373. }
  374. static rt_err_t rt_imxrt_eth_open(rt_device_t dev, rt_uint16_t oflag)
  375. {
  376. dbg_log(DBG_LOG, "rt_imxrt_eth_open...\n");
  377. return RT_EOK;
  378. }
  379. static rt_err_t rt_imxrt_eth_close(rt_device_t dev)
  380. {
  381. dbg_log(DBG_LOG, "rt_imxrt_eth_close...\n");
  382. return RT_EOK;
  383. }
  384. static rt_size_t rt_imxrt_eth_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
  385. {
  386. dbg_log(DBG_LOG, "rt_imxrt_eth_read...\n");
  387. rt_set_errno(-RT_ENOSYS);
  388. return 0;
  389. }
  390. static rt_size_t rt_imxrt_eth_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
  391. {
  392. dbg_log(DBG_LOG, "rt_imxrt_eth_write...\n");
  393. rt_set_errno(-RT_ENOSYS);
  394. return 0;
  395. }
  396. static rt_err_t rt_imxrt_eth_control(rt_device_t dev, int cmd, void *args)
  397. {
  398. dbg_log(DBG_LOG, "rt_imxrt_eth_control...\n");
  399. switch (cmd)
  400. {
  401. case NIOCTL_GADDR:
  402. /* get mac address */
  403. if (args)
  404. rt_memcpy(args, imxrt_eth_device.dev_addr, 6);
  405. else
  406. return -RT_ERROR;
  407. break;
  408. default:
  409. break;
  410. }
  411. return RT_EOK;
  412. }
  413. #ifdef SOC_IMXRT1170_SERIES
  414. static bool _ENET_TxDirtyRingAvailable(enet_tx_dirty_ring_t *txDirtyRing)
  415. {
  416. return !txDirtyRing->isFull;
  417. }
  418. static uint16_t _ENET_IncreaseIndex(uint16_t index, uint16_t max)
  419. {
  420. assert(index < max);
  421. /* Increase the index. */
  422. index++;
  423. if (index >= max)
  424. {
  425. index = 0;
  426. }
  427. return index;
  428. }
  429. static void _ENET_ActiveSendRing(ENET_Type *base, uint8_t ringId)
  430. {
  431. assert(ringId < (uint8_t)FSL_FEATURE_ENET_INSTANCE_QUEUEn(base));
  432. volatile uint32_t *txDesActive = NULL;
  433. /* Ensure previous data update is completed with Data Synchronization Barrier before activing Tx BD. */
  434. __DSB();
  435. switch (ringId)
  436. {
  437. case 0:
  438. txDesActive = &(base->TDAR);
  439. break;
  440. #if FSL_FEATURE_ENET_QUEUE > 1
  441. case 1:
  442. txDesActive = &(base->TDAR1);
  443. break;
  444. case 2:
  445. txDesActive = &(base->TDAR2);
  446. break;
  447. #endif /* FSL_FEATURE_ENET_QUEUE > 1 */
  448. default:
  449. txDesActive = &(base->TDAR);
  450. break;
  451. }
  452. #if defined(FSL_FEATURE_ENET_HAS_ERRATA_007885) && FSL_FEATURE_ENET_HAS_ERRATA_007885
  453. /* There is a TDAR race condition for mutliQ when the software sets TDAR
  454. * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles).
  455. * This will cause the udma_tx and udma_tx_arbiter state machines to hang.
  456. * Software workaround: introduces a delay by reading the relevant ENET_TDARn_TDAR 4 times
  457. */
  458. for (uint8_t i = 0; i < 4U; i++)
  459. {
  460. if (*txDesActive == 0U)
  461. {
  462. break;
  463. }
  464. }
  465. #endif
  466. /* Write to active tx descriptor */
  467. *txDesActive = 0;
  468. }
  469. #else
  470. static void _ENET_ActiveSend(ENET_Type *base, uint32_t ringId)
  471. {
  472. assert(ringId < FSL_FEATURE_ENET_QUEUE);
  473. switch (ringId)
  474. {
  475. case 0:
  476. base->TDAR = ENET_TDAR_TDAR_MASK;
  477. break;
  478. #if FSL_FEATURE_ENET_QUEUE > 1
  479. case 1:
  480. base->TDAR1 = ENET_TDAR1_TDAR_MASK;
  481. break;
  482. case 2:
  483. base->TDAR2 = ENET_TDAR2_TDAR_MASK;
  484. break;
  485. #endif /* FSL_FEATURE_ENET_QUEUE > 1 */
  486. default:
  487. base->TDAR = ENET_TDAR_TDAR_MASK;
  488. break;
  489. }
  490. }
  491. #endif
  492. #ifdef SOC_IMXRT1170_SERIES
  493. static status_t _ENET_SendFrame(ENET_Type *base,
  494. enet_handle_t *handle,
  495. const uint8_t *data,
  496. uint32_t length,
  497. uint8_t ringId,
  498. bool tsFlag,
  499. void *context)
  500. {
  501. assert(handle != NULL);
  502. assert(data != NULL);
  503. assert(FSL_FEATURE_ENET_INSTANCE_QUEUEn(base) != -1);
  504. assert(ringId < (uint8_t)FSL_FEATURE_ENET_INSTANCE_QUEUEn(base));
  505. volatile enet_tx_bd_struct_t *curBuffDescrip;
  506. enet_tx_bd_ring_t *txBdRing = &handle->txBdRing[ringId];
  507. enet_tx_dirty_ring_t *txDirtyRing = &handle->txDirtyRing[ringId];
  508. enet_frame_info_t *txDirty = NULL;
  509. uint32_t len = 0;
  510. uint32_t sizeleft = 0;
  511. uint32_t address;
  512. status_t result = kStatus_Success;
  513. uint32_t src;
  514. uint32_t configVal;
  515. bool isReturn = false;
  516. uint32_t primask;
  517. /* Check the frame length. */
  518. if (length > ENET_FRAME_TX_LEN_LIMITATION(base))
  519. {
  520. result = kStatus_ENET_TxFrameOverLen;
  521. }
  522. else
  523. {
  524. /* Check if the transmit buffer is ready. */
  525. curBuffDescrip = txBdRing->txBdBase + txBdRing->txGenIdx;
  526. if (0U != (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK))
  527. {
  528. result = kStatus_ENET_TxFrameBusy;
  529. }
  530. /* Check txDirtyRing if need frameinfo in tx interrupt callback. */
  531. else if ((handle->txReclaimEnable[ringId]) && !_ENET_TxDirtyRingAvailable(txDirtyRing))
  532. {
  533. result = kStatus_ENET_TxFrameBusy;
  534. }
  535. else
  536. {
  537. /* One transmit buffer is enough for one frame. */
  538. if (handle->txBuffSizeAlign[ringId] >= length)
  539. {
  540. /* Copy data to the buffer for uDMA transfer. */
  541. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  542. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer, kMEMORY_DMA2Local);
  543. #else
  544. address = (uint32_t)curBuffDescrip->buffer;
  545. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  546. pbuf_copy_partial((const struct pbuf *)data, (void *)address, length, 0);
  547. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  548. if (handle->txMaintainEnable[ringId])
  549. {
  550. DCACHE_CleanByRange(address, length);
  551. }
  552. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  553. /* Set data length. */
  554. curBuffDescrip->length = (uint16_t)length;
  555. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  556. /* For enable the timestamp. */
  557. if (tsFlag)
  558. {
  559. curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  560. }
  561. else
  562. {
  563. curBuffDescrip->controlExtend1 &= (uint16_t)(~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK);
  564. }
  565. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  566. curBuffDescrip->control |= (ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK);
  567. /* Increase the buffer descriptor address. */
  568. txBdRing->txGenIdx = _ENET_IncreaseIndex(txBdRing->txGenIdx, txBdRing->txRingLen);
  569. /* Add context to frame info ring */
  570. if (handle->txReclaimEnable[ringId])
  571. {
  572. txDirty = txDirtyRing->txDirtyBase + txDirtyRing->txGenIdx;
  573. txDirty->context = context;
  574. txDirtyRing->txGenIdx = _ENET_IncreaseIndex(txDirtyRing->txGenIdx, txDirtyRing->txRingLen);
  575. if (txDirtyRing->txGenIdx == txDirtyRing->txConsumIdx)
  576. {
  577. txDirtyRing->isFull = true;
  578. }
  579. primask = DisableGlobalIRQ();
  580. txBdRing->txDescUsed++;
  581. EnableGlobalIRQ(primask);
  582. }
  583. /* Active the transmit buffer descriptor. */
  584. _ENET_ActiveSendRing(base, ringId);
  585. }
  586. else
  587. {
  588. /* One frame requires more than one transmit buffers. */
  589. do
  590. {
  591. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  592. /* For enable the timestamp. */
  593. if (tsFlag)
  594. {
  595. curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  596. }
  597. else
  598. {
  599. curBuffDescrip->controlExtend1 &= (uint16_t)(~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK);
  600. }
  601. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  602. /* Update the size left to be transmit. */
  603. sizeleft = length - len;
  604. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  605. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer, kMEMORY_DMA2Local);
  606. #else
  607. address = (uint32_t)curBuffDescrip->buffer;
  608. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  609. src = (uint32_t)data + len;
  610. /* Increase the current software index of BD */
  611. txBdRing->txGenIdx = _ENET_IncreaseIndex(txBdRing->txGenIdx, txBdRing->txRingLen);
  612. if (sizeleft > handle->txBuffSizeAlign[ringId])
  613. {
  614. /* Data copy. */
  615. (void)rt_memcpy((void *)(uint32_t *)address, (void *)(uint32_t *)src,
  616. handle->txBuffSizeAlign[ringId]);
  617. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  618. if (handle->txMaintainEnable[ringId])
  619. {
  620. /* Add the cache clean maintain. */
  621. DCACHE_CleanByRange(address, handle->txBuffSizeAlign[ringId]);
  622. }
  623. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  624. /* Data length update. */
  625. curBuffDescrip->length = handle->txBuffSizeAlign[ringId];
  626. len += handle->txBuffSizeAlign[ringId];
  627. /* Sets the control flag. */
  628. configVal = (uint32_t)curBuffDescrip->control;
  629. configVal &= ~ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
  630. configVal |= ENET_BUFFDESCRIPTOR_TX_READY_MASK;
  631. curBuffDescrip->control = (uint16_t)configVal;
  632. if (handle->txReclaimEnable[ringId])
  633. {
  634. primask = DisableGlobalIRQ();
  635. txBdRing->txDescUsed++;
  636. EnableGlobalIRQ(primask);
  637. }
  638. /* Active the transmit buffer descriptor*/
  639. _ENET_ActiveSendRing(base, ringId);
  640. }
  641. else
  642. {
  643. (void)rt_memcpy((void *)(uint32_t *)address, (void *)(uint32_t *)src, sizeleft);
  644. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  645. if (handle->txMaintainEnable[ringId])
  646. {
  647. /* Add the cache clean maintain. */
  648. DCACHE_CleanByRange(address, sizeleft);
  649. }
  650. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  651. curBuffDescrip->length = (uint16_t)sizeleft;
  652. /* Set Last buffer wrap flag. */
  653. curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
  654. if (handle->txReclaimEnable[ringId])
  655. {
  656. /* Add context to frame info ring */
  657. txDirty = txDirtyRing->txDirtyBase + txDirtyRing->txGenIdx;
  658. txDirty->context = context;
  659. txDirtyRing->txGenIdx = _ENET_IncreaseIndex(txDirtyRing->txGenIdx, txDirtyRing->txRingLen);
  660. if (txDirtyRing->txGenIdx == txDirtyRing->txConsumIdx)
  661. {
  662. txDirtyRing->isFull = true;
  663. }
  664. primask = DisableGlobalIRQ();
  665. txBdRing->txDescUsed++;
  666. EnableGlobalIRQ(primask);
  667. }
  668. /* Active the transmit buffer descriptor. */
  669. _ENET_ActiveSendRing(base, ringId);
  670. isReturn = true;
  671. break;
  672. }
  673. /* Update the buffer descriptor address. */
  674. curBuffDescrip = txBdRing->txBdBase + txBdRing->txGenIdx;
  675. } while (0U == (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK));
  676. if (isReturn == false)
  677. {
  678. result = kStatus_ENET_TxFrameBusy;
  679. }
  680. }
  681. }
  682. }
  683. return result;
  684. }
  685. #else
  686. static status_t _ENET_SendFrame(ENET_Type *base, enet_handle_t *handle, const uint8_t *data, uint32_t length)
  687. {
  688. assert(handle);
  689. assert(data);
  690. volatile enet_tx_bd_struct_t *curBuffDescrip;
  691. uint32_t len = 0;
  692. uint32_t sizeleft = 0;
  693. uint32_t address;
  694. /* Check the frame length. */
  695. if (length > ENET_FRAME_MAX_FRAMELEN)
  696. {
  697. return kStatus_ENET_TxFrameOverLen;
  698. }
  699. /* Check if the transmit buffer is ready. */
  700. curBuffDescrip = handle->txBdCurrent[0];
  701. if (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK)
  702. {
  703. return kStatus_ENET_TxFrameBusy;
  704. }
  705. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  706. bool isPtpEventMessage = false;
  707. /* Check PTP message with the PTP header. */
  708. isPtpEventMessage = ENET_Ptp1588ParseFrame(data, NULL, true);
  709. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  710. /* One transmit buffer is enough for one frame. */
  711. if (handle->txBuffSizeAlign[0] >= length)
  712. {
  713. /* Copy data to the buffer for uDMA transfer. */
  714. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  715. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer, kMEMORY_DMA2Local);
  716. #else
  717. address = (uint32_t)curBuffDescrip->buffer;
  718. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  719. pbuf_copy_partial((const struct pbuf *)data, (void *)address, length, 0);
  720. /* Set data length. */
  721. curBuffDescrip->length = length;
  722. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  723. /* For enable the timestamp. */
  724. if (isPtpEventMessage)
  725. {
  726. curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  727. }
  728. else
  729. {
  730. curBuffDescrip->controlExtend1 &= ~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  731. }
  732. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  733. curBuffDescrip->control |= (ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK);
  734. /* Increase the buffer descriptor address. */
  735. if (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_WRAP_MASK)
  736. {
  737. handle->txBdCurrent[0] = handle->txBdBase[0];
  738. }
  739. else
  740. {
  741. handle->txBdCurrent[0]++;
  742. }
  743. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  744. /* Add the cache clean maintain. */
  745. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  746. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer, kMEMORY_DMA2Local);
  747. #else
  748. address = (uint32_t)curBuffDescrip->buffer;
  749. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  750. DCACHE_CleanByRange(address, length);
  751. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  752. /* Active the transmit buffer descriptor. */
  753. _ENET_ActiveSend(base, 0);
  754. return kStatus_Success;
  755. }
  756. else
  757. {
  758. /* One frame requires more than one transmit buffers. */
  759. do
  760. {
  761. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  762. /* For enable the timestamp. */
  763. if (isPtpEventMessage)
  764. {
  765. curBuffDescrip->controlExtend1 |= ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  766. }
  767. else
  768. {
  769. curBuffDescrip->controlExtend1 &= ~ENET_BUFFDESCRIPTOR_TX_TIMESTAMP_MASK;
  770. }
  771. #endif /* ENET_ENHANCEDBUFFERDESCRIPTOR_MODE */
  772. /* Increase the buffer descriptor address. */
  773. if (curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_WRAP_MASK)
  774. {
  775. handle->txBdCurrent[0] = handle->txBdBase[0];
  776. }
  777. else
  778. {
  779. handle->txBdCurrent[0]++;
  780. }
  781. /* update the size left to be transmit. */
  782. sizeleft = length - len;
  783. if (sizeleft > handle->txBuffSizeAlign[0])
  784. {
  785. /* Data copy. */
  786. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  787. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer, kMEMORY_DMA2Local);
  788. #else
  789. address = (uint32_t)curBuffDescrip->buffer;
  790. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  791. rt_memcpy((void *)address, data + len, handle->txBuffSizeAlign[0]);
  792. /* Data length update. */
  793. curBuffDescrip->length = handle->txBuffSizeAlign[0];
  794. len += handle->txBuffSizeAlign[0];
  795. /* Sets the control flag. */
  796. curBuffDescrip->control &= ~ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
  797. curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_TX_READY_MASK;
  798. /* Active the transmit buffer descriptor*/
  799. _ENET_ActiveSend(base, 0);
  800. }
  801. else
  802. {
  803. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  804. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer, kMEMORY_DMA2Local);
  805. #else
  806. address = (uint32_t)curBuffDescrip->buffer;
  807. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  808. rt_memcpy((void *)address, data + len, sizeleft);
  809. curBuffDescrip->length = sizeleft;
  810. /* Set Last buffer wrap flag. */
  811. curBuffDescrip->control |= ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK;
  812. #if defined(FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL) && FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL
  813. /* Add the cache clean maintain. */
  814. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  815. address = MEMORY_ConvertMemoryMapAddress((uint32_t)curBuffDescrip->buffer, kMEMORY_DMA2Local);
  816. #else
  817. address = (uint32_t)curBuffDescrip->buffer;
  818. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  819. DCACHE_CleanByRange(address, handle->txBuffSizeAlign[0]);
  820. #endif /* FSL_SDK_ENABLE_DRIVER_CACHE_CONTROL */
  821. /* Active the transmit buffer descriptor. */
  822. _ENET_ActiveSend(base, 0);
  823. return kStatus_Success;
  824. }
  825. /* Get the current buffer descriptor address. */
  826. curBuffDescrip = handle->txBdCurrent[0];
  827. } while (!(curBuffDescrip->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK));
  828. return kStatus_ENET_TxFrameBusy;
  829. }
  830. }
  831. #endif
  832. /* ethernet device interface */
  833. /* transmit packet. */
  834. rt_err_t rt_imxrt_eth_tx(rt_device_t dev, struct pbuf *p)
  835. {
  836. rt_err_t result = RT_EOK;
  837. enet_handle_t *enet_handle = &imxrt_eth_device.enet_handle;
  838. RT_ASSERT(p != NULL);
  839. RT_ASSERT(enet_handle != RT_NULL);
  840. dbg_log(DBG_LOG, "rt_imxrt_eth_tx: %d\n", p->len);
  841. #ifdef ETH_TX_DUMP
  842. packet_dump("send", p);
  843. #endif
  844. do
  845. {
  846. #ifdef SOC_IMXRT1170_SERIES
  847. result = _ENET_SendFrame(imxrt_eth_device.enet_base, enet_handle, (const uint8_t *)p, p->tot_len, RING_ID, false, NULL);
  848. #else
  849. result = _ENET_SendFrame(imxrt_eth_device.enet_base, enet_handle, (const uint8_t *)p, p->tot_len);
  850. #endif
  851. if (result == kStatus_ENET_TxFrameBusy)
  852. {
  853. imxrt_eth_device.tx_is_waiting = RT_TRUE;
  854. rt_sem_take(&imxrt_eth_device.tx_wait, RT_WAITING_FOREVER);
  855. }
  856. } while (result == kStatus_ENET_TxFrameBusy);
  857. return RT_EOK;
  858. }
  859. /* reception packet. */
  860. struct pbuf *rt_imxrt_eth_rx(rt_device_t dev)
  861. {
  862. uint32_t length = 0;
  863. status_t status;
  864. struct pbuf *p = RT_NULL;
  865. enet_handle_t *enet_handle = &imxrt_eth_device.enet_handle;
  866. ENET_Type *enet_base = imxrt_eth_device.enet_base;
  867. enet_data_error_stats_t *error_statistic = &imxrt_eth_device.error_statistic;
  868. /* Get the Frame size */
  869. #ifdef SOC_IMXRT1170_SERIES
  870. status = ENET_GetRxFrameSize(enet_handle, &length, RING_ID);
  871. #else
  872. status = ENET_GetRxFrameSize(enet_handle, &length);
  873. #endif
  874. /* Call ENET_ReadFrame when there is a received frame. */
  875. if (length != 0)
  876. {
  877. /* Received valid frame. Deliver the rx buffer with the size equal to length. */
  878. p = pbuf_alloc(PBUF_RAW, length, PBUF_POOL);
  879. if (p != NULL)
  880. {
  881. #ifdef SOC_IMXRT1170_SERIES
  882. status = ENET_ReadFrame(enet_base, enet_handle, p->payload, length, RING_ID, NULL);
  883. #else
  884. status = ENET_ReadFrame(enet_base, enet_handle, p->payload, length);
  885. #endif
  886. if (status == kStatus_Success)
  887. {
  888. #ifdef ETH_RX_DUMP
  889. packet_dump("recv", p);
  890. #endif
  891. return p;
  892. }
  893. else
  894. {
  895. dbg_log(DBG_LOG, " A frame read failed\n");
  896. pbuf_free(p);
  897. }
  898. }
  899. else
  900. {
  901. dbg_log(DBG_LOG, " pbuf_alloc faild\n");
  902. }
  903. }
  904. else if (status == kStatus_ENET_RxFrameError)
  905. {
  906. dbg_log(DBG_WARNING, "ENET_GetRxFrameSize: kStatus_ENET_RxFrameError\n");
  907. /* Update the received buffer when error happened. */
  908. #ifdef SOC_IMXRT1170_SERIES
  909. /* Get the error information of the received g_frame. */
  910. ENET_GetRxErrBeforeReadFrame(enet_handle, error_statistic, RING_ID);
  911. /* update the receive buffer. */
  912. ENET_ReadFrame(enet_base, enet_handle, NULL, 0, RING_ID, NULL);
  913. #else
  914. /* Get the error information of the received g_frame. */
  915. ENET_GetRxErrBeforeReadFrame(enet_handle, error_statistic);
  916. /* update the receive buffer. */
  917. ENET_ReadFrame(enet_base, enet_handle, NULL, 0);
  918. #endif
  919. }
  920. ENET_EnableInterrupts(enet_base, kENET_RxFrameInterrupt);
  921. return NULL;
  922. }
  923. #ifdef BSP_USING_PHY
  924. static struct rt_phy_device *phy_dev = RT_NULL;
  925. static void phy_monitor_thread_entry(void *parameter)
  926. {
  927. rt_uint32_t speed;
  928. rt_uint32_t duplex;
  929. rt_bool_t link = RT_FALSE;
  930. #ifdef SOC_IMXRT1170_SERIES
  931. #ifdef PHY_USING_RTL8211F
  932. phy_dev = (struct rt_phy_device *)rt_device_find("rtl8211f");
  933. if ((RT_NULL == phy_dev) || (RT_NULL == phy_dev->ops))
  934. {
  935. // TODO print warning information
  936. LOG_E("Can not find phy device called \"rtl8211f\"");
  937. return;
  938. }
  939. #else
  940. phy_dev = (struct rt_phy_device *)rt_device_find("ksz8081");
  941. if ((RT_NULL == phy_dev) || (RT_NULL == phy_dev->ops))
  942. {
  943. // TODO print warning information
  944. LOG_E("Can not find phy device called \"ksz8081\"");
  945. return;
  946. }
  947. #endif
  948. #else
  949. phy_dev = (struct rt_phy_device *)rt_device_find("rtt-phy");
  950. if ((RT_NULL == phy_dev) || (RT_NULL == phy_dev->ops))
  951. {
  952. // TODO print warning information
  953. LOG_E("Can not find phy device called \"rtt-phy\"");
  954. return;
  955. }
  956. #endif
  957. if (RT_NULL == phy_dev->ops->init)
  958. {
  959. LOG_E("phy driver error!");
  960. return;
  961. }
  962. #ifdef SOC_IMXRT1170_SERIES
  963. #ifdef PHY_USING_RTL8211F
  964. rt_phy_status status = phy_dev->ops->init(imxrt_eth_device.enet_base, PHY_RTL8211F_ADDRESS, CLOCK_GetRootClockFreq(kCLOCK_Root_Bus));
  965. #else
  966. rt_phy_status status = phy_dev->ops->init(imxrt_eth_device.enet_base, PHY_KSZ8081_ADDRESS, CLOCK_GetRootClockFreq(kCLOCK_Root_Bus));
  967. #endif
  968. #else
  969. rt_phy_status status = phy_dev->ops->init(imxrt_eth_device.enet_base, PHY_DEVICE_ADDRESS, CLOCK_GetFreq(kCLOCK_AhbClk));
  970. #endif
  971. if (PHY_STATUS_OK != status)
  972. {
  973. LOG_E("Phy device initialize unsuccessful!\n");
  974. return;
  975. }
  976. LOG_I("Phy device initialize successfully!\n");
  977. while (1)
  978. {
  979. rt_bool_t new_link = RT_FALSE;
  980. rt_phy_status status = phy_dev->ops->get_link_status(&new_link);
  981. if ((PHY_STATUS_OK == status) && (link != new_link))
  982. {
  983. link = new_link;
  984. if (link) // link up
  985. {
  986. phy_dev->ops->get_link_speed_duplex(&speed, &duplex);
  987. if (PHY_SPEED_10M == speed)
  988. {
  989. dbg_log(DBG_LOG, "10M\n");
  990. }
  991. else if (PHY_SPEED_100M == speed)
  992. {
  993. dbg_log(DBG_LOG, "100M\n");
  994. }
  995. else
  996. {
  997. dbg_log(DBG_LOG, "1000M\n");
  998. }
  999. if (PHY_HALF_DUPLEX == duplex)
  1000. {
  1001. dbg_log(DBG_LOG, "half dumplex\n");
  1002. }
  1003. else
  1004. {
  1005. dbg_log(DBG_LOG, "full dumplex\n");
  1006. }
  1007. if ((imxrt_eth_device.speed != (enet_mii_speed_t)speed) || (imxrt_eth_device.duplex != (enet_mii_duplex_t)duplex))
  1008. {
  1009. imxrt_eth_device.speed = (enet_mii_speed_t)speed;
  1010. imxrt_eth_device.duplex = (enet_mii_duplex_t)duplex;
  1011. dbg_log(DBG_LOG, "link up, and update eth mode.\n");
  1012. rt_imxrt_eth_init((rt_device_t)&imxrt_eth_device);
  1013. }
  1014. else
  1015. {
  1016. dbg_log(DBG_LOG, "link up, eth not need re-config.\n");
  1017. }
  1018. dbg_log(DBG_LOG, "link up.\n");
  1019. eth_device_linkchange(&imxrt_eth_device.parent, RT_TRUE);
  1020. }
  1021. else
  1022. {
  1023. dbg_log(DBG_LOG, "link down.\n");
  1024. eth_device_linkchange(&imxrt_eth_device.parent, RT_FALSE);
  1025. }
  1026. }
  1027. rt_thread_delay(RT_TICK_PER_SECOND * 2);
  1028. }
  1029. }
  1030. #endif
  1031. static int rt_hw_imxrt_eth_init(void)
  1032. {
  1033. rt_err_t state;
  1034. _enet_clk_init();
  1035. #ifdef PHY_USING_RTL8211F
  1036. /* NXP (Freescale) MAC OUI */
  1037. imxrt_eth_device.dev_addr[0] = 0x54;
  1038. imxrt_eth_device.dev_addr[1] = 0x27;
  1039. imxrt_eth_device.dev_addr[2] = 0x8d;
  1040. /* generate MAC addr from 96bit unique ID (only for test). */
  1041. imxrt_eth_device.dev_addr[3] = 0x11;
  1042. imxrt_eth_device.dev_addr[4] = 0x22;
  1043. imxrt_eth_device.dev_addr[5] = 0x33;
  1044. imxrt_eth_device.speed = kENET_MiiSpeed1000M;
  1045. imxrt_eth_device.duplex = kENET_MiiFullDuplex;
  1046. imxrt_eth_device.enet_base = ENET_1G;
  1047. #else
  1048. /* NXP (Freescale) MAC OUI */
  1049. imxrt_eth_device.dev_addr[0] = 0x00;
  1050. imxrt_eth_device.dev_addr[1] = 0x04;
  1051. imxrt_eth_device.dev_addr[2] = 0x9F;
  1052. /* generate MAC addr from 96bit unique ID (only for test). */
  1053. imxrt_eth_device.dev_addr[3] = 0x05;
  1054. imxrt_eth_device.dev_addr[4] = 0x44;
  1055. imxrt_eth_device.dev_addr[5] = 0xE5;
  1056. imxrt_eth_device.speed = kENET_MiiSpeed100M;
  1057. imxrt_eth_device.duplex = kENET_MiiFullDuplex;
  1058. imxrt_eth_device.enet_base = ENET;
  1059. #endif
  1060. imxrt_eth_device.parent.parent.init = rt_imxrt_eth_init;
  1061. imxrt_eth_device.parent.parent.open = rt_imxrt_eth_open;
  1062. imxrt_eth_device.parent.parent.close = rt_imxrt_eth_close;
  1063. imxrt_eth_device.parent.parent.read = rt_imxrt_eth_read;
  1064. imxrt_eth_device.parent.parent.write = rt_imxrt_eth_write;
  1065. imxrt_eth_device.parent.parent.control = rt_imxrt_eth_control;
  1066. imxrt_eth_device.parent.parent.user_data = RT_NULL;
  1067. imxrt_eth_device.parent.eth_rx = rt_imxrt_eth_rx;
  1068. imxrt_eth_device.parent.eth_tx = rt_imxrt_eth_tx;
  1069. dbg_log(DBG_LOG, "sem init: tx_wait\r\n");
  1070. /* init tx semaphore */
  1071. rt_sem_init(&imxrt_eth_device.tx_wait, "tx_wait", 0, RT_IPC_FLAG_FIFO);
  1072. dbg_log(DBG_LOG, "sem init: buff_wait\r\n");
  1073. /* init tx semaphore */
  1074. rt_sem_init(&imxrt_eth_device.buff_wait, "buff_wait", 1, RT_IPC_FLAG_FIFO);
  1075. /* register eth device */
  1076. dbg_log(DBG_LOG, "eth_device_init start\r\n");
  1077. state = eth_device_init(&(imxrt_eth_device.parent), "e0");
  1078. if (RT_EOK == state)
  1079. {
  1080. dbg_log(DBG_LOG, "eth_device_init success\r\n");
  1081. }
  1082. else
  1083. {
  1084. dbg_log(DBG_LOG, "eth_device_init faild: %d\r\n", state);
  1085. }
  1086. eth_device_linkchange(&imxrt_eth_device.parent, RT_FALSE);
  1087. /* start phy monitor */
  1088. {
  1089. #ifdef BSP_USING_PHY
  1090. rt_thread_t tid;
  1091. tid = rt_thread_create("phy",
  1092. phy_monitor_thread_entry,
  1093. RT_NULL,
  1094. 4096,
  1095. RT_THREAD_PRIORITY_MAX - 2,
  1096. 2);
  1097. if (tid != RT_NULL)
  1098. rt_thread_startup(tid);
  1099. #endif
  1100. }
  1101. return state;
  1102. }
  1103. INIT_DEVICE_EXPORT(rt_hw_imxrt_eth_init);
  1104. #endif
  1105. #if defined(RT_USING_FINSH) && defined(RT_USING_PHY)
  1106. #include <finsh.h>
  1107. void phy_read(rt_uint32_t phy_reg)
  1108. {
  1109. rt_uint32_t data;
  1110. rt_phy_status status = phy_dev->ops->read(phy_reg, &data);
  1111. if (PHY_STATUS_OK == status)
  1112. {
  1113. rt_kprintf("PHY_Read: %02X --> %08X", phy_reg, data);
  1114. }
  1115. else
  1116. {
  1117. rt_kprintf("PHY_Read: %02X --> faild", phy_reg);
  1118. }
  1119. }
  1120. void phy_write(rt_uint32_t phy_reg, rt_uint32_t data)
  1121. {
  1122. rt_phy_status status = phy_dev->ops->write(phy_reg, data);
  1123. if (PHY_STATUS_OK == status)
  1124. {
  1125. rt_kprintf("PHY_Write: %02X --> %08X\n", phy_reg, data);
  1126. }
  1127. else
  1128. {
  1129. rt_kprintf("PHY_Write: %02X --> faild\n", phy_reg);
  1130. }
  1131. }
  1132. void phy_dump(void)
  1133. {
  1134. rt_uint32_t data;
  1135. rt_phy_status status;
  1136. int i;
  1137. for (i = 0; i < 32; i++)
  1138. {
  1139. status = phy_dev->ops->read(i, &data);
  1140. if (PHY_STATUS_OK != status)
  1141. {
  1142. rt_kprintf("phy_dump: %02X --> faild", i);
  1143. break;
  1144. }
  1145. if (i % 8 == 7)
  1146. {
  1147. rt_kprintf("%02X --> %08X ", i, data);
  1148. }
  1149. else
  1150. {
  1151. rt_kprintf("%02X --> %08X\n", i, data);
  1152. }
  1153. }
  1154. }
  1155. #endif
  1156. #if defined(RT_USING_FINSH) && defined(RT_USING_LWIP)
  1157. void enet_reg_dump(void)
  1158. {
  1159. ENET_Type *enet_base = imxrt_eth_device.enet_base;
  1160. #define DUMP_REG(__REG) \
  1161. rt_kprintf("%s(%08X): %08X\n", #__REG, (uint32_t)&enet_base->__REG, enet_base->__REG)
  1162. DUMP_REG(EIR);
  1163. DUMP_REG(EIMR);
  1164. DUMP_REG(RDAR);
  1165. DUMP_REG(TDAR);
  1166. DUMP_REG(ECR);
  1167. DUMP_REG(MMFR);
  1168. DUMP_REG(MSCR);
  1169. DUMP_REG(MIBC);
  1170. DUMP_REG(RCR);
  1171. DUMP_REG(TCR);
  1172. DUMP_REG(PALR);
  1173. DUMP_REG(PAUR);
  1174. DUMP_REG(OPD);
  1175. DUMP_REG(TXIC);
  1176. DUMP_REG(RXIC);
  1177. DUMP_REG(IAUR);
  1178. DUMP_REG(IALR);
  1179. DUMP_REG(GAUR);
  1180. DUMP_REG(GALR);
  1181. DUMP_REG(TFWR);
  1182. DUMP_REG(RDSR);
  1183. DUMP_REG(TDSR);
  1184. DUMP_REG(MRBR);
  1185. DUMP_REG(RSFL);
  1186. DUMP_REG(RSEM);
  1187. DUMP_REG(RAEM);
  1188. DUMP_REG(RAFL);
  1189. DUMP_REG(TSEM);
  1190. DUMP_REG(TAEM);
  1191. DUMP_REG(TAFL);
  1192. DUMP_REG(TIPG);
  1193. DUMP_REG(FTRL);
  1194. DUMP_REG(TACC);
  1195. DUMP_REG(RACC);
  1196. // DUMP_REG(RMON_T_DROP);
  1197. DUMP_REG(RMON_T_PACKETS);
  1198. DUMP_REG(RMON_T_BC_PKT);
  1199. DUMP_REG(RMON_T_MC_PKT);
  1200. DUMP_REG(RMON_T_CRC_ALIGN);
  1201. DUMP_REG(RMON_T_UNDERSIZE);
  1202. DUMP_REG(RMON_T_OVERSIZE);
  1203. DUMP_REG(RMON_T_FRAG);
  1204. DUMP_REG(RMON_T_JAB);
  1205. DUMP_REG(RMON_T_COL);
  1206. DUMP_REG(RMON_T_P64);
  1207. DUMP_REG(RMON_T_P65TO127);
  1208. DUMP_REG(RMON_T_P128TO255);
  1209. DUMP_REG(RMON_T_P256TO511);
  1210. DUMP_REG(RMON_T_P512TO1023);
  1211. DUMP_REG(RMON_T_P1024TO2047);
  1212. DUMP_REG(RMON_T_P_GTE2048);
  1213. DUMP_REG(RMON_T_OCTETS);
  1214. DUMP_REG(IEEE_T_DROP);
  1215. DUMP_REG(IEEE_T_FRAME_OK);
  1216. DUMP_REG(IEEE_T_1COL);
  1217. DUMP_REG(IEEE_T_MCOL);
  1218. DUMP_REG(IEEE_T_DEF);
  1219. DUMP_REG(IEEE_T_LCOL);
  1220. DUMP_REG(IEEE_T_EXCOL);
  1221. DUMP_REG(IEEE_T_MACERR);
  1222. DUMP_REG(IEEE_T_CSERR);
  1223. DUMP_REG(IEEE_T_SQE);
  1224. DUMP_REG(IEEE_T_FDXFC);
  1225. DUMP_REG(IEEE_T_OCTETS_OK);
  1226. DUMP_REG(RMON_R_PACKETS);
  1227. DUMP_REG(RMON_R_BC_PKT);
  1228. DUMP_REG(RMON_R_MC_PKT);
  1229. DUMP_REG(RMON_R_CRC_ALIGN);
  1230. DUMP_REG(RMON_R_UNDERSIZE);
  1231. DUMP_REG(RMON_R_OVERSIZE);
  1232. DUMP_REG(RMON_R_FRAG);
  1233. DUMP_REG(RMON_R_JAB);
  1234. // DUMP_REG(RMON_R_RESVD_0);
  1235. DUMP_REG(RMON_R_P64);
  1236. DUMP_REG(RMON_R_P65TO127);
  1237. DUMP_REG(RMON_R_P128TO255);
  1238. DUMP_REG(RMON_R_P256TO511);
  1239. DUMP_REG(RMON_R_P512TO1023);
  1240. DUMP_REG(RMON_R_P1024TO2047);
  1241. DUMP_REG(RMON_R_P_GTE2048);
  1242. DUMP_REG(RMON_R_OCTETS);
  1243. DUMP_REG(IEEE_R_DROP);
  1244. DUMP_REG(IEEE_R_FRAME_OK);
  1245. DUMP_REG(IEEE_R_CRC);
  1246. DUMP_REG(IEEE_R_ALIGN);
  1247. DUMP_REG(IEEE_R_MACERR);
  1248. DUMP_REG(IEEE_R_FDXFC);
  1249. DUMP_REG(IEEE_R_OCTETS_OK);
  1250. DUMP_REG(ATCR);
  1251. DUMP_REG(ATVR);
  1252. DUMP_REG(ATOFF);
  1253. DUMP_REG(ATPER);
  1254. DUMP_REG(ATCOR);
  1255. DUMP_REG(ATINC);
  1256. DUMP_REG(ATSTMP);
  1257. DUMP_REG(TGSR);
  1258. }
  1259. void enet_nvic_tog(void)
  1260. {
  1261. NVIC_SetPendingIRQ(ENET_IRQn);
  1262. }
  1263. void enet_rx_stat(void)
  1264. {
  1265. enet_data_error_stats_t *error_statistic = &imxrt_eth_device.error_statistic;
  1266. #define DUMP_STAT(__VAR) \
  1267. rt_kprintf("%-25s: %08X\n", #__VAR, error_statistic->__VAR);
  1268. DUMP_STAT(statsRxLenGreaterErr);
  1269. DUMP_STAT(statsRxAlignErr);
  1270. DUMP_STAT(statsRxFcsErr);
  1271. DUMP_STAT(statsRxOverRunErr);
  1272. DUMP_STAT(statsRxTruncateErr);
  1273. #ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE
  1274. DUMP_STAT(statsRxProtocolChecksumErr);
  1275. DUMP_STAT(statsRxIpHeadChecksumErr);
  1276. DUMP_STAT(statsRxMacErr);
  1277. DUMP_STAT(statsRxPhyErr);
  1278. DUMP_STAT(statsRxCollisionErr);
  1279. DUMP_STAT(statsTxErr);
  1280. DUMP_STAT(statsTxFrameErr);
  1281. DUMP_STAT(statsTxOverFlowErr);
  1282. DUMP_STAT(statsTxLateCollisionErr);
  1283. DUMP_STAT(statsTxExcessCollisionErr);
  1284. DUMP_STAT(statsTxUnderFlowErr);
  1285. DUMP_STAT(statsTxTsErr);
  1286. #endif
  1287. }
  1288. void enet_buf_info(void)
  1289. {
  1290. int i = 0;
  1291. for (i = 0; i < ENET_RXBD_NUM; i++)
  1292. {
  1293. rt_kprintf("%d: length: %-8d, control: %04X, buffer:%p\n",
  1294. i,
  1295. g_rxBuffDescrip[i].length,
  1296. g_rxBuffDescrip[i].control,
  1297. g_rxBuffDescrip[i].buffer);
  1298. }
  1299. for (i = 0; i < ENET_TXBD_NUM; i++)
  1300. {
  1301. rt_kprintf("%d: length: %-8d, control: %04X, buffer:%p\n",
  1302. i,
  1303. g_txBuffDescrip[i].length,
  1304. g_txBuffDescrip[i].control,
  1305. g_txBuffDescrip[i].buffer);
  1306. }
  1307. }
  1308. FINSH_FUNCTION_EXPORT(phy_read, read phy register);
  1309. FINSH_FUNCTION_EXPORT(phy_write, write phy register);
  1310. FINSH_FUNCTION_EXPORT(phy_dump, dump phy registers);
  1311. FINSH_FUNCTION_EXPORT(enet_reg_dump, dump enet registers);
  1312. FINSH_FUNCTION_EXPORT(enet_nvic_tog, toggle enet nvic pendding bit);
  1313. FINSH_FUNCTION_EXPORT(enet_rx_stat, dump enet rx statistic);
  1314. FINSH_FUNCTION_EXPORT(enet_buf_info, dump enet tx and tx buffer descripter);
  1315. #endif