drv_enet.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623
  1. /*
  2. * Copyright (c) 2021-2024 HPMicro
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-01-11 HPMicro First version
  9. * 2022-07-10 HPMicro Driver optimization for multiple instances
  10. */
  11. #include <rtdevice.h>
  12. #ifdef BSP_USING_ETH
  13. #include <rtdbg.h>
  14. #include "drv_enet.h"
  15. #include "hpm_otp_drv.h"
  16. #ifdef BSP_USING_ETH0
  17. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(ENET_SOC_DESC_ADDR_ALIGNMENT)
  18. __RW enet_rx_desc_t enet0_dma_rx_desc_tab[ENET0_RX_BUFF_COUNT]; /* Ethernet0 Rx DMA Descriptor */
  19. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(ENET_SOC_DESC_ADDR_ALIGNMENT)
  20. __RW enet_tx_desc_t enet0_dma_tx_desc_tab[ENET0_TX_BUFF_COUNT]; /* Ethernet0 Tx DMA Descriptor */
  21. ATTR_PLACE_AT_WITH_ALIGNMENT(".fast_ram", ENET_SOC_BUFF_ADDR_ALIGNMENT)
  22. __RW uint8_t enet0_rx_buff[ENET0_RX_BUFF_COUNT][ENET0_RX_BUFF_SIZE]; /* Ethernet0 Receive Buffer */
  23. ATTR_PLACE_AT_WITH_ALIGNMENT(".fast_ram", ENET_SOC_BUFF_ADDR_ALIGNMENT)
  24. __RW uint8_t enet0_tx_buff[ENET0_TX_BUFF_COUNT][ENET0_TX_BUFF_SIZE]; /* Ethernet0 Transmit Buffer */
  25. struct eth_device eth0_dev;
  26. static enet_device enet0_dev;
  27. static enet_buff_config_t enet0_rx_buff_cfg = {.buffer = (uint32_t)enet0_rx_buff,
  28. .count = ENET0_RX_BUFF_COUNT,
  29. .size = ENET0_RX_BUFF_SIZE
  30. };
  31. static enet_buff_config_t enet0_tx_buff_cfg = {.buffer = (uint32_t)enet0_tx_buff,
  32. .count = ENET0_TX_BUFF_COUNT,
  33. .size = ENET0_TX_BUFF_SIZE
  34. };
  35. #if __USE_ENET_PTP
  36. static enet_ptp_ts_update_t ptp_timestamp0 = {0, 0};
  37. static enet_ptp_config_t ptp_config0 = {.timestamp_rollover_mode = enet_ts_dig_rollover_control,
  38. .update_method = enet_ptp_time_fine_update,
  39. .addend = 0xffffffff,
  40. };
  41. #endif
  42. static hpm_enet_t enet0 = {.name = "E0",
  43. .base = HPM_ENET0,
  44. .irq_num = IRQn_ENET0,
  45. .inf = BOARD_ENET0_INF,
  46. .eth_dev = &eth0_dev,
  47. .enet_dev = &enet0_dev,
  48. .rx_buff_cfg = &enet0_rx_buff_cfg,
  49. .tx_buff_cfg = &enet0_tx_buff_cfg,
  50. .dma_rx_desc_tab = enet0_dma_rx_desc_tab,
  51. .dma_tx_desc_tab = enet0_dma_tx_desc_tab,
  52. #if !BOARD_ENET0_INF
  53. .int_refclk = BOARD_ENET0_INT_REF_CLK,
  54. #else
  55. .tx_delay = BOARD_ENET0_TX_DLY,
  56. .rx_delay = BOARD_ENET0_RX_DLY,
  57. #endif
  58. #if __USE_ENET_PTP
  59. .ptp_clk_src = BOARD_ENET0_PTP_CLOCK,
  60. .ptp_config = &ptp_config0,
  61. .ptp_timestamp = &ptp_timestamp0
  62. #endif
  63. };
  64. #endif
  65. mac_init_t mac_init[] = {
  66. {MAC0_ADDR0, MAC0_ADDR1, MAC0_ADDR2, MAC0_ADDR3, MAC0_ADDR4, MAC0_ADDR5},
  67. {MAC1_ADDR0, MAC1_ADDR1, MAC1_ADDR2, MAC1_ADDR3, MAC1_ADDR4, MAC1_ADDR5}
  68. };
  69. #ifdef BSP_USING_ETH1
  70. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(ENET_SOC_DESC_ADDR_ALIGNMENT)
  71. __RW enet_rx_desc_t enet1_dma_rx_desc_tab[ENET1_RX_BUFF_COUNT]; /* Ethernet1 Rx DMA Descriptor */
  72. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(ENET_SOC_DESC_ADDR_ALIGNMENT)
  73. __RW enet_tx_desc_t enet1_dma_tx_desc_tab[ENET1_TX_BUFF_COUNT]; /* Ethernet1 Tx DMA Descriptor */
  74. ATTR_PLACE_AT_WITH_ALIGNMENT(".fast_ram", ENET_SOC_BUFF_ADDR_ALIGNMENT)
  75. __RW uint8_t enet1_rx_buff[ENET1_RX_BUFF_COUNT][ENET1_RX_BUFF_SIZE]; /* Ethernet1 Receive Buffer */
  76. ATTR_PLACE_AT_WITH_ALIGNMENT(".fast_ram", ENET_SOC_BUFF_ADDR_ALIGNMENT)
  77. __RW uint8_t enet1_tx_buff[ENET1_TX_BUFF_COUNT][ENET1_TX_BUFF_SIZE]; /* Ethernet1 Transmit Buffer */
  78. struct eth_device eth1_dev;
  79. static enet_device enet1_dev;
  80. static enet_buff_config_t enet1_rx_buff_cfg = {.buffer = (uint32_t)enet1_rx_buff,
  81. .count = ENET1_RX_BUFF_COUNT,
  82. .size = ENET1_RX_BUFF_SIZE
  83. };
  84. static enet_buff_config_t enet1_tx_buff_cfg = {.buffer = (uint32_t)enet1_tx_buff,
  85. .count = ENET1_TX_BUFF_COUNT,
  86. .size = ENET1_TX_BUFF_SIZE
  87. };
  88. #if __USE_ENET_PTP
  89. static enet_ptp_ts_update_t ptp_timestamp1 = {0, 0};
  90. static enet_ptp_config_t ptp_config1 = {.timestamp_rollover_mode = enet_ts_dig_rollover_control,
  91. .update_method = enet_ptp_time_fine_update,
  92. .addend = 0xffffffff,
  93. };
  94. #endif
  95. static hpm_enet_t enet1 = {.name = "E1",
  96. .base = HPM_ENET1,
  97. .irq_num = IRQn_ENET1,
  98. .inf = BOARD_ENET1_INF,
  99. .eth_dev = &eth1_dev,
  100. .enet_dev = &enet1_dev,
  101. .rx_buff_cfg = &enet1_rx_buff_cfg,
  102. .tx_buff_cfg = &enet1_tx_buff_cfg,
  103. .dma_rx_desc_tab = enet1_dma_rx_desc_tab,
  104. .dma_tx_desc_tab = enet1_dma_tx_desc_tab,
  105. #if !BOARD_ENET1_INF
  106. .int_refclk = BOARD_ENET1_INT_REF_CLK,
  107. #else
  108. .tx_delay = BOARD_ENET1_TX_DLY,
  109. .rx_delay = BOARD_ENET1_RX_DLY,
  110. #endif
  111. #if __USE_ENET_PTP
  112. .ptp_clk_src = BOARD_ENET1_PTP_CLOCK,
  113. .ptp_config = &ptp_config1,
  114. .ptp_timestamp = &ptp_timestamp1
  115. #endif
  116. };
  117. #endif
  118. static hpm_enet_t *s_geths[] = {
  119. #ifdef BSP_USING_ETH0
  120. &enet0,
  121. #endif
  122. #ifdef BSP_USING_ETH1
  123. &enet1
  124. #endif
  125. };
  126. ATTR_WEAK uint8_t enet_get_mac_address(ENET_Type *ptr, uint8_t *mac)
  127. {
  128. uint32_t macl, mach;
  129. uint8_t i;
  130. i = (ptr == HPM_ENET0) ? 0 : 1;
  131. if (mac == NULL) {
  132. return ENET_MAC_ADDR_PARA_ERROR;
  133. }
  134. /* load mac address from OTP MAC area */
  135. if (i == 0) {
  136. macl = otp_read_from_shadow(OTP_SOC_MAC0_IDX);
  137. mach = otp_read_from_shadow(OTP_SOC_MAC0_IDX + 1);
  138. mac[0] = (macl >> 0) & 0xff;
  139. mac[1] = (macl >> 8) & 0xff;
  140. mac[2] = (macl >> 16) & 0xff;
  141. mac[3] = (macl >> 24) & 0xff;
  142. mac[4] = (mach >> 0) & 0xff;
  143. mac[5] = (mach >> 8) & 0xff;
  144. } else {
  145. macl = otp_read_from_shadow(OTP_SOC_MAC0_IDX + 1);
  146. mach = otp_read_from_shadow(OTP_SOC_MAC0_IDX + 2);
  147. mac[0] = (macl >> 16) & 0xff;
  148. mac[1] = (macl >> 24) & 0xff;
  149. mac[2] = (mach >> 0) & 0xff;
  150. mac[3] = (mach >> 8) & 0xff;
  151. mac[4] = (mach >> 16) & 0xff;
  152. mac[5] = (mach >> 24) & 0xff;
  153. }
  154. if (!IS_MAC_INVALID(mac)) {
  155. return ENET_MAC_ADDR_FROM_OTP_MAC;
  156. }
  157. /* load MAC address from MACRO definitions */
  158. memcpy(mac, &mac_init[i], ENET_MAC);
  159. return ENET_MAC_ADDR_FROM_MACRO;
  160. }
  161. static rt_err_t hpm_enet_init(enet_device *init)
  162. {
  163. if (init->media_interface == enet_inf_rmii)
  164. {
  165. /* Initialize reference clock */
  166. board_init_enet_rmii_reference_clock(init->instance, init->int_refclk);
  167. }
  168. #if ENET_SOC_RGMII_EN
  169. /* Set RGMII clock delay */
  170. if (init->media_interface == enet_inf_rgmii)
  171. {
  172. enet_rgmii_enable_clock(init->instance);
  173. enet_rgmii_set_clock_delay(init->instance, init->tx_delay, init->rx_delay);
  174. }
  175. #endif
  176. /* Get the default interrupt config */
  177. enet_get_default_interrupt_config(init->instance, &init->int_config);
  178. /* Initialize eth controller */
  179. enet_controller_init(init->instance, init->media_interface, &init->desc, &init->mac_config, &init->int_config);
  180. /* Disable LPI interrupt */
  181. enet_disable_lpi_interrupt(init->instance);
  182. #if __USE_ENET_PTP
  183. /* initialize PTP Clock */
  184. board_init_enet_ptp_clock(init->instance);
  185. /* initialize Ethernet PTP Module */
  186. init->ptp_config.ssinc = ENET_ONE_SEC_IN_NANOSEC / clock_get_frequency(init->ptp_clk_src);
  187. enet_init_ptp(init->instance, &init->ptp_config);
  188. /* set the initial timestamp */
  189. enet_set_ptp_timestamp(init->instance, &init->ptp_timestamp);
  190. #endif
  191. /* enable irq */
  192. intc_m_enable_irq(init->irq_number);
  193. return RT_EOK;
  194. }
  195. static rt_err_t rt_hpm_eth_init(rt_device_t dev)
  196. {
  197. uint8_t mac[ENET_MAC];
  198. enet_device *enet_dev = (enet_device *)dev->user_data;
  199. /* Initialize GPIOs */
  200. board_init_enet_pins(enet_dev->instance);
  201. /* Reset an enet PHY */
  202. board_reset_enet_phy(enet_dev->instance);
  203. /* Get MAC address */
  204. enet_get_mac_address(enet_dev->instance, mac);
  205. /* Set mac0 address */
  206. enet_dev->mac_config.mac_addr_high[0] = mac[5] << 8 | mac[4];
  207. enet_dev->mac_config.mac_addr_low[0] = mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0];
  208. enet_dev->mac_config.valid_max_count = 1;
  209. /* Initialize MAC and DMA */
  210. if (hpm_enet_init(enet_dev) == 0)
  211. {
  212. LOG_D("Ethernet control initialize successfully\n");
  213. return RT_EOK;
  214. }
  215. else
  216. {
  217. LOG_D("Ethernet control initialize unsuccessfully\n");
  218. return -RT_ERROR;
  219. }
  220. }
  221. static rt_err_t rt_hpm_eth_open(rt_device_t dev, rt_uint16_t oflag)
  222. {
  223. return RT_EOK;
  224. }
  225. static rt_err_t rt_hpm_eth_close(rt_device_t dev)
  226. {
  227. return RT_EOK;
  228. }
  229. static rt_ssize_t rt_hpm_eth_read(rt_device_t dev, rt_off_t pos, void * buffer, rt_size_t size)
  230. {
  231. return 0;
  232. }
  233. static rt_ssize_t rt_hpm_eth_write(rt_device_t dev, rt_off_t pos, const void * buffer, rt_size_t size)
  234. {
  235. return 0;
  236. }
  237. static rt_err_t rt_hpm_eth_control(rt_device_t dev, int cmd, void * args)
  238. {
  239. uint8_t *mac = (uint8_t *)args;
  240. enet_device *enet_dev = (enet_device *)dev->user_data;
  241. switch (cmd)
  242. {
  243. case NIOCTL_GADDR:
  244. if (args != NULL)
  245. {
  246. enet_get_mac_address(enet_dev->instance, (uint8_t *)mac);
  247. SMEMCPY(args, mac, ENET_MAC);
  248. }
  249. else
  250. {
  251. return -RT_ERROR;
  252. }
  253. break;
  254. default:
  255. break;
  256. }
  257. return RT_EOK;
  258. }
  259. static rt_err_t rt_hpm_eth_tx(rt_device_t dev, struct pbuf * p)
  260. {
  261. rt_err_t ret = RT_ERROR;
  262. uint32_t status;
  263. enet_device *enet_dev = (enet_device *)dev->user_data;
  264. uint32_t tx_buff_size = enet_dev->desc.tx_buff_cfg.size;
  265. struct pbuf *q;
  266. uint8_t *buffer;
  267. __IO enet_tx_desc_t *dma_tx_desc;
  268. uint32_t frame_length = 0;
  269. uint32_t buffer_offset = 0;
  270. uint32_t bytes_left_to_copy = 0;
  271. uint32_t payload_offset = 0;
  272. enet_tx_desc_t *tx_desc_list_cur = enet_dev->desc.tx_desc_list_cur;
  273. dma_tx_desc = tx_desc_list_cur;
  274. buffer = (uint8_t *)(dma_tx_desc->tdes2_bm.buffer1);
  275. buffer_offset = 0;
  276. rt_tick_t t_start;
  277. /* copy frame from pbufs to driver buffers */
  278. for (q = p; q != NULL; q = q->next)
  279. {
  280. /* Get bytes in current lwIP buffer */
  281. bytes_left_to_copy = q->len;
  282. payload_offset = 0;
  283. /* Check if the length of data to copy is bigger than Tx buffer size*/
  284. while ((bytes_left_to_copy + buffer_offset) > tx_buff_size)
  285. {
  286. /* check DMA own status within timeout */
  287. t_start = rt_tick_get();
  288. while (dma_tx_desc->tdes0_bm.own)
  289. {
  290. if (rt_tick_get() - t_start > RT_TICK_PER_SECOND / 100)
  291. {
  292. return ERR_TIMEOUT;
  293. }
  294. }
  295. /* Copy data to Tx buffer*/
  296. SMEMCPY((uint8_t *)((uint8_t *)buffer + buffer_offset),
  297. (uint8_t *)((uint8_t *)q->payload + payload_offset),
  298. tx_buff_size - buffer_offset);
  299. /* Point to next descriptor */
  300. dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
  301. /* Check if the buffer is available */
  302. if (dma_tx_desc->tdes0_bm.own != 0)
  303. {
  304. LOG_E("DMA tx desc buffer is not valid\n");
  305. return ERR_BUF;
  306. }
  307. buffer = (uint8_t *)(dma_tx_desc->tdes2_bm.buffer1);
  308. bytes_left_to_copy = bytes_left_to_copy - (tx_buff_size - buffer_offset);
  309. payload_offset = payload_offset + (tx_buff_size - buffer_offset);
  310. frame_length = frame_length + (tx_buff_size - buffer_offset);
  311. buffer_offset = 0;
  312. }
  313. /* check DMA own status within timeout */
  314. t_start = rt_tick_get();
  315. while (dma_tx_desc->tdes0_bm.own)
  316. {
  317. if (rt_tick_get() - t_start > RT_TICK_PER_SECOND / 100)
  318. {
  319. return ERR_TIMEOUT;
  320. }
  321. }
  322. /* Copy the remaining bytes */
  323. buffer = (void *)sys_address_to_core_local_mem(0, (uint32_t)buffer);
  324. SMEMCPY((uint8_t *)((uint8_t *)buffer + buffer_offset),
  325. (uint8_t *)((uint8_t *)q->payload + payload_offset),
  326. bytes_left_to_copy);
  327. buffer_offset = buffer_offset + bytes_left_to_copy;
  328. frame_length = frame_length + bytes_left_to_copy;
  329. }
  330. /* Prepare transmit descriptors to give to DMA */
  331. LOG_D("The length of the transmitted frame: %d\n", frame_length);
  332. frame_length += 4;
  333. status = enet_prepare_transmission_descriptors(enet_dev->instance, &enet_dev->desc.tx_desc_list_cur, frame_length, enet_dev->desc.tx_buff_cfg.size);
  334. if (status != ENET_SUCCESS)
  335. {
  336. LOG_E("Ethernet controller transmit unsuccessfully: %d\n", status);
  337. }
  338. return ERR_OK;
  339. }
  340. static struct pbuf *rt_hpm_eth_rx(rt_device_t dev)
  341. {
  342. struct pbuf *p = NULL, *q = NULL;
  343. enet_device *enet_dev = (enet_device *)dev->user_data;
  344. uint32_t rx_buff_size = enet_dev->desc.rx_buff_cfg.size;
  345. uint16_t len = 0;
  346. uint8_t *buffer;
  347. enet_frame_t frame = {0, 0, 0};
  348. enet_rx_desc_t *dma_rx_desc;
  349. uint32_t buffer_offset = 0;
  350. uint32_t payload_offset = 0;
  351. uint32_t bytes_left_to_copy = 0;
  352. uint32_t i = 0;
  353. /* Get a received frame */
  354. frame = enet_get_received_frame_interrupt(&enet_dev->desc.rx_desc_list_cur,
  355. &enet_dev->desc.rx_frame_info,
  356. enet_dev->desc.rx_buff_cfg.count);
  357. /* Obtain the size of the packet and put it into the "len" variable. */
  358. len = frame.length;
  359. buffer = (uint8_t *)frame.buffer;
  360. LOG_D("The current received frame length : %d\n", len);
  361. if (len > 0)
  362. {
  363. /* allocate a pbuf chain of pbufs from the Lwip buffer pool */
  364. p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL);
  365. if (p != NULL)
  366. {
  367. dma_rx_desc = frame.rx_desc;
  368. buffer_offset = 0;
  369. for (q = p; q != NULL; q = q->next)
  370. {
  371. bytes_left_to_copy = q->len;
  372. payload_offset = 0;
  373. /* Check if the length of bytes to copy in current pbuf is bigger than Rx buffer size*/
  374. while ((bytes_left_to_copy + buffer_offset) > rx_buff_size)
  375. {
  376. /* Copy data to pbuf */
  377. SMEMCPY((uint8_t *)((uint8_t *)q->payload + payload_offset), (uint8_t *)((uint8_t *)buffer + buffer_offset), (rx_buff_size - buffer_offset));
  378. /* Point to next descriptor */
  379. dma_rx_desc = (enet_rx_desc_t *)(dma_rx_desc->rdes3_bm.next_desc);
  380. buffer = (uint8_t *)(dma_rx_desc->rdes2_bm.buffer1);
  381. bytes_left_to_copy = bytes_left_to_copy - (rx_buff_size - buffer_offset);
  382. payload_offset = payload_offset + (rx_buff_size - buffer_offset);
  383. buffer_offset = 0;
  384. }
  385. /* Copy remaining data in pbuf */
  386. q->payload = (void *)sys_address_to_core_local_mem(0, (uint32_t)buffer);
  387. buffer_offset = buffer_offset + bytes_left_to_copy;
  388. }
  389. }
  390. /* Release descriptors to DMA */
  391. /* Point to first descriptor */
  392. dma_rx_desc = frame.rx_desc;
  393. /* Set Own bit in Rx descriptors: gives the buffers back to DMA */
  394. for (i = 0; i < enet_dev->desc.rx_frame_info.seg_count; i++)
  395. {
  396. dma_rx_desc->rdes0_bm.own = 1;
  397. dma_rx_desc = (enet_rx_desc_t*)(dma_rx_desc->rdes3_bm.next_desc);
  398. }
  399. /* Clear Segment_Count */
  400. enet_dev->desc.rx_frame_info.seg_count = 0;
  401. }
  402. /* Resume Rx Process */
  403. if (ENET_DMA_STATUS_RU_GET(enet_dev->instance->DMA_STATUS))
  404. {
  405. enet_dev->instance->DMA_STATUS = ENET_DMA_STATUS_RU_MASK;
  406. enet_dev->instance->DMA_RX_POLL_DEMAND = 1;
  407. }
  408. return p;
  409. }
  410. static void eth_rx_callback(struct eth_device* dev)
  411. {
  412. rt_err_t result;
  413. result = eth_device_ready(dev);
  414. if (result != RT_EOK)
  415. {
  416. LOG_I("Receive callback error = %d\n", result);
  417. }
  418. }
  419. void isr_enet(hpm_enet_t *obj)
  420. {
  421. uint32_t status;
  422. status = obj->base->DMA_STATUS;
  423. if (ENET_DMA_STATUS_GLPII_GET(status)) {
  424. obj->base->LPI_CSR;
  425. }
  426. if (ENET_DMA_STATUS_RI_GET(status)) {
  427. obj->base->DMA_STATUS |= ENET_DMA_STATUS_RI_SET(ENET_DMA_STATUS_RI_GET(status));
  428. eth_rx_callback(obj->eth_dev);
  429. }
  430. }
  431. #ifdef BSP_USING_ETH0
  432. void isr_enet0(void)
  433. {
  434. isr_enet(&enet0);
  435. }
  436. SDK_DECLARE_EXT_ISR_M(IRQn_ENET0, isr_enet0)
  437. #endif
  438. #ifdef BSP_USING_ETH1
  439. void isr_enet1(void)
  440. {
  441. isr_enet(&enet1);
  442. }
  443. SDK_DECLARE_EXT_ISR_M(IRQn_ENET1, isr_enet1)
  444. #endif
  445. int rt_hw_eth_init(void)
  446. {
  447. rt_err_t err = RT_ERROR;
  448. for (uint32_t i = 0; i < ARRAY_SIZE(s_geths); i++)
  449. {
  450. /* Clear memory */
  451. memset((uint8_t *)s_geths[i]->dma_rx_desc_tab, 0x00, sizeof(enet_rx_desc_t) * s_geths[i]->rx_buff_cfg->count);
  452. memset((uint8_t *)s_geths[i]->dma_tx_desc_tab, 0x00, sizeof(enet_tx_desc_t) * s_geths[i]->tx_buff_cfg->count);
  453. memset((uint8_t *)s_geths[i]->rx_buff_cfg->buffer, 0x00, sizeof(s_geths[i]->rx_buff_cfg->size));
  454. memset((uint8_t *)s_geths[i]->tx_buff_cfg->buffer, 0x00, sizeof(s_geths[i]->tx_buff_cfg->size));
  455. /* Set list heads */
  456. s_geths[i]->enet_dev->desc.tx_desc_list_head = (enet_tx_desc_t *)core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t)s_geths[i]->dma_tx_desc_tab);
  457. s_geths[i]->enet_dev->desc.rx_desc_list_head = (enet_rx_desc_t *)core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t)s_geths[i]->dma_rx_desc_tab);
  458. s_geths[i]->enet_dev->desc.tx_buff_cfg.buffer = core_local_mem_to_sys_address(BOARD_RUNNING_CORE, s_geths[i]->tx_buff_cfg->buffer);
  459. s_geths[i]->enet_dev->desc.tx_buff_cfg.count = s_geths[i]->tx_buff_cfg->count;
  460. s_geths[i]->enet_dev->desc.tx_buff_cfg.size = s_geths[i]->tx_buff_cfg->size;
  461. s_geths[i]->enet_dev->desc.rx_buff_cfg.buffer = core_local_mem_to_sys_address(BOARD_RUNNING_CORE, s_geths[i]->rx_buff_cfg->buffer);
  462. s_geths[i]->enet_dev->desc.rx_buff_cfg.count = s_geths[i]->rx_buff_cfg->count;
  463. s_geths[i]->enet_dev->desc.rx_buff_cfg.size = s_geths[i]->rx_buff_cfg->size;
  464. /* Set DMA PBL */
  465. s_geths[i]->enet_dev->mac_config.dma_pbl = board_get_enet_dma_pbl(s_geths[i]->base);
  466. /* Set instance */
  467. s_geths[i]->enet_dev->instance = s_geths[i]->base;
  468. /* Set media interface */
  469. s_geths[i]->enet_dev->media_interface = s_geths[i]->inf ? enet_inf_rgmii : enet_inf_rmii;
  470. if (s_geths[i]->enet_dev->media_interface == enet_inf_rmii)
  471. {
  472. /* Set refclk */
  473. s_geths[i]->enet_dev->int_refclk = s_geths[i]->int_refclk;
  474. } else {
  475. /* Set TX/RX delay */
  476. s_geths[i]->enet_dev->tx_delay = s_geths[i]->tx_delay;
  477. s_geths[i]->enet_dev->rx_delay = s_geths[i]->rx_delay;
  478. }
  479. #if __USE_ENET_PTP
  480. /* Set PTP function */
  481. s_geths[i]->enet_dev->ptp_clk_src = s_geths[i]->ptp_clk_src;
  482. s_geths[i]->enet_dev->ptp_config = *s_geths[i]->ptp_config;
  483. s_geths[i]->enet_dev->ptp_timestamp = *s_geths[i]->ptp_timestamp;
  484. #endif
  485. /* Set the irq number */
  486. s_geths[i]->enet_dev->irq_number = s_geths[i]->irq_num;
  487. /* Set the parent parameters */
  488. s_geths[i]->eth_dev->parent.init = rt_hpm_eth_init;
  489. s_geths[i]->eth_dev->parent.open = rt_hpm_eth_open;
  490. s_geths[i]->eth_dev->parent.close = rt_hpm_eth_close;
  491. s_geths[i]->eth_dev->parent.read = rt_hpm_eth_read;
  492. s_geths[i]->eth_dev->parent.write = rt_hpm_eth_write;
  493. s_geths[i]->eth_dev->parent.control = rt_hpm_eth_control;
  494. s_geths[i]->eth_dev->parent.user_data = s_geths[i]->enet_dev;
  495. s_geths[i]->eth_dev->eth_rx = rt_hpm_eth_rx;
  496. s_geths[i]->eth_dev->eth_tx = rt_hpm_eth_tx;
  497. err = eth_device_init(s_geths[i]->eth_dev, s_geths[i]->name);
  498. if (RT_EOK == err)
  499. {
  500. LOG_D("Ethernet device %d initialize successfully!\n", i);
  501. }
  502. else
  503. {
  504. LOG_D("Ethernet device %d initialize unsuccessfully!\n");
  505. return err;
  506. }
  507. }
  508. return err;
  509. }
  510. INIT_DEVICE_EXPORT(rt_hw_eth_init);
  511. #endif /* BSP_USING_ETH */