drv_enet.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594
  1. /*
  2. * Copyright (c) 2021 - 2023 HPMicro
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-01-11 HPMicro First version
  9. * 2022-07-10 HPMicro Driver optimization for multiple instances
  10. */
  11. #include <rtdevice.h>
  12. #ifdef BSP_USING_ETH
  13. #include <rtdbg.h>
  14. #include "drv_enet.h"
  15. #include "hpm_otp_drv.h"
  16. #ifdef BSP_USING_ETH0
  17. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(ENET_SOC_DESC_ADDR_ALIGNMENT)
  18. __RW enet_rx_desc_t enet0_dma_rx_desc_tab[ENET0_RX_BUFF_COUNT]; /* Ethernet0 Rx DMA Descriptor */
  19. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(ENET_SOC_DESC_ADDR_ALIGNMENT)
  20. __RW enet_tx_desc_t enet0_dma_tx_desc_tab[ENET0_TX_BUFF_COUNT]; /* Ethernet0 Tx DMA Descriptor */
  21. ATTR_PLACE_AT_WITH_ALIGNMENT(".fast_ram", ENET_SOC_BUFF_ADDR_ALIGNMENT)
  22. __RW uint8_t enet0_rx_buff[ENET0_RX_BUFF_COUNT][ENET0_RX_BUFF_SIZE]; /* Ethernet0 Receive Buffer */
  23. ATTR_PLACE_AT_WITH_ALIGNMENT(".fast_ram", ENET_SOC_BUFF_ADDR_ALIGNMENT)
  24. __RW uint8_t enet0_tx_buff[ENET0_TX_BUFF_COUNT][ENET0_TX_BUFF_SIZE]; /* Ethernet0 Transmit Buffer */
  25. struct eth_device eth0_dev;
  26. static enet_device enet0_dev;
  27. static enet_buff_config_t enet0_rx_buff_cfg = {.buffer = (uint32_t)enet0_rx_buff,
  28. .count = ENET0_RX_BUFF_COUNT,
  29. .size = ENET0_RX_BUFF_SIZE
  30. };
  31. static enet_buff_config_t enet0_tx_buff_cfg = {.buffer = (uint32_t)enet0_tx_buff,
  32. .count = ENET0_TX_BUFF_COUNT,
  33. .size = ENET0_TX_BUFF_SIZE
  34. };
  35. #if __USE_ENET_PTP
  36. static enet_ptp_ts_update_t ptp_timestamp0 = {0, 0};
  37. static enet_ptp_config_t ptp_config0 = {.timestamp_rollover_mode = enet_ts_dig_rollover_control,
  38. .update_method = enet_ptp_time_fine_update,
  39. .addend = 0xffffffff,
  40. };
  41. #endif
  42. static hpm_enet_t enet0 = {.name = "ETH0",
  43. .base = HPM_ENET0,
  44. .irq_num = IRQn_ENET0,
  45. .inf = BOARD_ENET0_INF,
  46. .eth_dev = &eth0_dev,
  47. .enet_dev = &enet0_dev,
  48. .rx_buff_cfg = &enet0_rx_buff_cfg,
  49. .tx_buff_cfg = &enet0_tx_buff_cfg,
  50. .dma_rx_desc_tab = enet0_dma_rx_desc_tab,
  51. .dma_tx_desc_tab = enet0_dma_tx_desc_tab,
  52. #if !BOARD_ENET0_INF
  53. .int_refclk = BOARD_ENET0_INT_REF_CLK,
  54. #else
  55. .tx_delay = BOARD_ENET0_TX_DLY,
  56. .rx_delay = BOARD_ENET0_RX_DLY,
  57. #endif
  58. #if __USE_ENET_PTP
  59. .ptp_clk_src = BOARD_ENET0_PTP_CLOCK,
  60. .ptp_config = &ptp_config0,
  61. .ptp_timestamp = &ptp_timestamp0
  62. #endif
  63. };
  64. #endif
  65. #ifdef BSP_USING_ETH1
  66. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(ENET_SOC_DESC_ADDR_ALIGNMENT)
  67. __RW enet_rx_desc_t enet1_dma_rx_desc_tab[ENET1_RX_BUFF_COUNT]; /* Ethernet1 Rx DMA Descriptor */
  68. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(ENET_SOC_DESC_ADDR_ALIGNMENT)
  69. __RW enet_tx_desc_t enet1_dma_tx_desc_tab[ENET1_TX_BUFF_COUNT]; /* Ethernet1 Tx DMA Descriptor */
  70. ATTR_PLACE_AT_WITH_ALIGNMENT(".fast_ram", ENET_SOC_BUFF_ADDR_ALIGNMENT)
  71. __RW uint8_t enet1_rx_buff[ENET1_RX_BUFF_COUNT][ENET1_RX_BUFF_SIZE]; /* Ethernet1 Receive Buffer */
  72. ATTR_PLACE_AT_WITH_ALIGNMENT(".fast_ram", ENET_SOC_BUFF_ADDR_ALIGNMENT)
  73. __RW uint8_t enet1_tx_buff[ENET1_TX_BUFF_COUNT][ENET1_TX_BUFF_SIZE]; /* Ethernet1 Transmit Buffer */
  74. struct eth_device eth1_dev;
  75. static enet_device enet1_dev;
  76. static enet_buff_config_t enet1_rx_buff_cfg = {.buffer = (uint32_t)enet1_rx_buff,
  77. .count = ENET1_RX_BUFF_COUNT,
  78. .size = ENET1_RX_BUFF_SIZE
  79. };
  80. static enet_buff_config_t enet1_tx_buff_cfg = {.buffer = (uint32_t)enet1_tx_buff,
  81. .count = ENET1_TX_BUFF_COUNT,
  82. .size = ENET1_TX_BUFF_SIZE
  83. };
  84. #if __USE_ENET_PTP
  85. static enet_ptp_ts_update_t ptp_timestamp1 = {0, 0};
  86. static enet_ptp_config_t ptp_config1 = {.timestamp_rollover_mode = enet_ts_dig_rollover_control,
  87. .update_method = enet_ptp_time_fine_update,
  88. .addend = 0xffffffff,
  89. };
  90. #endif
  91. static hpm_enet_t enet1 = {.name = "ETH1",
  92. .base = HPM_ENET1,
  93. .irq_num = IRQn_ENET1,
  94. .inf = BOARD_ENET1_INF,
  95. .eth_dev = &eth1_dev,
  96. .enet_dev = &enet1_dev,
  97. .rx_buff_cfg = &enet1_rx_buff_cfg,
  98. .tx_buff_cfg = &enet1_tx_buff_cfg,
  99. .dma_rx_desc_tab = enet1_dma_rx_desc_tab,
  100. .dma_tx_desc_tab = enet1_dma_tx_desc_tab,
  101. #if !BOARD_ENET1_INF
  102. .int_refclk = BOARD_ENET1_INT_REF_CLK,
  103. #else
  104. .tx_delay = BOARD_ENET1_TX_DLY,
  105. .rx_delay = BOARD_ENET1_RX_DLY,
  106. #endif
  107. #if __USE_ENET_PTP
  108. .ptp_clk_src = BOARD_ENET1_PTP_CLOCK,
  109. .ptp_config = &ptp_config1,
  110. .ptp_timestamp = &ptp_timestamp1
  111. #endif
  112. };
  113. #endif
  114. static hpm_enet_t *s_geths[] = {
  115. #ifdef BSP_USING_ETH0
  116. &enet0,
  117. #endif
  118. #ifdef BSP_USING_ETH1
  119. &enet1
  120. #endif
  121. };
  122. ATTR_WEAK void enet_get_mac_address(uint8_t *mac)
  123. {
  124. uint32_t uuid[OTP_SOC_UUID_LEN / sizeof(uint32_t)];
  125. for (int i = 0; i < ARRAY_SIZE(uuid); i++) {
  126. uuid[i] = otp_read_from_shadow(OTP_SOC_UUID_IDX + i);
  127. }
  128. if (!IS_UUID_INVALID(uuid)) {
  129. uuid[0] &= 0xfc;
  130. memcpy(mac, &uuid, ENET_MAC);
  131. } else {
  132. mac[0] = MAC_ADDR0;
  133. mac[1] = MAC_ADDR1;
  134. mac[2] = MAC_ADDR2;
  135. mac[3] = MAC_ADDR3;
  136. mac[4] = MAC_ADDR4;
  137. mac[5] = MAC_ADDR5;
  138. }
  139. }
  140. static rt_err_t hpm_enet_init(enet_device *init)
  141. {
  142. /* Initialize eth controller */
  143. enet_controller_init(init->instance, init->media_interface, &init->desc, &init->mac_config, &init->int_config);
  144. if (init->media_interface == enet_inf_rmii)
  145. {
  146. /* Initialize reference clock */
  147. board_init_enet_rmii_reference_clock(init->instance, init->int_refclk);
  148. enet_rmii_enable_clock(init->instance, init->int_refclk);
  149. }
  150. #if ENET_SOC_RGMII_EN
  151. /* Set RGMII clock delay */
  152. if (init->media_interface == enet_inf_rgmii)
  153. {
  154. enet_rgmii_enable_clock(init->instance);
  155. enet_rgmii_set_clock_delay(init->instance, init->tx_delay, init->rx_delay);
  156. }
  157. #endif
  158. #if __USE_ENET_PTP
  159. /* initialize PTP Clock */
  160. board_init_enet_ptp_clock(init->instance);
  161. /* initialize Ethernet PTP Module */
  162. init->ptp_config.ssinc = ENET_ONE_SEC_IN_NANOSEC / clock_get_frequency(init->ptp_clk_src);
  163. enet_init_ptp(init->instance, &init->ptp_config);
  164. /* set the initial timestamp */
  165. enet_set_ptp_timestamp(init->instance, &init->ptp_timestamp);
  166. #endif
  167. /* enable irq */
  168. intc_m_enable_irq(init->irq_number);
  169. return RT_EOK;
  170. }
  171. static rt_err_t rt_hpm_eth_init(rt_device_t dev)
  172. {
  173. uint8_t mac[ENET_MAC];
  174. enet_device *enet_dev = (enet_device *)dev->user_data;
  175. /* Initialize GPIOs */
  176. board_init_enet_pins(enet_dev->instance);
  177. /* Reset an enet PHY */
  178. board_reset_enet_phy(enet_dev->instance);
  179. /* Get MAC address */
  180. enet_get_mac_address(mac);
  181. /* Set mac0 address */
  182. enet_dev->mac_config.mac_addr_high[0] = mac[5] << 8 | mac[4];
  183. enet_dev->mac_config.mac_addr_low[0] = mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0];
  184. enet_dev->mac_config.valid_max_count = 1;
  185. /* Initialize MAC and DMA */
  186. if (hpm_enet_init(enet_dev) == 0)
  187. {
  188. LOG_D("Ethernet control initialize successfully\n");
  189. return RT_EOK;
  190. }
  191. else
  192. {
  193. LOG_D("Ethernet control initialize unsuccessfully\n");
  194. return RT_ERROR;
  195. }
  196. }
  197. static rt_err_t rt_hpm_eth_open(rt_device_t dev, rt_uint16_t oflag)
  198. {
  199. return RT_EOK;
  200. }
  201. static rt_err_t rt_hpm_eth_close(rt_device_t dev)
  202. {
  203. return RT_EOK;
  204. }
  205. static rt_ssize_t rt_hpm_eth_read(rt_device_t dev, rt_off_t pos, void * buffer, rt_size_t size)
  206. {
  207. return 0;
  208. }
  209. static rt_ssize_t rt_hpm_eth_write(rt_device_t dev, rt_off_t pos, const void * buffer, rt_size_t size)
  210. {
  211. return 0;
  212. }
  213. static rt_err_t rt_hpm_eth_control(rt_device_t dev, int cmd, void * args)
  214. {
  215. uint8_t *mac = (uint8_t *)args;
  216. switch (cmd)
  217. {
  218. case NIOCTL_GADDR:
  219. if (args != NULL)
  220. {
  221. enet_get_mac_address((uint8_t *)mac);
  222. SMEMCPY(args, mac, ENET_MAC);
  223. }
  224. else
  225. {
  226. return -RT_ERROR;
  227. }
  228. break;
  229. default:
  230. break;
  231. }
  232. return RT_EOK;
  233. }
  234. static rt_err_t rt_hpm_eth_tx(rt_device_t dev, struct pbuf * p)
  235. {
  236. rt_err_t ret = RT_ERROR;
  237. uint32_t status;
  238. enet_device *enet_dev = (enet_device *)dev->user_data;
  239. uint32_t tx_buff_size = enet_dev->desc.tx_buff_cfg.size;
  240. struct pbuf *q;
  241. uint8_t *buffer;
  242. __IO enet_tx_desc_t *dma_tx_desc;
  243. uint32_t frame_length = 0;
  244. uint32_t buffer_offset = 0;
  245. uint32_t bytes_left_to_copy = 0;
  246. uint32_t payload_offset = 0;
  247. enet_tx_desc_t *tx_desc_list_cur = enet_dev->desc.tx_desc_list_cur;
  248. dma_tx_desc = tx_desc_list_cur;
  249. buffer = (uint8_t *)(dma_tx_desc->tdes2_bm.buffer1);
  250. buffer_offset = 0;
  251. rt_tick_t t_start;
  252. /* copy frame from pbufs to driver buffers */
  253. for (q = p; q != NULL; q = q->next)
  254. {
  255. /* Get bytes in current lwIP buffer */
  256. bytes_left_to_copy = q->len;
  257. payload_offset = 0;
  258. /* Check if the length of data to copy is bigger than Tx buffer size*/
  259. while ((bytes_left_to_copy + buffer_offset) > tx_buff_size)
  260. {
  261. /* check DMA own status within timeout */
  262. t_start = rt_tick_get();
  263. while (dma_tx_desc->tdes0_bm.own)
  264. {
  265. if (rt_tick_get() - t_start > RT_TICK_PER_SECOND / 100)
  266. {
  267. return ERR_TIMEOUT;
  268. }
  269. }
  270. /* Copy data to Tx buffer*/
  271. SMEMCPY((uint8_t *)((uint8_t *)buffer + buffer_offset),
  272. (uint8_t *)((uint8_t *)q->payload + payload_offset),
  273. tx_buff_size - buffer_offset);
  274. /* Point to next descriptor */
  275. dma_tx_desc = (enet_tx_desc_t *)(dma_tx_desc->tdes3_bm.next_desc);
  276. /* Check if the buffer is available */
  277. if (dma_tx_desc->tdes0_bm.own != 0)
  278. {
  279. LOG_E("DMA tx desc buffer is not valid\n");
  280. return ERR_BUF;
  281. }
  282. buffer = (uint8_t *)(dma_tx_desc->tdes2_bm.buffer1);
  283. bytes_left_to_copy = bytes_left_to_copy - (tx_buff_size - buffer_offset);
  284. payload_offset = payload_offset + (tx_buff_size - buffer_offset);
  285. frame_length = frame_length + (tx_buff_size - buffer_offset);
  286. buffer_offset = 0;
  287. }
  288. /* check DMA own status within timeout */
  289. t_start = rt_tick_get();
  290. while (dma_tx_desc->tdes0_bm.own)
  291. {
  292. if (rt_tick_get() - t_start > RT_TICK_PER_SECOND / 100)
  293. {
  294. return ERR_TIMEOUT;
  295. }
  296. }
  297. /* Copy the remaining bytes */
  298. buffer = (void *)sys_address_to_core_local_mem(0, (uint32_t)buffer);
  299. SMEMCPY((uint8_t *)((uint8_t *)buffer + buffer_offset),
  300. (uint8_t *)((uint8_t *)q->payload + payload_offset),
  301. bytes_left_to_copy);
  302. buffer_offset = buffer_offset + bytes_left_to_copy;
  303. frame_length = frame_length + bytes_left_to_copy;
  304. }
  305. /* Prepare transmit descriptors to give to DMA */
  306. LOG_D("The length of the transmitted frame: %d\n", frame_length);
  307. frame_length += 4;
  308. status = enet_prepare_transmission_descriptors(enet_dev->instance, &enet_dev->desc.tx_desc_list_cur, frame_length, enet_dev->desc.tx_buff_cfg.size);
  309. if (status != ENET_SUCCESS)
  310. {
  311. LOG_E("Ethernet controller transmit unsuccessfully: %d\n", status);
  312. }
  313. return ERR_OK;
  314. }
  315. static struct pbuf *rt_hpm_eth_rx(rt_device_t dev)
  316. {
  317. struct pbuf *p = NULL, *q = NULL;
  318. enet_device *enet_dev = (enet_device *)dev->user_data;
  319. uint32_t rx_buff_size = enet_dev->desc.rx_buff_cfg.size;
  320. uint16_t len = 0;
  321. uint8_t *buffer;
  322. enet_frame_t frame = {0, 0, 0};
  323. enet_rx_desc_t *dma_rx_desc;
  324. uint32_t buffer_offset = 0;
  325. uint32_t payload_offset = 0;
  326. uint32_t bytes_left_to_copy = 0;
  327. uint32_t i = 0;
  328. /* Get a received frame */
  329. frame = enet_get_received_frame_interrupt(&enet_dev->desc.rx_desc_list_cur,
  330. &enet_dev->desc.rx_frame_info,
  331. enet_dev->desc.rx_buff_cfg.count);
  332. /* Obtain the size of the packet and put it into the "len" variable. */
  333. len = frame.length;
  334. buffer = (uint8_t *)frame.buffer;
  335. LOG_D("The current received frame length : %d\n", len);
  336. if (len > 0)
  337. {
  338. /* allocate a pbuf chain of pbufs from the Lwip buffer pool */
  339. p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL);
  340. }
  341. if (p != NULL)
  342. {
  343. dma_rx_desc = frame.rx_desc;
  344. buffer_offset = 0;
  345. for (q = p; q != NULL; q = q->next)
  346. {
  347. bytes_left_to_copy = q->len;
  348. payload_offset = 0;
  349. /* Check if the length of bytes to copy in current pbuf is bigger than Rx buffer size*/
  350. while ((bytes_left_to_copy + buffer_offset) > rx_buff_size)
  351. {
  352. /* Copy data to pbuf */
  353. SMEMCPY((uint8_t *)((uint8_t *)q->payload + payload_offset), (uint8_t *)((uint8_t *)buffer + buffer_offset), (rx_buff_size - buffer_offset));
  354. /* Point to next descriptor */
  355. dma_rx_desc = (enet_rx_desc_t *)(dma_rx_desc->rdes3_bm.next_desc);
  356. buffer = (uint8_t *)(dma_rx_desc->rdes2_bm.buffer1);
  357. bytes_left_to_copy = bytes_left_to_copy - (rx_buff_size - buffer_offset);
  358. payload_offset = payload_offset + (rx_buff_size - buffer_offset);
  359. buffer_offset = 0;
  360. }
  361. /* Copy remaining data in pbuf */
  362. q->payload = (void *)sys_address_to_core_local_mem(0, (uint32_t)buffer);
  363. buffer_offset = buffer_offset + bytes_left_to_copy;
  364. }
  365. }
  366. else
  367. {
  368. return NULL;
  369. }
  370. /* Release descriptors to DMA */
  371. /* Point to first descriptor */
  372. dma_rx_desc = frame.rx_desc;
  373. /* Set Own bit in Rx descriptors: gives the buffers back to DMA */
  374. for (i = 0; i < enet_dev->desc.rx_frame_info.seg_count; i++)
  375. {
  376. dma_rx_desc->rdes0_bm.own = 1;
  377. dma_rx_desc = (enet_rx_desc_t*)(dma_rx_desc->rdes3_bm.next_desc);
  378. }
  379. /* Clear Segment_Count */
  380. enet_dev->desc.rx_frame_info.seg_count = 0;
  381. return p;
  382. }
  383. static void eth_rx_callback(struct eth_device* dev)
  384. {
  385. rt_err_t result;
  386. result = eth_device_ready(dev);
  387. if (result != RT_EOK)
  388. {
  389. LOG_I("Receive callback error = %d\n", result);
  390. }
  391. }
  392. void isr_enet(hpm_enet_t *obj)
  393. {
  394. uint32_t status;
  395. status = obj->base->DMA_STATUS;
  396. if (ENET_DMA_STATUS_GLPII_GET(status)) {
  397. obj->base->LPI_CSR;
  398. }
  399. if (ENET_DMA_STATUS_RI_GET(status)) {
  400. obj->base->DMA_STATUS |= ENET_DMA_STATUS_RI_SET(ENET_DMA_STATUS_RI_GET(status));
  401. eth_rx_callback(obj->eth_dev);
  402. }
  403. }
  404. #ifdef BSP_USING_ETH0
  405. void isr_enet0(void)
  406. {
  407. isr_enet(&enet0);
  408. }
  409. SDK_DECLARE_EXT_ISR_M(IRQn_ENET0, isr_enet0)
  410. #endif
  411. #ifdef BSP_USING_ETH1
  412. void isr_enet1(void)
  413. {
  414. isr_enet(&enet1);
  415. }
  416. SDK_DECLARE_EXT_ISR_M(IRQn_ENET1, isr_enet1)
  417. #endif
  418. int rt_hw_eth_init(void)
  419. {
  420. rt_err_t err = RT_ERROR;
  421. for (uint32_t i = 0; i < ARRAY_SIZE(s_geths); i++)
  422. {
  423. /* Clear memory */
  424. memset((uint8_t *)s_geths[i]->dma_rx_desc_tab, 0x00, sizeof(enet_rx_desc_t) * s_geths[i]->rx_buff_cfg->count);
  425. memset((uint8_t *)s_geths[i]->dma_tx_desc_tab, 0x00, sizeof(enet_tx_desc_t) * s_geths[i]->tx_buff_cfg->count);
  426. memset((uint8_t *)s_geths[i]->rx_buff_cfg->buffer, 0x00, sizeof(s_geths[i]->rx_buff_cfg->size));
  427. memset((uint8_t *)s_geths[i]->tx_buff_cfg->buffer, 0x00, sizeof(s_geths[i]->tx_buff_cfg->size));
  428. /* Set list heads */
  429. s_geths[i]->enet_dev->desc.tx_desc_list_head = (enet_tx_desc_t *)core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t)s_geths[i]->dma_tx_desc_tab);
  430. s_geths[i]->enet_dev->desc.rx_desc_list_head = (enet_rx_desc_t *)core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t)s_geths[i]->dma_rx_desc_tab);
  431. s_geths[i]->enet_dev->desc.tx_buff_cfg.buffer = core_local_mem_to_sys_address(BOARD_RUNNING_CORE, s_geths[i]->tx_buff_cfg->buffer);
  432. s_geths[i]->enet_dev->desc.tx_buff_cfg.count = s_geths[i]->tx_buff_cfg->count;
  433. s_geths[i]->enet_dev->desc.tx_buff_cfg.size = s_geths[i]->tx_buff_cfg->size;
  434. s_geths[i]->enet_dev->desc.rx_buff_cfg.buffer = core_local_mem_to_sys_address(BOARD_RUNNING_CORE, s_geths[i]->rx_buff_cfg->buffer);
  435. s_geths[i]->enet_dev->desc.rx_buff_cfg.count = s_geths[i]->rx_buff_cfg->count;
  436. s_geths[i]->enet_dev->desc.rx_buff_cfg.size = s_geths[i]->rx_buff_cfg->size;
  437. /* Set DMA PBL */
  438. s_geths[i]->enet_dev->mac_config.dma_pbl = board_enet_get_dma_pbl(s_geths[i]->base);
  439. /* Set instance */
  440. s_geths[i]->enet_dev->instance = s_geths[i]->base;
  441. /* Set media interface */
  442. s_geths[i]->enet_dev->media_interface = s_geths[i]->inf ? enet_inf_rgmii : enet_inf_rmii;
  443. if (s_geths[i]->enet_dev->media_interface == enet_inf_rmii)
  444. {
  445. /* Set refclk */
  446. s_geths[i]->enet_dev->int_refclk = s_geths[i]->int_refclk;
  447. } else {
  448. /* Set TX/RX delay */
  449. s_geths[i]->enet_dev->tx_delay = s_geths[i]->tx_delay;
  450. s_geths[i]->enet_dev->rx_delay = s_geths[i]->rx_delay;
  451. }
  452. #if __USE_ENET_PTP
  453. /* Set PTP function */
  454. s_geths[i]->enet_dev->ptp_clk_src = s_geths[i]->ptp_clk_src;
  455. s_geths[i]->enet_dev->ptp_config = *s_geths[i]->ptp_config;
  456. s_geths[i]->enet_dev->ptp_timestamp = *s_geths[i]->ptp_timestamp;
  457. #endif
  458. /* Set the interrupt enable mask */
  459. s_geths[i]->enet_dev->int_config.int_enable = enet_normal_int_sum_en /* Enable normal interrupt summary */
  460. | enet_receive_int_en; /* Enable receive interrupt */
  461. /* Set the interrupt disable mask */
  462. s_geths[i]->enet_dev->int_config.int_mask = enet_rgsmii_int_mask;
  463. /* Set the irq number */
  464. s_geths[i]->enet_dev->irq_number = s_geths[i]->irq_num;
  465. /* Set the parent parameters */
  466. s_geths[i]->eth_dev->parent.init = rt_hpm_eth_init;
  467. s_geths[i]->eth_dev->parent.open = rt_hpm_eth_open;
  468. s_geths[i]->eth_dev->parent.close = rt_hpm_eth_close;
  469. s_geths[i]->eth_dev->parent.read = rt_hpm_eth_read;
  470. s_geths[i]->eth_dev->parent.write = rt_hpm_eth_write;
  471. s_geths[i]->eth_dev->parent.control = rt_hpm_eth_control;
  472. s_geths[i]->eth_dev->parent.user_data = s_geths[i]->enet_dev;
  473. s_geths[i]->eth_dev->eth_rx = rt_hpm_eth_rx;
  474. s_geths[i]->eth_dev->eth_tx = rt_hpm_eth_tx;
  475. err = eth_device_init(s_geths[i]->eth_dev, s_geths[i]->name);
  476. if (RT_EOK == err)
  477. {
  478. LOG_D("Ethernet device initialize successfully!\n");
  479. }
  480. else
  481. {
  482. LOG_D("Ethernet device initialize unsuccessfully!\n");
  483. return err;
  484. }
  485. }
  486. return err;
  487. }
  488. INIT_DEVICE_EXPORT(rt_hw_eth_init);
  489. #endif /* BSP_USING_ETH */