drv_emac.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-05-16 shelton first version
  9. * 2022-07-11 shelton optimize code to improve network throughput
  10. * performance
  11. * 2022-10-15 shelton optimize code
  12. * 2023-10-18 shelton optimize code
  13. * 2024-09-02 shelton add support phy lan8720 and yt8512
  14. */
  15. #include "drv_emac.h"
  16. #include <netif/ethernetif.h>
  17. #include <lwipopts.h>
  18. /* debug option */
  19. //#define EMAC_RX_DUMP
  20. //#define EMAC_TX_DUMP
  21. //#define DRV_DEBUG
  22. #define LOG_TAG "drv.emac"
  23. #include <drv_log.h>
  24. #define CRYSTAL_ON_PHY 0
  25. /* emac memory buffer configuration */
  26. #define EMAC_NUM_RX_BUF 5 /* rx (5 * 1500) */
  27. #define EMAC_NUM_TX_BUF 5 /* tx (5 * 1500) */
  28. #define MAX_ADDR_LEN 6
  29. #define DMARXDESC_FRAMELENGTH_SHIFT 16
  30. struct rt_at32_emac
  31. {
  32. /* inherit from ethernet device */
  33. struct eth_device parent;
  34. #ifndef PHY_USING_INTERRUPT_MODE
  35. rt_timer_t poll_link_timer;
  36. #endif
  37. /* interface address info, hw address */
  38. rt_uint8_t dev_addr[MAX_ADDR_LEN];
  39. /* emac_speed */
  40. emac_speed_type emac_speed;
  41. /* emac_duplex_mode */
  42. emac_duplex_type emac_mode;
  43. };
  44. typedef struct {
  45. rt_uint32_t length;
  46. rt_uint32_t buffer;
  47. emac_dma_desc_type *descriptor;
  48. emac_dma_desc_type *rx_fs_desc;
  49. emac_dma_desc_type *rx_ls_desc;
  50. rt_uint8_t g_seg_count;
  51. } frame_type;
  52. static emac_dma_desc_type *dma_rx_dscr_tab, *dma_tx_dscr_tab;
  53. extern emac_dma_desc_type *dma_rx_desc_to_get, *dma_tx_desc_to_set;
  54. frame_type rx_frame;
  55. static rt_uint8_t *rx_buff, *tx_buff;
  56. static struct rt_at32_emac at32_emac_device;
  57. static uint8_t phy_addr = 0xFF;
  58. #if defined(EMAC_RX_DUMP) || defined(EMAC_TX_DUMP)
  59. #define __is_print(ch) ((unsigned int)((ch) - ' ') < 127u - ' ')
  60. static void dump_hex(const rt_uint8_t *ptr, rt_size_t buflen)
  61. {
  62. unsigned char *buf = (unsigned char *)ptr;
  63. int i, j;
  64. for (i = 0; i < buflen; i += 16)
  65. {
  66. rt_kprintf("%08X: ", i);
  67. for (j = 0; j < 16; j++)
  68. if (i + j < buflen)
  69. rt_kprintf("%02X ", buf[i + j]);
  70. else
  71. rt_kprintf(" ");
  72. rt_kprintf(" ");
  73. for (j = 0; j < 16; j++)
  74. if (i + j < buflen)
  75. rt_kprintf("%c", __is_print(buf[i + j]) ? buf[i + j] : '.');
  76. rt_kprintf("\n");
  77. }
  78. }
  79. #endif
  80. /**
  81. * @brief phy reset
  82. */
  83. static void phy_reset(void)
  84. {
  85. gpio_init_type gpio_init_struct;
  86. #if defined (SOC_SERIES_AT32F437)
  87. crm_periph_clock_enable(CRM_GPIOE_PERIPH_CLOCK, TRUE);
  88. crm_periph_clock_enable(CRM_GPIOG_PERIPH_CLOCK, TRUE);
  89. gpio_default_para_init(&gpio_init_struct);
  90. gpio_init_struct.gpio_drive_strength = GPIO_DRIVE_STRENGTH_STRONGER;
  91. gpio_init_struct.gpio_mode = GPIO_MODE_OUTPUT;
  92. gpio_init_struct.gpio_out_type = GPIO_OUTPUT_PUSH_PULL;
  93. gpio_init_struct.gpio_pull = GPIO_PULL_NONE;
  94. gpio_init_struct.gpio_pins = GPIO_PINS_15;
  95. gpio_init(GPIOE, &gpio_init_struct);
  96. gpio_init_struct.gpio_pins = GPIO_PINS_15;
  97. gpio_init(GPIOG, &gpio_init_struct);
  98. gpio_bits_reset(GPIOE, GPIO_PINS_15);
  99. gpio_bits_reset(GPIOG, GPIO_PINS_15);
  100. rt_thread_mdelay(2);
  101. gpio_bits_set(GPIOE, GPIO_PINS_15);
  102. #endif
  103. #if defined (SOC_SERIES_AT32F407)
  104. crm_periph_clock_enable(CRM_GPIOC_PERIPH_CLOCK, TRUE);
  105. gpio_default_para_init(&gpio_init_struct);
  106. gpio_init_struct.gpio_drive_strength = GPIO_DRIVE_STRENGTH_STRONGER;
  107. gpio_init_struct.gpio_mode = GPIO_MODE_OUTPUT;
  108. gpio_init_struct.gpio_out_type = GPIO_OUTPUT_PUSH_PULL;
  109. gpio_init_struct.gpio_pull = GPIO_PULL_NONE;
  110. gpio_init_struct.gpio_pins = GPIO_PINS_8;
  111. gpio_init(GPIOC, &gpio_init_struct);
  112. gpio_bits_reset(GPIOC, GPIO_PINS_8);
  113. rt_thread_mdelay(2);
  114. gpio_bits_set(GPIOC, GPIO_PINS_8);
  115. #endif
  116. rt_thread_mdelay(2000);
  117. }
  118. /**
  119. * @brief phy clock config
  120. */
  121. static void phy_clock_config(void)
  122. {
  123. #if (CRYSTAL_ON_PHY == 0)
  124. /* if CRYSTAL_NO_PHY, output clock with pa8 of mcu */
  125. gpio_init_type gpio_init_struct;
  126. crm_periph_clock_enable(CRM_GPIOA_PERIPH_CLOCK, TRUE);
  127. gpio_default_para_init(&gpio_init_struct);
  128. gpio_init_struct.gpio_drive_strength = GPIO_DRIVE_STRENGTH_STRONGER;
  129. gpio_init_struct.gpio_mode = GPIO_MODE_MUX;
  130. gpio_init_struct.gpio_out_type = GPIO_OUTPUT_PUSH_PULL;
  131. gpio_init_struct.gpio_pull = GPIO_PULL_NONE;
  132. gpio_init_struct.gpio_pins = GPIO_PINS_8;
  133. gpio_init(GPIOA, &gpio_init_struct);
  134. /* 9162 clkout output 25 mhz */
  135. /* 83848 clkout output 50 mhz */
  136. #if defined (SOC_SERIES_AT32F407)
  137. crm_clock_out_set(CRM_CLKOUT_SCLK);
  138. #if defined (PHY_USING_DM9162) || defined (PHY_USING_LAN8720) || \
  139. defined (PHY_USING_YT8512)
  140. crm_clkout_div_set(CRM_CLKOUT_DIV_8);
  141. #elif defined (PHY_USING_DP83848)
  142. crm_clkout_div_set(CRM_CLKOUT_DIV_4);
  143. #endif
  144. #endif
  145. #if defined (SOC_SERIES_AT32F437)
  146. crm_clock_out1_set(CRM_CLKOUT1_PLL);
  147. #if defined (PHY_USING_DM9162) || defined (PHY_USING_LAN8720) || \
  148. defined (PHY_USING_YT8512)
  149. crm_clkout_div_set(CRM_CLKOUT_INDEX_1, CRM_CLKOUT_DIV1_5, CRM_CLKOUT_DIV2_2);
  150. #elif defined (PHY_USING_DP83848)
  151. crm_clkout_div_set(CRM_CLKOUT_INDEX_1, CRM_CLKOUT_DIV1_5, CRM_CLKOUT_DIV2_1);
  152. #endif
  153. #endif
  154. #endif
  155. }
  156. /**
  157. * @brief reset phy register
  158. */
  159. static error_status emac_phy_register_reset(void)
  160. {
  161. uint16_t data = 0;
  162. uint32_t timeout = 0;
  163. uint32_t i = 0;
  164. if(emac_phy_register_write(phy_addr, PHY_CONTROL_REG, PHY_RESET_BIT) == ERROR)
  165. {
  166. return ERROR;
  167. }
  168. for(i = 0; i < 0x000FFFFF; i++);
  169. do
  170. {
  171. timeout++;
  172. if(emac_phy_register_read(phy_addr, PHY_CONTROL_REG, &data) == ERROR)
  173. {
  174. return ERROR;
  175. }
  176. } while((data & PHY_RESET_BIT) && (timeout < PHY_TIMEOUT));
  177. for(i = 0; i < 0x00FFFFF; i++);
  178. if(timeout == PHY_TIMEOUT)
  179. {
  180. return ERROR;
  181. }
  182. return SUCCESS;
  183. }
  184. /**
  185. * @brief set mac speed related parameters
  186. */
  187. static error_status emac_speed_config(emac_auto_negotiation_type nego, emac_duplex_type mode, emac_speed_type speed)
  188. {
  189. uint16_t data = 0;
  190. uint32_t timeout = 0;
  191. if(nego == EMAC_AUTO_NEGOTIATION_ON)
  192. {
  193. do
  194. {
  195. timeout++;
  196. if(emac_phy_register_read(phy_addr, PHY_STATUS_REG, &data) == ERROR)
  197. {
  198. return ERROR;
  199. }
  200. } while(!(data & PHY_LINKED_STATUS_BIT) && (timeout < PHY_TIMEOUT));
  201. if(timeout == PHY_TIMEOUT)
  202. {
  203. return ERROR;
  204. }
  205. timeout = 0;
  206. if(emac_phy_register_write(phy_addr, PHY_CONTROL_REG, PHY_AUTO_NEGOTIATION_BIT) == ERROR)
  207. {
  208. return ERROR;
  209. }
  210. do
  211. {
  212. timeout++;
  213. if(emac_phy_register_read(phy_addr, PHY_STATUS_REG, &data) == ERROR)
  214. {
  215. return ERROR;
  216. }
  217. } while(!(data & PHY_NEGO_COMPLETE_BIT) && (timeout < PHY_TIMEOUT));
  218. if(timeout == PHY_TIMEOUT)
  219. {
  220. return ERROR;
  221. }
  222. if(emac_phy_register_read(phy_addr, PHY_SPECIFIED_CS_REG, &data) == ERROR)
  223. {
  224. return ERROR;
  225. }
  226. #if defined (PHY_USING_DM9162) || defined (PHY_USING_LAN8720)
  227. if(data & PHY_FULL_DUPLEX_100MBPS_BIT)
  228. {
  229. emac_fast_speed_set(EMAC_SPEED_100MBPS);
  230. emac_duplex_mode_set(EMAC_FULL_DUPLEX);
  231. }
  232. else if(data & PHY_HALF_DUPLEX_100MBPS_BIT)
  233. {
  234. emac_fast_speed_set(EMAC_SPEED_100MBPS);
  235. emac_duplex_mode_set(EMAC_HALF_DUPLEX);
  236. }
  237. else if(data & PHY_FULL_DUPLEX_10MBPS_BIT)
  238. {
  239. emac_fast_speed_set(EMAC_SPEED_10MBPS);
  240. emac_duplex_mode_set(EMAC_FULL_DUPLEX);
  241. }
  242. else if(data & PHY_HALF_DUPLEX_10MBPS_BIT)
  243. {
  244. emac_fast_speed_set(EMAC_SPEED_10MBPS);
  245. emac_duplex_mode_set(EMAC_HALF_DUPLEX);
  246. }
  247. #endif
  248. #if defined (PHY_USING_DP83848)
  249. if(data & PHY_DUPLEX_MODE)
  250. {
  251. emac_duplex_mode_set(EMAC_FULL_DUPLEX);
  252. }
  253. else
  254. {
  255. emac_duplex_mode_set(EMAC_HALF_DUPLEX);
  256. }
  257. if(data & PHY_SPEED_MODE)
  258. {
  259. emac_fast_speed_set(EMAC_SPEED_10MBPS);
  260. }
  261. else
  262. {
  263. emac_fast_speed_set(EMAC_SPEED_100MBPS);
  264. }
  265. #endif
  266. #if defined (PHY_USING_YT8512)
  267. if(data & PHY_DUPLEX_MODE)
  268. {
  269. emac_duplex_mode_set(EMAC_FULL_DUPLEX);
  270. }
  271. else
  272. {
  273. emac_duplex_mode_set(EMAC_HALF_DUPLEX);
  274. }
  275. if(data & PHY_SPEED_MODE)
  276. {
  277. emac_fast_speed_set(EMAC_SPEED_100MBPS);
  278. }
  279. else
  280. {
  281. emac_fast_speed_set(EMAC_SPEED_10MBPS);
  282. }
  283. #endif
  284. }
  285. else
  286. {
  287. if(emac_phy_register_write(phy_addr, PHY_CONTROL_REG, (uint16_t)((mode << 8) | (speed << 13))) == ERROR)
  288. {
  289. return ERROR;
  290. }
  291. if(speed == EMAC_SPEED_100MBPS)
  292. {
  293. emac_fast_speed_set(EMAC_SPEED_100MBPS);
  294. }
  295. else
  296. {
  297. emac_fast_speed_set(EMAC_SPEED_10MBPS);
  298. }
  299. if(mode == EMAC_FULL_DUPLEX)
  300. {
  301. emac_duplex_mode_set(EMAC_FULL_DUPLEX);
  302. }
  303. else
  304. {
  305. emac_duplex_mode_set(EMAC_HALF_DUPLEX);
  306. }
  307. }
  308. return SUCCESS;
  309. }
  310. /**
  311. * @brief initialize emac phy
  312. */
  313. static error_status emac_phy_init(emac_control_config_type *control_para)
  314. {
  315. emac_clock_range_set();
  316. if(emac_phy_register_reset() == ERROR)
  317. {
  318. return ERROR;
  319. }
  320. if(emac_speed_config(control_para->auto_nego, control_para->duplex_mode, control_para->fast_ethernet_speed) == ERROR)
  321. {
  322. return ERROR;
  323. }
  324. emac_control_config(control_para);
  325. return SUCCESS;
  326. }
  327. /**
  328. * @brief emac initialization function
  329. */
  330. static rt_err_t rt_at32_emac_init(rt_device_t dev)
  331. {
  332. emac_control_config_type mac_control_para;
  333. emac_dma_config_type dma_control_para;
  334. /* check till phy detected */
  335. while(phy_addr == 0xFF)
  336. {
  337. rt_thread_mdelay(1000);
  338. }
  339. /* emac reset */
  340. emac_reset();
  341. /* software reset emac dma */
  342. emac_dma_software_reset_set();
  343. while(emac_dma_software_reset_get() == SET);
  344. emac_control_para_init(&mac_control_para);
  345. mac_control_para.auto_nego = EMAC_AUTO_NEGOTIATION_ON;
  346. if(emac_phy_init(&mac_control_para) == ERROR)
  347. {
  348. LOG_E("emac hardware init failed");
  349. return -RT_ERROR;
  350. }
  351. else
  352. {
  353. LOG_D("emac hardware init success");
  354. }
  355. emac_transmit_flow_control_enable(TRUE);
  356. emac_zero_quanta_pause_disable(TRUE);
  357. /* set mac address */
  358. emac_local_address_set(at32_emac_device.dev_addr);
  359. /* set emac dma rx link list */
  360. emac_dma_descriptor_list_address_set(EMAC_DMA_RECEIVE, dma_rx_dscr_tab, rx_buff, EMAC_NUM_RX_BUF);
  361. /* set emac dma tx link list */
  362. emac_dma_descriptor_list_address_set(EMAC_DMA_TRANSMIT, dma_tx_dscr_tab, tx_buff, EMAC_NUM_TX_BUF);
  363. emac_dma_para_init(&dma_control_para);
  364. dma_control_para.rsf_enable = TRUE;
  365. dma_control_para.tsf_enable = TRUE;
  366. dma_control_para.osf_enable = TRUE;
  367. dma_control_para.aab_enable = TRUE;
  368. dma_control_para.usp_enable = TRUE;
  369. dma_control_para.fb_enable = TRUE;
  370. dma_control_para.flush_rx_disable = TRUE;
  371. dma_control_para.rx_dma_pal = EMAC_DMA_PBL_32;
  372. dma_control_para.tx_dma_pal = EMAC_DMA_PBL_32;
  373. dma_control_para.priority_ratio = EMAC_DMA_2_RX_1_TX;
  374. emac_dma_config(&dma_control_para);
  375. /* emac interrupt init */
  376. emac_dma_interrupt_enable(EMAC_DMA_INTERRUPT_NORMAL_SUMMARY, TRUE);
  377. emac_dma_interrupt_enable(EMAC_DMA_INTERRUPT_RX, TRUE);
  378. nvic_irq_enable(EMAC_IRQn, 0x07, 0);
  379. /* enable emac */
  380. emac_start();
  381. return RT_EOK;
  382. }
  383. static rt_err_t rt_at32_emac_open(rt_device_t dev, rt_uint16_t oflag)
  384. {
  385. LOG_D("emac open");
  386. return RT_EOK;
  387. }
  388. static rt_err_t rt_at32_emac_close(rt_device_t dev)
  389. {
  390. LOG_D("emac close");
  391. return RT_EOK;
  392. }
  393. static rt_ssize_t rt_at32_emac_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
  394. {
  395. LOG_D("emac read");
  396. rt_set_errno(-RT_ENOSYS);
  397. return 0;
  398. }
  399. static rt_ssize_t rt_at32_emac_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
  400. {
  401. LOG_D("emac write");
  402. rt_set_errno(-RT_ENOSYS);
  403. return 0;
  404. }
  405. static rt_err_t rt_at32_emac_control(rt_device_t dev, int cmd, void *args)
  406. {
  407. switch (cmd)
  408. {
  409. case NIOCTL_GADDR:
  410. /* get mac address */
  411. if (args)
  412. {
  413. SMEMCPY(args, at32_emac_device.dev_addr, 6);
  414. }
  415. else
  416. {
  417. return -RT_ERROR;
  418. }
  419. break;
  420. default :
  421. break;
  422. }
  423. return RT_EOK;
  424. }
  425. /**
  426. * @brief emac txpkt chainmode
  427. */
  428. rt_err_t emac_txpkt_chainmode(rt_uint32_t frame_length)
  429. {
  430. rt_uint32_t buf_cnt = 0, index = 0;
  431. /* check if the descriptor is owned by the ethernet dma (when set) or cpu (when reset) */
  432. if((dma_tx_desc_to_set->status & EMAC_DMATXDESC_OWN) != (u32)RESET)
  433. {
  434. /* return error: own bit set */
  435. return -RT_ERROR;
  436. }
  437. if(frame_length == 0)
  438. {
  439. return -RT_ERROR;
  440. }
  441. if(frame_length > EMAC_MAX_PACKET_LENGTH)
  442. {
  443. buf_cnt = frame_length / EMAC_MAX_PACKET_LENGTH;
  444. if(frame_length % EMAC_MAX_PACKET_LENGTH)
  445. {
  446. buf_cnt += 1;
  447. }
  448. }
  449. else
  450. {
  451. buf_cnt = 1;
  452. }
  453. if(buf_cnt == 1)
  454. {
  455. /* setting the last segment and first segment bits (in this case a frame is transmitted in one descriptor) */
  456. dma_tx_desc_to_set->status |= EMAC_DMATXDESC_LS | EMAC_DMATXDESC_FS;
  457. /* setting the frame length: bits[12:0] */
  458. dma_tx_desc_to_set->controlsize = (frame_length & EMAC_DMATXDESC_TBS1);
  459. /* set own bit of the tx descriptor status: gives the buffer back to ethernet dma */
  460. dma_tx_desc_to_set->status |= EMAC_DMATXDESC_OWN;
  461. /* selects the next dma tx descriptor list for next buffer to send */
  462. dma_tx_desc_to_set = (emac_dma_desc_type*) (dma_tx_desc_to_set->buf2nextdescaddr);
  463. }
  464. else
  465. {
  466. for(index = 0; index < buf_cnt; index ++)
  467. {
  468. /* clear first and last segments */
  469. dma_tx_desc_to_set->status &= ~(EMAC_DMATXDESC_LS | EMAC_DMATXDESC_FS);
  470. /* set first segments */
  471. if(index == 0)
  472. {
  473. dma_tx_desc_to_set->status |= EMAC_DMATXDESC_FS;
  474. }
  475. /* set size */
  476. dma_tx_desc_to_set->controlsize = (EMAC_MAX_PACKET_LENGTH & EMAC_DMATXDESC_TBS1);
  477. /* set last segments */
  478. if(index == (buf_cnt - 1))
  479. {
  480. dma_tx_desc_to_set->status |= EMAC_DMATXDESC_LS;
  481. dma_tx_desc_to_set->controlsize = ((frame_length - ((buf_cnt-1) * EMAC_MAX_PACKET_LENGTH)) & EMAC_DMATXDESC_TBS1);
  482. }
  483. /* set own bit of the tx descriptor status: gives the buffer back to ethernet dma */
  484. dma_tx_desc_to_set->status |= EMAC_DMATXDESC_OWN;
  485. /* selects the next dma tx descriptor list for next buffer to send */
  486. dma_tx_desc_to_set = (emac_dma_desc_type*) (dma_tx_desc_to_set->buf2nextdescaddr);
  487. }
  488. }
  489. /* when tx buffer unavailable flag is set: clear it and resume transmission */
  490. if(emac_dma_flag_get(EMAC_DMA_TBU_FLAG))
  491. {
  492. /* clear tbus ethernet dma flag */
  493. emac_dma_flag_clear(EMAC_DMA_TBU_FLAG);
  494. /* resume dma transmission*/
  495. EMAC_DMA->tpd_bit.tpd = 0;
  496. }
  497. return RT_EOK;
  498. }
  499. /**
  500. * @brief transmit data
  501. */
  502. rt_err_t rt_at32_emac_tx(rt_device_t dev, struct pbuf *p)
  503. {
  504. rt_err_t ret = -RT_ERROR;
  505. struct pbuf *q;
  506. rt_uint32_t length = 0;
  507. rt_uint32_t buffer_offset = 0, payload_offset = 0, copy_count = 0;
  508. emac_dma_desc_type *dma_tx_desc;
  509. rt_uint8_t *buffer;
  510. dma_tx_desc = dma_tx_desc_to_set;
  511. buffer = (uint8_t *)(dma_tx_desc_to_set->buf1addr);
  512. /* copy data to buffer */
  513. for(q = p; q != NULL; q = q->next)
  514. {
  515. if((dma_tx_desc->status & EMAC_DMATXDESC_OWN) != RESET)
  516. {
  517. ret = RT_EOK;
  518. goto _error;
  519. }
  520. copy_count = q->len;
  521. payload_offset = 0;
  522. while((copy_count + buffer_offset) > EMAC_MAX_PACKET_LENGTH)
  523. {
  524. rt_memcpy(buffer + buffer_offset, (uint8_t *)q->payload + payload_offset, (EMAC_MAX_PACKET_LENGTH - buffer_offset));
  525. dma_tx_desc = (emac_dma_desc_type*)dma_tx_desc->buf2nextdescaddr;
  526. if((dma_tx_desc->status & EMAC_DMATXDESC_OWN) != RESET)
  527. {
  528. ret = RT_EOK;
  529. goto _error;
  530. }
  531. buffer = (uint8_t *)dma_tx_desc->buf1addr;
  532. copy_count = copy_count - (EMAC_MAX_PACKET_LENGTH - buffer_offset);
  533. payload_offset = payload_offset + (EMAC_MAX_PACKET_LENGTH - buffer_offset);
  534. length = length + (EMAC_MAX_PACKET_LENGTH - buffer_offset);
  535. buffer_offset = 0;
  536. }
  537. rt_memcpy(buffer + buffer_offset, (uint8_t *)q->payload + payload_offset, copy_count);
  538. buffer_offset = buffer_offset + copy_count;
  539. length = length + copy_count;
  540. }
  541. emac_txpkt_chainmode(length);
  542. ret = RT_EOK;
  543. _error:
  544. /* when tx buffer unavailable flag is set: clear it and resume transmission */
  545. if(emac_dma_flag_get(EMAC_DMA_TBU_FLAG))
  546. {
  547. /* clear tbus ethernet dma flag */
  548. emac_dma_flag_clear(EMAC_DMA_TBU_FLAG);
  549. /* resume dma transmission*/
  550. EMAC_DMA->tpd_bit.tpd = 0;
  551. }
  552. return ret;
  553. }
  554. /**
  555. * @brief emac rxpkt chainmode
  556. */
  557. rt_err_t emac_rxpkt_chainmode(void)
  558. {
  559. /* check if the descriptor is owned by the ethernet dma (when set) or cpu (when reset) */
  560. if((dma_rx_desc_to_get->status & EMAC_DMARXDESC_OWN) != (u32)RESET)
  561. {
  562. /* return error: own bit set */
  563. return -RT_ERROR;
  564. }
  565. if((dma_rx_desc_to_get->status & EMAC_DMARXDESC_LS) != (u32)RESET)
  566. {
  567. rx_frame.g_seg_count ++;
  568. if(rx_frame.g_seg_count == 1)
  569. {
  570. rx_frame.rx_fs_desc = dma_rx_desc_to_get;
  571. }
  572. rx_frame.rx_ls_desc = dma_rx_desc_to_get;
  573. rx_frame.length = ((dma_rx_desc_to_get->status & EMAC_DMARXDESC_FL) >> DMARXDESC_FRAMELENGTH_SHIFT) - 4;
  574. rx_frame.buffer = rx_frame.rx_fs_desc->buf1addr;
  575. /* Selects the next DMA Rx descriptor list for next buffer to read */
  576. dma_rx_desc_to_get = (emac_dma_desc_type*) (dma_rx_desc_to_get->buf2nextdescaddr);
  577. return RT_EOK;
  578. }
  579. else if((dma_rx_desc_to_get->status & EMAC_DMARXDESC_FS) != (u32)RESET)
  580. {
  581. rx_frame.g_seg_count = 1;
  582. rx_frame.rx_fs_desc = dma_rx_desc_to_get;
  583. rx_frame.rx_ls_desc = NULL;
  584. dma_rx_desc_to_get = (emac_dma_desc_type*) (dma_rx_desc_to_get->buf2nextdescaddr);
  585. }
  586. else
  587. {
  588. rx_frame.g_seg_count ++;
  589. dma_rx_desc_to_get = (emac_dma_desc_type*) (dma_rx_desc_to_get->buf2nextdescaddr);
  590. }
  591. return -RT_ERROR;
  592. }
  593. /**
  594. * @brief receive data
  595. */
  596. struct pbuf *rt_at32_emac_rx(rt_device_t dev)
  597. {
  598. struct pbuf *p = NULL;
  599. struct pbuf *q = NULL;
  600. rt_uint16_t len = 0;
  601. rt_uint8_t *buffer;
  602. emac_dma_desc_type *dma_rx_desc;
  603. rt_uint32_t buffer_offset, payload_offset = 0, copy_count = 0;
  604. rt_uint32_t index = 0;
  605. if(emac_rxpkt_chainmode() != RT_EOK)
  606. {
  607. return NULL;
  608. }
  609. /* obtain the size of the packet and put it into the "len"
  610. variable. */
  611. len = rx_frame.length;
  612. buffer = (uint8_t *)rx_frame.buffer;
  613. /* we allocate a pbuf chain of pbufs from the pool. */
  614. if(len > 0)
  615. {
  616. p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL);
  617. }
  618. if(p != NULL)
  619. {
  620. dma_rx_desc = rx_frame.rx_fs_desc;
  621. buffer_offset = 0;
  622. for (q = p; q != NULL; q = q->next)
  623. {
  624. copy_count = q->len;
  625. payload_offset = 0;
  626. while( (copy_count + buffer_offset) > EMAC_MAX_PACKET_LENGTH )
  627. {
  628. /* copy data to pbuf */
  629. rt_memcpy((uint8_t*)q->payload + payload_offset, buffer + buffer_offset, (EMAC_MAX_PACKET_LENGTH - buffer_offset));
  630. /* point to next descriptor */
  631. dma_rx_desc = (emac_dma_desc_type *)(dma_rx_desc->buf2nextdescaddr);
  632. buffer = (uint8_t *)(dma_rx_desc->buf1addr);
  633. copy_count = copy_count - (EMAC_MAX_PACKET_LENGTH - buffer_offset);
  634. payload_offset = payload_offset + (EMAC_MAX_PACKET_LENGTH - buffer_offset);
  635. buffer_offset = 0;
  636. }
  637. rt_memcpy((uint8_t*)q->payload + payload_offset, (uint8_t*)buffer + buffer_offset, copy_count);
  638. buffer_offset = buffer_offset + copy_count;
  639. }
  640. }
  641. dma_rx_desc = rx_frame.rx_fs_desc;
  642. for(index = 0; index < rx_frame.g_seg_count; index ++)
  643. {
  644. dma_rx_desc->status |= EMAC_DMARXDESC_OWN;
  645. dma_rx_desc = (emac_dma_desc_type*) (dma_rx_desc->buf2nextdescaddr);
  646. }
  647. rx_frame.g_seg_count = 0;
  648. /* when rx buffer unavailable flag is set: clear it and resume reception */
  649. if(emac_dma_flag_get(EMAC_DMA_RBU_FLAG))
  650. {
  651. /* clear rbus ethernet dma flag */
  652. emac_dma_flag_clear(EMAC_DMA_RBU_FLAG);
  653. /* resume dma reception */
  654. EMAC_DMA->rpd_bit.rpd = FALSE;
  655. }
  656. return p;
  657. }
  658. void EMAC_IRQHandler(void)
  659. {
  660. /* enter interrupt */
  661. rt_interrupt_enter();
  662. /* packet receiption */
  663. if (emac_dma_flag_get(EMAC_DMA_RI_FLAG) == SET)
  664. {
  665. /* a frame has been received */
  666. eth_device_ready(&(at32_emac_device.parent));
  667. emac_dma_flag_clear(EMAC_DMA_RI_FLAG);
  668. }
  669. /* packet transmission */
  670. if (emac_dma_flag_get(EMAC_DMA_TI_FLAG) == SET)
  671. {
  672. emac_dma_flag_clear(EMAC_DMA_TI_FLAG);
  673. }
  674. /* clear normal interrupt */
  675. emac_dma_flag_clear(EMAC_DMA_NIS_FLAG);
  676. /* clear dma error */
  677. if(emac_dma_flag_get(EMAC_DMA_AIS_FLAG) != RESET)
  678. {
  679. if(emac_dma_flag_get(EMAC_DMA_RBU_FLAG) != RESET)
  680. {
  681. emac_dma_flag_clear(EMAC_DMA_RBU_FLAG);
  682. }
  683. if(emac_dma_flag_get(EMAC_DMA_OVF_FLAG) != RESET)
  684. {
  685. emac_dma_flag_clear(EMAC_DMA_OVF_FLAG);
  686. }
  687. emac_dma_flag_clear(EMAC_DMA_AIS_FLAG);
  688. }
  689. /* leave interrupt */
  690. rt_interrupt_leave();
  691. }
  692. enum {
  693. PHY_LINK = (1 << 0),
  694. PHY_10M = (1 << 1),
  695. PHY_FULLDUPLEX = (1 << 2),
  696. };
  697. static void phy_linkchange()
  698. {
  699. static rt_uint8_t phy_speed = 0;
  700. rt_uint8_t phy_speed_new = 0;
  701. rt_uint16_t status;
  702. emac_phy_register_read(phy_addr, PHY_BASIC_STATUS_REG, (uint16_t *)&status);
  703. LOG_D("phy basic status reg is 0x%X", status);
  704. if (status & (PHY_AUTONEGO_COMPLETE_MASK | PHY_LINKED_STATUS_MASK))
  705. {
  706. rt_uint16_t SR = 0;
  707. phy_speed_new |= PHY_LINK;
  708. emac_phy_register_read(phy_addr, PHY_SPECIFIED_CS_REG, (uint16_t *)&SR);
  709. LOG_D("phy control status reg is 0x%X", SR);
  710. if (SR & (PHY_SPEED_MODE))
  711. {
  712. #if defined (PHY_USING_DP83848)
  713. phy_speed_new |= PHY_10M;
  714. #endif
  715. }
  716. if (SR & (PHY_DUPLEX_MODE))
  717. {
  718. phy_speed_new |= PHY_FULLDUPLEX;
  719. }
  720. }
  721. if (phy_speed != phy_speed_new)
  722. {
  723. phy_speed = phy_speed_new;
  724. if (phy_speed & PHY_LINK)
  725. {
  726. LOG_D("link up");
  727. if (phy_speed & PHY_10M)
  728. {
  729. LOG_D("10Mbps");
  730. at32_emac_device.emac_speed = EMAC_SPEED_10MBPS;
  731. }
  732. else
  733. {
  734. at32_emac_device.emac_speed = EMAC_SPEED_100MBPS;
  735. LOG_D("100Mbps");
  736. }
  737. if (phy_speed & PHY_FULLDUPLEX)
  738. {
  739. LOG_D("full-duplex");
  740. at32_emac_device.emac_mode = EMAC_FULL_DUPLEX;
  741. }
  742. else
  743. {
  744. LOG_D("half-duplex");
  745. at32_emac_device.emac_mode = EMAC_HALF_DUPLEX;
  746. }
  747. /* send link up. */
  748. eth_device_linkchange(&at32_emac_device.parent, RT_TRUE);
  749. }
  750. else
  751. {
  752. LOG_I("link down");
  753. eth_device_linkchange(&at32_emac_device.parent, RT_FALSE);
  754. }
  755. }
  756. }
  757. #ifdef PHY_USING_INTERRUPT_MODE
  758. static void emac_phy_isr(void *args)
  759. {
  760. rt_uint32_t status = 0;
  761. emac_phy_register_read(phy_addr, PHY_INTERRUPT_FLAG_REG, (uint16_t *)&status);
  762. LOG_D("phy interrupt status reg is 0x%X", status);
  763. phy_linkchange();
  764. }
  765. #endif /* PHY_USING_INTERRUPT_MODE */
  766. static void phy_monitor_thread_entry(void *parameter)
  767. {
  768. uint8_t detected_count = 0;
  769. while(phy_addr == 0xFF)
  770. {
  771. /* phy search */
  772. rt_uint32_t i, temp;
  773. for (i = 0; i <= 0x1F; i++)
  774. {
  775. emac_phy_register_read(i, PHY_BASIC_STATUS_REG, (uint16_t *)&temp);
  776. if (temp != 0xFFFF && temp != 0x00)
  777. {
  778. phy_addr = i;
  779. break;
  780. }
  781. }
  782. detected_count++;
  783. rt_thread_mdelay(1000);
  784. if (detected_count > 10)
  785. {
  786. LOG_E("No PHY device was detected, please check hardware!");
  787. }
  788. }
  789. LOG_D("Found a phy, address:0x%02X", phy_addr);
  790. /* reset phy */
  791. LOG_D("RESET PHY!");
  792. emac_phy_register_write(phy_addr, PHY_BASIC_CONTROL_REG, PHY_RESET_MASK);
  793. rt_thread_mdelay(2000);
  794. emac_phy_register_write(phy_addr, PHY_BASIC_CONTROL_REG, PHY_AUTO_NEGOTIATION_MASK);
  795. phy_linkchange();
  796. #ifdef PHY_USING_INTERRUPT_MODE
  797. /* configuration intterrupt pin */
  798. rt_pin_mode(PHY_INT_PIN, PIN_MODE_INPUT_PULLUP);
  799. rt_pin_attach_irq(PHY_INT_PIN, PIN_IRQ_MODE_FALLING, emac_phy_isr, (void *)"callbackargs");
  800. rt_pin_irq_enable(PHY_INT_PIN, PIN_IRQ_ENABLE);
  801. /* enable phy interrupt */
  802. emac_phy_register_write(phy_addr, PHY_INTERRUPT_MASK_REG, PHY_INT_MASK);
  803. #if defined(PHY_INTERRUPT_CTRL_REG)
  804. emac_phy_register_write(phy_addr, PHY_INTERRUPT_CTRL_REG, PHY_INTERRUPT_EN);
  805. #endif
  806. #else /* PHY_USING_INTERRUPT_MODE */
  807. at32_emac_device.poll_link_timer = rt_timer_create("phylnk", (void (*)(void*))phy_linkchange,
  808. NULL, RT_TICK_PER_SECOND, RT_TIMER_FLAG_PERIODIC);
  809. if (!at32_emac_device.poll_link_timer || rt_timer_start(at32_emac_device.poll_link_timer) != RT_EOK)
  810. {
  811. LOG_E("Start link change detection timer failed");
  812. }
  813. #endif /* PHY_USING_INTERRUPT_MODE */
  814. }
  815. /* Register the EMAC device */
  816. static int rt_hw_at32_emac_init(void)
  817. {
  818. rt_err_t state = RT_EOK;
  819. /* Prepare receive and send buffers */
  820. rx_buff = (rt_uint8_t *)rt_calloc(EMAC_NUM_RX_BUF, EMAC_MAX_PACKET_LENGTH);
  821. if (rx_buff == RT_NULL)
  822. {
  823. LOG_E("No memory");
  824. state = -RT_ENOMEM;
  825. goto __exit;
  826. }
  827. tx_buff = (rt_uint8_t *)rt_calloc(EMAC_NUM_TX_BUF, EMAC_MAX_PACKET_LENGTH);
  828. if (tx_buff == RT_NULL)
  829. {
  830. LOG_E("No memory");
  831. state = -RT_ENOMEM;
  832. goto __exit;
  833. }
  834. dma_rx_dscr_tab = (emac_dma_desc_type *)rt_calloc(EMAC_NUM_RX_BUF, sizeof(emac_dma_desc_type));
  835. if (dma_rx_dscr_tab == RT_NULL)
  836. {
  837. LOG_E("No memory");
  838. state = -RT_ENOMEM;
  839. goto __exit;
  840. }
  841. dma_tx_dscr_tab = (emac_dma_desc_type *)rt_calloc(EMAC_NUM_TX_BUF, sizeof(emac_dma_desc_type));
  842. if (dma_tx_dscr_tab == RT_NULL)
  843. {
  844. LOG_E("No memory");
  845. state = -RT_ENOMEM;
  846. goto __exit;
  847. }
  848. /* phy clock */
  849. phy_clock_config();
  850. /* enable periph clock */
  851. crm_periph_clock_enable(CRM_EMAC_PERIPH_CLOCK, TRUE);
  852. crm_periph_clock_enable(CRM_EMACTX_PERIPH_CLOCK, TRUE);
  853. crm_periph_clock_enable(CRM_EMACRX_PERIPH_CLOCK, TRUE);
  854. /* interface mode */
  855. #if defined (SOC_SERIES_AT32F407)
  856. gpio_pin_remap_config(MII_RMII_SEL_GMUX, TRUE);
  857. #endif
  858. #if defined (SOC_SERIES_AT32F437)
  859. scfg_emac_interface_set(SCFG_EMAC_SELECT_RMII);
  860. #endif
  861. /* emac gpio init */
  862. at32_msp_emac_init(NULL);
  863. at32_emac_device.emac_speed = EMAC_SPEED_100MBPS;
  864. at32_emac_device.emac_mode = EMAC_FULL_DUPLEX;
  865. at32_emac_device.dev_addr[0] = 0x00;
  866. at32_emac_device.dev_addr[1] = 0x66;
  867. at32_emac_device.dev_addr[2] = 0x88;
  868. /* generate mac addr from unique id (only for test). */
  869. at32_emac_device.dev_addr[3] = *(rt_uint8_t *)(0x1FFFF7E8 + 4);
  870. at32_emac_device.dev_addr[4] = *(rt_uint8_t *)(0x1FFFF7E8 + 2);
  871. at32_emac_device.dev_addr[5] = *(rt_uint8_t *)(0x1FFFF7E8 + 0);
  872. at32_emac_device.parent.parent.init = rt_at32_emac_init;
  873. at32_emac_device.parent.parent.open = rt_at32_emac_open;
  874. at32_emac_device.parent.parent.close = rt_at32_emac_close;
  875. at32_emac_device.parent.parent.read = rt_at32_emac_read;
  876. at32_emac_device.parent.parent.write = rt_at32_emac_write;
  877. at32_emac_device.parent.parent.control = rt_at32_emac_control;
  878. at32_emac_device.parent.parent.user_data = RT_NULL;
  879. at32_emac_device.parent.eth_rx = rt_at32_emac_rx;
  880. at32_emac_device.parent.eth_tx = rt_at32_emac_tx;
  881. rx_frame.g_seg_count = 0;
  882. /* reset phy */
  883. phy_reset();
  884. /* start phy monitor */
  885. rt_thread_t tid;
  886. tid = rt_thread_create("phy",
  887. phy_monitor_thread_entry,
  888. RT_NULL,
  889. 1024,
  890. RT_THREAD_PRIORITY_MAX - 2,
  891. 2);
  892. if (tid != RT_NULL)
  893. {
  894. rt_thread_startup(tid);
  895. }
  896. else
  897. {
  898. state = -RT_ERROR;
  899. }
  900. /* register eth device */
  901. state = eth_device_init(&(at32_emac_device.parent), "e0");
  902. if (RT_EOK == state)
  903. {
  904. LOG_D("emac device init success");
  905. }
  906. else
  907. {
  908. LOG_E("emac device init faild: %d", state);
  909. state = -RT_ERROR;
  910. goto __exit;
  911. }
  912. __exit:
  913. if (state != RT_EOK)
  914. {
  915. if (rx_buff)
  916. {
  917. rt_free(rx_buff);
  918. }
  919. if (tx_buff)
  920. {
  921. rt_free(tx_buff);
  922. }
  923. if (dma_rx_dscr_tab)
  924. {
  925. rt_free(dma_rx_dscr_tab);
  926. }
  927. if (dma_tx_dscr_tab)
  928. {
  929. rt_free(dma_tx_dscr_tab);
  930. }
  931. }
  932. return state;
  933. }
  934. INIT_DEVICE_EXPORT(rt_hw_at32_emac_init);