drv_emac.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-05-16 shelton first version
  9. * 2022-07-11 shelton optimize code to improve network throughput
  10. * performance
  11. * 2022-10-15 shelton optimize code
  12. * 2023-10-18 shelton optimize code
  13. */
  14. #include "drv_emac.h"
  15. #include <netif/ethernetif.h>
  16. #include <lwipopts.h>
  17. /* debug option */
  18. //#define EMAC_RX_DUMP
  19. //#define EMAC_TX_DUMP
  20. //#define DRV_DEBUG
  21. #define LOG_TAG "drv.emac"
  22. #include <drv_log.h>
  23. #define CRYSTAL_ON_PHY 0
  24. /* emac memory buffer configuration */
  25. #define EMAC_NUM_RX_BUF 5 /* rx (5 * 1500) */
  26. #define EMAC_NUM_TX_BUF 5 /* tx (5 * 1500) */
  27. #define MAX_ADDR_LEN 6
  28. #define DMARXDESC_FRAMELENGTH_SHIFT 16
  29. struct rt_at32_emac
  30. {
  31. /* inherit from ethernet device */
  32. struct eth_device parent;
  33. #ifndef PHY_USING_INTERRUPT_MODE
  34. rt_timer_t poll_link_timer;
  35. #endif
  36. /* interface address info, hw address */
  37. rt_uint8_t dev_addr[MAX_ADDR_LEN];
  38. /* emac_speed */
  39. emac_speed_type emac_speed;
  40. /* emac_duplex_mode */
  41. emac_duplex_type emac_mode;
  42. };
  43. typedef struct {
  44. rt_uint32_t length;
  45. rt_uint32_t buffer;
  46. emac_dma_desc_type *descriptor;
  47. emac_dma_desc_type *rx_fs_desc;
  48. emac_dma_desc_type *rx_ls_desc;
  49. rt_uint8_t g_seg_count;
  50. } frame_type;
  51. static emac_dma_desc_type *dma_rx_dscr_tab, *dma_tx_dscr_tab;
  52. extern emac_dma_desc_type *dma_rx_desc_to_get, *dma_tx_desc_to_set;
  53. frame_type rx_frame;
  54. static rt_uint8_t *rx_buff, *tx_buff;
  55. static struct rt_at32_emac at32_emac_device;
  56. static uint8_t phy_addr = 0xFF;
  57. #if defined(EMAC_RX_DUMP) || defined(EMAC_TX_DUMP)
  58. #define __is_print(ch) ((unsigned int)((ch) - ' ') < 127u - ' ')
  59. static void dump_hex(const rt_uint8_t *ptr, rt_size_t buflen)
  60. {
  61. unsigned char *buf = (unsigned char *)ptr;
  62. int i, j;
  63. for (i = 0; i < buflen; i += 16)
  64. {
  65. rt_kprintf("%08X: ", i);
  66. for (j = 0; j < 16; j++)
  67. if (i + j < buflen)
  68. rt_kprintf("%02X ", buf[i + j]);
  69. else
  70. rt_kprintf(" ");
  71. rt_kprintf(" ");
  72. for (j = 0; j < 16; j++)
  73. if (i + j < buflen)
  74. rt_kprintf("%c", __is_print(buf[i + j]) ? buf[i + j] : '.');
  75. rt_kprintf("\n");
  76. }
  77. }
  78. #endif
  79. /**
  80. * @brief phy reset
  81. */
  82. static void phy_reset(void)
  83. {
  84. gpio_init_type gpio_init_struct;
  85. #if defined (SOC_SERIES_AT32F437)
  86. crm_periph_clock_enable(CRM_GPIOE_PERIPH_CLOCK, TRUE);
  87. crm_periph_clock_enable(CRM_GPIOG_PERIPH_CLOCK, TRUE);
  88. gpio_default_para_init(&gpio_init_struct);
  89. gpio_init_struct.gpio_drive_strength = GPIO_DRIVE_STRENGTH_STRONGER;
  90. gpio_init_struct.gpio_mode = GPIO_MODE_OUTPUT;
  91. gpio_init_struct.gpio_out_type = GPIO_OUTPUT_PUSH_PULL;
  92. gpio_init_struct.gpio_pull = GPIO_PULL_NONE;
  93. gpio_init_struct.gpio_pins = GPIO_PINS_15;
  94. gpio_init(GPIOE, &gpio_init_struct);
  95. gpio_init_struct.gpio_pins = GPIO_PINS_15;
  96. gpio_init(GPIOG, &gpio_init_struct);
  97. gpio_bits_reset(GPIOE, GPIO_PINS_15);
  98. gpio_bits_reset(GPIOG, GPIO_PINS_15);
  99. rt_thread_mdelay(2);
  100. gpio_bits_set(GPIOE, GPIO_PINS_15);
  101. #endif
  102. #if defined (SOC_SERIES_AT32F407)
  103. crm_periph_clock_enable(CRM_GPIOC_PERIPH_CLOCK, TRUE);
  104. gpio_default_para_init(&gpio_init_struct);
  105. gpio_init_struct.gpio_drive_strength = GPIO_DRIVE_STRENGTH_STRONGER;
  106. gpio_init_struct.gpio_mode = GPIO_MODE_OUTPUT;
  107. gpio_init_struct.gpio_out_type = GPIO_OUTPUT_PUSH_PULL;
  108. gpio_init_struct.gpio_pull = GPIO_PULL_NONE;
  109. gpio_init_struct.gpio_pins = GPIO_PINS_8;
  110. gpio_init(GPIOC, &gpio_init_struct);
  111. gpio_bits_reset(GPIOC, GPIO_PINS_8);
  112. rt_thread_mdelay(2);
  113. gpio_bits_set(GPIOC, GPIO_PINS_8);
  114. #endif
  115. rt_thread_mdelay(2000);
  116. }
  117. /**
  118. * @brief phy clock config
  119. */
  120. static void phy_clock_config(void)
  121. {
  122. #if (CRYSTAL_ON_PHY == 0)
  123. /* if CRYSTAL_NO_PHY, output clock with pa8 of mcu */
  124. gpio_init_type gpio_init_struct;
  125. crm_periph_clock_enable(CRM_GPIOA_PERIPH_CLOCK, TRUE);
  126. gpio_default_para_init(&gpio_init_struct);
  127. gpio_init_struct.gpio_drive_strength = GPIO_DRIVE_STRENGTH_STRONGER;
  128. gpio_init_struct.gpio_mode = GPIO_MODE_MUX;
  129. gpio_init_struct.gpio_out_type = GPIO_OUTPUT_PUSH_PULL;
  130. gpio_init_struct.gpio_pull = GPIO_PULL_NONE;
  131. gpio_init_struct.gpio_pins = GPIO_PINS_8;
  132. gpio_init(GPIOA, &gpio_init_struct);
  133. /* 9162 clkout output 25 mhz */
  134. /* 83848 clkout output 50 mhz */
  135. #if defined (SOC_SERIES_AT32F407)
  136. crm_clock_out_set(CRM_CLKOUT_SCLK);
  137. #if defined (PHY_USING_DM9162)
  138. crm_clkout_div_set(CRM_CLKOUT_DIV_8);
  139. #elif defined (PHY_USING_DP83848)
  140. crm_clkout_div_set(CRM_CLKOUT_DIV_4);
  141. #endif
  142. #endif
  143. #if defined (SOC_SERIES_AT32F437)
  144. crm_clock_out1_set(CRM_CLKOUT1_PLL);
  145. #if defined (PHY_USING_DM9162)
  146. crm_clkout_div_set(CRM_CLKOUT_INDEX_1, CRM_CLKOUT_DIV1_5, CRM_CLKOUT_DIV2_2);
  147. #elif defined (PHY_USING_DP83848)
  148. crm_clkout_div_set(CRM_CLKOUT_INDEX_1, CRM_CLKOUT_DIV1_5, CRM_CLKOUT_DIV2_1);
  149. #endif
  150. #endif
  151. #endif
  152. }
  153. /**
  154. * @brief reset phy register
  155. */
  156. static error_status emac_phy_register_reset(void)
  157. {
  158. uint16_t data = 0;
  159. uint32_t timeout = 0;
  160. uint32_t i = 0;
  161. if(emac_phy_register_write(phy_addr, PHY_CONTROL_REG, PHY_RESET_BIT) == ERROR)
  162. {
  163. return ERROR;
  164. }
  165. for(i = 0; i < 0x000FFFFF; i++);
  166. do
  167. {
  168. timeout++;
  169. if(emac_phy_register_read(phy_addr, PHY_CONTROL_REG, &data) == ERROR)
  170. {
  171. return ERROR;
  172. }
  173. } while((data & PHY_RESET_BIT) && (timeout < PHY_TIMEOUT));
  174. for(i = 0; i < 0x00FFFFF; i++);
  175. if(timeout == PHY_TIMEOUT)
  176. {
  177. return ERROR;
  178. }
  179. return SUCCESS;
  180. }
  181. /**
  182. * @brief set mac speed related parameters
  183. */
  184. static error_status emac_speed_config(emac_auto_negotiation_type nego, emac_duplex_type mode, emac_speed_type speed)
  185. {
  186. uint16_t data = 0;
  187. uint32_t timeout = 0;
  188. if(nego == EMAC_AUTO_NEGOTIATION_ON)
  189. {
  190. do
  191. {
  192. timeout++;
  193. if(emac_phy_register_read(phy_addr, PHY_STATUS_REG, &data) == ERROR)
  194. {
  195. return ERROR;
  196. }
  197. } while(!(data & PHY_LINKED_STATUS_BIT) && (timeout < PHY_TIMEOUT));
  198. if(timeout == PHY_TIMEOUT)
  199. {
  200. return ERROR;
  201. }
  202. timeout = 0;
  203. if(emac_phy_register_write(phy_addr, PHY_CONTROL_REG, PHY_AUTO_NEGOTIATION_BIT) == ERROR)
  204. {
  205. return ERROR;
  206. }
  207. do
  208. {
  209. timeout++;
  210. if(emac_phy_register_read(phy_addr, PHY_STATUS_REG, &data) == ERROR)
  211. {
  212. return ERROR;
  213. }
  214. } while(!(data & PHY_NEGO_COMPLETE_BIT) && (timeout < PHY_TIMEOUT));
  215. if(timeout == PHY_TIMEOUT)
  216. {
  217. return ERROR;
  218. }
  219. if(emac_phy_register_read(phy_addr, PHY_SPECIFIED_CS_REG, &data) == ERROR)
  220. {
  221. return ERROR;
  222. }
  223. #ifdef PHY_USING_DM9162
  224. if(data & PHY_FULL_DUPLEX_100MBPS_BIT)
  225. {
  226. emac_fast_speed_set(EMAC_SPEED_100MBPS);
  227. emac_duplex_mode_set(EMAC_FULL_DUPLEX);
  228. }
  229. else if(data & PHY_HALF_DUPLEX_100MBPS_BIT)
  230. {
  231. emac_fast_speed_set(EMAC_SPEED_100MBPS);
  232. emac_duplex_mode_set(EMAC_HALF_DUPLEX);
  233. }
  234. else if(data & PHY_FULL_DUPLEX_10MBPS_BIT)
  235. {
  236. emac_fast_speed_set(EMAC_SPEED_10MBPS);
  237. emac_duplex_mode_set(EMAC_FULL_DUPLEX);
  238. }
  239. else if(data & PHY_HALF_DUPLEX_10MBPS_BIT)
  240. {
  241. emac_fast_speed_set(EMAC_SPEED_10MBPS);
  242. emac_duplex_mode_set(EMAC_HALF_DUPLEX);
  243. }
  244. #endif
  245. #ifdef PHY_USING_DP83848
  246. if(data & PHY_DUPLEX_MODE)
  247. {
  248. emac_duplex_mode_set(EMAC_FULL_DUPLEX);
  249. }
  250. else
  251. {
  252. emac_duplex_mode_set(EMAC_HALF_DUPLEX);
  253. }
  254. if(data & PHY_SPEED_MODE)
  255. {
  256. emac_fast_speed_set(EMAC_SPEED_10MBPS);
  257. }
  258. else
  259. {
  260. emac_fast_speed_set(EMAC_SPEED_100MBPS);
  261. }
  262. #endif
  263. }
  264. else
  265. {
  266. if(emac_phy_register_write(phy_addr, PHY_CONTROL_REG, (uint16_t)((mode << 8) | (speed << 13))) == ERROR)
  267. {
  268. return ERROR;
  269. }
  270. if(speed == EMAC_SPEED_100MBPS)
  271. {
  272. emac_fast_speed_set(EMAC_SPEED_100MBPS);
  273. }
  274. else
  275. {
  276. emac_fast_speed_set(EMAC_SPEED_10MBPS);
  277. }
  278. if(mode == EMAC_FULL_DUPLEX)
  279. {
  280. emac_duplex_mode_set(EMAC_FULL_DUPLEX);
  281. }
  282. else
  283. {
  284. emac_duplex_mode_set(EMAC_HALF_DUPLEX);
  285. }
  286. }
  287. return SUCCESS;
  288. }
  289. /**
  290. * @brief initialize emac phy
  291. */
  292. static error_status emac_phy_init(emac_control_config_type *control_para)
  293. {
  294. emac_clock_range_set();
  295. if(emac_phy_register_reset() == ERROR)
  296. {
  297. return ERROR;
  298. }
  299. if(emac_speed_config(control_para->auto_nego, control_para->duplex_mode, control_para->fast_ethernet_speed) == ERROR)
  300. {
  301. return ERROR;
  302. }
  303. emac_control_config(control_para);
  304. return SUCCESS;
  305. }
  306. /**
  307. * @brief emac initialization function
  308. */
  309. static rt_err_t rt_at32_emac_init(rt_device_t dev)
  310. {
  311. emac_control_config_type mac_control_para;
  312. emac_dma_config_type dma_control_para;
  313. /* check till phy detected */
  314. while(phy_addr == 0xFF)
  315. {
  316. rt_thread_mdelay(1000);
  317. }
  318. /* emac reset */
  319. emac_reset();
  320. /* software reset emac dma */
  321. emac_dma_software_reset_set();
  322. while(emac_dma_software_reset_get() == SET);
  323. emac_control_para_init(&mac_control_para);
  324. mac_control_para.auto_nego = EMAC_AUTO_NEGOTIATION_ON;
  325. if(emac_phy_init(&mac_control_para) == ERROR)
  326. {
  327. LOG_E("emac hardware init failed");
  328. return -RT_ERROR;
  329. }
  330. else
  331. {
  332. LOG_D("emac hardware init success");
  333. }
  334. emac_transmit_flow_control_enable(TRUE);
  335. emac_zero_quanta_pause_disable(TRUE);
  336. /* set mac address */
  337. emac_local_address_set(at32_emac_device.dev_addr);
  338. /* set emac dma rx link list */
  339. emac_dma_descriptor_list_address_set(EMAC_DMA_RECEIVE, dma_rx_dscr_tab, rx_buff, EMAC_NUM_RX_BUF);
  340. /* set emac dma tx link list */
  341. emac_dma_descriptor_list_address_set(EMAC_DMA_TRANSMIT, dma_tx_dscr_tab, tx_buff, EMAC_NUM_TX_BUF);
  342. emac_dma_para_init(&dma_control_para);
  343. dma_control_para.rsf_enable = TRUE;
  344. dma_control_para.tsf_enable = TRUE;
  345. dma_control_para.osf_enable = TRUE;
  346. dma_control_para.aab_enable = TRUE;
  347. dma_control_para.usp_enable = TRUE;
  348. dma_control_para.fb_enable = TRUE;
  349. dma_control_para.flush_rx_disable = TRUE;
  350. dma_control_para.rx_dma_pal = EMAC_DMA_PBL_32;
  351. dma_control_para.tx_dma_pal = EMAC_DMA_PBL_32;
  352. dma_control_para.priority_ratio = EMAC_DMA_2_RX_1_TX;
  353. emac_dma_config(&dma_control_para);
  354. /* emac interrupt init */
  355. emac_dma_interrupt_enable(EMAC_DMA_INTERRUPT_NORMAL_SUMMARY, TRUE);
  356. emac_dma_interrupt_enable(EMAC_DMA_INTERRUPT_RX, TRUE);
  357. nvic_irq_enable(EMAC_IRQn, 0x07, 0);
  358. /* enable emac */
  359. emac_start();
  360. return RT_EOK;
  361. }
  362. static rt_err_t rt_at32_emac_open(rt_device_t dev, rt_uint16_t oflag)
  363. {
  364. LOG_D("emac open");
  365. return RT_EOK;
  366. }
  367. static rt_err_t rt_at32_emac_close(rt_device_t dev)
  368. {
  369. LOG_D("emac close");
  370. return RT_EOK;
  371. }
  372. static rt_ssize_t rt_at32_emac_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
  373. {
  374. LOG_D("emac read");
  375. rt_set_errno(-RT_ENOSYS);
  376. return 0;
  377. }
  378. static rt_ssize_t rt_at32_emac_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
  379. {
  380. LOG_D("emac write");
  381. rt_set_errno(-RT_ENOSYS);
  382. return 0;
  383. }
  384. static rt_err_t rt_at32_emac_control(rt_device_t dev, int cmd, void *args)
  385. {
  386. switch (cmd)
  387. {
  388. case NIOCTL_GADDR:
  389. /* get mac address */
  390. if (args)
  391. {
  392. SMEMCPY(args, at32_emac_device.dev_addr, 6);
  393. }
  394. else
  395. {
  396. return -RT_ERROR;
  397. }
  398. break;
  399. default :
  400. break;
  401. }
  402. return RT_EOK;
  403. }
  404. /**
  405. * @brief emac txpkt chainmode
  406. */
  407. rt_err_t emac_txpkt_chainmode(rt_uint32_t frame_length)
  408. {
  409. rt_uint32_t buf_cnt = 0, index = 0;
  410. /* check if the descriptor is owned by the ethernet dma (when set) or cpu (when reset) */
  411. if((dma_tx_desc_to_set->status & EMAC_DMATXDESC_OWN) != (u32)RESET)
  412. {
  413. /* return error: own bit set */
  414. return RT_ERROR;
  415. }
  416. if(frame_length == 0)
  417. {
  418. return RT_ERROR;
  419. }
  420. if(frame_length > EMAC_MAX_PACKET_LENGTH)
  421. {
  422. buf_cnt = frame_length / EMAC_MAX_PACKET_LENGTH;
  423. if(frame_length % EMAC_MAX_PACKET_LENGTH)
  424. {
  425. buf_cnt += 1;
  426. }
  427. }
  428. else
  429. {
  430. buf_cnt = 1;
  431. }
  432. if(buf_cnt == 1)
  433. {
  434. /* setting the last segment and first segment bits (in this case a frame is transmitted in one descriptor) */
  435. dma_tx_desc_to_set->status |= EMAC_DMATXDESC_LS | EMAC_DMATXDESC_FS;
  436. /* setting the frame length: bits[12:0] */
  437. dma_tx_desc_to_set->controlsize = (frame_length & EMAC_DMATXDESC_TBS1);
  438. /* set own bit of the tx descriptor status: gives the buffer back to ethernet dma */
  439. dma_tx_desc_to_set->status |= EMAC_DMATXDESC_OWN;
  440. /* selects the next dma tx descriptor list for next buffer to send */
  441. dma_tx_desc_to_set = (emac_dma_desc_type*) (dma_tx_desc_to_set->buf2nextdescaddr);
  442. }
  443. else
  444. {
  445. for(index = 0; index < buf_cnt; index ++)
  446. {
  447. /* clear first and last segments */
  448. dma_tx_desc_to_set->status &= ~(EMAC_DMATXDESC_LS | EMAC_DMATXDESC_FS);
  449. /* set first segments */
  450. if(index == 0)
  451. {
  452. dma_tx_desc_to_set->status |= EMAC_DMATXDESC_FS;
  453. }
  454. /* set size */
  455. dma_tx_desc_to_set->controlsize = (EMAC_MAX_PACKET_LENGTH & EMAC_DMATXDESC_TBS1);
  456. /* set last segments */
  457. if(index == (buf_cnt - 1))
  458. {
  459. dma_tx_desc_to_set->status |= EMAC_DMATXDESC_LS;
  460. dma_tx_desc_to_set->controlsize = ((frame_length - ((buf_cnt-1) * EMAC_MAX_PACKET_LENGTH)) & EMAC_DMATXDESC_TBS1);
  461. }
  462. /* set own bit of the tx descriptor status: gives the buffer back to ethernet dma */
  463. dma_tx_desc_to_set->status |= EMAC_DMATXDESC_OWN;
  464. /* selects the next dma tx descriptor list for next buffer to send */
  465. dma_tx_desc_to_set = (emac_dma_desc_type*) (dma_tx_desc_to_set->buf2nextdescaddr);
  466. }
  467. }
  468. /* when tx buffer unavailable flag is set: clear it and resume transmission */
  469. if(emac_dma_flag_get(EMAC_DMA_TBU_FLAG))
  470. {
  471. /* clear tbus ethernet dma flag */
  472. emac_dma_flag_clear(EMAC_DMA_TBU_FLAG);
  473. /* resume dma transmission*/
  474. EMAC_DMA->tpd_bit.tpd = 0;
  475. }
  476. return RT_EOK;
  477. }
  478. /**
  479. * @brief transmit data
  480. */
  481. rt_err_t rt_at32_emac_tx(rt_device_t dev, struct pbuf *p)
  482. {
  483. rt_err_t ret = -RT_ERROR;
  484. struct pbuf *q;
  485. rt_uint32_t length = 0;
  486. rt_uint32_t buffer_offset = 0, payload_offset = 0, copy_count = 0;
  487. emac_dma_desc_type *dma_tx_desc;
  488. rt_uint8_t *buffer;
  489. dma_tx_desc = dma_tx_desc_to_set;
  490. buffer = (uint8_t *)(dma_tx_desc_to_set->buf1addr);
  491. /* copy data to buffer */
  492. for(q = p; q != NULL; q = q->next)
  493. {
  494. if((dma_tx_desc->status & EMAC_DMATXDESC_OWN) != RESET)
  495. {
  496. ret = RT_EOK;
  497. goto _error;
  498. }
  499. copy_count = q->len;
  500. payload_offset = 0;
  501. while((copy_count + buffer_offset) > EMAC_MAX_PACKET_LENGTH)
  502. {
  503. rt_memcpy(buffer + buffer_offset, (uint8_t *)q->payload + payload_offset, (EMAC_MAX_PACKET_LENGTH - buffer_offset));
  504. dma_tx_desc = (emac_dma_desc_type*)dma_tx_desc->buf2nextdescaddr;
  505. if((dma_tx_desc->status & EMAC_DMATXDESC_OWN) != RESET)
  506. {
  507. ret = RT_EOK;
  508. goto _error;
  509. }
  510. buffer = (uint8_t *)dma_tx_desc->buf1addr;
  511. copy_count = copy_count - (EMAC_MAX_PACKET_LENGTH - buffer_offset);
  512. payload_offset = payload_offset + (EMAC_MAX_PACKET_LENGTH - buffer_offset);
  513. length = length + (EMAC_MAX_PACKET_LENGTH - buffer_offset);
  514. buffer_offset = 0;
  515. }
  516. rt_memcpy(buffer + buffer_offset, (uint8_t *)q->payload + payload_offset, copy_count);
  517. buffer_offset = buffer_offset + copy_count;
  518. length = length + copy_count;
  519. }
  520. emac_txpkt_chainmode(length);
  521. ret = RT_EOK;
  522. _error:
  523. /* when tx buffer unavailable flag is set: clear it and resume transmission */
  524. if(emac_dma_flag_get(EMAC_DMA_TBU_FLAG))
  525. {
  526. /* clear tbus ethernet dma flag */
  527. emac_dma_flag_clear(EMAC_DMA_TBU_FLAG);
  528. /* resume dma transmission*/
  529. EMAC_DMA->tpd_bit.tpd = 0;
  530. }
  531. return ret;
  532. }
  533. /**
  534. * @brief emac rxpkt chainmode
  535. */
  536. rt_err_t emac_rxpkt_chainmode(void)
  537. {
  538. /* check if the descriptor is owned by the ethernet dma (when set) or cpu (when reset) */
  539. if((dma_rx_desc_to_get->status & EMAC_DMARXDESC_OWN) != (u32)RESET)
  540. {
  541. /* return error: own bit set */
  542. return RT_ERROR;
  543. }
  544. if((dma_rx_desc_to_get->status & EMAC_DMARXDESC_LS) != (u32)RESET)
  545. {
  546. rx_frame.g_seg_count ++;
  547. if(rx_frame.g_seg_count == 1)
  548. {
  549. rx_frame.rx_fs_desc = dma_rx_desc_to_get;
  550. }
  551. rx_frame.rx_ls_desc = dma_rx_desc_to_get;
  552. rx_frame.length = ((dma_rx_desc_to_get->status & EMAC_DMARXDESC_FL) >> DMARXDESC_FRAMELENGTH_SHIFT) - 4;
  553. rx_frame.buffer = rx_frame.rx_fs_desc->buf1addr;
  554. /* Selects the next DMA Rx descriptor list for next buffer to read */
  555. dma_rx_desc_to_get = (emac_dma_desc_type*) (dma_rx_desc_to_get->buf2nextdescaddr);
  556. return RT_EOK;
  557. }
  558. else if((dma_rx_desc_to_get->status & EMAC_DMARXDESC_FS) != (u32)RESET)
  559. {
  560. rx_frame.g_seg_count = 1;
  561. rx_frame.rx_fs_desc = dma_rx_desc_to_get;
  562. rx_frame.rx_ls_desc = NULL;
  563. dma_rx_desc_to_get = (emac_dma_desc_type*) (dma_rx_desc_to_get->buf2nextdescaddr);
  564. }
  565. else
  566. {
  567. rx_frame.g_seg_count ++;
  568. dma_rx_desc_to_get = (emac_dma_desc_type*) (dma_rx_desc_to_get->buf2nextdescaddr);
  569. }
  570. return RT_ERROR;
  571. }
  572. /**
  573. * @brief receive data
  574. */
  575. struct pbuf *rt_at32_emac_rx(rt_device_t dev)
  576. {
  577. struct pbuf *p = NULL;
  578. struct pbuf *q = NULL;
  579. rt_uint16_t len = 0;
  580. rt_uint8_t *buffer;
  581. emac_dma_desc_type *dma_rx_desc;
  582. rt_uint32_t buffer_offset, payload_offset = 0, copy_count = 0;
  583. rt_uint32_t index = 0;
  584. if(emac_rxpkt_chainmode() != RT_EOK)
  585. {
  586. return NULL;
  587. }
  588. /* obtain the size of the packet and put it into the "len"
  589. variable. */
  590. len = rx_frame.length;
  591. buffer = (uint8_t *)rx_frame.buffer;
  592. /* we allocate a pbuf chain of pbufs from the pool. */
  593. if(len > 0)
  594. {
  595. p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL);
  596. }
  597. if(p != NULL)
  598. {
  599. dma_rx_desc = rx_frame.rx_fs_desc;
  600. buffer_offset = 0;
  601. for (q = p; q != NULL; q = q->next)
  602. {
  603. copy_count = q->len;
  604. payload_offset = 0;
  605. while( (copy_count + buffer_offset) > EMAC_MAX_PACKET_LENGTH )
  606. {
  607. /* copy data to pbuf */
  608. rt_memcpy((uint8_t*)q->payload + payload_offset, buffer + buffer_offset, (EMAC_MAX_PACKET_LENGTH - buffer_offset));
  609. /* point to next descriptor */
  610. dma_rx_desc = (emac_dma_desc_type *)(dma_rx_desc->buf2nextdescaddr);
  611. buffer = (uint8_t *)(dma_rx_desc->buf1addr);
  612. copy_count = copy_count - (EMAC_MAX_PACKET_LENGTH - buffer_offset);
  613. payload_offset = payload_offset + (EMAC_MAX_PACKET_LENGTH - buffer_offset);
  614. buffer_offset = 0;
  615. }
  616. rt_memcpy((uint8_t*)q->payload + payload_offset, (uint8_t*)buffer + buffer_offset, copy_count);
  617. buffer_offset = buffer_offset + copy_count;
  618. }
  619. }
  620. dma_rx_desc = rx_frame.rx_fs_desc;
  621. for(index = 0; index < rx_frame.g_seg_count; index ++)
  622. {
  623. dma_rx_desc->status |= EMAC_DMARXDESC_OWN;
  624. dma_rx_desc = (emac_dma_desc_type*) (dma_rx_desc->buf2nextdescaddr);
  625. }
  626. rx_frame.g_seg_count = 0;
  627. /* when rx buffer unavailable flag is set: clear it and resume reception */
  628. if(emac_dma_flag_get(EMAC_DMA_RBU_FLAG))
  629. {
  630. /* clear rbus ethernet dma flag */
  631. emac_dma_flag_clear(EMAC_DMA_RBU_FLAG);
  632. /* resume dma reception */
  633. EMAC_DMA->rpd_bit.rpd = FALSE;
  634. }
  635. return p;
  636. }
  637. void EMAC_IRQHandler(void)
  638. {
  639. /* enter interrupt */
  640. rt_interrupt_enter();
  641. /* packet receiption */
  642. if (emac_dma_flag_get(EMAC_DMA_RI_FLAG) == SET)
  643. {
  644. /* a frame has been received */
  645. eth_device_ready(&(at32_emac_device.parent));
  646. emac_dma_flag_clear(EMAC_DMA_RI_FLAG);
  647. }
  648. /* packet transmission */
  649. if (emac_dma_flag_get(EMAC_DMA_TI_FLAG) == SET)
  650. {
  651. emac_dma_flag_clear(EMAC_DMA_TI_FLAG);
  652. }
  653. /* clear normal interrupt */
  654. emac_dma_flag_clear(EMAC_DMA_NIS_FLAG);
  655. /* clear dma error */
  656. if(emac_dma_flag_get(EMAC_DMA_AIS_FLAG) != RESET)
  657. {
  658. if(emac_dma_flag_get(EMAC_DMA_RBU_FLAG) != RESET)
  659. {
  660. emac_dma_flag_clear(EMAC_DMA_RBU_FLAG);
  661. }
  662. if(emac_dma_flag_get(EMAC_DMA_OVF_FLAG) != RESET)
  663. {
  664. emac_dma_flag_clear(EMAC_DMA_OVF_FLAG);
  665. }
  666. emac_dma_flag_clear(EMAC_DMA_AIS_FLAG);
  667. }
  668. /* leave interrupt */
  669. rt_interrupt_leave();
  670. }
  671. enum {
  672. PHY_LINK = (1 << 0),
  673. PHY_10M = (1 << 1),
  674. PHY_FULLDUPLEX = (1 << 2),
  675. };
  676. static void phy_linkchange()
  677. {
  678. static rt_uint8_t phy_speed = 0;
  679. rt_uint8_t phy_speed_new = 0;
  680. rt_uint16_t status;
  681. emac_phy_register_read(phy_addr, PHY_BASIC_STATUS_REG, (uint16_t *)&status);
  682. LOG_D("phy basic status reg is 0x%X", status);
  683. if (status & (PHY_AUTONEGO_COMPLETE_MASK | PHY_LINKED_STATUS_MASK))
  684. {
  685. rt_uint16_t SR = 0;
  686. phy_speed_new |= PHY_LINK;
  687. emac_phy_register_read(phy_addr, PHY_SPECIFIED_CS_REG, (uint16_t *)&SR);
  688. LOG_D("phy control status reg is 0x%X", SR);
  689. if (SR & (PHY_SPEED_MODE))
  690. {
  691. phy_speed_new |= PHY_10M;
  692. }
  693. if (SR & (PHY_DUPLEX_MODE))
  694. {
  695. phy_speed_new |= PHY_FULLDUPLEX;
  696. }
  697. }
  698. if (phy_speed != phy_speed_new)
  699. {
  700. phy_speed = phy_speed_new;
  701. if (phy_speed & PHY_LINK)
  702. {
  703. LOG_D("link up");
  704. if (phy_speed & PHY_10M)
  705. {
  706. LOG_D("10Mbps");
  707. at32_emac_device.emac_speed = EMAC_SPEED_10MBPS;
  708. }
  709. else
  710. {
  711. at32_emac_device.emac_speed = EMAC_SPEED_100MBPS;
  712. LOG_D("100Mbps");
  713. }
  714. if (phy_speed & PHY_FULLDUPLEX)
  715. {
  716. LOG_D("full-duplex");
  717. at32_emac_device.emac_mode = EMAC_FULL_DUPLEX;
  718. }
  719. else
  720. {
  721. LOG_D("half-duplex");
  722. at32_emac_device.emac_mode = EMAC_HALF_DUPLEX;
  723. }
  724. /* send link up. */
  725. eth_device_linkchange(&at32_emac_device.parent, RT_TRUE);
  726. }
  727. else
  728. {
  729. LOG_I("link down");
  730. eth_device_linkchange(&at32_emac_device.parent, RT_FALSE);
  731. }
  732. }
  733. }
  734. #ifdef PHY_USING_INTERRUPT_MODE
  735. static void emac_phy_isr(void *args)
  736. {
  737. rt_uint32_t status = 0;
  738. emac_phy_register_read(phy_addr, PHY_INTERRUPT_FLAG_REG, (uint16_t *)&status);
  739. LOG_D("phy interrupt status reg is 0x%X", status);
  740. phy_linkchange();
  741. }
  742. #endif /* PHY_USING_INTERRUPT_MODE */
  743. static void phy_monitor_thread_entry(void *parameter)
  744. {
  745. uint8_t detected_count = 0;
  746. while(phy_addr == 0xFF)
  747. {
  748. /* phy search */
  749. rt_uint32_t i, temp;
  750. for (i = 0; i <= 0x1F; i++)
  751. {
  752. emac_phy_register_read(i, PHY_BASIC_STATUS_REG, (uint16_t *)&temp);
  753. if (temp != 0xFFFF && temp != 0x00)
  754. {
  755. phy_addr = i;
  756. break;
  757. }
  758. }
  759. detected_count++;
  760. rt_thread_mdelay(1000);
  761. if (detected_count > 10)
  762. {
  763. LOG_E("No PHY device was detected, please check hardware!");
  764. }
  765. }
  766. LOG_D("Found a phy, address:0x%02X", phy_addr);
  767. /* reset phy */
  768. LOG_D("RESET PHY!");
  769. emac_phy_register_write(phy_addr, PHY_BASIC_CONTROL_REG, PHY_RESET_MASK);
  770. rt_thread_mdelay(2000);
  771. emac_phy_register_write(phy_addr, PHY_BASIC_CONTROL_REG, PHY_AUTO_NEGOTIATION_MASK);
  772. phy_linkchange();
  773. #ifdef PHY_USING_INTERRUPT_MODE
  774. /* configuration intterrupt pin */
  775. rt_pin_mode(PHY_INT_PIN, PIN_MODE_INPUT_PULLUP);
  776. rt_pin_attach_irq(PHY_INT_PIN, PIN_IRQ_MODE_FALLING, emac_phy_isr, (void *)"callbackargs");
  777. rt_pin_irq_enable(PHY_INT_PIN, PIN_IRQ_ENABLE);
  778. /* enable phy interrupt */
  779. emac_phy_register_write(phy_addr, PHY_INTERRUPT_MASK_REG, PHY_INT_MASK);
  780. #if defined(PHY_INTERRUPT_CTRL_REG)
  781. emac_phy_register_write(phy_addr, PHY_INTERRUPT_CTRL_REG, PHY_INTERRUPT_EN);
  782. #endif
  783. #else /* PHY_USING_INTERRUPT_MODE */
  784. at32_emac_device.poll_link_timer = rt_timer_create("phylnk", (void (*)(void*))phy_linkchange,
  785. NULL, RT_TICK_PER_SECOND, RT_TIMER_FLAG_PERIODIC);
  786. if (!at32_emac_device.poll_link_timer || rt_timer_start(at32_emac_device.poll_link_timer) != RT_EOK)
  787. {
  788. LOG_E("Start link change detection timer failed");
  789. }
  790. #endif /* PHY_USING_INTERRUPT_MODE */
  791. }
  792. /* Register the EMAC device */
  793. static int rt_hw_at32_emac_init(void)
  794. {
  795. rt_err_t state = RT_EOK;
  796. /* Prepare receive and send buffers */
  797. rx_buff = (rt_uint8_t *)rt_calloc(EMAC_NUM_RX_BUF, EMAC_MAX_PACKET_LENGTH);
  798. if (rx_buff == RT_NULL)
  799. {
  800. LOG_E("No memory");
  801. state = -RT_ENOMEM;
  802. goto __exit;
  803. }
  804. tx_buff = (rt_uint8_t *)rt_calloc(EMAC_NUM_TX_BUF, EMAC_MAX_PACKET_LENGTH);
  805. if (tx_buff == RT_NULL)
  806. {
  807. LOG_E("No memory");
  808. state = -RT_ENOMEM;
  809. goto __exit;
  810. }
  811. dma_rx_dscr_tab = (emac_dma_desc_type *)rt_calloc(EMAC_NUM_RX_BUF, sizeof(emac_dma_desc_type));
  812. if (dma_rx_dscr_tab == RT_NULL)
  813. {
  814. LOG_E("No memory");
  815. state = -RT_ENOMEM;
  816. goto __exit;
  817. }
  818. dma_tx_dscr_tab = (emac_dma_desc_type *)rt_calloc(EMAC_NUM_TX_BUF, sizeof(emac_dma_desc_type));
  819. if (dma_tx_dscr_tab == RT_NULL)
  820. {
  821. LOG_E("No memory");
  822. state = -RT_ENOMEM;
  823. goto __exit;
  824. }
  825. /* phy clock */
  826. phy_clock_config();
  827. /* enable periph clock */
  828. crm_periph_clock_enable(CRM_EMAC_PERIPH_CLOCK, TRUE);
  829. crm_periph_clock_enable(CRM_EMACTX_PERIPH_CLOCK, TRUE);
  830. crm_periph_clock_enable(CRM_EMACRX_PERIPH_CLOCK, TRUE);
  831. /* interface mode */
  832. #if defined (SOC_SERIES_AT32F407)
  833. gpio_pin_remap_config(MII_RMII_SEL_GMUX, TRUE);
  834. #endif
  835. #if defined (SOC_SERIES_AT32F437)
  836. scfg_emac_interface_set(SCFG_EMAC_SELECT_RMII);
  837. #endif
  838. /* emac gpio init */
  839. at32_msp_emac_init(NULL);
  840. at32_emac_device.emac_speed = EMAC_SPEED_100MBPS;
  841. at32_emac_device.emac_mode = EMAC_FULL_DUPLEX;
  842. at32_emac_device.dev_addr[0] = 0x00;
  843. at32_emac_device.dev_addr[1] = 0x66;
  844. at32_emac_device.dev_addr[2] = 0x88;
  845. /* generate mac addr from unique id (only for test). */
  846. at32_emac_device.dev_addr[3] = *(rt_uint8_t *)(0x1FFFF7E8 + 4);
  847. at32_emac_device.dev_addr[4] = *(rt_uint8_t *)(0x1FFFF7E8 + 2);
  848. at32_emac_device.dev_addr[5] = *(rt_uint8_t *)(0x1FFFF7E8 + 0);
  849. at32_emac_device.parent.parent.init = rt_at32_emac_init;
  850. at32_emac_device.parent.parent.open = rt_at32_emac_open;
  851. at32_emac_device.parent.parent.close = rt_at32_emac_close;
  852. at32_emac_device.parent.parent.read = rt_at32_emac_read;
  853. at32_emac_device.parent.parent.write = rt_at32_emac_write;
  854. at32_emac_device.parent.parent.control = rt_at32_emac_control;
  855. at32_emac_device.parent.parent.user_data = RT_NULL;
  856. at32_emac_device.parent.eth_rx = rt_at32_emac_rx;
  857. at32_emac_device.parent.eth_tx = rt_at32_emac_tx;
  858. rx_frame.g_seg_count = 0;
  859. /* reset phy */
  860. phy_reset();
  861. /* start phy monitor */
  862. rt_thread_t tid;
  863. tid = rt_thread_create("phy",
  864. phy_monitor_thread_entry,
  865. RT_NULL,
  866. 1024,
  867. RT_THREAD_PRIORITY_MAX - 2,
  868. 2);
  869. if (tid != RT_NULL)
  870. {
  871. rt_thread_startup(tid);
  872. }
  873. else
  874. {
  875. state = -RT_ERROR;
  876. }
  877. /* register eth device */
  878. state = eth_device_init(&(at32_emac_device.parent), "e0");
  879. if (RT_EOK == state)
  880. {
  881. LOG_D("emac device init success");
  882. }
  883. else
  884. {
  885. LOG_E("emac device init faild: %d", state);
  886. state = -RT_ERROR;
  887. goto __exit;
  888. }
  889. __exit:
  890. if (state != RT_EOK)
  891. {
  892. if (rx_buff)
  893. {
  894. rt_free(rx_buff);
  895. }
  896. if (tx_buff)
  897. {
  898. rt_free(tx_buff);
  899. }
  900. if (dma_rx_dscr_tab)
  901. {
  902. rt_free(dma_rx_dscr_tab);
  903. }
  904. if (dma_tx_dscr_tab)
  905. {
  906. rt_free(dma_tx_dscr_tab);
  907. }
  908. }
  909. return state;
  910. }
  911. INIT_DEVICE_EXPORT(rt_hw_at32_emac_init);