drv_eth.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. /*
  2. * Copyright (c) 2006-2020, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2020-10-30 bigmagic first version
  9. */
  10. #include <stdint.h>
  11. #include <string.h>
  12. #include <rthw.h>
  13. #include <rtthread.h>
  14. #include "board.h"
  15. #include <lwip/sys.h>
  16. #include <netif/ethernetif.h>
  17. #include "mbox.h"
  18. #include "raspi4.h"
  19. #include "drv_eth.h"
  20. #define DBG_LEVEL DBG_LOG
  21. #include <rtdbg.h>
  22. #define LOG_TAG "drv.eth"
  23. static int link_speed = 0;
  24. static int link_flag = 0;
  25. #define RECV_CACHE_BUF (1024)
  26. #define SEND_CACHE_BUF (1024)
  27. #define SEND_DATA_NO_CACHE (0x08200000)
  28. #define RECV_DATA_NO_CACHE (0x08400000)
  29. #define DMA_DISC_ADDR_SIZE (4 * 1024 *1024)
  30. #define RX_DESC_BASE (MAC_REG + GENET_RX_OFF)
  31. #define TX_DESC_BASE (MAC_REG + GENET_TX_OFF)
  32. #define MAX_ADDR_LEN (6)
  33. #define upper_32_bits(n) ((rt_uint32_t)(((n) >> 16) >> 16))
  34. #define lower_32_bits(n) ((rt_uint32_t)(n))
  35. #define BIT(nr) (1UL << (nr))
  36. static rt_thread_t link_thread_tid = RT_NULL;
  37. #define LINK_THREAD_STACK_SIZE (1024)
  38. #define LINK_THREAD_PRIORITY (20)
  39. #define LINK_THREAD_TIMESLICE (10)
  40. static rt_uint32_t tx_index = 0;
  41. static rt_uint32_t rx_index = 0;
  42. static rt_uint32_t index_flag = 0;
  43. static rt_uint32_t send_cache_pbuf[RECV_CACHE_BUF];
  44. struct rt_eth_dev
  45. {
  46. struct eth_device parent;
  47. rt_uint8_t dev_addr[MAX_ADDR_LEN];
  48. char *name;
  49. void *iobase;
  50. int state;
  51. int index;
  52. struct rt_timer link_timer;
  53. struct rt_timer rx_poll_timer;
  54. void *priv;
  55. };
  56. static struct rt_eth_dev eth_dev;
  57. static struct rt_semaphore sem_lock;
  58. static struct rt_semaphore link_ack;
  59. static inline rt_uint32_t read32(void *addr)
  60. {
  61. return (*((volatile unsigned int *)(addr)));
  62. }
  63. static inline void write32(void *addr, rt_uint32_t value)
  64. {
  65. (*((volatile unsigned int *)(addr))) = value;
  66. }
  67. static void eth_rx_irq(int irq, void *param)
  68. {
  69. rt_uint32_t val = 0;
  70. val = read32(MAC_REG + GENET_INTRL2_CPU_STAT);
  71. val &= ~read32(MAC_REG + GENET_INTRL2_CPU_STAT_MASK);
  72. write32(MAC_REG + GENET_INTRL2_CPU_CLEAR, val);
  73. if (val & GENET_IRQ_RXDMA_DONE)
  74. {
  75. eth_device_ready(&eth_dev.parent);
  76. }
  77. if (val & GENET_IRQ_TXDMA_DONE)
  78. {
  79. rt_sem_release(&sem_lock);
  80. }
  81. }
  82. /* We only support RGMII (as used on the RPi4). */
  83. static int bcmgenet_interface_set(void)
  84. {
  85. int phy_mode = PHY_INTERFACE_MODE_RGMII;
  86. switch (phy_mode)
  87. {
  88. case PHY_INTERFACE_MODE_RGMII:
  89. case PHY_INTERFACE_MODE_RGMII_RXID:
  90. write32(MAC_REG + SYS_PORT_CTRL, PORT_MODE_EXT_GPHY);
  91. break;
  92. default:
  93. rt_kprintf("unknown phy mode: %d\n", MAC_REG);
  94. return -1;
  95. }
  96. return 0;
  97. }
  98. static void bcmgenet_umac_reset(void)
  99. {
  100. rt_uint32_t reg;
  101. reg = read32(MAC_REG + SYS_RBUF_FLUSH_CTRL);
  102. reg |= BIT(1);
  103. write32((MAC_REG + SYS_RBUF_FLUSH_CTRL), reg);
  104. reg &= ~BIT(1);
  105. write32((MAC_REG + SYS_RBUF_FLUSH_CTRL), reg);
  106. DELAY_MICROS(10);
  107. write32((MAC_REG + SYS_RBUF_FLUSH_CTRL), 0);
  108. DELAY_MICROS(10);
  109. write32(MAC_REG + UMAC_CMD, 0);
  110. write32(MAC_REG + UMAC_CMD, (CMD_SW_RESET | CMD_LCL_LOOP_EN));
  111. DELAY_MICROS(2);
  112. write32(MAC_REG + UMAC_CMD, 0);
  113. /* clear tx/rx counter */
  114. write32(MAC_REG + UMAC_MIB_CTRL, MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT);
  115. write32(MAC_REG + UMAC_MIB_CTRL, 0);
  116. write32(MAC_REG + UMAC_MAX_FRAME_LEN, ENET_MAX_MTU_SIZE);
  117. /* init rx registers, enable ip header optimization */
  118. reg = read32(MAC_REG + RBUF_CTRL);
  119. reg |= RBUF_ALIGN_2B;
  120. write32(MAC_REG + RBUF_CTRL, reg);
  121. write32(MAC_REG + RBUF_TBUF_SIZE_CTRL, 1);
  122. }
  123. static void bcmgenet_disable_dma(void)
  124. {
  125. rt_uint32_t tdma_reg = 0, rdma_reg = 0;
  126. tdma_reg = read32(MAC_REG + TDMA_REG_BASE + DMA_CTRL);
  127. tdma_reg &= ~(1UL << DMA_EN);
  128. write32(MAC_REG + TDMA_REG_BASE + DMA_CTRL, tdma_reg);
  129. rdma_reg = read32(MAC_REG + RDMA_REG_BASE + DMA_CTRL);
  130. rdma_reg &= ~(1UL << DMA_EN);
  131. write32(MAC_REG + RDMA_REG_BASE + DMA_CTRL, rdma_reg);
  132. write32(MAC_REG + UMAC_TX_FLUSH, 1);
  133. DELAY_MICROS(100);
  134. write32(MAC_REG + UMAC_TX_FLUSH, 0);
  135. }
  136. static void bcmgenet_enable_dma(void)
  137. {
  138. rt_uint32_t reg = 0;
  139. rt_uint32_t dma_ctrl = 0;
  140. dma_ctrl = (1 << (DEFAULT_Q + DMA_RING_BUF_EN_SHIFT)) | DMA_EN;
  141. write32(MAC_REG + TDMA_REG_BASE + DMA_CTRL, dma_ctrl);
  142. reg = read32(MAC_REG + RDMA_REG_BASE + DMA_CTRL);
  143. write32(MAC_REG + RDMA_REG_BASE + DMA_CTRL, dma_ctrl | reg);
  144. }
  145. static int bcmgenet_mdio_write(rt_uint32_t addr, rt_uint32_t reg, rt_uint32_t value)
  146. {
  147. int count = 10000;
  148. rt_uint32_t val;
  149. val = MDIO_WR | (addr << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT) | (0xffff & value);
  150. write32(MAC_REG + MDIO_CMD, val);
  151. rt_uint32_t reg_val = read32(MAC_REG + MDIO_CMD);
  152. reg_val = reg_val | MDIO_START_BUSY;
  153. write32(MAC_REG + MDIO_CMD, reg_val);
  154. while ((read32(MAC_REG + MDIO_CMD) & MDIO_START_BUSY) && (--count))
  155. DELAY_MICROS(1);
  156. reg_val = read32(MAC_REG + MDIO_CMD);
  157. return reg_val & 0xffff;
  158. }
  159. static int bcmgenet_mdio_read(rt_uint32_t addr, rt_uint32_t reg)
  160. {
  161. int count = 10000;
  162. rt_uint32_t val = 0;
  163. rt_uint32_t reg_val = 0;
  164. val = MDIO_RD | (addr << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT);
  165. write32(MAC_REG + MDIO_CMD, val);
  166. reg_val = read32(MAC_REG + MDIO_CMD);
  167. reg_val = reg_val | MDIO_START_BUSY;
  168. write32(MAC_REG + MDIO_CMD, reg_val);
  169. while ((read32(MAC_REG + MDIO_CMD) & MDIO_START_BUSY) && (--count))
  170. DELAY_MICROS(1);
  171. reg_val = read32(MAC_REG + MDIO_CMD);
  172. return reg_val & 0xffff;
  173. }
  174. static int bcmgenet_gmac_write_hwaddr(void)
  175. {
  176. //{0xdc,0xa6,0x32,0x28,0x22,0x50};
  177. rt_uint8_t addr[6];
  178. rt_uint32_t reg;
  179. bcm271x_mbox_hardware_get_mac_address(&addr[0]);
  180. reg = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
  181. write32(MAC_REG + UMAC_MAC0, reg);
  182. reg = addr[4] << 8 | addr[5];
  183. write32(MAC_REG + UMAC_MAC1, reg);
  184. return 0;
  185. }
  186. static int get_ethernet_uid(void)
  187. {
  188. rt_uint32_t uid_high = 0;
  189. rt_uint32_t uid_low = 0;
  190. rt_uint32_t uid = 0;
  191. uid_high = bcmgenet_mdio_read(1, BCM54213PE_PHY_IDENTIFIER_HIGH);
  192. uid_low = bcmgenet_mdio_read(1, BCM54213PE_PHY_IDENTIFIER_LOW);
  193. uid = (uid_high << 16 | uid_low);
  194. if (BCM54213PE_VERSION_B1 == uid)
  195. {
  196. LOG_I("version is B1\n");
  197. }
  198. return uid;
  199. }
  200. static void bcmgenet_mdio_init(void)
  201. {
  202. rt_uint32_t ret = 0;
  203. /*get ethernet uid*/
  204. ret = get_ethernet_uid();
  205. if (ret == 0)
  206. {
  207. return;
  208. }
  209. /* reset phy */
  210. bcmgenet_mdio_write(1, BCM54213PE_MII_CONTROL, MII_CONTROL_PHY_RESET);
  211. /* read control reg */
  212. bcmgenet_mdio_read(1, BCM54213PE_MII_CONTROL);
  213. /* reset phy again */
  214. bcmgenet_mdio_write(1, BCM54213PE_MII_CONTROL, MII_CONTROL_PHY_RESET);
  215. /* read control reg */
  216. bcmgenet_mdio_read(1, BCM54213PE_MII_CONTROL);
  217. /* read status reg */
  218. bcmgenet_mdio_read(1, BCM54213PE_MII_STATUS);
  219. /* read status reg */
  220. bcmgenet_mdio_read(1, BCM54213PE_IEEE_EXTENDED_STATUS);
  221. bcmgenet_mdio_read(1, BCM54213PE_AUTO_NEGOTIATION_ADV);
  222. bcmgenet_mdio_read(1, BCM54213PE_MII_STATUS);
  223. bcmgenet_mdio_read(1, BCM54213PE_CONTROL);
  224. /* half full duplex capability */
  225. bcmgenet_mdio_write(1, BCM54213PE_CONTROL, (CONTROL_HALF_DUPLEX_CAPABILITY | CONTROL_FULL_DUPLEX_CAPABILITY));
  226. bcmgenet_mdio_read(1, BCM54213PE_MII_CONTROL);
  227. /* set mii control */
  228. bcmgenet_mdio_write(1, BCM54213PE_MII_CONTROL, (MII_CONTROL_AUTO_NEGOTIATION_ENABLED | MII_CONTROL_AUTO_NEGOTIATION_RESTART | MII_CONTROL_PHY_FULL_DUPLEX | MII_CONTROL_SPEED_SELECTION));
  229. }
  230. static void rx_ring_init(void)
  231. {
  232. write32(MAC_REG + RDMA_REG_BASE + DMA_SCB_BURST_SIZE, DMA_MAX_BURST_LENGTH);
  233. write32(MAC_REG + RDMA_RING_REG_BASE + DMA_START_ADDR, 0x0);
  234. write32(MAC_REG + RDMA_READ_PTR, 0x0);
  235. write32(MAC_REG + RDMA_WRITE_PTR, 0x0);
  236. write32(MAC_REG + RDMA_RING_REG_BASE + DMA_END_ADDR, RX_DESCS * DMA_DESC_SIZE / 4 - 1);
  237. write32(MAC_REG + RDMA_PROD_INDEX, 0x0);
  238. write32(MAC_REG + RDMA_CONS_INDEX, 0x0);
  239. write32(MAC_REG + RDMA_RING_REG_BASE + DMA_RING_BUF_SIZE, (RX_DESCS << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH);
  240. write32(MAC_REG + RDMA_XON_XOFF_THRESH, DMA_FC_THRESH_VALUE);
  241. write32(MAC_REG + RDMA_REG_BASE + DMA_RING_CFG, 1 << DEFAULT_Q);
  242. }
  243. static void tx_ring_init(void)
  244. {
  245. write32(MAC_REG + TDMA_REG_BASE + DMA_SCB_BURST_SIZE, DMA_MAX_BURST_LENGTH);
  246. write32(MAC_REG + TDMA_RING_REG_BASE + DMA_START_ADDR, 0x0);
  247. write32(MAC_REG + TDMA_READ_PTR, 0x0);
  248. write32(MAC_REG + TDMA_READ_PTR, 0x0);
  249. write32(MAC_REG + TDMA_READ_PTR, 0x0);
  250. write32(MAC_REG + TDMA_WRITE_PTR, 0x0);
  251. write32(MAC_REG + TDMA_RING_REG_BASE + DMA_END_ADDR, TX_DESCS * DMA_DESC_SIZE / 4 - 1);
  252. write32(MAC_REG + TDMA_PROD_INDEX, 0x0);
  253. write32(MAC_REG + TDMA_CONS_INDEX, 0x0);
  254. write32(MAC_REG + TDMA_RING_REG_BASE + DMA_MBUF_DONE_THRESH, 0x1);
  255. write32(MAC_REG + TDMA_FLOW_PERIOD, 0x0);
  256. write32(MAC_REG + TDMA_RING_REG_BASE + DMA_RING_BUF_SIZE, (TX_DESCS << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH);
  257. write32(MAC_REG + TDMA_REG_BASE + DMA_RING_CFG, 1 << DEFAULT_Q);
  258. }
  259. static void rx_descs_init(void)
  260. {
  261. char *rxbuffs = (char *)RECV_DATA_NO_CACHE;
  262. rt_uint32_t len_stat, i;
  263. void *desc_base = (void *)RX_DESC_BASE;
  264. len_stat = (RX_BUF_LENGTH << DMA_BUFLENGTH_SHIFT) | DMA_OWN;
  265. for (i = 0; i < RX_DESCS; i++)
  266. {
  267. write32((desc_base + i * DMA_DESC_SIZE + DMA_DESC_ADDRESS_LO), lower_32_bits((uintptr_t)&rxbuffs[i * RX_BUF_LENGTH]));
  268. write32((desc_base + i * DMA_DESC_SIZE + DMA_DESC_ADDRESS_HI), upper_32_bits((uintptr_t)&rxbuffs[i * RX_BUF_LENGTH]));
  269. write32((desc_base + i * DMA_DESC_SIZE + DMA_DESC_LENGTH_STATUS), len_stat);
  270. }
  271. }
  272. static int bcmgenet_adjust_link(void)
  273. {
  274. rt_uint32_t speed;
  275. rt_uint32_t phy_dev_speed = link_speed;
  276. switch (phy_dev_speed)
  277. {
  278. case SPEED_1000:
  279. speed = UMAC_SPEED_1000;
  280. break;
  281. case SPEED_100:
  282. speed = UMAC_SPEED_100;
  283. break;
  284. case SPEED_10:
  285. speed = UMAC_SPEED_10;
  286. break;
  287. default:
  288. rt_kprintf("bcmgenet: Unsupported PHY speed: %d\n", phy_dev_speed);
  289. return -1;
  290. }
  291. rt_uint32_t reg1 = read32(MAC_REG + EXT_RGMII_OOB_CTRL);
  292. //reg1 &= ~(1UL << OOB_DISABLE);
  293. //rt_kprintf("OOB_DISABLE is %d\n", OOB_DISABLE);
  294. reg1 |= (RGMII_LINK | RGMII_MODE_EN | ID_MODE_DIS);
  295. write32(MAC_REG + EXT_RGMII_OOB_CTRL, reg1);
  296. DELAY_MICROS(1000);
  297. write32(MAC_REG + UMAC_CMD, speed << CMD_SPEED_SHIFT);
  298. return 0;
  299. }
  300. void link_irq(void *param)
  301. {
  302. if ((bcmgenet_mdio_read(1, BCM54213PE_MII_STATUS) & MII_STATUS_LINK_UP) != 0)
  303. {
  304. rt_sem_release(&link_ack);
  305. }
  306. }
  307. static int bcmgenet_gmac_eth_start(void)
  308. {
  309. rt_uint32_t ret;
  310. rt_uint32_t count = 10000;
  311. bcmgenet_umac_reset();
  312. bcmgenet_gmac_write_hwaddr();
  313. /* Disable RX/TX DMA and flush TX queues */
  314. bcmgenet_disable_dma();
  315. rx_ring_init();
  316. rx_descs_init();
  317. tx_ring_init();
  318. /* Enable RX/TX DMA */
  319. bcmgenet_enable_dma();
  320. /* Update MAC registers based on PHY property */
  321. ret = bcmgenet_adjust_link();
  322. if (ret)
  323. {
  324. rt_kprintf("bcmgenet: adjust PHY link failed: %d\n", ret);
  325. return ret;
  326. }
  327. /* wait tx index clear */
  328. while ((read32(MAC_REG + TDMA_CONS_INDEX) != 0) && (--count))
  329. DELAY_MICROS(1);
  330. tx_index = read32(MAC_REG + TDMA_CONS_INDEX);
  331. write32(MAC_REG + TDMA_PROD_INDEX, tx_index);
  332. index_flag = read32(MAC_REG + RDMA_PROD_INDEX);
  333. rx_index = index_flag % RX_DESCS;
  334. write32(MAC_REG + RDMA_CONS_INDEX, index_flag);
  335. write32(MAC_REG + RDMA_PROD_INDEX, index_flag);
  336. /* Enable Rx/Tx */
  337. rt_uint32_t rx_tx_en;
  338. rx_tx_en = read32(MAC_REG + UMAC_CMD);
  339. rx_tx_en |= (CMD_TX_EN | CMD_RX_EN);
  340. write32(MAC_REG + UMAC_CMD, rx_tx_en);
  341. //IRQ
  342. write32(MAC_REG + GENET_INTRL2_CPU_CLEAR_MASK, GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
  343. return 0;
  344. }
  345. static rt_uint32_t prev_recv_cnt = 0;
  346. static rt_uint32_t cur_recv_cnt = 0;
  347. static rt_uint32_t bcmgenet_gmac_eth_recv(rt_uint8_t **packetp)
  348. {
  349. void *desc_base;
  350. rt_uint32_t length = 0, addr = 0;
  351. rt_uint32_t prod_index = read32(MAC_REG + RDMA_PROD_INDEX);
  352. if (prod_index == index_flag)
  353. {
  354. cur_recv_cnt = index_flag;
  355. index_flag = 0x7fffffff;
  356. /* no buff */
  357. return 0;
  358. }
  359. else
  360. {
  361. if (prev_recv_cnt == (prod_index & 0xffff))
  362. {
  363. return 0;
  364. }
  365. desc_base = RX_DESC_BASE + rx_index * DMA_DESC_SIZE;
  366. length = read32(desc_base + DMA_DESC_LENGTH_STATUS);
  367. length = (length >> DMA_BUFLENGTH_SHIFT) & DMA_BUFLENGTH_MASK;
  368. addr = read32(desc_base + DMA_DESC_ADDRESS_LO);
  369. /* To cater for the IP headepr alignment the hardware does.
  370. * This would actually not be needed if we don't program
  371. * RBUF_ALIGN_2B
  372. */
  373. rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, (void *) addr, length);
  374. *packetp = (rt_uint8_t *)(addr + RX_BUF_OFFSET);
  375. rx_index = rx_index + 1;
  376. if (rx_index >= RX_DESCS)
  377. {
  378. rx_index = 0;
  379. }
  380. write32(MAC_REG + RDMA_CONS_INDEX, cur_recv_cnt);
  381. cur_recv_cnt = cur_recv_cnt + 1;
  382. if (cur_recv_cnt > 0xffff)
  383. {
  384. cur_recv_cnt = 0;
  385. }
  386. prev_recv_cnt = cur_recv_cnt;
  387. return length;
  388. }
  389. }
  390. static int bcmgenet_gmac_eth_send(void *packet, int length)
  391. {
  392. void *desc_base = (TX_DESC_BASE + tx_index * DMA_DESC_SIZE);
  393. rt_uint32_t len_stat = length << DMA_BUFLENGTH_SHIFT;
  394. rt_uint32_t prod_index;
  395. prod_index = read32(MAC_REG + TDMA_PROD_INDEX);
  396. len_stat |= 0x3F << DMA_TX_QTAG_SHIFT;
  397. len_stat |= DMA_TX_APPEND_CRC | DMA_SOP | DMA_EOP;
  398. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)packet, length);
  399. write32((desc_base + DMA_DESC_ADDRESS_LO), (rt_uint32_t)packet);
  400. write32((desc_base + DMA_DESC_ADDRESS_HI), 0);
  401. write32((desc_base + DMA_DESC_LENGTH_STATUS), len_stat);
  402. tx_index = tx_index + 1;
  403. prod_index = prod_index + 1;
  404. if (prod_index == 0xe000)
  405. {
  406. write32(MAC_REG + TDMA_PROD_INDEX, 0);
  407. prod_index = 0;
  408. }
  409. if (tx_index >= TX_DESCS)
  410. {
  411. tx_index = 0;
  412. }
  413. /* Start Transmisson */
  414. write32(MAC_REG + TDMA_PROD_INDEX, prod_index);
  415. rt_sem_take(&sem_lock, RT_WAITING_FOREVER);
  416. return 0;
  417. }
  418. static void link_task_entry(void *param)
  419. {
  420. struct eth_device *eth_device = (struct eth_device *)param;
  421. RT_ASSERT(eth_device != RT_NULL);
  422. struct rt_eth_dev *dev = &eth_dev;
  423. //start mdio
  424. bcmgenet_mdio_init();
  425. //start timer link
  426. rt_timer_init(&dev->link_timer, "link_timer",
  427. link_irq,
  428. NULL,
  429. 100,
  430. RT_TIMER_FLAG_PERIODIC);
  431. rt_timer_start(&dev->link_timer);
  432. //link wait forever
  433. rt_sem_take(&link_ack, RT_WAITING_FOREVER);
  434. eth_device_linkchange(&eth_dev.parent, RT_TRUE); //link up
  435. rt_timer_stop(&dev->link_timer);
  436. //set mac
  437. bcmgenet_gmac_write_hwaddr();
  438. bcmgenet_gmac_write_hwaddr();
  439. //check link speed
  440. if ((bcmgenet_mdio_read(1, BCM54213PE_STATUS) & (1 << 10)) || (bcmgenet_mdio_read(1, BCM54213PE_STATUS) & (1 << 11)))
  441. {
  442. link_speed = 1000;
  443. rt_kprintf("Support link mode Speed 1000M\n");
  444. }
  445. else if ((bcmgenet_mdio_read(1, 0x05) & (1 << 7)) || (bcmgenet_mdio_read(1, 0x05) & (1 << 8)) || (bcmgenet_mdio_read(1, 0x05) & (1 << 9)))
  446. {
  447. link_speed = 100;
  448. rt_kprintf("Support link mode Speed 100M\n");
  449. }
  450. else
  451. {
  452. link_speed = 10;
  453. rt_kprintf("Support link mode Speed 10M\n");
  454. }
  455. bcmgenet_gmac_eth_start();
  456. rt_hw_interrupt_install(ETH_IRQ, eth_rx_irq, NULL, "eth_irq");
  457. rt_hw_interrupt_umask(ETH_IRQ);
  458. link_flag = 1;
  459. }
  460. static rt_err_t bcmgenet_eth_init(rt_device_t device)
  461. {
  462. rt_uint32_t ret = 0;
  463. rt_uint32_t hw_reg = 0;
  464. /* Read GENET HW version */
  465. rt_uint8_t major = 0;
  466. hw_reg = read32(MAC_REG + SYS_REV_CTRL);
  467. major = (hw_reg >> 24) & 0x0f;
  468. if (major != 6)
  469. {
  470. if (major == 5)
  471. major = 4;
  472. else if (major == 0)
  473. major = 1;
  474. rt_kprintf("Uns upported GENETv%d.%d\n", major, (hw_reg >> 16) & 0x0f);
  475. return -RT_ERROR;
  476. }
  477. /* set interface */
  478. ret = bcmgenet_interface_set();
  479. if (ret)
  480. {
  481. return ret;
  482. }
  483. /* rbuf clear */
  484. write32(MAC_REG + SYS_RBUF_FLUSH_CTRL, 0);
  485. /* disable MAC while updating its registers */
  486. write32(MAC_REG + UMAC_CMD, 0);
  487. /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
  488. write32(MAC_REG + UMAC_CMD, CMD_SW_RESET | CMD_LCL_LOOP_EN);
  489. link_thread_tid = rt_thread_create("link", link_task_entry, (void *)device,
  490. LINK_THREAD_STACK_SIZE,
  491. LINK_THREAD_PRIORITY, LINK_THREAD_TIMESLICE);
  492. if (link_thread_tid != RT_NULL)
  493. rt_thread_startup(link_thread_tid);
  494. return RT_EOK;
  495. }
  496. static rt_err_t bcmgenet_eth_control(rt_device_t dev, int cmd, void *args)
  497. {
  498. switch (cmd)
  499. {
  500. case NIOCTL_GADDR:
  501. if (args)
  502. rt_memcpy(args, eth_dev.dev_addr, 6);
  503. else
  504. return -RT_ERROR;
  505. break;
  506. default:
  507. break;
  508. }
  509. return RT_EOK;
  510. }
  511. rt_err_t rt_eth_tx(rt_device_t device, struct pbuf *p)
  512. {
  513. rt_uint32_t sendbuf = (rt_uint32_t)SEND_DATA_NO_CACHE + (rt_uint32_t)(tx_index * SEND_CACHE_BUF);
  514. /* lock eth device */
  515. if (link_flag == 1)
  516. {
  517. pbuf_copy_partial(p, (void *)&send_cache_pbuf[0], p->tot_len, 0);
  518. rt_memcpy((void *)sendbuf, send_cache_pbuf, p->tot_len);
  519. bcmgenet_gmac_eth_send((void *)sendbuf, p->tot_len);
  520. }
  521. return RT_EOK;
  522. }
  523. struct pbuf *rt_eth_rx(rt_device_t device)
  524. {
  525. int recv_len = 0;
  526. rt_uint8_t *addr_point = RT_NULL;
  527. struct pbuf *pbuf = RT_NULL;
  528. if (link_flag == 1)
  529. {
  530. recv_len = bcmgenet_gmac_eth_recv((rt_uint8_t **)&addr_point);
  531. if (recv_len > 0)
  532. {
  533. pbuf = pbuf_alloc(PBUF_LINK, recv_len, PBUF_RAM);
  534. if (pbuf)
  535. {
  536. rt_memcpy(pbuf->payload, addr_point, recv_len);
  537. }
  538. }
  539. }
  540. return pbuf;
  541. }
  542. int rt_hw_eth_init(void)
  543. {
  544. rt_uint8_t mac_addr[6];
  545. rt_sem_init(&sem_lock, "eth_send_lock", TX_DESCS, RT_IPC_FLAG_FIFO);
  546. rt_sem_init(&link_ack, "link_ack", 0, RT_IPC_FLAG_FIFO);
  547. memset(&eth_dev, 0, sizeof(eth_dev));
  548. memset((void *)SEND_DATA_NO_CACHE, 0, DMA_DISC_ADDR_SIZE);
  549. memset((void *)RECV_DATA_NO_CACHE, 0, DMA_DISC_ADDR_SIZE);
  550. bcm271x_mbox_hardware_get_mac_address(&mac_addr[0]);
  551. eth_dev.iobase = MAC_REG;
  552. eth_dev.name = "e0";
  553. eth_dev.dev_addr[0] = mac_addr[0];
  554. eth_dev.dev_addr[1] = mac_addr[1];
  555. eth_dev.dev_addr[2] = mac_addr[2];
  556. eth_dev.dev_addr[3] = mac_addr[3];
  557. eth_dev.dev_addr[4] = mac_addr[4];
  558. eth_dev.dev_addr[5] = mac_addr[5];
  559. eth_dev.parent.parent.type = RT_Device_Class_NetIf;
  560. eth_dev.parent.parent.init = bcmgenet_eth_init;
  561. eth_dev.parent.parent.open = RT_NULL;
  562. eth_dev.parent.parent.close = RT_NULL;
  563. eth_dev.parent.parent.read = RT_NULL;
  564. eth_dev.parent.parent.write = RT_NULL;
  565. eth_dev.parent.parent.control = bcmgenet_eth_control;
  566. eth_dev.parent.parent.user_data = RT_NULL;
  567. eth_dev.parent.eth_tx = rt_eth_tx;
  568. eth_dev.parent.eth_rx = rt_eth_rx;
  569. eth_device_init(&(eth_dev.parent), "e0");
  570. eth_device_linkchange(&eth_dev.parent, RT_FALSE); //link down
  571. return 0;
  572. }
  573. INIT_COMPONENT_EXPORT(rt_hw_eth_init);