drv_rtl8139.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-08-16 JasonHu first version
  9. */
  10. #include <board.h>
  11. #include <rtthread.h>
  12. #include <rtdevice.h>
  13. #include <rthw.h>
  14. #include <netif/ethernetif.h>
  15. #include <pci.h>
  16. #include <mmu.h>
  17. #define DBG_LVL DBG_INFO
  18. #define DBG_TAG "RTL8139"
  19. #include <rtdbg.h>
  20. #include "drv_rtl8139.h"
  21. #define DEV_NAME "e0"
  22. #define GET_RTL8139(eth) (struct eth_device_rtl8139 *)(eth)
  23. struct eth_device_rtl8139
  24. {
  25. /* inherit from Ethernet device */
  26. struct eth_device parent;
  27. /* interface address info. */
  28. rt_uint8_t dev_addr[ETH_ALEN]; /* MAC address */
  29. rt_pci_device_t *pci_dev; /* pci device info */
  30. rt_uint32_t iobase; /* io port base */
  31. rt_uint32_t irqno; /* irq number */
  32. card_chip_t chipset;
  33. rt_spinlock_t lock; /* lock for rx packet */
  34. rt_uint8_t *rx_buffer;
  35. rt_uint8_t *rx_ring;
  36. rt_uint8_t current_rx; /* CAPR, Current Address of Packet Read */
  37. rt_uint32_t rx_flags;
  38. rt_ubase_t rx_ring_dma; /* dma phy addr */
  39. rt_uint32_t rx_config; /* receive config */
  40. struct rtl8139_status rx_status;
  41. rt_uint8_t *tx_buffers;
  42. rt_uint8_t *tx_buffer[NUM_TX_DESC]; /* tx buffer pointer array */
  43. rt_uint32_t current_tx;
  44. rt_uint32_t dirty_tx;
  45. rt_size_t tx_free_counts;
  46. rt_uint32_t tx_flags;
  47. rt_ubase_t tx_buffer_dma; /* dma phy addr */
  48. struct rtl8139_status tx_status;
  49. struct net_device_status stats; /* device stats */
  50. struct rtl_extra_status xstats; /* extra status */
  51. rt_uint32_t dev_flags; /* flags of net device */
  52. rt_mq_t rx_mqueue; /* msg queue for rx */
  53. rt_uint8_t linked; /* eth device linked */
  54. };
  55. static struct eth_device_rtl8139 eth_dev;
  56. static rt_uint8_t rx_cache_send_buf[RX_MSG_SIZE] = {0}; /* buf for rx packet, put size and data into mq */
  57. static rt_uint8_t rx_cache_recv_buf[RX_MSG_SIZE] = {0}; /* buf for rx packet, get size and data from mq */
  58. static rt_uint8_t tx_cache_pbuf[TX_CACHE_BUF_SIZE] = {0}; /* buf for tx packet, get data from pbuf payload */
  59. /* rx config */
  60. static const rt_uint32_t rtl8139_rx_config = RX_CFG_RCV_32K | RX_NO_WRAP |
  61. (RX_FIFO_THRESH << RX_CFG_FIFO_SHIFT) |
  62. (RX_DMA_BURST << RX_CFG_DMA_SHIFT);
  63. /* tx config */
  64. static const rt_uint32_t rtl8139_tx_config = TX_IFG96 | (TX_DMA_BURST << TX_DMA_SHIFT) |
  65. (TX_RETRY << TX_RETRY_SHIFT);
  66. /* intr mask, 1: receive, 0: ignore */
  67. static const rt_uint16_t rtl8139_intr_mask = PCI_ERR | PCS_TIMEOUT | RX_UNDERRUN | RX_OVERFLOW | RX_FIFO_OVER |
  68. TX_ERR | TX_OK | RX_ERR | RX_OK;
  69. static int rtl8139_next_desc(int current_desc)
  70. {
  71. return (current_desc == NUM_TX_DESC - 1) ? 0 : (current_desc + 1);
  72. }
  73. static int rtl8139_transmit(struct eth_device_rtl8139 *dev, rt_uint8_t *buf, rt_size_t len)
  74. {
  75. rt_uint32_t entry;
  76. rt_uint32_t length = len;
  77. entry = dev->current_tx;
  78. rt_base_t level = rt_hw_interrupt_disable();
  79. if (dev->tx_free_counts > 0)
  80. {
  81. if (length < TX_BUF_SIZE)
  82. {
  83. if (length < ETH_ZLEN)
  84. {
  85. rt_memset(dev->tx_buffer[entry], 0, ETH_ZLEN); /* pad zero */
  86. }
  87. rt_memcpy(dev->tx_buffer[entry], buf, length);
  88. }
  89. else
  90. {
  91. /* drop packet */
  92. dev->stats.tx_dropped++;
  93. dbg_log(DBG_WARNING, "dropped a packed!\n");
  94. rt_hw_interrupt_enable(level);
  95. return 0;
  96. }
  97. /*
  98. * Writing to tx_status triggers a DMA transfer of the data
  99. * copied to dev->tx_buffer[entry] above. Use a memory barrier
  100. * to make sure that the device sees the updated data.
  101. */
  102. rt_hw_dsb();
  103. outl(dev->iobase + TX_STATUS0 + (entry * 4), dev->tx_flags | ETH_MAX(length, (rt_uint32_t)ETH_ZLEN));
  104. inl(dev->iobase + TX_STATUS0 + (entry * 4)); // flush
  105. dev->current_tx = rtl8139_next_desc(dev->current_tx);
  106. --dev->tx_free_counts;
  107. } else {
  108. LOG_E("Stop Tx packet!\n");
  109. rt_hw_interrupt_enable(level);
  110. return -1;
  111. }
  112. rt_hw_interrupt_enable(level);
  113. return 0;
  114. }
  115. /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
  116. static void rtl8139_init_ring(struct eth_device_rtl8139 *dev)
  117. {
  118. dev->current_rx = 0;
  119. dev->current_tx = 0;
  120. dev->dirty_tx = 0;
  121. /* set free counts */
  122. dev->tx_free_counts = NUM_TX_DESC;
  123. int i = 0;
  124. for (; i < NUM_TX_DESC; i++)
  125. {
  126. dev->tx_buffer[i] = (unsigned char *)&dev->tx_buffers[i * TX_BUF_SIZE];
  127. }
  128. }
  129. static void rtl8139_chip_reset(struct eth_device_rtl8139 *dev)
  130. {
  131. /* software reset, to clear the RX and TX buffers and set everything back to defaults. */
  132. outb(dev->iobase + CHIP_CMD, CMD_RESET);
  133. /* wait reset done */
  134. for (;;)
  135. {
  136. rt_hw_dmb();
  137. if ((inb(dev->iobase + CHIP_CMD) & CMD_RESET) == 0)
  138. {
  139. break;
  140. }
  141. rt_hw_cpu_pause();
  142. }
  143. }
  144. static void rtl8139_set_rx_mode(struct eth_device_rtl8139 *dev)
  145. {
  146. rt_base_t level = rt_hw_interrupt_disable();
  147. int rx_mode = ACCEPT_BROADCAST | ACCEPT_MY_PHYS | ACCEPT_MULTICAST;
  148. rx_mode |= (ACCEPT_ERR | ACCEPT_RUNT);
  149. rt_uint32_t tmp;
  150. tmp = rtl8139_rx_config | rx_mode;
  151. if (dev->rx_config != tmp)
  152. {
  153. outl(dev->iobase + RX_CONFIG, tmp);
  154. /* flush */
  155. inl(dev->iobase + RX_CONFIG);
  156. dev->rx_config = tmp;
  157. }
  158. /* filter packet */
  159. rt_uint32_t mac_filter[2];
  160. mac_filter[0] = mac_filter[1] = 0;
  161. outl(dev->iobase + MAR0 + 0, mac_filter[0]);
  162. inl(dev->iobase + MAR0 + 0);
  163. outl(dev->iobase + MAR0 + 4, mac_filter[1]);
  164. inl(dev->iobase + MAR0 + 4);
  165. rt_hw_interrupt_enable(level);
  166. }
  167. static void rtl8139_hardware_start(struct eth_device_rtl8139 *dev)
  168. {
  169. /* Bring old chips out of low-power mode. */
  170. if (rtl_chip_info[dev->chipset].flags & HAS_HLT_CLK)
  171. {
  172. outb(dev->iobase + HLT_CTL, 'R');
  173. }
  174. rtl8139_chip_reset(dev);
  175. /* unlock Config[01234] and BMCR register writes */
  176. outb(dev->iobase + CFG9346, CFG9346_UNLOCK);
  177. inb(dev->iobase + CFG9346); // flush
  178. /* Restore our rtl8139a of the MAC address. */
  179. outl(dev->iobase + MAC0, *(rt_uint32_t *)(dev->dev_addr + 0));
  180. inl(dev->iobase + MAC0);
  181. outw(dev->iobase + MAC0 + 4, *(uint16_t *)(dev->dev_addr + 4));
  182. inw(dev->iobase + MAC0 + 4);
  183. dev->current_rx = 0;
  184. /* init Rx ring buffer DMA address */
  185. outl(dev->iobase + RX_BUF, dev->rx_ring_dma);
  186. inl(dev->iobase + RX_BUF);
  187. /* Must enable Tx/Rx before setting transfer thresholds! */
  188. outb(dev->iobase + CHIP_CMD, CMD_RX_ENABLE | CMD_TX_ENABLE);
  189. /* set receive config */
  190. dev->rx_config = rtl8139_rx_config | ACCEPT_BROADCAST | ACCEPT_MY_PHYS;
  191. outl(dev->iobase + RX_CONFIG, dev->rx_config);
  192. outl(dev->iobase + TX_CONFIG, rtl8139_tx_config);
  193. if (dev->chipset >= CH_8139B)
  194. {
  195. /* Disable magic packet scanning, which is enabled
  196. * when PM is enabled in Config1. It can be reenabled
  197. * via ETHTOOL_SWOL if desired.
  198. * clear MAGIC bit
  199. */
  200. outb(dev->iobase + CONFIG3, inb(dev->iobase + CONFIG3) & ~CFG3_MAGIC);
  201. }
  202. /* Lock Config[01234] and BMCR register writes */
  203. outb(dev->iobase + CFG9346, CFG9346_LOCK);
  204. /* init Tx buffer DMA addresses */
  205. int i = 0;
  206. for (; i < NUM_TX_DESC; i++)
  207. {
  208. outl(dev->iobase + TX_ADDR0 + (i * 4), dev->tx_buffer_dma + (dev->tx_buffer[i] - dev->tx_buffers));
  209. /* flush */
  210. inl(dev->iobase + TX_ADDR0 + (i * 4));
  211. }
  212. outl(dev->iobase + RX_MISSED, 0);
  213. rtl8139_set_rx_mode(dev);
  214. /* no early-rx intr */
  215. outw(dev->iobase + MULTI_INTR, inw(dev->iobase + MULTI_INTR) & MULTI_INTR_CLEAR);
  216. /* make sure tx & rx enabled */
  217. uint8_t tmp = inb(dev->iobase + CHIP_CMD);
  218. if (!(tmp & CMD_RX_ENABLE) || !(tmp & CMD_TX_ENABLE))
  219. {
  220. outb(dev->iobase + CHIP_CMD, CMD_RX_ENABLE | CMD_TX_ENABLE);
  221. }
  222. /* enable 8139 intr mask */
  223. outw(dev->iobase + INTR_MASK, rtl8139_intr_mask);
  224. }
  225. static int rtl8139_tx_interrupt(struct eth_device_rtl8139 *dev)
  226. {
  227. while (dev->tx_free_counts < NUM_TX_DESC)
  228. {
  229. int entry = dev->dirty_tx;
  230. /* read tx status */
  231. int tx_status = inl(dev->iobase + TX_STATUS0 + (entry * 4));
  232. /* no tx intr, exit */
  233. if (!(tx_status & (TX_STAT_OK | TX_UNDERRUN | TX_ABORTED)))
  234. {
  235. dbg_log(DBG_ERROR, "tx status not we want!\n");
  236. break;
  237. }
  238. /* NOTE: TxCarrierLost is always asserted at 100mbps. */
  239. if (tx_status & (TX_OUT_OF_WINDOW | TX_ABORTED))
  240. {
  241. dbg_log(DBG_ERROR, "Transmit error, Tx status %x\n", tx_status);
  242. dev->stats.tx_errors++;
  243. if (tx_status & TX_ABORTED)
  244. {
  245. dev->stats.tx_aborted_errors++;
  246. /* clear abort bit */
  247. outl(dev->iobase + TX_CONFIG, TX_CLEAR_ABT);
  248. /* set intr tx error */
  249. outw(dev->iobase + INTR_STATUS, TX_ERR);
  250. rt_hw_dsb();
  251. }
  252. if (tx_status & TX_CARRIER_LOST)
  253. {
  254. dev->stats.tx_carrier_errors++;
  255. }
  256. if (tx_status & TX_OUT_OF_WINDOW)
  257. {
  258. dev->stats.tx_window_errors++;
  259. }
  260. }
  261. else
  262. {
  263. if (tx_status & TX_UNDERRUN)
  264. {
  265. /* Add 64 to the Tx FIFO threshold. */
  266. if (dev->tx_flags < 0x00300000) {
  267. dev->tx_flags += 0x00020000;
  268. }
  269. dev->stats.tx_fifo_errors++;
  270. }
  271. dev->stats.collisions += (tx_status >> 24) & 15;
  272. dev->tx_status.packets++;
  273. dev->tx_status.bytes += tx_status & 0x7ff;
  274. }
  275. dev->dirty_tx = rtl8139_next_desc(dev->dirty_tx);
  276. if (dev->tx_free_counts == 0)
  277. {
  278. rt_hw_dmb();
  279. }
  280. dev->tx_free_counts++;
  281. }
  282. return 0;
  283. }
  284. static void rtl8139_other_interrupt(struct eth_device_rtl8139 *dev, int status, int link_changed)
  285. {
  286. /* Update the error count. */
  287. dev->stats.rx_missed_errors += inl(dev->iobase + RX_MISSED);
  288. outl(dev->iobase + RX_MISSED, 0);
  289. if ((status & RX_UNDERRUN) && link_changed && (dev->dev_flags & HAS_LNK_CHNG))
  290. {
  291. dev->linked = RT_FALSE; /* dev not linked */
  292. status &= ~RX_UNDERRUN;
  293. }
  294. if (status & (RX_UNDERRUN | RX_ERR))
  295. {
  296. dev->stats.rx_errors++;
  297. }
  298. if (status & PCS_TIMEOUT)
  299. {
  300. dev->stats.rx_length_errors++;
  301. }
  302. if (status & RX_UNDERRUN)
  303. {
  304. dev->stats.rx_fifo_errors++;
  305. }
  306. if (status & PCI_ERR) /* error on pci */
  307. {
  308. rt_uint32_t pci_cmd_status;
  309. pci_cmd_status = rt_pci_device_read(dev->pci_dev, PCI_STATUS_COMMAND);
  310. rt_pci_device_write(dev->pci_dev, PCI_STATUS_COMMAND, pci_cmd_status);
  311. dbg_log(DBG_ERROR, "PCI Bus error %x\n", pci_cmd_status >> 16);
  312. }
  313. }
  314. static void rtl8139_rx_error(rt_uint32_t rx_status, struct eth_device_rtl8139 *dev)
  315. {
  316. rt_uint8_t tmp;
  317. dev->stats.rx_errors++;
  318. /* rx error */
  319. if (!(rx_status & RX_STATUS_OK))
  320. {
  321. /* frame error */
  322. if (rx_status & (RX_BAD_SYMBOL | RX_BAD_ALIGN))
  323. {
  324. dev->stats.rx_frame_errors++;
  325. }
  326. /* long */
  327. if (rx_status & (RX_RUNT | RX_TOO_LONG))
  328. {
  329. dev->stats.rx_length_errors++;
  330. }
  331. /* CRC check */
  332. if (rx_status & RX_CRC_ERR)
  333. {
  334. dev->stats.rx_crc_errors++;
  335. }
  336. }
  337. else
  338. {
  339. /* receive ok, but lost */
  340. dev->xstats.rx_lost_in_ring++;
  341. }
  342. /* reset receive */
  343. tmp = inb(dev->iobase + CHIP_CMD);
  344. outb(dev->iobase + CHIP_CMD, tmp & ~CMD_RX_ENABLE);
  345. outb(dev->iobase + CHIP_CMD, tmp);
  346. outl(dev->iobase + RX_CONFIG, dev->rx_config);
  347. dev->current_rx = 0;
  348. }
  349. static void rtl8139_isr_ack(struct eth_device_rtl8139 *dev)
  350. {
  351. rt_uint16_t status;
  352. status = inw(dev->iobase + INTR_STATUS) & RX_ACK_BITS;
  353. /* Clear out errors and receive interrupts */
  354. if (status != 0)
  355. {
  356. if (status & (RX_FIFO_OVER | RX_OVERFLOW))
  357. {
  358. dev->stats.rx_errors++;
  359. if (status & RX_FIFO_OVER)
  360. {
  361. dev->stats.rx_fifo_errors++;
  362. }
  363. }
  364. /* write rx ack */
  365. outw(dev->iobase + INTR_STATUS, RX_ACK_BITS);
  366. inw(dev->iobase + INTR_STATUS); // for flush
  367. }
  368. }
  369. static int rtl8139_rx_interrupt(struct eth_device_rtl8139 *dev)
  370. {
  371. int received = 0;
  372. rt_uint8_t *rx_ring = dev->rx_ring;
  373. rt_uint32_t current_rx = dev->current_rx;
  374. rt_uint32_t rx_size = 0;
  375. while (!(inb(dev->iobase + CHIP_CMD) & RX_BUFFER_EMPTY))
  376. {
  377. rt_uint32_t ring_offset = current_rx % RX_BUF_LEN;
  378. rt_uint32_t rx_status;
  379. rt_size_t pkt_size;
  380. rt_hw_dmb();
  381. /* read size+status of next frame from DMA ring buffer */
  382. rx_status = *(rt_uint32_t *)(rx_ring + ring_offset);
  383. /* size on high 16 bit */
  384. rx_size = rx_status >> 16;
  385. if (!(dev->dev_flags & DEV_FLAGS_RXFCS)) {
  386. pkt_size = rx_size - 4;
  387. } else {
  388. pkt_size = rx_size;
  389. }
  390. /* Packet copy from FIFO still in progress.
  391. * Theoretically, this should never happen
  392. * since early_rx is disabled.
  393. */
  394. if (rx_size == 0xfff0)
  395. {
  396. dbg_log(DBG_WARNING, "rx fifo copy in progress\n");
  397. dev->xstats.early_rx++;
  398. break;
  399. }
  400. /* If Rx err or invalid rx_size/rx_status received
  401. * (which happens if we get lost in the ring),
  402. * Rx process gets reset, so we abort any further
  403. * Rx processing.
  404. */
  405. if ((rx_size > (MAX_ETH_FRAME_SIZE + 4) || (rx_size < 8) || (!(rx_status & RX_STATUS_OK))))
  406. {
  407. if ((dev->dev_flags & DEV_FLAGS_RXALL) && (rx_size <= (MAX_ETH_FRAME_SIZE + 4)) &&
  408. (rx_size >= 8) && (!(rx_status & RX_STATUS_OK)))
  409. {
  410. dev->stats.rx_errors++;
  411. if (rx_status & RX_CRC_ERR)
  412. {
  413. dev->stats.rx_crc_errors++;
  414. JUMP_TO(keep_pkt);
  415. }
  416. if (rx_status & RX_RUNT)
  417. {
  418. dev->stats.rx_length_errors++;
  419. JUMP_TO(keep_pkt);
  420. }
  421. }
  422. /* rx error handle */
  423. rtl8139_rx_error(rx_status, dev);
  424. received = -1;
  425. JUMP_TO(out);
  426. }
  427. keep_pkt:
  428. /* merge size and data into receive pkg */
  429. rt_memcpy(rx_cache_send_buf, &pkt_size, 4);
  430. rt_memcpy(&rx_cache_send_buf[4], &rx_ring[ring_offset + 4], pkt_size);
  431. rt_mq_send_interrupt(dev->rx_mqueue, rx_cache_send_buf, pkt_size + 4);
  432. eth_device_ready(&dev->parent); /* notify eth thread to read packet */
  433. dev->rx_status.packets++;
  434. dev->rx_status.bytes += pkt_size;
  435. received++;
  436. /* 4:for header length(length include 4 bytes CRC)
  437. * 3:for dword alignment
  438. */
  439. current_rx = (current_rx + rx_size + 4 + 3) & ~3;
  440. outw(dev->iobase + RX_BUF_PTR, (rt_uint16_t)(current_rx - 16));
  441. rtl8139_isr_ack(dev);
  442. }
  443. if (!received || rx_size == 0xfff0)
  444. {
  445. rtl8139_isr_ack(dev);
  446. }
  447. dev->current_rx = current_rx;
  448. out:
  449. return received;
  450. }
  451. static void rt_hw_rtl8139_isr(int vector, void *param)
  452. {
  453. struct eth_device_rtl8139 *dev = GET_RTL8139(param);
  454. rt_uint16_t status, ackstat;
  455. int link_changed = 0; /* avoid bogus "uninit" warning */
  456. rt_spin_lock(&dev->lock);
  457. status = inw(dev->iobase + INTR_STATUS);
  458. outw(dev->iobase + INTR_STATUS, status);
  459. if ((status & rtl8139_intr_mask) == 0)
  460. {
  461. dbg_log(DBG_LOG, "no interrupt occured on me!\n");
  462. rt_spin_unlock(&dev->lock);
  463. return;
  464. }
  465. /* check netif state whether running. */
  466. if (!dev->linked)
  467. {
  468. /* clear intr mask, don't receive intr forever */
  469. outw(dev->iobase + INTR_MASK, 0);
  470. JUMP_TO(out);
  471. }
  472. /* Acknowledge all of the current interrupt sources ASAP, but
  473. an first get an additional status bit from CSCR. */
  474. if (status & RX_UNDERRUN)
  475. {
  476. link_changed = inw(dev->iobase + CSCR) & CSCR_LINK_CHANGE;
  477. }
  478. ackstat = status & ~(RX_ACK_BITS | TX_ERR);
  479. if (ackstat)
  480. {
  481. outw(dev->iobase + INTR_STATUS, ackstat);
  482. }
  483. if (status & RX_ACK_BITS)
  484. {
  485. rtl8139_rx_interrupt(dev);
  486. }
  487. /* Check uncommon events with one test. */
  488. if (status & (PCI_ERR | PCS_TIMEOUT | RX_UNDERRUN | RX_ERR))
  489. {
  490. rtl8139_other_interrupt(dev, status, link_changed);
  491. }
  492. /* handle receive */
  493. if (status & (TX_OK | TX_ERR))
  494. {
  495. rtl8139_tx_interrupt(dev);
  496. if (status & TX_ERR)
  497. {
  498. outw(dev->iobase + INTR_STATUS, TX_ERR);
  499. }
  500. }
  501. out:
  502. rt_spin_unlock(&dev->lock);
  503. }
  504. static rt_err_t rtl8139_init(rt_device_t device)
  505. {
  506. struct eth_device_rtl8139 *dev = GET_RTL8139(device);
  507. /* alloc transmit buffer */
  508. dev->tx_buffers = (rt_uint8_t *) rt_malloc(TX_BUF_TOTAL_LEN);
  509. if (dev->tx_buffers == RT_NULL)
  510. {
  511. LOG_E("alloc memory for rtl8139 tx buffer failed!\n");
  512. return -1;
  513. }
  514. /* alloc receive buffer */
  515. dev->rx_ring = (rt_uint8_t *) rt_malloc(RX_BUF_TOTAL_LEN);
  516. if (dev->rx_ring == RT_NULL) {
  517. LOG_E("alloc memory for rtl8139 rx buffer failed!\n");
  518. rt_free(dev->tx_buffers);
  519. return -1;
  520. }
  521. /* create msg queue for eth rx */
  522. dev->rx_mqueue = rt_mq_create("rx_mqueue", RX_MSG_SIZE, RX_MSG_CNT, 0);
  523. if (dev->rx_mqueue == RT_NULL)
  524. {
  525. LOG_E("crete msg queue for rx buffer failed!\n");
  526. rt_free(dev->tx_buffers);
  527. rt_free(dev->rx_ring);
  528. return -1;
  529. }
  530. dev->tx_buffer_dma = (rt_ubase_t)rt_hw_vir2phy(dev->tx_buffers);
  531. dev->rx_ring_dma = (rt_ubase_t)rt_hw_vir2phy(dev->rx_ring);
  532. dev->tx_flags = (TX_FIFO_THRESH << 11) & 0x003f0000;
  533. /* init tx and rx ring */
  534. rtl8139_init_ring(dev);
  535. rtl8139_hardware_start(dev);
  536. dev->dev_flags = DEV_FLAGS_RXALL;
  537. dev->linked = RT_TRUE;
  538. eth_device_linkchange(&dev->parent, RT_TRUE);
  539. if (rt_hw_interrupt_install(dev->irqno, rt_hw_rtl8139_isr, (void *) dev, "rtl8139") < 0)
  540. {
  541. LOG_E("install IRQ failed!\n");
  542. rt_free(dev->tx_buffers);
  543. rt_free(dev->rx_ring);
  544. rt_mq_delete(dev->rx_mqueue);
  545. return RT_ERROR;
  546. }
  547. rt_hw_interrupt_umask(dev->irqno);
  548. dbg_log(DBG_INFO, "ethernet card init done.\n");
  549. return RT_EOK;
  550. }
  551. #ifdef RT_USING_DEVICE_OPS
  552. const static struct rt_device_ops rtl8139_ops =
  553. {
  554. rtl8139_init,
  555. RT_NULL,
  556. RT_NULL,
  557. RT_NULL,
  558. RT_NULL,
  559. rtl8139_control
  560. };
  561. #endif
  562. static int rtl8139_get_pci(struct eth_device_rtl8139 *dev)
  563. {
  564. /* get pci device */
  565. rt_pci_device_t *pci_dev = rt_pci_device_get(RTL8139_VENDOR_ID, RTL8139_DEVICE_ID);
  566. if (pci_dev == RT_NULL)
  567. {
  568. LOG_E("device not find on pci device.\n");
  569. return -1;
  570. }
  571. dev->pci_dev = pci_dev;
  572. dbg_log(DBG_LOG, "find device, vendor id: 0x%x, device id: 0x%x\n",
  573. pci_dev->vendor_id, pci_dev->device_id);
  574. /* enable bus mastering */
  575. rt_pci_enable_bus_mastering(pci_dev);
  576. /* get io port address */
  577. dev->iobase = rt_pci_device_get_io_addr(pci_dev);
  578. if (dev->iobase == 0)
  579. {
  580. LOG_E("invalid pci device io address.\n");
  581. return -1;
  582. }
  583. dbg_log(DBG_LOG, "io base address: 0x%x\n", dev->iobase);
  584. /* get irq */
  585. dev->irqno = rt_pci_device_get_irq_line(pci_dev);
  586. if (dev->irqno == 0xff)
  587. {
  588. LOG_E("invalid irqno.\n");
  589. return -1;
  590. }
  591. dbg_log(DBG_LOG, "irqno %d\n", dev->irqno);
  592. return 0;
  593. }
  594. static int rtl8139_init_board(struct eth_device_rtl8139 *dev)
  595. {
  596. /* check for missing/broken hardware */
  597. if (inl(dev->iobase + TX_CONFIG) == 0xFFFFFFFF)
  598. {
  599. dbg_log(DBG_ERROR, "chip not responding, ignoring board.\n");
  600. return -1;
  601. }
  602. rt_uint32_t version = inl(dev->iobase + TX_CONFIG) & HW_REVID_MASK;
  603. int i = 0;
  604. for (; i < CHIP_INFO_NR; i++)
  605. {
  606. if (version == rtl_chip_info[i].version) {
  607. dev->chipset = i;
  608. JUMP_TO(chip_match);
  609. }
  610. }
  611. /* if unknown chip, assume array element #0, original RTL-8139 in this case */
  612. i = 0;
  613. dbg_log(DBG_LOG, "unknown chip version, assuming RTL-8139\n");
  614. dbg_log(DBG_LOG, "TxConfig = 0x%x\n", inl(dev->iobase + TX_CONFIG));
  615. dev->chipset = 0;
  616. chip_match:
  617. dbg_log(DBG_LOG, "chipset id (%x) == index %d, '%s'\n",
  618. version, i, rtl_chip_info[i].name);
  619. /* start netcard */
  620. if (dev->chipset >= CH_8139B)
  621. {
  622. dbg_log(DBG_WARNING, "PCI PM wakeup, not support now!\n");
  623. }
  624. else
  625. {
  626. rt_uint8_t tmp = inb(dev->iobase + CONFIG1);
  627. tmp &= ~(CFG1_SLEEP | CFG1_PWRDN);
  628. outb(dev->iobase + CONFIG1, tmp);
  629. }
  630. /* reset chip */
  631. rtl8139_chip_reset(dev);
  632. return 0;
  633. }
  634. static int rtl8139_init_hw(struct eth_device_rtl8139 *dev)
  635. {
  636. rt_pci_device_t *pci_dev = dev->pci_dev;
  637. /* check version */
  638. if (pci_dev->vendor_id == RTL8139_VENDOR_ID && pci_dev->device_id == RTL8139_DEVICE_ID &&
  639. pci_dev->revision_id >= 0x20)
  640. {
  641. dbg_log(DBG_LOG, "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip, use 8139cp\n",
  642. pci_dev->vendor_id, pci_dev->device_id, pci_dev->revision_id);
  643. }
  644. if (rtl8139_init_board(dev) < 0)
  645. {
  646. return -1;
  647. }
  648. /* get MAC from pci config */
  649. int i = 0;
  650. for (; i < ETH_ALEN; i++) {
  651. dev->dev_addr[i] = inb(dev->iobase + MAC0 + i);
  652. }
  653. dbg_log(DBG_INFO, "MAC addr: %x:%x:%x:%x:%x:%x\n", dev->dev_addr[0], dev->dev_addr[1],
  654. dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
  655. rt_spin_lock_init(&dev->lock);
  656. /* Put the chip into low-power mode. */
  657. if (rtl_chip_info[dev->chipset].flags & HAS_HLT_CLK)
  658. {
  659. outb(dev->iobase + HLT_CTL, 'H'); /* 'R' would leave the clock running. */
  660. }
  661. return 0;
  662. }
  663. static rt_err_t rtl8139_tx(rt_device_t device, struct pbuf *p)
  664. {
  665. rt_err_t err = RT_EOK;
  666. /* copy data from pbuf to tx cache */
  667. pbuf_copy_partial(p, (void *)&tx_cache_pbuf[0], p->tot_len, 0);
  668. if (rtl8139_transmit(GET_RTL8139(device), tx_cache_pbuf, p->tot_len) < 0)
  669. {
  670. err = RT_ERROR;
  671. }
  672. return err;
  673. }
  674. static struct pbuf *rtl8139_rx(rt_device_t device)
  675. {
  676. struct eth_device_rtl8139 *dev = GET_RTL8139(device);
  677. int recv_len = 0;
  678. struct pbuf *pbuf = RT_NULL;
  679. rt_err_t err;
  680. /* get data from rx queue. */
  681. err = rt_mq_recv_interruptible(dev->rx_mqueue, rx_cache_recv_buf, RX_MSG_SIZE, 0);
  682. if (err != RT_EOK)
  683. {
  684. return pbuf;
  685. }
  686. /* get recv len from rx cache, 0~3: recv len, 3-n: frame data */
  687. recv_len = *(int *)rx_cache_recv_buf;
  688. if (recv_len > 0)
  689. {
  690. pbuf = pbuf_alloc(PBUF_LINK, recv_len, PBUF_RAM);
  691. rt_memcpy(pbuf->payload, (char *)rx_cache_recv_buf + 4, recv_len);
  692. }
  693. return pbuf;
  694. }
  695. static rt_err_t rtl8139_control(rt_device_t device, int cmd, void *args)
  696. {
  697. struct eth_device_rtl8139 *dev = GET_RTL8139(device);
  698. switch(cmd)
  699. {
  700. case NIOCTL_GADDR:
  701. /* get MAC address */
  702. if(args)
  703. {
  704. rt_memcpy(args, dev->dev_addr, ETH_ALEN);
  705. }
  706. else
  707. {
  708. return -RT_ERROR;
  709. }
  710. break;
  711. default :
  712. break;
  713. }
  714. return RT_EOK;
  715. }
  716. static int rt_hw_rtl8139_init(void)
  717. {
  718. rt_memset(&eth_dev, 0x0, sizeof(eth_dev));
  719. if (rtl8139_get_pci(&eth_dev) < 0)
  720. {
  721. return -1;
  722. }
  723. if (rtl8139_init_hw(&eth_dev) < 0)
  724. {
  725. return -1;
  726. }
  727. /* set device opts */
  728. #ifdef RT_USING_DEVICE_OPS
  729. eth_dev.parent.parent.ops = &rtl8139_ops;
  730. #else
  731. eth_dev.parent.parent.init = rtl8139_init;
  732. eth_dev.parent.parent.open = RT_NULL;
  733. eth_dev.parent.parent.close = RT_NULL;
  734. eth_dev.parent.parent.read = RT_NULL;
  735. eth_dev.parent.parent.write = RT_NULL;
  736. eth_dev.parent.parent.control = rtl8139_control;
  737. #endif
  738. eth_dev.parent.parent.user_data = RT_NULL;
  739. eth_dev.parent.eth_rx = rtl8139_rx;
  740. eth_dev.parent.eth_tx = rtl8139_tx;
  741. /* register ETH device */
  742. if (eth_device_init(&(eth_dev.parent), DEV_NAME) != RT_EOK)
  743. {
  744. return -1;
  745. }
  746. return 0;
  747. }
  748. INIT_DEVICE_EXPORT(rt_hw_rtl8139_init);