synopGMAC.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2017-08-24 chinesebear first version
  9. * 2020-08-10 lizhirui porting to ls2k
  10. */
  11. #include <rtthread.h>
  12. #include <rtdef.h>
  13. //#include <lwip/pbuf.h>
  14. #include "synopGMAC.h"
  15. #include "mii.c"
  16. #include "synopGMAC_debug.h"
  17. #define RMII
  18. #define PCI_BASE (0xFE00001800)
  19. #define Buffer_Size 2048
  20. #define MAX_ADDR_LEN 6
  21. #define NAMESIZE 16
  22. #define LS1B_GMAC0_IRQ 34
  23. #define BUS_SIZE_ALIGN(x) ((x+15)&~15)
  24. #define DEFAULT_MAC_ADDRESS {0x00, 0x55, 0x7B, 0xB5, 0x7D, 0xF7}
  25. u64 gmac_base = 0;
  26. static u32 GMAC_Power_down;
  27. extern void *plat_alloc_consistent_dmaable_memory(synopGMACdevice *pcidev, u32 size, u32 *addr) ;
  28. extern s32 synopGMAC_check_phy_init(synopGMACPciNetworkAdapter *adapter) ;
  29. extern int init_phy(synopGMACdevice *gmacdev);
  30. dma_addr_t plat_dma_map_single(void *hwdev, void *ptr, u32 size);
  31. void eth_rx_irq(int irqno, void *param);
  32. static char Rx_Buffer[Buffer_Size];
  33. static char Tx_Buffer[Buffer_Size];
  34. struct pci_header
  35. {
  36. uint16_t VendorID;
  37. uint16_t DeviceID;
  38. uint16_t Command;
  39. uint16_t Status;
  40. uint32_t RevisionID : 8;
  41. uint32_t ClassCode : 24;
  42. uint8_t CachelineSize;
  43. uint8_t LatencyTimer;
  44. uint8_t HeaderType;
  45. uint8_t BIST;
  46. uint32_t BaseAddressRegister[6];
  47. uint32_t CardbusCISPointer;
  48. uint16_t SubsystemVendorID;
  49. uint16_t SubsystemID;
  50. uint32_t ExpansionROMBaseAddress;
  51. uint32_t CapabilitiesPointer : 8;
  52. uint32_t resv1 : 24;
  53. uint32_t resv2;
  54. uint8_t InterruptLine;
  55. uint8_t InterruptPin;
  56. uint8_t Min_Gnt;
  57. uint8_t Max_Lat;
  58. };
  59. struct rt_eth_dev
  60. {
  61. struct eth_device parent;
  62. rt_uint8_t dev_addr[MAX_ADDR_LEN];
  63. char *name;
  64. int iobase;
  65. int state;
  66. int index;
  67. struct rt_timer link_timer;
  68. struct rt_timer rx_poll_timer;
  69. void *priv;
  70. };
  71. static struct rt_eth_dev eth_dev;
  72. static struct rt_semaphore sem_ack, sem_lock;
  73. /**
  74. * This sets up the transmit Descriptor queue in ring or chain mode.
  75. * This function is tightly coupled to the platform and operating system
  76. * Device is interested only after the descriptors are setup. Therefore this function
  77. * is not included in the device driver API. This function should be treated as an
  78. * example code to design the descriptor structures for ring mode or chain mode.
  79. * This function depends on the pcidev structure for allocation consistent dma-able memory in case
  80. * of linux.
  81. * This limitation is due to the fact that linux uses pci structure to allocate a dmable memory
  82. * - Allocates the memory for the descriptors.
  83. * - Initialize the Busy and Next descriptors indices to 0(Indicating first descriptor).
  84. * - Initialize the Busy and Next descriptors to first descriptor address.
  85. * - Initialize the last descriptor with the endof ring in case of ring mode.
  86. * - Initialize the descriptors in chain mode.
  87. * @param[in] pointer to synopGMACdevice.
  88. * @param[in] pointer to pci_device structure.
  89. * @param[in] number of descriptor expected in tx descriptor queue.
  90. * @param[in] whether descriptors to be created in RING mode or CHAIN mode.
  91. * \return 0 upon success. Error code upon failure.
  92. * \note This function fails if allocation fails for required number of descriptors in Ring mode,
  93. * but in chain mode
  94. * function returns -ESYNOPGMACNOMEM in the process of descriptor chain creation. once returned from
  95. * this function
  96. * user should for gmacdev->TxDescCount to see how many descriptors are there in the chain. Should
  97. * continue further
  98. * only if the number of descriptors in the chain meets the requirements
  99. */
  100. s32 synopGMAC_setup_tx_desc_queue(synopGMACdevice *gmacdev, u32 no_of_desc, u32 desc_mode)
  101. {
  102. s32 i;
  103. DmaDesc *bf1;
  104. DmaDesc *first_desc = NULL;
  105. dma_addr_t dma_addr;
  106. gmacdev->TxDescCount = 0;
  107. first_desc = (DmaDesc *)plat_alloc_consistent_dmaable_memory(gmacdev, sizeof(DmaDesc) * (no_of_desc), &dma_addr);
  108. if (first_desc == NULL)
  109. {
  110. rt_kprintf("Error in Tx Descriptors memory allocation\n");
  111. return -ESYNOPGMACNOMEM;
  112. }
  113. DEBUG_MES("tx_first_desc_addr = %p\n", first_desc);
  114. DEBUG_MES("dmaadr = %p\n", dma_addr);
  115. gmacdev->TxDescCount = no_of_desc;
  116. gmacdev->TxDesc = first_desc;
  117. gmacdev->TxDescDma = dma_addr;
  118. for (i = 0; i < gmacdev->TxDescCount; i++)
  119. {
  120. synopGMAC_tx_desc_init_ring(gmacdev->TxDesc + i, i == gmacdev->TxDescCount - 1);
  121. #if SYNOP_TOP_DEBUG
  122. rt_kprintf("\n%02d %08x \n", i, (unsigned int)(gmacdev->TxDesc + i));
  123. rt_kprintf("%08x ", (unsigned int)((gmacdev->TxDesc + i))->status);
  124. rt_kprintf("%08x ", (unsigned int)((gmacdev->TxDesc + i)->length));
  125. rt_kprintf("%08x ", (unsigned int)((gmacdev->TxDesc + i)->buffer1));
  126. rt_kprintf("%08x ", (unsigned int)((gmacdev->TxDesc + i)->buffer2));
  127. rt_kprintf("%08x ", (unsigned int)((gmacdev->TxDesc + i)->data1));
  128. rt_kprintf("%08x ", (unsigned int)((gmacdev->TxDesc + i)->data2));
  129. rt_kprintf("%08x ", (unsigned int)((gmacdev->TxDesc + i)->dummy1));
  130. rt_kprintf("%08x ", (unsigned int)((gmacdev->TxDesc + i)->dummy2));
  131. #endif
  132. }
  133. gmacdev->TxNext = 0;
  134. gmacdev->TxBusy = 0;
  135. gmacdev->TxNextDesc = gmacdev->TxDesc;
  136. gmacdev->TxBusyDesc = gmacdev->TxDesc;
  137. gmacdev->BusyTxDesc = 0;
  138. return -ESYNOPGMACNOERR;
  139. }
  140. /**
  141. * This sets up the receive Descriptor queue in ring or chain mode.
  142. * This function is tightly coupled to the platform and operating system
  143. * Device is interested only after the descriptors are setup. Therefore this function
  144. * is not included in the device driver API. This function should be treated as an
  145. * example code to design the descriptor structures in ring mode or chain mode.
  146. * This function depends on the pcidev structure for allocation of consistent dma-able memory in
  147. * case of linux.
  148. * This limitation is due to the fact that linux uses pci structure to allocate a dmable memory
  149. * - Allocates the memory for the descriptors.
  150. * - Initialize the Busy and Next descriptors indices to 0(Indicating first descriptor).
  151. * - Initialize the Busy and Next descriptors to first descriptor address.
  152. * - Initialize the last descriptor with the endof ring in case of ring mode.
  153. * - Initialize the descriptors in chain mode.
  154. * @param[in] pointer to synopGMACdevice.
  155. * @param[in] pointer to pci_device structure.
  156. * @param[in] number of descriptor expected in rx descriptor queue.
  157. * @param[in] whether descriptors to be created in RING mode or CHAIN mode.
  158. * \return 0 upon success. Error code upon failure.
  159. * \note This function fails if allocation fails for required number of descriptors in Ring mode,
  160. * but in chain mode
  161. * function returns -ESYNOPGMACNOMEM in the process of descriptor chain creation. once returned from
  162. * this function
  163. * user should for gmacdev->RxDescCount to see how many descriptors are there in the chain. Should
  164. * continue further
  165. * only if the number of descriptors in the chain meets the requirements
  166. */
  167. s32 synopGMAC_setup_rx_desc_queue(synopGMACdevice *gmacdev, u32 no_of_desc, u32 desc_mode)
  168. {
  169. s32 i;
  170. DmaDesc *bf1;
  171. DmaDesc *first_desc = NULL;
  172. dma_addr_t dma_addr;
  173. gmacdev->RxDescCount = 0;
  174. first_desc = (DmaDesc *)plat_alloc_consistent_dmaable_memory(gmacdev, sizeof(DmaDesc) * no_of_desc, &dma_addr);
  175. if (first_desc == NULL)
  176. {
  177. rt_kprintf("Error in Rx Descriptor Memory allocation in Ring mode\n");
  178. return -ESYNOPGMACNOMEM;
  179. }
  180. DEBUG_MES("rx_first_desc_addr = %p\n", first_desc);
  181. DEBUG_MES("dmaadr = %p\n", dma_addr);
  182. gmacdev->RxDescCount = no_of_desc;
  183. gmacdev->RxDesc = (DmaDesc *)first_desc;
  184. gmacdev->RxDescDma = dma_addr;
  185. for (i = 0; i < gmacdev->RxDescCount; i++)
  186. {
  187. synopGMAC_rx_desc_init_ring(gmacdev->RxDesc + i, i == gmacdev->RxDescCount - 1);
  188. }
  189. gmacdev->RxNext = 0;
  190. gmacdev->RxBusy = 0;
  191. gmacdev->RxNextDesc = gmacdev->RxDesc;
  192. gmacdev->RxBusyDesc = gmacdev->RxDesc;
  193. gmacdev->BusyRxDesc = 0;
  194. return -ESYNOPGMACNOERR;
  195. }
  196. void synopGMAC_linux_cable_unplug_function(void *adaptr)
  197. {
  198. s32 data;
  199. synopGMACPciNetworkAdapter *adapter = (synopGMACPciNetworkAdapter *)adaptr;
  200. synopGMACdevice *gmacdev = adapter->synopGMACdev;
  201. struct ethtool_cmd cmd;
  202. if (!mii_link_ok(&adapter->mii))
  203. {
  204. if (gmacdev->LinkState)
  205. rt_kprintf("\r\nNo Link\r\n");
  206. gmacdev->DuplexMode = 0;
  207. gmacdev->Speed = 0;
  208. gmacdev->LoopBackMode = 0;
  209. gmacdev->LinkState = 0;
  210. }
  211. else
  212. {
  213. data = synopGMAC_check_phy_init(adapter);
  214. if (gmacdev->LinkState != data)
  215. {
  216. gmacdev->LinkState = data;
  217. synopGMAC_mac_init(gmacdev);
  218. rt_kprintf("Link is up in %s mode\n", (gmacdev->DuplexMode == FULLDUPLEX) ? "FULL DUPLEX" : "HALF DUPLEX");
  219. if (gmacdev->Speed == SPEED1000)
  220. rt_kprintf("Link is with 1000M Speed \r\n");
  221. if (gmacdev->Speed == SPEED100)
  222. rt_kprintf("Link is with 100M Speed \n");
  223. if (gmacdev->Speed == SPEED10)
  224. rt_kprintf("Link is with 10M Speed \n");
  225. }
  226. }
  227. }
  228. s32 synopGMAC_check_phy_init(synopGMACPciNetworkAdapter *adapter)
  229. {
  230. struct ethtool_cmd cmd;
  231. synopGMACdevice *gmacdev = adapter->synopGMACdev;
  232. if (!mii_link_ok(&adapter->mii))
  233. {
  234. gmacdev->DuplexMode = FULLDUPLEX;
  235. gmacdev->Speed = SPEED100;
  236. return 0;
  237. }
  238. else
  239. {
  240. mii_ethtool_gset(&adapter->mii, &cmd);
  241. gmacdev->DuplexMode = (cmd.duplex == DUPLEX_FULL) ? FULLDUPLEX : HALFDUPLEX ;
  242. if (cmd.speed == SPEED_1000)
  243. gmacdev->Speed = SPEED1000;
  244. else if (cmd.speed == SPEED_100)
  245. gmacdev->Speed = SPEED100;
  246. else
  247. gmacdev->Speed = SPEED10;
  248. }
  249. return gmacdev->Speed | (gmacdev->DuplexMode << 4);
  250. }
  251. static int Mac_change_check(u8 *macaddr0, u8 *macaddr1)
  252. {
  253. int i;
  254. for (i = 0; i < 6; i++)
  255. {
  256. if (macaddr0[i] != macaddr1[i])
  257. return 1;
  258. }
  259. return 0;
  260. }
  261. static rt_err_t eth_init(rt_device_t device)
  262. {
  263. struct eth_device *eth_device = (struct eth_device *)device;
  264. RT_ASSERT(eth_device != RT_NULL);
  265. s32 ijk;
  266. s32 status = 0;
  267. u64 dma_addr;
  268. u32 Mac_changed = 0;
  269. struct pbuf *pbuf;
  270. u8 macaddr[6] = DEFAULT_MAC_ADDRESS;
  271. struct rt_eth_dev *dev = &eth_dev;
  272. struct synopGMACNetworkAdapter *adapter = dev->priv;
  273. synopGMACdevice *gmacdev = (synopGMACdevice *)adapter->synopGMACdev;
  274. synopGMAC_reset(gmacdev);
  275. synopGMAC_attach(gmacdev, (gmac_base + MACBASE), (gmac_base + DMABASE), DEFAULT_PHY_BASE, macaddr);
  276. synopGMAC_read_version(gmacdev);
  277. synopGMAC_set_mdc_clk_div(gmacdev, GmiiCsrClk3);
  278. gmacdev->ClockDivMdc = synopGMAC_get_mdc_clk_div(gmacdev);
  279. init_phy(adapter->synopGMACdev);
  280. DEBUG_MES("tx desc_queue\n");
  281. synopGMAC_setup_tx_desc_queue(gmacdev, TRANSMIT_DESC_SIZE, RINGMODE);
  282. synopGMAC_init_tx_desc_base(gmacdev);
  283. DEBUG_MES("rx desc_queue\n");
  284. synopGMAC_setup_rx_desc_queue(gmacdev, RECEIVE_DESC_SIZE, RINGMODE);
  285. synopGMAC_init_rx_desc_base(gmacdev);
  286. DEBUG_MES("DmaRxBaseAddr = %08x\n", synopGMACReadReg(gmacdev->DmaBase, DmaRxBaseAddr));
  287. // u32 dmaRx_Base_addr = synopGMACReadReg(gmacdev->DmaBase,DmaRxBaseAddr);
  288. // rt_kprintf("first_desc_addr = 0x%x\n", dmaRx_Base_addr);
  289. #ifdef ENH_DESC_8W
  290. synopGMAC_dma_bus_mode_init(gmacdev, DmaBurstLength32 | DmaDescriptorSkip2 | DmaDescriptor8Words);
  291. #else
  292. synopGMAC_dma_bus_mode_init(gmacdev, DmaBurstLength4 | DmaDescriptorSkip1);
  293. //synopGMAC_dma_bus_mode_init(gmacdev, DmaBurstLength4 | DmaDescriptorSkip2);
  294. #endif
  295. synopGMAC_dma_control_init(gmacdev, DmaStoreAndForward | DmaTxSecondFrame | DmaRxThreshCtrl128);
  296. status = synopGMAC_check_phy_init(adapter);
  297. synopGMAC_mac_init(gmacdev);
  298. synopGMAC_pause_control(gmacdev);
  299. #ifdef IPC_OFFLOAD
  300. synopGMAC_enable_rx_chksum_offload(gmacdev);
  301. synopGMAC_rx_tcpip_chksum_drop_enable(gmacdev);
  302. #endif
  303. u64 skb;
  304. do
  305. {
  306. skb = (u64)plat_alloc_memory(RX_BUF_SIZE); //should skb aligned here?
  307. if (skb == RT_NULL)
  308. {
  309. rt_kprintf("ERROR in skb buffer allocation\n");
  310. break;
  311. }
  312. dma_addr = plat_dma_map_single(gmacdev, (void *)skb, RX_BUF_SIZE); //获取 skb 的 dma 地址
  313. status = synopGMAC_set_rx_qptr(gmacdev, dma_addr, RX_BUF_SIZE, (u64)skb, 0, 0, 0);
  314. if (status < 0)
  315. {
  316. rt_kprintf("status < 0!!\n");
  317. plat_free_memory((void *)skb);
  318. }
  319. }
  320. while (status >= 0 && (status < (RECEIVE_DESC_SIZE - 1)));
  321. synopGMAC_clear_interrupt(gmacdev);
  322. synopGMAC_disable_mmc_tx_interrupt(gmacdev, 0xFFFFFFFF);
  323. synopGMAC_disable_mmc_rx_interrupt(gmacdev, 0xFFFFFFFF);
  324. synopGMAC_disable_mmc_ipc_rx_interrupt(gmacdev, 0xFFFFFFFF);
  325. // synopGMAC_disable_interrupt_all(gmacdev);
  326. synopGMAC_enable_interrupt(gmacdev, DmaIntEnable);
  327. synopGMAC_enable_dma_rx(gmacdev);
  328. synopGMAC_enable_dma_tx(gmacdev);
  329. plat_delay(DEFAULT_LOOP_VARIABLE);
  330. synopGMAC_check_phy_init(adapter);
  331. synopGMAC_mac_init(gmacdev);
  332. rt_timer_init(&dev->link_timer, "link_timer",
  333. synopGMAC_linux_cable_unplug_function,
  334. (void *)adapter,
  335. RT_TICK_PER_SECOND,
  336. RT_TIMER_FLAG_PERIODIC);
  337. rt_timer_start(&dev->link_timer);
  338. #ifdef RT_USING_GMAC_INT_MODE
  339. /* installl isr */
  340. DEBUG_MES("%s\n", __FUNCTION__);
  341. rt_hw_interrupt_install(LS1C_MAC_IRQ, eth_rx_irq, RT_NULL, "e0_isr");
  342. rt_hw_interrupt_umask(LS1C_MAC_IRQ);
  343. #else
  344. rt_timer_init(&dev->rx_poll_timer, "rx_poll_timer",
  345. eth_rx_irq,
  346. (void *)adapter,
  347. 1,
  348. RT_TIMER_FLAG_PERIODIC);
  349. rt_timer_start(&dev->rx_poll_timer);
  350. #endif /*RT_USING_GMAC_INT_MODE*/
  351. return RT_EOK;
  352. }
  353. static rt_err_t eth_open(rt_device_t dev, rt_uint16_t oflag)
  354. {
  355. rt_kprintf("eth_open!!\n");
  356. return RT_EOK;
  357. }
  358. static rt_err_t eth_close(rt_device_t dev)
  359. {
  360. return RT_EOK;
  361. }
  362. static rt_size_t eth_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
  363. {
  364. rt_set_errno(-RT_ENOSYS);
  365. return 0;
  366. }
  367. static rt_size_t eth_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
  368. {
  369. rt_set_errno(-RT_ENOSYS);
  370. return 0;
  371. }
  372. static rt_err_t eth_control(rt_device_t dev, int cmd, void *args)
  373. {
  374. switch (cmd)
  375. {
  376. case NIOCTL_GADDR:
  377. if (args) rt_memcpy(args, eth_dev.dev_addr, 6);
  378. else return -RT_ERROR;
  379. break;
  380. default :
  381. break;
  382. }
  383. return RT_EOK;
  384. }
  385. rt_err_t rt_eth_tx(rt_device_t device, struct pbuf *p)
  386. {
  387. /* lock eth device */
  388. rt_sem_take(&sem_lock, RT_WAITING_FOREVER);
  389. DEBUG_MES("in %s\n", __FUNCTION__);
  390. s32 status;
  391. u64 pbuf;
  392. u64 dma_addr;
  393. u32 offload_needed = 0;
  394. u32 index;
  395. DmaDesc *dpr;
  396. struct rt_eth_dev *dev = (struct rt_eth_dev *) device;
  397. struct synopGMACNetworkAdapter *adapter;
  398. synopGMACdevice *gmacdev;
  399. adapter = (struct synopGMACNetworkAdapter *) dev->priv;
  400. if (adapter == NULL)
  401. return -1;
  402. gmacdev = (synopGMACdevice *) adapter->synopGMACdev;
  403. if (gmacdev == NULL)
  404. return -1;
  405. if (!synopGMAC_is_desc_owned_by_dma(gmacdev->TxNextDesc))
  406. {
  407. pbuf = (u64)plat_alloc_memory(p->tot_len);
  408. //pbuf = (u32)pbuf_alloc(PBUF_LINK, p->len, PBUF_RAM);
  409. if (pbuf == 0)
  410. {
  411. rt_kprintf("===error in alloc bf1\n");
  412. return -1;
  413. }
  414. DEBUG_MES("p->len = %d\n", p->len);
  415. pbuf_copy_partial(p, (void *)pbuf, p->tot_len, 0);
  416. dma_addr = plat_dma_map_single(gmacdev, (void *)pbuf, p->tot_len);
  417. status = synopGMAC_set_tx_qptr(gmacdev, dma_addr, p->tot_len, pbuf, 0, 0, 0, offload_needed, &index, dpr);
  418. if (status < 0)
  419. {
  420. rt_kprintf("%s No More Free Tx Descriptors\n", __FUNCTION__);
  421. plat_free_memory((void *)pbuf);
  422. return -16;
  423. }
  424. }
  425. synopGMAC_resume_dma_tx(gmacdev);
  426. s32 desc_index;
  427. u64 data1, data2;
  428. u32 dma_addr1, dma_addr2;
  429. u32 length1, length2;
  430. #ifdef ENH_DESC_8W
  431. u32 ext_status;
  432. u16 time_stamp_higher;
  433. u32 time_stamp_high;
  434. u32 time_stamp_low;
  435. #endif
  436. do
  437. {
  438. #ifdef ENH_DESC_8W
  439. desc_index = synopGMAC_get_tx_qptr(gmacdev, &status, &dma_addr1, &length1, &data1, &dma_addr2, &length2, &data2, &ext_status, &time_stamp_high, &time_stamp_low);
  440. synopGMAC_TS_read_timestamp_higher_val(gmacdev, &time_stamp_higher);
  441. #else
  442. desc_index = synopGMAC_get_tx_qptr(gmacdev, &status, &dma_addr1, &length1, &data1, &dma_addr2, &length2, &data2);
  443. #endif
  444. if (desc_index >= 0 && data1 != 0)
  445. {
  446. #ifdef IPC_OFFLOAD
  447. if (synopGMAC_is_tx_ipv4header_checksum_error(gmacdev, status))
  448. {
  449. rt_kprintf("Harware Failed to Insert IPV4 Header Checksum\n");
  450. }
  451. if (synopGMAC_is_tx_payload_checksum_error(gmacdev, status))
  452. {
  453. rt_kprintf("Harware Failed to Insert Payload Checksum\n");
  454. }
  455. #endif
  456. plat_free_memory((void *)(data1)); //sw: data1 = buffer1
  457. if (synopGMAC_is_desc_valid(status))
  458. {
  459. adapter->synopGMACNetStats.tx_bytes += length1;
  460. adapter->synopGMACNetStats.tx_packets++;
  461. }
  462. else
  463. {
  464. adapter->synopGMACNetStats.tx_errors++;
  465. adapter->synopGMACNetStats.tx_aborted_errors += synopGMAC_is_tx_aborted(status);
  466. adapter->synopGMACNetStats.tx_carrier_errors += synopGMAC_is_tx_carrier_error(status);
  467. }
  468. }
  469. adapter->synopGMACNetStats.collisions += synopGMAC_get_tx_collision_count(status);
  470. }
  471. while (desc_index >= 0);
  472. /* unlock eth device */
  473. rt_sem_release(&sem_lock);
  474. // rt_kprintf("output %d bytes\n", p->len);
  475. u32 test_data;
  476. test_data = synopGMACReadReg(gmacdev->DmaBase, DmaStatus);
  477. //rt_kprintf("dma_status = 0x%08x\n",test_data);
  478. return RT_EOK;
  479. }
  480. struct pbuf *rt_eth_rx(rt_device_t device)
  481. {
  482. DEBUG_MES("%s : \n", __FUNCTION__);
  483. struct rt_eth_dev *dev = &eth_dev;
  484. struct synopGMACNetworkAdapter *adapter;
  485. synopGMACdevice *gmacdev;
  486. // struct PmonInet * pinetdev;
  487. s32 desc_index;
  488. int i;
  489. char *ptr;
  490. u32 bf1;
  491. u64 data1;
  492. u64 data2;
  493. u32 len;
  494. u32 status;
  495. u32 dma_addr1;
  496. u32 dma_addr2;
  497. struct pbuf *pbuf = RT_NULL;
  498. rt_sem_take(&sem_lock, RT_WAITING_FOREVER);
  499. adapter = (struct synopGMACNetworkAdapter *) dev->priv;
  500. if (adapter == NULL)
  501. {
  502. rt_kprintf("%S : Unknown Device !!\n", __FUNCTION__);
  503. return NULL;
  504. }
  505. gmacdev = (synopGMACdevice *) adapter->synopGMACdev;
  506. if (gmacdev == NULL)
  507. {
  508. rt_kprintf("%s : GMAC device structure is missing\n", __FUNCTION__);
  509. return NULL;
  510. }
  511. /*Handle the Receive Descriptors*/
  512. desc_index = synopGMAC_get_rx_qptr(gmacdev, &status, &dma_addr1, NULL, &data1, &dma_addr2, NULL, &data2);
  513. if (((u32)desc_index >= RECEIVE_DESC_SIZE) && (desc_index != -1))
  514. {
  515. rt_kprintf("host receive descriptor address pointer = 0x%08x\n", synopGMACReadReg(gmacdev->DmaBase, DmaRxCurrDesc));
  516. rt_kprintf("host receive buffer = 0x%08x\n", synopGMACReadReg(gmacdev->DmaBase, DmaRxCurrAddr));
  517. rt_kprintf("desc_index error!!!!,tick = %d\n", rt_tick_get());
  518. while (1);
  519. }
  520. if (desc_index >= 0 && data1 != 0)
  521. {
  522. DEBUG_MES("Received Data at Rx Descriptor %d for skb 0x%08x whose status is %08x\n", desc_index, dma_addr1, status);
  523. if (synopGMAC_is_rx_desc_valid(status) || SYNOP_PHY_LOOPBACK)
  524. {
  525. dma_addr1 = plat_dma_map_single(gmacdev, (void *)data1, RX_BUF_SIZE);
  526. len = synopGMAC_get_rx_desc_frame_length(status) - 4; //Not interested in Ethernet CRC bytes
  527. pbuf = pbuf_alloc(PBUF_LINK, len, PBUF_RAM);
  528. if (pbuf == 0) rt_kprintf("===error in pbuf_alloc\n");
  529. rt_memcpy(pbuf->payload, (char *)data1, len);
  530. DEBUG_MES("==get pkg len: %d\n", len);
  531. }
  532. else
  533. {
  534. rt_kprintf("s: %08x\n", status);
  535. adapter->synopGMACNetStats.rx_errors++;
  536. adapter->synopGMACNetStats.collisions += synopGMAC_is_rx_frame_collision(status);
  537. adapter->synopGMACNetStats.rx_crc_errors += synopGMAC_is_rx_crc(status);
  538. adapter->synopGMACNetStats.rx_frame_errors += synopGMAC_is_frame_dribbling_errors(status);
  539. adapter->synopGMACNetStats.rx_length_errors += synopGMAC_is_rx_frame_length_errors(status);
  540. }
  541. desc_index = synopGMAC_set_rx_qptr(gmacdev, dma_addr1, RX_BUF_SIZE, (u64)data1, 0, 0, 0);
  542. if (desc_index < 0)
  543. {
  544. #if SYNOP_RX_DEBUG
  545. rt_kprintf("Cannot set Rx Descriptor for data1 %08x\n", (u32)data1);
  546. #endif
  547. plat_free_memory((void *)data1);
  548. }
  549. }
  550. rt_sem_release(&sem_lock);
  551. DEBUG_MES("%s : before return \n", __FUNCTION__);
  552. return pbuf;
  553. }
  554. static int rtl88e1111_config_init(synopGMACdevice *gmacdev)
  555. {
  556. int retval, err;
  557. u16 data;
  558. DEBUG_MES("in %s\n", __FUNCTION__);
  559. synopGMAC_read_phy_reg(gmacdev->MacBase, gmacdev->PhyBase, 0x14, &data);
  560. data = data | 0x82;
  561. err = synopGMAC_write_phy_reg(gmacdev->MacBase, gmacdev->PhyBase, 0x14, data);
  562. synopGMAC_read_phy_reg(gmacdev->MacBase, gmacdev->PhyBase, 0x00, &data);
  563. data = data | 0x8000;
  564. err = synopGMAC_write_phy_reg(gmacdev->MacBase, gmacdev->PhyBase, 0x00, data);
  565. #if SYNOP_PHY_LOOPBACK
  566. synopGMAC_read_phy_reg(gmacdev->MacBase, gmacdev->PhyBase, 0x14, &data);
  567. data = data | 0x70;
  568. data = data & 0xffdf;
  569. err = synopGMAC_write_phy_reg(gmacdev->MacBase, gmacdev->PhyBase, 0x14, data);
  570. data = 0x8000;
  571. err = synopGMAC_write_phy_reg(gmacdev->MacBase, gmacdev->PhyBase, 0x00, data);
  572. data = 0x5140;
  573. err = synopGMAC_write_phy_reg(gmacdev->MacBase, gmacdev->PhyBase, 0x00, data);
  574. #endif
  575. if (err < 0)
  576. return err;
  577. return 0;
  578. }
  579. int init_phy(synopGMACdevice *gmacdev)
  580. {
  581. u16 data;
  582. synopGMAC_read_phy_reg(gmacdev->MacBase, gmacdev->PhyBase, 2, &data);
  583. /*set 88e1111 clock phase delay*/
  584. if (data == 0x141)
  585. rtl88e1111_config_init(gmacdev);
  586. #if defined (RMII)
  587. else if (data == 0x8201)
  588. {
  589. //RTL8201
  590. data = 0x400; // set RMII mode
  591. synopGMAC_write_phy_reg(gmacdev->MacBase, gmacdev->PhyBase, 0x19, data);
  592. synopGMAC_read_phy_reg(gmacdev->MacBase, gmacdev->PhyBase, 0x19, &data);
  593. TR("phy reg25 is %0x \n", data);
  594. data = 0x3100; //set 100M speed
  595. synopGMAC_write_phy_reg(gmacdev->MacBase, gmacdev->PhyBase, 0x0, data);
  596. }
  597. else if (data == 0x0180 || data == 0x0181)
  598. {
  599. //DM9161
  600. synopGMAC_read_phy_reg(gmacdev->MacBase, gmacdev->PhyBase, 0x10, &data);
  601. data |= (1 << 8); //set RMII mode
  602. synopGMAC_write_phy_reg(gmacdev->MacBase, gmacdev->PhyBase, 0x10, data); //set RMII mode
  603. synopGMAC_read_phy_reg(gmacdev->MacBase, gmacdev->PhyBase, 0x10, &data);
  604. TR("phy reg16 is 0x%0x \n", data);
  605. // synopGMAC_read_phy_reg(gmacdev->MacBase,gmacdev->PhyBase,0x0,&data);
  606. // data &= ~(1<<10);
  607. data = 0x3100; //set auto-
  608. //data = 0x0100; //set 10M speed
  609. synopGMAC_write_phy_reg(gmacdev->MacBase, gmacdev->PhyBase, 0x0, data);
  610. }
  611. #endif
  612. return 0;
  613. }
  614. u32 synopGMAC_wakeup_filter_config3[] =
  615. {
  616. 0x00000000,
  617. 0x000000FF,
  618. 0x00000000,
  619. 0x00000000,
  620. 0x00000100,
  621. 0x00003200,
  622. 0x7eED0000,
  623. 0x00000000
  624. };
  625. static void synopGMAC_linux_powerdown_mac(synopGMACdevice *gmacdev)
  626. {
  627. rt_kprintf("Put the GMAC to power down mode..\n");
  628. GMAC_Power_down = 1;
  629. synopGMAC_disable_dma_tx(gmacdev);
  630. plat_delay(10000);
  631. synopGMAC_tx_disable(gmacdev);
  632. synopGMAC_rx_disable(gmacdev);
  633. plat_delay(10000);
  634. synopGMAC_disable_dma_rx(gmacdev);
  635. synopGMAC_magic_packet_enable(gmacdev);
  636. synopGMAC_write_wakeup_frame_register(gmacdev, synopGMAC_wakeup_filter_config3);
  637. synopGMAC_wakeup_frame_enable(gmacdev);
  638. synopGMAC_rx_enable(gmacdev);
  639. synopGMAC_pmt_int_enable(gmacdev);
  640. synopGMAC_power_down_enable(gmacdev);
  641. return;
  642. }
  643. static void synopGMAC_linux_powerup_mac(synopGMACdevice *gmacdev)
  644. {
  645. GMAC_Power_down = 0;
  646. if (synopGMAC_is_magic_packet_received(gmacdev))
  647. rt_kprintf("GMAC wokeup due to Magic Pkt Received\n");
  648. if (synopGMAC_is_wakeup_frame_received(gmacdev))
  649. rt_kprintf("GMAC wokeup due to Wakeup Frame Received\n");
  650. synopGMAC_pmt_int_disable(gmacdev);
  651. synopGMAC_rx_enable(gmacdev);
  652. synopGMAC_enable_dma_rx(gmacdev);
  653. synopGMAC_tx_enable(gmacdev);
  654. synopGMAC_enable_dma_tx(gmacdev);
  655. return;
  656. }
  657. static int mdio_read(synopGMACPciNetworkAdapter *adapter, int addr, int reg)
  658. {
  659. synopGMACdevice *gmacdev;
  660. u16 data;
  661. gmacdev = adapter->synopGMACdev;
  662. synopGMAC_read_phy_reg(gmacdev->MacBase, addr, reg, &data);
  663. return data;
  664. }
  665. static void mdio_write(synopGMACPciNetworkAdapter *adapter, int addr, int reg, int data)
  666. {
  667. synopGMACdevice *gmacdev;
  668. gmacdev = adapter->synopGMACdev;
  669. synopGMAC_write_phy_reg(gmacdev->MacBase, addr, reg, data);
  670. }
  671. void eth_rx_irq(int irqno, void *param)
  672. {
  673. struct rt_eth_dev *dev = &eth_dev;
  674. struct synopGMACNetworkAdapter *adapter = dev->priv;
  675. //DEBUG_MES("in irq!!\n");
  676. #ifdef RT_USING_GMAC_INT_MODE
  677. int i ;
  678. for (i = 0; i < 7200; i++)
  679. ;
  680. #endif /*RT_USING_GMAC_INT_MODE*/
  681. synopGMACdevice *gmacdev = (synopGMACdevice *)adapter->synopGMACdev;
  682. u32 interrupt, dma_status_reg;
  683. s32 status;
  684. u32 dma_addr;
  685. //rt_kprintf("irq i = %d\n", i++);
  686. dma_status_reg = synopGMACReadReg(gmacdev->DmaBase, DmaStatus);
  687. if (dma_status_reg == 0)
  688. {
  689. rt_kprintf("dma_status ==0 \n");
  690. return;
  691. }
  692. //rt_kprintf("dma_status_reg is 0x%x\n", dma_status_reg);
  693. u32 gmacstatus;
  694. synopGMAC_disable_interrupt_all(gmacdev);
  695. gmacstatus = synopGMACReadReg(gmacdev->MacBase, GmacStatus);
  696. if (dma_status_reg & GmacPmtIntr)
  697. {
  698. rt_kprintf("%s:: Interrupt due to PMT module\n", __FUNCTION__);
  699. //synopGMAC_linux_powerup_mac(gmacdev);
  700. }
  701. if (dma_status_reg & GmacMmcIntr)
  702. {
  703. rt_kprintf("%s:: Interrupt due to MMC module\n", __FUNCTION__);
  704. DEBUG_MES("%s:: synopGMAC_rx_int_status = %08x\n", __FUNCTION__, synopGMAC_read_mmc_rx_int_status(gmacdev));
  705. DEBUG_MES("%s:: synopGMAC_tx_int_status = %08x\n", __FUNCTION__, synopGMAC_read_mmc_tx_int_status(gmacdev));
  706. }
  707. if (dma_status_reg & GmacLineIntfIntr)
  708. {
  709. //rt_kprintf("%s:: Interrupt due to GMAC LINE module\n", __FUNCTION__);
  710. }
  711. interrupt = synopGMAC_get_interrupt_type(gmacdev);
  712. //rt_kprintf("%s:Interrupts to be handled: 0x%08x\n",__FUNCTION__,interrupt);
  713. if (interrupt & synopGMACDmaError)
  714. {
  715. u8 mac_addr0[6];
  716. rt_kprintf("%s::Fatal Bus Error Inetrrupt Seen\n", __FUNCTION__);
  717. memcpy(mac_addr0, dev->dev_addr, 6);
  718. synopGMAC_disable_dma_tx(gmacdev);
  719. synopGMAC_disable_dma_rx(gmacdev);
  720. synopGMAC_take_desc_ownership_tx(gmacdev);
  721. synopGMAC_take_desc_ownership_rx(gmacdev);
  722. synopGMAC_init_tx_rx_desc_queue(gmacdev);
  723. synopGMAC_reset(gmacdev);
  724. synopGMAC_set_mac_addr(gmacdev, GmacAddr0High, GmacAddr0Low, mac_addr0);
  725. synopGMAC_dma_bus_mode_init(gmacdev, DmaFixedBurstEnable | DmaBurstLength8 | DmaDescriptorSkip1);
  726. synopGMAC_dma_control_init(gmacdev, DmaStoreAndForward);
  727. synopGMAC_init_rx_desc_base(gmacdev);
  728. synopGMAC_init_tx_desc_base(gmacdev);
  729. synopGMAC_mac_init(gmacdev);
  730. synopGMAC_enable_dma_rx(gmacdev);
  731. synopGMAC_enable_dma_tx(gmacdev);
  732. }
  733. if (interrupt & synopGMACDmaRxNormal)
  734. {
  735. //DEBUG_MES("%s:: Rx Normal \n", __FUNCTION__);
  736. //synop_handle_received_data(netdev);
  737. eth_device_ready(&eth_dev.parent);
  738. }
  739. if (interrupt & synopGMACDmaRxAbnormal)
  740. {
  741. //rt_kprintf("%s::Abnormal Rx Interrupt Seen\n",__FUNCTION__);
  742. if (GMAC_Power_down == 0)
  743. {
  744. adapter->synopGMACNetStats.rx_over_errors++;
  745. synopGMACWriteReg(gmacdev->DmaBase, DmaStatus, 0x80);
  746. synopGMAC_resume_dma_rx(gmacdev);
  747. }
  748. }
  749. if (interrupt & synopGMACDmaRxStopped)
  750. {
  751. rt_kprintf("%s::Receiver stopped seeing Rx interrupts\n", __FUNCTION__); //Receiver gone in to stopped state
  752. }
  753. if (interrupt & synopGMACDmaTxNormal)
  754. {
  755. DEBUG_MES("%s::Finished Normal Transmission \n", __FUNCTION__);
  756. // synop_handle_transmit_over(netdev);
  757. }
  758. if (interrupt & synopGMACDmaTxAbnormal)
  759. {
  760. rt_kprintf("%s::Abnormal Tx Interrupt Seen\n", __FUNCTION__);
  761. }
  762. if (interrupt & synopGMACDmaTxStopped)
  763. {
  764. TR("%s::Transmitter stopped sending the packets\n", __FUNCTION__);
  765. if (GMAC_Power_down == 0) // If Mac is not in powerdown
  766. {
  767. synopGMAC_disable_dma_tx(gmacdev);
  768. synopGMAC_take_desc_ownership_tx(gmacdev);
  769. synopGMAC_enable_dma_tx(gmacdev);
  770. // netif_wake_queue(netdev);
  771. TR("%s::Transmission Resumed\n", __FUNCTION__);
  772. }
  773. }
  774. /* Enable the interrrupt before returning from ISR*/
  775. synopGMAC_enable_interrupt(gmacdev, DmaIntEnable);
  776. return;
  777. }
  778. int rt_hw_eth_init(void)
  779. {
  780. struct pci_header *p = (struct pci_header *)(0x9000000000000000 | PCI_BASE);
  781. gmac_base = (0x9000000000000000 | ((p->BaseAddressRegister[0]) & 0xffffff00));
  782. struct synopGMACNetworkAdapter *synopGMACadapter;
  783. static u8 mac_addr0[6] = DEFAULT_MAC_ADDRESS;
  784. int index;
  785. rt_sem_init(&sem_ack, "tx_ack", 1, RT_IPC_FLAG_FIFO);
  786. rt_sem_init(&sem_lock, "eth_lock", 1, RT_IPC_FLAG_FIFO);
  787. memset(&eth_dev, 0, sizeof(eth_dev));
  788. synopGMACadapter = (struct synopGMACNetworkAdapter *)plat_alloc_memory(sizeof(struct synopGMACNetworkAdapter));
  789. if (!synopGMACadapter)
  790. {
  791. rt_kprintf("Error in Memory Allocataion, Founction : %s \n", __FUNCTION__);
  792. }
  793. memset((char *)synopGMACadapter, 0, sizeof(struct synopGMACNetworkAdapter));
  794. synopGMACadapter->synopGMACdev = NULL;
  795. synopGMACadapter->synopGMACdev = (synopGMACdevice *) plat_alloc_memory(sizeof(synopGMACdevice));
  796. if (!synopGMACadapter->synopGMACdev)
  797. {
  798. rt_kprintf("Error in Memory Allocataion, Founction : %s \n", __FUNCTION__);
  799. }
  800. memset((char *)synopGMACadapter->synopGMACdev, 0, sizeof(synopGMACdevice));
  801. /*
  802. * Attach the device to MAC struct This will configure all the required base addresses
  803. * such as Mac base, configuration base, phy base address(out of 32 possible phys)
  804. * */
  805. synopGMAC_attach(synopGMACadapter->synopGMACdev, (gmac_base + MACBASE), gmac_base + DMABASE, DEFAULT_PHY_BASE, mac_addr0);
  806. init_phy(synopGMACadapter->synopGMACdev);
  807. synopGMAC_reset(synopGMACadapter->synopGMACdev);
  808. /* MII setup */
  809. synopGMACadapter->mii.phy_id_mask = 0x1F;
  810. synopGMACadapter->mii.reg_num_mask = 0x1F;
  811. synopGMACadapter->mii.dev = synopGMACadapter;
  812. synopGMACadapter->mii.mdio_read = mdio_read;
  813. synopGMACadapter->mii.mdio_write = mdio_write;
  814. synopGMACadapter->mii.phy_id = synopGMACadapter->synopGMACdev->PhyBase;
  815. synopGMACadapter->mii.supports_gmii = mii_check_gmii_support(&synopGMACadapter->mii);
  816. eth_dev.iobase = gmac_base;
  817. eth_dev.name = "e0";
  818. eth_dev.priv = synopGMACadapter;
  819. eth_dev.dev_addr[0] = mac_addr0[0];
  820. eth_dev.dev_addr[1] = mac_addr0[1];
  821. eth_dev.dev_addr[2] = mac_addr0[2];
  822. eth_dev.dev_addr[3] = mac_addr0[3];
  823. eth_dev.dev_addr[4] = mac_addr0[4];
  824. eth_dev.dev_addr[5] = mac_addr0[5];
  825. eth_dev.parent.parent.type = RT_Device_Class_NetIf;
  826. eth_dev.parent.parent.init = eth_init;
  827. eth_dev.parent.parent.open = eth_open;
  828. eth_dev.parent.parent.close = eth_close;
  829. eth_dev.parent.parent.read = eth_read;
  830. eth_dev.parent.parent.write = eth_write;
  831. eth_dev.parent.parent.control = eth_control;
  832. eth_dev.parent.parent.user_data = RT_NULL;
  833. eth_dev.parent.eth_tx = rt_eth_tx;
  834. eth_dev.parent.eth_rx = rt_eth_rx;
  835. eth_device_init(&(eth_dev.parent), "e0");
  836. eth_device_linkchange(&eth_dev.parent, RT_TRUE); //linkup the e0 for lwip to check
  837. return 0;
  838. }
  839. INIT_COMPONENT_EXPORT(rt_hw_eth_init);