hal_geth.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892
  1. /*
  2. * Copyright (c) 2019-2025 Allwinner Technology Co., Ltd. ALL rights reserved.
  3. *
  4. * Allwinner is a trademark of Allwinner Technology Co.,Ltd., registered in
  5. * the the People's Republic of China and other countries.
  6. * All Allwinner Technology Co.,Ltd. trademarks are used with permission.
  7. *
  8. * DISCLAIMER
  9. * THIRD PARTY LICENCES MAY BE REQUIRED TO IMPLEMENT THE SOLUTION/PRODUCT.
  10. * IF YOU NEED TO INTEGRATE THIRD PARTY’S TECHNOLOGY (SONY, DTS, DOLBY, AVS OR MPEGLA, ETC.)
  11. * IN ALLWINNERS’SDK OR PRODUCTS, YOU SHALL BE SOLELY RESPONSIBLE TO OBTAIN
  12. * ALL APPROPRIATELY REQUIRED THIRD PARTY LICENCES.
  13. * ALLWINNER SHALL HAVE NO WARRANTY, INDEMNITY OR OTHER OBLIGATIONS WITH RESPECT TO MATTERS
  14. * COVERED UNDER ANY REQUIRED THIRD PARTY LICENSE.
  15. * YOU ARE SOLELY RESPONSIBLE FOR YOUR USAGE OF THIRD PARTY’S TECHNOLOGY.
  16. *
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY ALLWINNER"AS IS" AND TO THE MAXIMUM EXTENT
  19. * PERMITTED BY LAW, ALLWINNER EXPRESSLY DISCLAIMS ALL WARRANTIES OF ANY KIND,
  20. * WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING WITHOUT LIMITATION REGARDING
  21. * THE TITLE, NON-INFRINGEMENT, ACCURACY, CONDITION, COMPLETENESS, PERFORMANCE
  22. * OR MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  23. * IN NO EVENT SHALL ALLWINNER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  25. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  26. * LOSS OF USE, DATA, OR PROFITS, OR BUSINESS INTERRUPTION)
  27. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  28. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  29. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  30. * OF THE POSSIBILITY OF SUCH DAMAGE.
  31. */
  32. #include <errno.h>
  33. #include <typedef.h>
  34. #include <hal_mem.h>
  35. #include <sunxi_hal_geth.h>
  36. #include <lwip/pbuf.h>
  37. #include <netif/ethernet.h>
  38. #include <sunxi_hal_mii.h>
  39. #include <sunxi_hal_miiphy.h>
  40. #include <rtthread.h>
  41. #include <netif/ethernetif.h>
  42. #ifdef RT_USING_SMART
  43. #include <page.h>
  44. #include <ioremap.h>
  45. #endif
  46. #include <arch.h>
  47. static struct geth_device rt_geth_dev;
  48. static unsigned int rx_clean = 0;
  49. static int gmac_rx_desc_fill(rt_device_t dev)
  50. {
  51. unsigned int i = 0;
  52. unsigned int count = 0;
  53. hal_geth_dma_desc_t *rx_p = NULL;
  54. for(i=0; i<DMA_DESC_RX_NUM; i++)
  55. {
  56. rx_p = rt_geth_dev.get_buffer_config.dma_desc_rx + count;
  57. rx_p->desc0.rx.own = 1;
  58. rx_p->desc1.all |= ((1 << 11) - 1);
  59. count = circ_inc(count, DMA_DESC_RX_NUM);
  60. }
  61. count = 0;
  62. return 0;
  63. }
  64. void desc_buf_set(struct dma_desc *desc, unsigned long paddr, int size)
  65. {
  66. desc->desc1.all &= (~((1 << 11) - 1));
  67. desc->desc1.all |= (size & ((1 << 11) - 1));
  68. }
  69. void desc_set_own(struct dma_desc *desc)
  70. {
  71. desc->desc0.all |= 0x80000000;
  72. }
  73. void desc_tx_close(struct dma_desc *first, struct dma_desc *end, int csum_insert)
  74. {
  75. struct dma_desc *desc = first;
  76. first->desc1.tx.first_sg = 1;
  77. end->desc1.tx.last_seg = 1;
  78. end->desc1.tx.interrupt = 1;
  79. if (csum_insert)
  80. {
  81. do
  82. {
  83. desc->desc1.tx.cic = 3;
  84. desc++;
  85. } while (desc <= end);
  86. }
  87. }
  88. int desc_get_own(struct dma_desc *desc)
  89. {
  90. return desc->desc0.all & 0x80000000;
  91. }
  92. void desc_tag_clean(struct dma_desc *desc)
  93. {
  94. desc->resever0 = 0;
  95. }
  96. void desc_add_tag(struct dma_desc *desc)
  97. {
  98. desc->resever0 = 1;
  99. }
  100. static int geth_phy_read(char *devname, unsigned int phy_addr, unsigned char reg,unsigned short *data)
  101. {
  102. struct eth_device *dev;
  103. uint32_t value;
  104. value = geth_mdio_read(rt_geth_dev.iobase, phy_addr, reg);
  105. return value;
  106. }
  107. static int geth_phy_write(char *devname, unsigned int phy_addr, unsigned char reg, uint16_t data)
  108. {
  109. struct eth_device *dev;
  110. geth_mdio_write(rt_geth_dev.iobase, phy_addr, reg, data);
  111. return 0;
  112. }
  113. void geth_link_change(struct geth_device *dev,rt_bool_t up)
  114. {
  115. if(up)
  116. {
  117. printf("link up\n");
  118. eth_device_linkchange(&dev->parent, RT_TRUE);
  119. dev->phy_link_status = RT_TRUE;
  120. }
  121. else
  122. {
  123. printf("link down\n");
  124. eth_device_linkchange(&dev->parent, RT_FALSE);
  125. dev->phy_link_status = RT_FALSE;
  126. }
  127. }
  128. int read_data_from_eth(rt_device_t dev,struct pbuf *p,uint16_t *read_length)
  129. {
  130. unsigned int i = 0;
  131. int ret = -1;
  132. hal_geth_dma_desc_t *rx_p = NULL;
  133. unsigned int delay_count = 0;
  134. struct pbuf *q = NULL;
  135. int length = 0;
  136. int offset = 0;
  137. rx_p = rt_geth_dev.get_buffer_config.dma_desc_rx + rx_clean;
  138. awos_arch_mems_flush_dcache_region((unsigned long)rx_p,sizeof(hal_geth_dma_desc_t));
  139. awos_arch_mems_flush_dcache_region((unsigned long)rx_p->desc2,2048);
  140. dsb(v);
  141. if(!rx_p->desc0.rx.own)
  142. {
  143. length = rx_p->desc0.rx.frm_len;
  144. for(q = p;q != RT_NULL;q=q->next)
  145. {
  146. if((length - PBUF_MAX_BUFF_SIZE) <= 0)
  147. {
  148. rt_memcpy(q->payload,(void *)((unsigned long)rx_p->desc2 + offset),length);
  149. offset += length;
  150. break;
  151. }
  152. else
  153. {
  154. rt_memcpy(q->payload,(void *)((unsigned long)rx_p->desc2 + offset),PBUF_MAX_BUFF_SIZE);
  155. offset += PBUF_MAX_BUFF_SIZE;
  156. length -= PBUF_MAX_BUFF_SIZE;
  157. }
  158. }
  159. if(offset != (uint16_t)(rx_p->desc0.rx.frm_len))
  160. {
  161. *read_length = 0;
  162. ret = -1;
  163. printf("have not enough pbuf for receive data offset %d length %d\n",offset,length);
  164. }
  165. else
  166. {
  167. *read_length = offset;
  168. ret = 0;
  169. }
  170. rx_p->desc0.all = 0x80000000;
  171. awos_arch_mems_clean_dcache_region((unsigned long)rx_p,sizeof(hal_geth_dma_desc_t));
  172. rx_clean = circ_inc(rx_clean, DMA_DESC_RX_NUM);
  173. }
  174. else
  175. {
  176. read_length = 0;
  177. return -1;
  178. }
  179. return ret;
  180. }
  181. int tx_desc_recycle(rt_device_t dev)
  182. {
  183. struct dma_desc *desc;
  184. static int tx_clean = 0;
  185. desc = rt_geth_dev.get_buffer_config.dma_desc_tx + tx_clean;
  186. awos_arch_mems_flush_dcache_region((unsigned long)desc,sizeof(hal_geth_dma_desc_t));
  187. dsb(v);
  188. while((!desc_get_own(desc))&&(desc->resever0))
  189. {
  190. desc_tag_clean(desc);
  191. dsb(v);
  192. awos_arch_mems_clean_dcache_region((unsigned long)desc,sizeof(hal_geth_dma_desc_t));
  193. tx_clean = circ_inc(tx_clean, DMA_DESC_TX_NUM);
  194. desc = rt_geth_dev.get_buffer_config.dma_desc_tx + tx_clean;
  195. dsb(v);
  196. awos_arch_mems_flush_dcache_region((unsigned long)desc,sizeof(hal_geth_dma_desc_t));
  197. dsb(v);
  198. }
  199. return 0;
  200. }
  201. static irqreturn_t geth_irq_handler(int irq, void *dev_id)
  202. {
  203. int int_sta_value;
  204. int_sta_value = hal_readl(rt_geth_dev.iobase + GETH_INT_STA);
  205. int length;
  206. int ret = 0;
  207. if(RX_INT & int_sta_value)
  208. {
  209. hal_writel(RX_INT,rt_geth_dev.iobase + GETH_INT_STA);
  210. geth_rx_int_disable(rt_geth_dev.iobase);
  211. eth_device_ready(&(rt_geth_dev.parent));
  212. }
  213. if(TX_INT & int_sta_value)
  214. {
  215. hal_writel(RX_INT,rt_geth_dev.iobase + GETH_INT_STA);
  216. }
  217. /*clear all interrupt status*/
  218. hal_writel(int_sta_value,rt_geth_dev.iobase + GETH_INT_STA);
  219. return 0;
  220. }
  221. int is_enough_desc_available(struct dma_desc *entry)
  222. {
  223. struct pbuf *q = RT_NULL;
  224. struct dma_desc *desc;
  225. desc = entry;
  226. awos_arch_mems_flush_dcache_region((unsigned long)desc,sizeof(hal_geth_dma_desc_t));
  227. dsb(v);
  228. if(desc->desc0.tx.own != 0)
  229. {
  230. printf("desc %08x desc0 %08x desc1 %08x\n",desc,desc->desc0,desc->desc1);
  231. return -RT_ERROR;
  232. }
  233. return RT_EOK;
  234. }
  235. int wait_tx_completed(struct dma_desc *entry)
  236. {
  237. struct pbuf *q = RT_NULL;
  238. struct dma_desc *desc;
  239. unsigned int timeout_cnt = 0;
  240. desc = entry;
  241. awos_arch_mems_flush_dcache_region((unsigned long)desc,sizeof(hal_geth_dma_desc_t));
  242. dsb(v);
  243. while(desc_get_own(desc))
  244. {
  245. awos_arch_mems_flush_dcache_region((unsigned long)desc,sizeof(hal_geth_dma_desc_t));
  246. dsb(v);
  247. timeout_cnt++;
  248. if(timeout_cnt > 1000)
  249. {
  250. printf("emac send data timeout \n");
  251. return -RT_ERROR;
  252. }
  253. }
  254. desc_tag_clean(desc);
  255. dsb(v);
  256. awos_arch_mems_clean_dcache_region((unsigned long)desc,sizeof(hal_geth_dma_desc_t));
  257. return RT_EOK;
  258. }
  259. static rt_err_t rt_geth_xmit(rt_device_t dev, struct pbuf *p)
  260. {
  261. unsigned int entry;
  262. struct pbuf *q = RT_NULL;
  263. struct dma_desc *first, *paddr ,*end;
  264. static int tx_dirty = 0;
  265. static int tx_clean = 0;
  266. int desc_value = 0;
  267. void *dist = NULL;
  268. int ret = 0;
  269. unsigned int i = 0;
  270. unsigned int copy_offset = 0;
  271. if (!rt_geth_dev.phy_link_status) return -RT_ERROR;
  272. first = rt_geth_dev.get_buffer_config.dma_desc_tx + tx_dirty;
  273. ret = is_enough_desc_available(first);
  274. if(ret < 0)
  275. {
  276. return -RT_ERROR;
  277. }
  278. copy_offset = 0;
  279. for(q = p;q != RT_NULL;q=q->next)
  280. {
  281. dist = (void *)((unsigned long)first->desc2);
  282. rt_memcpy(dist+copy_offset,q->payload,q->len);
  283. copy_offset += q->len;
  284. if(copy_offset >= ((1 << 11) - 1))
  285. {
  286. printf("send data exceed max len copy_offset %d\n",copy_offset);
  287. return -RT_ERROR;
  288. }
  289. }
  290. desc_buf_set(first, 0, copy_offset);
  291. desc_add_tag(first);
  292. tx_dirty = circ_inc(tx_dirty, DMA_DESC_TX_NUM);
  293. end = first;
  294. desc_set_own(first);
  295. desc_tx_close(first, end, 0);
  296. dsb(v);
  297. awos_arch_mems_clean_dcache_region((unsigned long)first,sizeof(hal_geth_dma_desc_t));
  298. awos_arch_mems_clean_dcache_region((unsigned long)first->desc2,copy_offset);
  299. dsb(v);
  300. /* Enable transmit and Poll transmit */
  301. geth_tx_poll(rt_geth_dev.iobase);
  302. ret = wait_tx_completed(first);
  303. return ret;
  304. }
  305. static struct pbuf *rt_geth_recv(rt_device_t dev)
  306. {
  307. static struct pbuf *p_s = RT_NULL;
  308. struct pbuf *p = RT_NULL;
  309. int status;
  310. uint16_t length = 0;
  311. static int first_flag = 0;
  312. uint64_t before_get_data_tick;
  313. uint64_t diff_tick0;
  314. uint64_t diff_tick1;
  315. uint64_t diff_tick2;
  316. if(p_s == RT_NULL)
  317. {
  318. p_s = pbuf_alloc(PBUF_RAW, ENET_FRAME_MAX_FRAMELEN, PBUF_POOL);
  319. if(p_s == RT_NULL)
  320. {
  321. return RT_NULL;
  322. }
  323. }
  324. p = p_s;
  325. status = read_data_from_eth(dev,p,&length);
  326. if(status == -1)
  327. {
  328. geth_rx_int_enable(rt_geth_dev.iobase);
  329. return NULL;
  330. }
  331. pbuf_realloc(p, length);
  332. p_s = RT_NULL;
  333. return p;
  334. }
  335. static int rx_status(hal_geth_dma_desc_t *p)
  336. {
  337. int ret = good_frame;
  338. if (p->desc0.rx.last_desc == 0)
  339. ret = discard_frame;
  340. if (p->desc0.rx.frm_type && (p->desc0.rx.chsum_err
  341. || p->desc0.rx.ipch_err))
  342. ret = discard_frame;
  343. if (p->desc0.rx.err_sum)
  344. ret = discard_frame;
  345. if (p->desc0.rx.len_err)
  346. ret = discard_frame;
  347. if (p->desc0.rx.mii_err)
  348. ret = discard_frame;
  349. return ret;
  350. }
  351. static void geth_set_hwaddr(unsigned char *addr)
  352. {
  353. geth_set_mac_addr(rt_geth_dev.iobase, addr, 0);
  354. }
  355. static void geth_get_hwaddr(struct eth_device *dev)
  356. {
  357. }
  358. /********************************************************
  359. RGMII RMI D1 F133
  360. ---------------------------------------------------------
  361. RXD3 / PE14 PG9 / PG9
  362. RXD2 / PE13 PG8 PE13 PG8
  363. RXD1 RXD1 PE2 PG2 PE2 PG2
  364. RXD0 RXD0 PE1 PG1 PE1 PG1
  365. RXCK / PE15 PG10 / PG10
  366. RXCTRL CRS-DV PE0 PG0 PE0 PG0
  367. TXD3 / PE12 PG7 PE12 PG7
  368. TXD2 / PE11 PG6 PE11 PG6
  369. TXD1 TXD1 PE5 PG5 PE5 PG5
  370. TXD0 TXD0 PE4 PG4 PE4 PG4
  371. TXCK TXCK PE3 PG3 PE3 PG3
  372. TXCTL TXEN PE6 PG12 PE6 PG12
  373. CLKIN RXER PE7 PG13 PE7 PG13
  374. MDC MDC PE8 PG14 PE8 PG14
  375. MDIO MDIO PE9 PG15 PE9 PG15
  376. EPHY-25M EPHY-25M PE10 PG11 PE10 PG11
  377. */
  378. static void geth_pinctrl_init(void)
  379. {
  380. #ifdef GMAC_USING_GPIOE
  381. hal_gpio_pinmux_set_function(GPIO_PE0, 8);
  382. hal_gpio_pinmux_set_function(GPIO_PE1, 8);
  383. hal_gpio_pinmux_set_function(GPIO_PE2, 8);
  384. hal_gpio_pinmux_set_function(GPIO_PE3, 8);
  385. hal_gpio_pinmux_set_function(GPIO_PE4, 8);
  386. hal_gpio_pinmux_set_function(GPIO_PE5, 8);
  387. hal_gpio_pinmux_set_function(GPIO_PE6, 8);
  388. hal_gpio_pinmux_set_function(GPIO_PE7, 8);
  389. hal_gpio_pinmux_set_function(GPIO_PE8, 8);
  390. hal_gpio_pinmux_set_function(GPIO_PE9, 8);
  391. hal_gpio_pinmux_set_function(GPIO_PE10, 8);
  392. #ifdef GMAC_USING_RGMII
  393. hal_gpio_pinmux_set_function(GPIO_PE11, 8);
  394. hal_gpio_pinmux_set_function(GPIO_PE12, 8);
  395. hal_gpio_pinmux_set_function(GPIO_PE13, 8);
  396. #ifdef BOARD_allwinnerd1
  397. hal_gpio_pinmux_set_function(GPIO_PE14, 8);
  398. hal_gpio_pinmux_set_function(GPIO_PE15, 8);
  399. #endif /* BOARD_allwinnerd1 */
  400. #ifdef BOARD_allwinnerd1s
  401. hal_gpio_pinmux_set_function(GPIO_PG9, 4);
  402. hal_gpio_pinmux_set_function(GPIO_PG10, 4);
  403. #endif /* BOARD_allwinnerd1s */
  404. #endif /* GMAC_USING_RGMII */
  405. #endif /* GMAC_USING_GPIOE */
  406. #ifdef GMAC_USING_GPIOG
  407. hal_gpio_pinmux_set_function(GPIO_PG0, 4);
  408. hal_gpio_pinmux_set_function(GPIO_PG1, 4);
  409. hal_gpio_pinmux_set_function(GPIO_PG2, 4);
  410. hal_gpio_pinmux_set_function(GPIO_PG3, 4);
  411. hal_gpio_pinmux_set_function(GPIO_PG4, 4);
  412. hal_gpio_pinmux_set_function(GPIO_PG5, 4);
  413. #ifdef GMAC_USING_RGMII
  414. hal_gpio_pinmux_set_function(GPIO_PG6, 4);
  415. hal_gpio_pinmux_set_function(GPIO_PG7, 4);
  416. hal_gpio_pinmux_set_function(GPIO_PG8, 4);
  417. hal_gpio_pinmux_set_function(GPIO_PG9, 4);
  418. hal_gpio_pinmux_set_function(GPIO_PG10, 4);
  419. #endif /* GMAC_USING_RGMII */
  420. hal_gpio_pinmux_set_function(GPIO_PG11, 4);
  421. hal_gpio_pinmux_set_function(GPIO_PG12, 4);
  422. hal_gpio_pinmux_set_function(GPIO_PG13, 4);
  423. hal_gpio_pinmux_set_function(GPIO_PE14, 4);
  424. hal_gpio_pinmux_set_function(GPIO_PE15, 4);
  425. #endif /* GMAC_USING_GPIOG */
  426. }
  427. static int geth_phy_init(struct eth_device *dev)
  428. {
  429. uint32_t value;
  430. uint16_t phy_val;
  431. int i;
  432. uint32_t phy_addr = 0x1f;
  433. int duplex;
  434. int speed;
  435. uint16_t temp = 0;
  436. for (i = 0; i < 0x1f; i++) {
  437. value = (geth_phy_read((char *)dev, i, MII_PHYSID1,NULL)
  438. & 0xffff) << 16;
  439. value |= (geth_phy_read((char *)dev, i, MII_PHYSID2,NULL) & 0xffff);
  440. if ((value & 0x1fffffff) == 0x1fffffff) {
  441. rt_thread_delay(RT_TICK_PER_SECOND/100);
  442. continue;
  443. }
  444. phy_addr = i;
  445. break;
  446. }
  447. phy_addr = i;
  448. if (phy_addr == 0x1f) {
  449. printf("No PHY device!\n");
  450. return -1;
  451. }
  452. phy_val = geth_phy_read((char *)dev, phy_addr, MII_BMCR,NULL);
  453. geth_phy_write((char *)dev, phy_addr, MII_BMCR, phy_val | BMCR_RESET);
  454. while (geth_phy_read((char *)dev, phy_addr, MII_BMCR,NULL) & BMCR_RESET);
  455. phy_val = geth_phy_read((char *)dev, phy_addr, MII_BMCR,NULL);
  456. geth_phy_write((char *)dev, phy_addr, MII_BMCR, phy_val | BMCR_FULLDPLX);
  457. /* Reset phy chip */
  458. phy_val = geth_phy_read((char *)dev, phy_addr, MII_BMCR,NULL);
  459. geth_phy_write((char *)dev, phy_addr, MII_BMCR, (phy_val & ~BMCR_PDOWN));
  460. while (geth_phy_read((char *)dev, phy_addr, MII_BMCR,NULL) & BMCR_PDOWN);
  461. /* Wait BMSR_ANEGCOMPLETE be set */
  462. while (!(geth_phy_read((char *)dev, phy_addr, MII_BMSR,NULL) & BMSR_ANEGCOMPLETE)) {
  463. if (i > 40) {
  464. printf("Warning: Auto negotiation timeout!\n");
  465. return -1;
  466. }
  467. rt_thread_delay(RT_TICK_PER_SECOND/2);
  468. i++;
  469. }
  470. phy_val = geth_phy_read((char *)dev, phy_addr, MII_RESV2,NULL);
  471. temp = (phy_val>>4) & 0x3;
  472. switch(temp)
  473. {
  474. case 2:
  475. speed = 1000;
  476. break;
  477. case 1:
  478. speed = 100;
  479. break;
  480. case 0:
  481. speed = 10;
  482. break;
  483. default :
  484. break;
  485. }
  486. temp = phy_val & 0x08;
  487. duplex = (temp) ? 1 : 0;
  488. geth_set_link_mode(rt_geth_dev.iobase,duplex,speed);
  489. return 0;
  490. }
  491. static int geth_dma_desc_init(void)
  492. {
  493. void *temp = RT_NULL;
  494. #ifdef RT_USING_SMART
  495. rt_geth_dev.get_buffer_config.rx_buff_addr = rt_pages_alloc(RX_BUFFER_INDEX_NUM);
  496. #else
  497. rt_geth_dev.get_buffer_config.rx_buff_addr = rt_malloc(DMA_MEM_ALIGN_SIZE * DMA_DESC_RX_NUM);
  498. #endif
  499. if(!rt_geth_dev.get_buffer_config.rx_buff_addr)
  500. {
  501. printf("ERROR: rx buff page alloc failed\n");
  502. return -1;
  503. }
  504. //temp = (void *)rt_ioremap_nocache((void *)awos_arch_virt_to_phys((unsigned long)rt_geth_dev.get_buffer_config.rx_buff_addr), (SYS_PAGE_SIZE<<RX_BUFFER_INDEX_NUM));
  505. rt_geth_dev.get_buffer_config.phy_rx_buff_addr = (void *)awos_arch_virt_to_phys((unsigned long)rt_geth_dev.get_buffer_config.rx_buff_addr);
  506. #ifdef RT_USING_SMART
  507. rt_geth_dev.get_buffer_config.tx_buff_addr = rt_pages_alloc(TX_BUFFER_INDEX_NUM);
  508. #else
  509. rt_geth_dev.get_buffer_config.tx_buff_addr = rt_malloc(DMA_MEM_ALIGN_SIZE * DMA_DESC_TX_NUM);
  510. #endif
  511. if(!rt_geth_dev.get_buffer_config.tx_buff_addr)
  512. {
  513. printf("ERROR: tx buff page alloc failed\n");
  514. return -1;
  515. }
  516. //temp = (void *)rt_ioremap_nocache((void *)awos_arch_virt_to_phys((unsigned long)rt_geth_dev.get_buffer_config.tx_buff_addr), (SYS_PAGE_SIZE<<TX_BUFFER_INDEX_NUM));
  517. rt_geth_dev.get_buffer_config.phy_tx_buff_addr = (void *)awos_arch_virt_to_phys((unsigned long)rt_geth_dev.get_buffer_config.tx_buff_addr);
  518. #ifdef RT_USING_SMART
  519. rt_geth_dev.get_buffer_config.dma_desc_rx = (hal_geth_dma_desc_t *)rt_pages_alloc(RX_BD_INDEX_NUM);
  520. #else
  521. rt_geth_dev.get_buffer_config.dma_desc_rx = (hal_geth_dma_desc_t *)rt_malloc(sizeof(hal_geth_dma_desc_t) * DMA_DESC_RX_NUM);
  522. #endif
  523. if(!rt_geth_dev.get_buffer_config.dma_desc_rx)
  524. {
  525. printf("ERROR: rx bd page alloc failed\n");
  526. return -1;
  527. }
  528. //temp = (void *)rt_ioremap_nocache((void *)awos_arch_virt_to_phys((unsigned long)rt_geth_dev.get_buffer_config.dma_desc_rx), (SYS_PAGE_SIZE<<RX_BD_INDEX_NUM));
  529. rt_geth_dev.get_buffer_config.phy_dma_desc_rx = (hal_geth_dma_desc_t *)awos_arch_virt_to_phys((unsigned long)rt_geth_dev.get_buffer_config.dma_desc_rx);
  530. #ifdef RT_USING_SMART
  531. rt_geth_dev.get_buffer_config.dma_desc_tx = (hal_geth_dma_desc_t *)rt_pages_alloc(TX_BD_INDEX_NUM);
  532. #else
  533. rt_geth_dev.get_buffer_config.dma_desc_tx = (hal_geth_dma_desc_t *)rt_malloc(sizeof(hal_geth_dma_desc_t) * DMA_DESC_TX_NUM);
  534. #endif
  535. if(!rt_geth_dev.get_buffer_config.dma_desc_tx)
  536. {
  537. printf("ERROR: tx bd page alloc failed\n");
  538. return -1;
  539. }
  540. //temp = (void *)rt_ioremap_nocache((void *)awos_arch_virt_to_phys((unsigned long)rt_geth_dev.get_buffer_config.dma_desc_tx), (SYS_PAGE_SIZE<<TX_BD_INDEX_NUM));
  541. rt_geth_dev.get_buffer_config.phy_dma_desc_tx = (hal_geth_dma_desc_t *)awos_arch_virt_to_phys((unsigned long)rt_geth_dev.get_buffer_config.dma_desc_tx);
  542. return 0;
  543. }
  544. void desc_init_chain(hal_geth_dma_desc_t *desc, unsigned long addr, unsigned long first_buff_addr,unsigned int size,unsigned int align_size)
  545. {
  546. /* In chained mode the desc3 points to the next element in the ring.
  547. * The latest element has to point to the head.
  548. */
  549. int i;
  550. hal_geth_dma_desc_t *p = desc;
  551. unsigned long dma_desc_phy = addr;
  552. unsigned long dma_buff_phy = first_buff_addr;
  553. for (i = 0; i < size; i++) {
  554. if(i == (size - 1))
  555. {
  556. p->desc2 = (u32)dma_buff_phy;
  557. p->desc3 = (u32)addr;
  558. }
  559. else
  560. {
  561. dma_desc_phy += sizeof(hal_geth_dma_desc_t);
  562. p->desc2 = (u32)dma_buff_phy;
  563. p->desc3 = (u32)dma_desc_phy;
  564. p++;
  565. }
  566. dma_buff_phy += align_size;
  567. }
  568. }
  569. static rt_err_t rt_geth_init(rt_device_t dev)
  570. {
  571. uint32_t value;
  572. /* Enable clock */
  573. uint32_t used_type = rt_geth_dev.used_type;
  574. uint32_t tx_delay = rt_geth_dev.tx_delay;
  575. uint32_t rx_delay = rt_geth_dev.rx_delay;
  576. uint32_t phy_interface = rt_geth_dev.phy_interface;
  577. geth_clk_enable(used_type,phy_interface,tx_delay,rx_delay);
  578. /* Pinctrl init */
  579. geth_pinctrl_init();
  580. /* MAC controller soft reset */
  581. value = geth_mac_reset(rt_geth_dev.iobase);
  582. if (!value)
  583. {
  584. printf("Gmac controller softs reset success\n");
  585. }
  586. else
  587. {
  588. printf("Gmac controller soft reset failed value %08x\n",value);
  589. }
  590. /* MAC controller initialize */
  591. geth_mac_init(rt_geth_dev.iobase);
  592. geth_set_hwaddr(rt_geth_dev.dev_addr);
  593. /* Frame filter */
  594. geth_set_filter(rt_geth_dev.iobase);
  595. /* Burst should be 8 */
  596. value = hal_readl(rt_geth_dev.iobase + GETH_BASIC_CTL1);
  597. value |= (8 << 24);
  598. hal_writel(value, rt_geth_dev.iobase + GETH_BASIC_CTL1);
  599. /* Disable all interrupt of dma */
  600. geth_all_int_disable(rt_geth_dev.iobase);
  601. value = geth_dma_desc_init();
  602. if(value < 0) {
  603. printf("Gmac dma desc init fail!\n");
  604. return -1;
  605. }
  606. memset((void *)rt_geth_dev.get_buffer_config.dma_desc_tx, 0, sizeof(hal_geth_dma_desc_t)*DMA_DESC_TX_NUM);
  607. memset((void *)rt_geth_dev.get_buffer_config.dma_desc_rx, 0, sizeof(hal_geth_dma_desc_t)*DMA_DESC_RX_NUM);
  608. desc_init_chain(rt_geth_dev.get_buffer_config.dma_desc_tx, (unsigned long)rt_geth_dev.get_buffer_config.phy_dma_desc_tx,
  609. (unsigned long)rt_geth_dev.get_buffer_config.phy_tx_buff_addr, DMA_DESC_TX_NUM, DMA_MEM_ALIGN_SIZE);
  610. desc_init_chain(rt_geth_dev.get_buffer_config.dma_desc_rx, (unsigned long)rt_geth_dev.get_buffer_config.phy_dma_desc_rx,
  611. (unsigned long)rt_geth_dev.get_buffer_config.phy_rx_buff_addr, DMA_DESC_RX_NUM, DMA_MEM_ALIGN_SIZE);
  612. gmac_rx_desc_fill(NULL);
  613. hal_writel((unsigned long)rt_geth_dev.get_buffer_config.dma_desc_tx, rt_geth_dev.iobase + GETH_TX_DESC_LIST);
  614. hal_writel((unsigned long)rt_geth_dev.get_buffer_config.dma_desc_rx, rt_geth_dev.iobase + GETH_RX_DESC_LIST);
  615. awos_arch_mems_clean_dcache_region((unsigned long)rt_geth_dev.get_buffer_config.dma_desc_tx, sizeof(hal_geth_dma_desc_t)*DMA_DESC_TX_NUM);
  616. awos_arch_mems_clean_dcache_region((unsigned long)rt_geth_dev.get_buffer_config.dma_desc_rx, sizeof(hal_geth_dma_desc_t)*DMA_DESC_RX_NUM);
  617. dsb(v);
  618. /* start tx & rx */
  619. geth_start_tx(rt_geth_dev.iobase);
  620. geth_start_rx(rt_geth_dev.iobase);
  621. /* Enable transmit & receive */
  622. geth_mac_enable(rt_geth_dev.iobase);
  623. if (request_irq(GETH_IRQ_NUM, geth_irq_handler, 0, "geth", (void *)&rt_geth_dev) < 0)
  624. {
  625. printf("request irq error\n");
  626. return -1;
  627. }
  628. enable_irq(GETH_IRQ_NUM);
  629. geth_rx_int_enable(rt_geth_dev.iobase);
  630. return 0;
  631. }
  632. static void rt_geth_uninitialize(rt_device_t dev)
  633. {
  634. geth_mac_disable(rt_geth_dev.iobase);
  635. geth_clk_disable();
  636. }
  637. static rt_err_t rt_geth_open(rt_device_t dev, rt_uint16_t oflag)
  638. {
  639. printf("gmac open\n");
  640. return RT_EOK;
  641. }
  642. static rt_err_t rt_geth_close(rt_device_t dev)
  643. {
  644. printf("gmac close\n");
  645. return RT_EOK;
  646. }
  647. static rt_ssize_t rt_geth_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
  648. {
  649. printf("gmac read\n");
  650. rt_set_errno(-RT_ENOSYS);
  651. return 0;
  652. }
  653. static rt_ssize_t rt_geth_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
  654. {
  655. printf("gmac write\n");
  656. rt_set_errno(-RT_ENOSYS);
  657. return 0;
  658. }
  659. static rt_err_t rt_geth_control(rt_device_t dev, int cmd, void *args)
  660. {
  661. switch(cmd)
  662. {
  663. case NIOCTL_GADDR:
  664. /* get mac address */
  665. if(args) {
  666. rt_memcpy(args, rt_geth_dev.dev_addr, 6);
  667. } else {
  668. return -RT_ERROR;
  669. }
  670. break;
  671. default :
  672. break;
  673. }
  674. return RT_EOK;
  675. }
  676. int is_data_availabl()
  677. {
  678. hal_geth_dma_desc_t *rx_p = NULL;
  679. rx_p = rt_geth_dev.get_buffer_config.dma_desc_rx + rx_clean;
  680. awos_arch_mems_flush_dcache_region((unsigned long)rx_p,sizeof(hal_geth_dma_desc_t));
  681. awos_arch_mems_flush_dcache_region((unsigned long)rx_p->desc2,2048);
  682. dsb(v);
  683. if(!rx_p->desc0.rx.own)
  684. {
  685. if((rx_p->desc0.rx.last_desc != 1)||(rx_p->desc0.rx.first_desc != 1))
  686. {
  687. printf("first %d last %d\n",rx_p->desc0.rx.first_desc,rx_p->desc0.rx.last_desc);
  688. }
  689. return 0;
  690. }
  691. else
  692. {
  693. return -1;
  694. }
  695. return 0;
  696. }
  697. static void phy_link_detect(void *param)
  698. {
  699. uint16_t bmsr = 0;
  700. uint16_t link_status = 0;
  701. uint16_t link_status_old = 0;
  702. uint16_t phy_val;
  703. int ret = -1;
  704. while(1)
  705. {
  706. bmsr = geth_phy_read(NULL, 0, MII_BMSR,NULL);
  707. link_status = bmsr & BMSR_LSTATUS;
  708. if(link_status_old != link_status)
  709. {
  710. if(link_status)
  711. {
  712. ret = geth_phy_init((struct eth_device *)param);
  713. if(ret == 0)
  714. {
  715. geth_link_change(&rt_geth_dev,1);
  716. }
  717. }
  718. else
  719. {
  720. if(link_status_old != link_status)
  721. {
  722. geth_link_change(&rt_geth_dev,0);
  723. }
  724. }
  725. }
  726. link_status_old = link_status;
  727. rt_thread_delay(RT_TICK_PER_SECOND);
  728. }
  729. }
  730. void rt_geth_driver_init(void)
  731. {
  732. rt_err_t state = RT_EOK;
  733. rt_geth_dev.iobase = IOBASE;
  734. rt_geth_dev.phy_interface = PHY_INTERFACE_MODE_RGMII;
  735. rt_geth_dev.used_type = EXT_PHY;
  736. rt_geth_dev.tx_delay = 3;
  737. rt_geth_dev.rx_delay = 0;
  738. random_ether_addr(rt_geth_dev.dev_addr);
  739. rt_geth_dev.parent.parent.init = rt_geth_init;
  740. rt_geth_dev.parent.parent.open = rt_geth_open;
  741. rt_geth_dev.parent.parent.close = rt_geth_close;
  742. rt_geth_dev.parent.parent.read = rt_geth_read;
  743. rt_geth_dev.parent.parent.write = rt_geth_write;
  744. rt_geth_dev.parent.parent.control = rt_geth_control;
  745. rt_geth_dev.parent.parent.user_data = RT_NULL;
  746. rt_geth_dev.parent.eth_rx = rt_geth_recv;
  747. rt_geth_dev.parent.eth_tx = rt_geth_xmit;
  748. /* register eth device */
  749. state = eth_device_init(&(rt_geth_dev.parent), "e0");
  750. if (RT_EOK == state) {
  751. printf("gmac device init success\n");
  752. } else {
  753. printf("gmac device init failed: %d\n", state);
  754. }
  755. rt_thread_t link_detect;
  756. link_detect = rt_thread_create("link_detect",
  757. phy_link_detect,
  758. (void *)&rt_geth_dev,
  759. 4096,
  760. 13,
  761. 2);
  762. if (link_detect != RT_NULL)
  763. {
  764. rt_thread_startup(link_detect);
  765. }
  766. return;
  767. }
  768. static int rt_hw_eth_drvier_init(void)
  769. {
  770. rt_geth_driver_init();
  771. return 0;
  772. }
  773. INIT_DEVICE_EXPORT(rt_hw_eth_drvier_init);