davinci_emac.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2011-01-13 weety first version
  9. */
  10. #include <rtthread.h>
  11. #include <netif/ethernetif.h>
  12. #include <lwipopts.h>
  13. #include <dm36x.h>
  14. #include "davinci_emac.h"
  15. #define MMU_NOCACHE_ADDR(a) ((rt_uint32_t)a | (1UL<<29))
  16. #define CACHE_LINE_SIZE 32
  17. extern void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size);
  18. extern void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size);
  19. /* EMAC internal utility function */
  20. static inline rt_uint32_t emac_virt_to_phys(void *addr)
  21. {
  22. return (rt_uint32_t)addr;
  23. }
  24. static inline rt_uint32_t virt_to_phys(void *addr)
  25. {
  26. return (rt_uint32_t)addr;
  27. }
  28. /* Cache macros - Packet buffers would be from pbuf pool which is cached */
  29. #define EMAC_VIRT_NOCACHE(addr) (addr)
  30. #define EMAC_CACHE_INVALIDATE(addr, size) \
  31. mmu_invalidate_dcache(addr, size)
  32. #define EMAC_CACHE_WRITEBACK(addr, size) \
  33. mmu_clean_dcache(addr, size)
  34. #define EMAC_CACHE_WRITEBACK_INVALIDATE(addr, size) \
  35. mmu_clean_invalidated_dcache(addr, size)
  36. /* DM644x does not have BD's in cached memory - so no cache functions */
  37. #define BD_CACHE_INVALIDATE(addr, size)
  38. #define BD_CACHE_WRITEBACK(addr, size)
  39. #define BD_CACHE_WRITEBACK_INVALIDATE(addr, size)
  40. static struct emac_priv davinci_emac_device;
  41. /* clock frequency for EMAC */
  42. static unsigned long emac_bus_frequency;
  43. static unsigned long mdio_max_freq;
  44. #define EMAC_AUTONEG_TIMEOUT 5000000
  45. #define EMAC_LINK_TIMEOUT 500000
  46. /* EMAC TX Host Error description strings */
  47. static char *emac_txhost_errcodes[16] = {
  48. "No error", "SOP error", "Ownership bit not set in SOP buffer",
  49. "Zero Next Buffer Descriptor Pointer Without EOP",
  50. "Zero Buffer Pointer", "Zero Buffer Length", "Packet Length Error",
  51. "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
  52. "Reserved", "Reserved", "Reserved", "Reserved"
  53. };
  54. /* EMAC RX Host Error description strings */
  55. static char *emac_rxhost_errcodes[16] = {
  56. "No error", "Reserved", "Ownership bit not set in input buffer",
  57. "Reserved", "Zero Buffer Pointer", "Reserved", "Reserved",
  58. "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
  59. "Reserved", "Reserved", "Reserved", "Reserved"
  60. };
  61. #define emac_read(reg) davinci_readl(priv->emac_base + (reg))
  62. #define emac_write(reg, val) davinci_writel(val, priv->emac_base + (reg))
  63. #define emac_ctrl_read(reg) davinci_readl((priv->ctrl_base + (reg)))
  64. #define emac_ctrl_write(reg, val) davinci_writel(val, (priv->ctrl_base + (reg)))
  65. #define emac_mdio_read(reg) davinci_readl(priv->mdio_base + (reg))
  66. #define emac_mdio_write(reg, val) davinci_writel(val, (priv->mdio_base + (reg)))
  67. static void emac_int_enable(struct emac_priv *priv);
  68. static void emac_int_disable(struct emac_priv *priv);
  69. static int emac_init_txch(struct emac_priv *priv, rt_uint32_t ch);
  70. /* PHY/MII bus related */
  71. /* Wait until mdio is ready for next command */
  72. #define MDIO_WAIT_FOR_USER_ACCESS\
  73. while ((emac_mdio_read((MDIO_USERACCESS(0))) &\
  74. MDIO_USERACCESS_GO) != 0)
  75. static int emac_mii_read(struct emac_priv *priv, int phy_id, int phy_reg)
  76. {
  77. unsigned int phy_data = 0;
  78. unsigned int phy_control;
  79. /* Wait until mdio is ready for next command */
  80. MDIO_WAIT_FOR_USER_ACCESS;
  81. phy_control = (MDIO_USERACCESS_GO |
  82. MDIO_USERACCESS_READ |
  83. ((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
  84. ((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
  85. (phy_data & MDIO_USERACCESS_DATA));
  86. emac_mdio_write(MDIO_USERACCESS(0), phy_control);
  87. /* Wait until mdio is ready for next command */
  88. MDIO_WAIT_FOR_USER_ACCESS;
  89. return emac_mdio_read(MDIO_USERACCESS(0)) & MDIO_USERACCESS_DATA;
  90. }
  91. static int emac_mii_write(struct emac_priv *priv, int phy_id,
  92. int phy_reg, rt_uint16_t phy_data)
  93. {
  94. unsigned int control;
  95. /* until mdio is ready for next command */
  96. MDIO_WAIT_FOR_USER_ACCESS;
  97. control = (MDIO_USERACCESS_GO |
  98. MDIO_USERACCESS_WRITE |
  99. ((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
  100. ((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
  101. (phy_data & MDIO_USERACCESS_DATA));
  102. emac_mdio_write(MDIO_USERACCESS(0), control);
  103. return 0;
  104. }
  105. static int emac_mii_reset(struct emac_priv *priv)
  106. {
  107. unsigned int clk_div;
  108. int mdio_bus_freq = emac_bus_frequency;
  109. if (mdio_max_freq && mdio_bus_freq)
  110. clk_div = ((mdio_bus_freq / mdio_max_freq) - 1);
  111. else
  112. clk_div = 0xFF;
  113. clk_div &= MDIO_CONTROL_CLKDIV;
  114. /* Set enable and clock divider in MDIOControl */
  115. emac_mdio_write(MDIO_CONTROL, (clk_div | MDIO_CONTROL_ENABLE));
  116. return 0;
  117. }
  118. /* Duplex, half or full. */
  119. #define DUPLEX_HALF 0x00
  120. #define DUPLEX_FULL 0x01
  121. static void udelay(rt_uint32_t us)
  122. {
  123. rt_uint32_t len;
  124. for (;us > 0; us --)
  125. for (len = 0; len < 10; len++ );
  126. }
  127. static void davinci_emac_phy_reset(rt_device_t dev)
  128. {
  129. int i;
  130. rt_uint16_t status, adv;
  131. struct emac_priv *priv = dev->user_data;;
  132. adv = ADVERTISE_CSMA | ADVERTISE_ALL;
  133. emac_mii_write(priv, priv->phy_addr, MII_ADVERTISE, adv);
  134. rt_kprintf("%s: Starting autonegotiation...\n", dev->parent.name);
  135. emac_mii_write(priv, priv->phy_addr, MII_BMCR, (BMCR_ANENABLE
  136. | BMCR_ANRESTART));
  137. for (i = 0; i < EMAC_AUTONEG_TIMEOUT / 100; i++) {
  138. status = emac_mii_read(priv, priv->phy_addr, MII_BMSR);
  139. if (status & BMSR_ANEGCOMPLETE)
  140. break;
  141. udelay(100);
  142. }
  143. if (status & BMSR_ANEGCOMPLETE)
  144. rt_kprintf("%s: Autonegotiation complete\n", dev->parent.name);
  145. else
  146. rt_kprintf("%s: Autonegotiation timed out (status=0x%04x)\n",
  147. dev->parent.name, status);
  148. }
  149. static int davinci_emac_phy_init(rt_device_t dev)
  150. {
  151. struct emac_priv *priv = dev->user_data;
  152. rt_uint16_t phy_id, status, adv, lpa;
  153. int media, speed, duplex;
  154. int i;
  155. /* Check if the PHY is up to snuff... */
  156. phy_id = emac_mii_read(priv, priv->phy_addr, MII_PHYSID1);
  157. if (phy_id == 0xffff) {
  158. rt_kprintf("%s: No PHY present\n", dev->parent.name);
  159. return 0;
  160. }
  161. status = emac_mii_read(priv, priv->phy_addr, MII_BMSR);
  162. if (!(status & BMSR_LSTATUS)) {
  163. /* Try to re-negotiate if we don't have link already. */
  164. davinci_emac_phy_reset(dev);
  165. for (i = 0; i < EMAC_LINK_TIMEOUT / 100; i++) {
  166. status = emac_mii_read(priv, priv->phy_addr, MII_BMSR);
  167. if (status & BMSR_LSTATUS)
  168. break;
  169. udelay(100);
  170. }
  171. }
  172. if (!(status & BMSR_LSTATUS)) {
  173. rt_kprintf("%s: link down (status: 0x%04x)\n",
  174. dev->parent.name, status);
  175. priv->link = 0;
  176. eth_device_linkchange(&priv->parent, RT_FALSE);
  177. return 0;
  178. } else {
  179. adv = emac_mii_read(priv, priv->phy_addr, MII_ADVERTISE);
  180. lpa = emac_mii_read(priv, priv->phy_addr, MII_LPA);
  181. media = mii_nway_result(lpa & adv);
  182. speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
  183. ? 1 : 0);
  184. duplex = (media & ADVERTISE_FULL) ? 1 : 0;
  185. rt_kprintf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n",
  186. dev->parent.name,
  187. speed ? "100" : "10",
  188. duplex ? "full" : "half",
  189. lpa);
  190. priv->speed = speed;
  191. priv->duplex = duplex;
  192. priv->link = 1;
  193. eth_device_linkchange(&priv->parent, RT_TRUE);
  194. return 1;
  195. }
  196. }
  197. /**
  198. * emac_update_phystatus: Update Phy status
  199. * @priv: The DaVinci EMAC driver private structure
  200. *
  201. * Updates phy status and takes action for network queue if required
  202. * based upon link status
  203. *
  204. */
  205. static void emac_update_phystatus(struct emac_priv *priv)
  206. {
  207. rt_uint32_t mac_control;
  208. rt_uint32_t new_duplex;
  209. rt_uint32_t cur_duplex;
  210. mac_control = emac_read(EMAC_MACCONTROL);
  211. cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
  212. DUPLEX_FULL : DUPLEX_HALF;
  213. if (priv->phy_mask)
  214. new_duplex = priv->duplex;
  215. else
  216. new_duplex = DUPLEX_FULL;
  217. /* We get called only if link has changed (speed/duplex/status) */
  218. if ((priv->link) && (new_duplex != cur_duplex)) {
  219. priv->duplex = new_duplex;
  220. if (DUPLEX_FULL == priv->duplex)
  221. mac_control |= (EMAC_MACCONTROL_FULLDUPLEXEN);
  222. else
  223. mac_control &= ~(EMAC_MACCONTROL_FULLDUPLEXEN);
  224. }
  225. if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) {
  226. mac_control = emac_read(EMAC_MACCONTROL);
  227. mac_control |= (EMAC_DM646X_MACCONTORL_GIG |
  228. EMAC_DM646X_MACCONTORL_GIGFORCE);
  229. } else {
  230. /* Clear the GIG bit and GIGFORCE bit */
  231. mac_control &= ~(EMAC_DM646X_MACCONTORL_GIGFORCE |
  232. EMAC_DM646X_MACCONTORL_GIG);
  233. if (priv->rmii_en && (priv->speed == SPEED_100))
  234. mac_control |= EMAC_MACCONTROL_RMIISPEED_MASK;
  235. else
  236. mac_control &= ~EMAC_MACCONTROL_RMIISPEED_MASK;
  237. }
  238. /* Update mac_control if changed */
  239. emac_write(EMAC_MACCONTROL, mac_control);
  240. #if 0
  241. if (priv->link) {
  242. /* link ON */
  243. /* reactivate the transmit queue if it is stopped */
  244. } else {
  245. /* link OFF */
  246. }
  247. #endif
  248. }
  249. void davinci_emac_update_link(void *param)
  250. {
  251. struct emac_priv *priv = param;
  252. rt_device_t dev = &(priv->parent.parent);
  253. rt_uint32_t status, status_change = 0;
  254. rt_uint32_t link;
  255. rt_uint32_t media;
  256. rt_uint16_t adv, lpa;
  257. status = emac_mii_read(priv, priv->phy_addr, MII_BMSR);
  258. if ((status & BMSR_LSTATUS) == 0)
  259. link = 0;
  260. else
  261. link = 1;
  262. if (link != priv->link) {
  263. priv->link = link;
  264. status_change = 1;
  265. }
  266. if (status_change) {
  267. if (priv->link) {
  268. adv = emac_mii_read(priv, priv->phy_addr, MII_ADVERTISE);
  269. lpa = emac_mii_read(priv, priv->phy_addr, MII_LPA);
  270. media = mii_nway_result(lpa & adv);
  271. priv->speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
  272. ? 100 : 10);
  273. priv->duplex = (media & ADVERTISE_FULL) ? 1 : 0;
  274. rt_kprintf("%s: link up (%dMbps/%s-duplex)\n",
  275. dev->parent.name, priv->speed,
  276. DUPLEX_FULL == priv->duplex ? "Full":"Half");
  277. eth_device_linkchange(&priv->parent, RT_TRUE);
  278. } else {
  279. rt_kprintf("%s: link down\n", dev->parent.name);
  280. eth_device_linkchange(&priv->parent, RT_FALSE);
  281. }
  282. emac_update_phystatus(priv);
  283. }
  284. }
  285. /**
  286. * emac_net_tx_complete: TX packet completion function
  287. * @priv: The DaVinci EMAC driver private structure
  288. * @net_data_tokens: packet token - pbuf pointer
  289. * @num_tokens: number of pbuf's to free
  290. * @ch: TX channel number
  291. *
  292. * Frees the pbuf once packet is transmitted
  293. *
  294. */
  295. static int emac_net_tx_complete(struct emac_priv *priv,
  296. void **net_data_tokens,
  297. int num_tokens, rt_uint32_t ch)
  298. {
  299. rt_uint32_t cnt;
  300. for (cnt = 0; cnt < num_tokens; cnt++) {
  301. struct pbuf *p = (struct pbuf *)net_data_tokens[cnt];
  302. if (p == RT_NULL)
  303. continue;
  304. priv->net_dev_stats.tx_packets++;
  305. priv->net_dev_stats.tx_bytes += p->len;
  306. //free pbuf
  307. }
  308. return 0;
  309. }
  310. /**
  311. * emac_txch_teardown: TX channel teardown
  312. * @priv: The DaVinci EMAC driver private structure
  313. * @ch: TX channel number
  314. *
  315. * Called to teardown TX channel
  316. *
  317. */
  318. static void emac_txch_teardown(struct emac_priv *priv, rt_uint32_t ch)
  319. {
  320. rt_uint32_t teardown_cnt = 0xFFFFFFF0; /* Some high value */
  321. struct emac_txch *txch = priv->txch[ch];
  322. struct emac_tx_bd __iomem *curr_bd;
  323. while ((emac_read(EMAC_TXCP(ch)) & EMAC_TEARDOWN_VALUE) !=
  324. EMAC_TEARDOWN_VALUE) {
  325. /* wait till tx teardown complete */
  326. --teardown_cnt;
  327. if (0 == teardown_cnt) {
  328. rt_kprintf("EMAC: TX teardown aborted\n");
  329. break;
  330. }
  331. }
  332. emac_write(EMAC_TXCP(ch), EMAC_TEARDOWN_VALUE);
  333. /* process sent packets and return pbuf's to upper layer */
  334. if (1 == txch->queue_active) {
  335. curr_bd = txch->active_queue_head;
  336. while (curr_bd != RT_NULL) {
  337. emac_net_tx_complete(priv, (void *)
  338. &curr_bd->buf_token, 1, ch);
  339. if (curr_bd != txch->active_queue_tail)
  340. curr_bd = curr_bd->next;
  341. else
  342. break;
  343. }
  344. txch->bd_pool_head = txch->active_queue_head;
  345. txch->active_queue_head =
  346. txch->active_queue_tail = RT_NULL;
  347. }
  348. }
  349. /**
  350. * emac_stop_txch: Stop TX channel operation
  351. * @priv: The DaVinci EMAC driver private structure
  352. * @ch: TX channel number
  353. *
  354. * Called to stop TX channel operation
  355. *
  356. */
  357. static void emac_stop_txch(struct emac_priv *priv, rt_uint32_t ch)
  358. {
  359. struct emac_txch *txch = priv->txch[ch];
  360. if (txch) {
  361. txch->teardown_pending = 1;
  362. emac_write(EMAC_TXTEARDOWN, 0);
  363. emac_txch_teardown(priv, ch);
  364. txch->teardown_pending = 0;
  365. emac_write(EMAC_TXINTMASKCLEAR, BIT(ch));
  366. }
  367. }
  368. /**
  369. * emac_tx_bdproc: TX buffer descriptor (packet) processing
  370. * @priv: The DaVinci EMAC driver private structure
  371. * @ch: TX channel number to process buffer descriptors for
  372. * @budget: number of packets allowed to process
  373. * @pending: indication to caller that packets are pending to process
  374. *
  375. * Processes TX buffer descriptors after packets are transmitted - checks
  376. * ownership bit on the TX * descriptor and requeues it to free pool & frees
  377. * the PBUF buffer. Only "budget" number of packets are processed and
  378. * indication of pending packets provided to the caller
  379. *
  380. * Returns number of packets processed
  381. */
  382. static int emac_tx_bdproc(struct emac_priv *priv, rt_uint32_t ch, rt_uint32_t budget)
  383. {
  384. unsigned long flags;
  385. rt_uint32_t frame_status;
  386. rt_uint32_t pkts_processed = 0;
  387. rt_uint32_t tx_complete_cnt = 0;
  388. struct emac_tx_bd __iomem *curr_bd;
  389. struct emac_txch *txch = priv->txch[ch];
  390. rt_uint32_t *tx_complete_ptr = txch->tx_complete;
  391. if (1 == txch->teardown_pending) {
  392. rt_kprintf("DaVinci EMAC:emac_tx_bdproc: "\
  393. "teardown pending\n");
  394. return 0; /* dont handle any pkt completions */
  395. }
  396. ++txch->proc_count;
  397. rt_sem_take(&priv->tx_lock, RT_WAITING_FOREVER);
  398. curr_bd = txch->active_queue_head;
  399. if (RT_NULL == curr_bd) {
  400. emac_write(EMAC_TXCP(ch),
  401. emac_virt_to_phys(txch->last_hw_bdprocessed));
  402. txch->no_active_pkts++;
  403. rt_sem_release(&priv->tx_lock);
  404. return 0;
  405. }
  406. BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
  407. frame_status = curr_bd->mode;
  408. while ((curr_bd) &&
  409. ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
  410. (pkts_processed < budget)) {
  411. emac_write(EMAC_TXCP(ch), emac_virt_to_phys(curr_bd));
  412. txch->active_queue_head = curr_bd->next;
  413. if (frame_status & EMAC_CPPI_EOQ_BIT) {
  414. if (curr_bd->next) { /* misqueued packet */
  415. emac_write(EMAC_TXHDP(ch), curr_bd->h_next);
  416. ++txch->mis_queued_packets;
  417. } else {
  418. txch->queue_active = 0; /* end of queue */
  419. }
  420. }
  421. *tx_complete_ptr = (rt_uint32_t) curr_bd->buf_token;
  422. ++tx_complete_ptr;
  423. ++tx_complete_cnt;
  424. curr_bd->next = txch->bd_pool_head;
  425. txch->bd_pool_head = curr_bd;
  426. --txch->active_queue_count;
  427. pkts_processed++;
  428. txch->last_hw_bdprocessed = curr_bd;
  429. curr_bd = txch->active_queue_head;
  430. if (curr_bd) {
  431. BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
  432. frame_status = curr_bd->mode;
  433. }
  434. } /* end of pkt processing loop */
  435. emac_net_tx_complete(priv,
  436. (void *)&txch->tx_complete[0],
  437. tx_complete_cnt, ch);
  438. rt_sem_release(&priv->tx_lock);
  439. return pkts_processed;
  440. }
  441. #define EMAC_ERR_TX_OUT_OF_BD -1
  442. /**
  443. * emac_send: EMAC Transmit function (internal)
  444. * @priv: The DaVinci EMAC driver private structure
  445. * @pkt: packet pointer (contains pbuf ptr)
  446. * @ch: TX channel number
  447. *
  448. * Called by the transmit function to queue the packet in EMAC hardware queue
  449. *
  450. * Returns success(0) or error code (typically out of desc's)
  451. */
  452. static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, rt_uint32_t ch)
  453. {
  454. unsigned long flags;
  455. struct emac_tx_bd __iomem *curr_bd;
  456. struct emac_txch *txch;
  457. struct emac_netbufobj *buf_list;
  458. rt_uint32_t num_pkts = 0;
  459. int retry = 0;
  460. txch = priv->txch[ch];
  461. buf_list = pkt->buf_list; /* get handle to the buffer array */
  462. /* check packet size and pad if short */
  463. if (pkt->pkt_length < EMAC_DEF_MIN_ETHPKTSIZE) {
  464. buf_list->length += (EMAC_DEF_MIN_ETHPKTSIZE - pkt->pkt_length);
  465. pkt->pkt_length = EMAC_DEF_MIN_ETHPKTSIZE;
  466. }
  467. try:
  468. rt_sem_take(&priv->tx_lock, RT_WAITING_FOREVER);
  469. curr_bd = txch->bd_pool_head;
  470. if (curr_bd == RT_NULL) {
  471. txch->out_of_tx_bd++;
  472. rt_sem_release(&priv->tx_lock);
  473. num_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH,
  474. EMAC_DEF_TX_MAX_SERVICE);
  475. if (!num_pkts) {
  476. retry++;
  477. if (retry > 5)
  478. return EMAC_ERR_TX_OUT_OF_BD;
  479. rt_thread_delay(1);
  480. }
  481. goto try;
  482. }
  483. txch->bd_pool_head = curr_bd->next;
  484. curr_bd->buf_token = buf_list->buf_token;
  485. curr_bd->buff_ptr = virt_to_phys(buf_list->data_ptr);
  486. curr_bd->off_b_len = buf_list->length;
  487. curr_bd->h_next = 0;
  488. curr_bd->next = RT_NULL;
  489. curr_bd->mode = (EMAC_CPPI_SOP_BIT | EMAC_CPPI_OWNERSHIP_BIT |
  490. EMAC_CPPI_EOP_BIT | pkt->pkt_length);
  491. /* flush the packet from cache if write back cache is present */
  492. BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
  493. /* send the packet */
  494. if (txch->active_queue_head == RT_NULL) {
  495. txch->active_queue_head = curr_bd;
  496. txch->active_queue_tail = curr_bd;
  497. if (1 != txch->queue_active) {
  498. emac_write(EMAC_TXHDP(ch),
  499. emac_virt_to_phys(curr_bd));
  500. txch->queue_active = 1;
  501. }
  502. ++txch->queue_reinit;
  503. } else {
  504. register struct emac_tx_bd __iomem *tail_bd;
  505. register rt_uint32_t frame_status;
  506. tail_bd = txch->active_queue_tail;
  507. tail_bd->next = curr_bd;
  508. txch->active_queue_tail = curr_bd;
  509. tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
  510. tail_bd->h_next = (int)emac_virt_to_phys(curr_bd);
  511. frame_status = tail_bd->mode;
  512. if (frame_status & EMAC_CPPI_EOQ_BIT) {
  513. emac_write(EMAC_TXHDP(ch), emac_virt_to_phys(curr_bd));
  514. frame_status &= ~(EMAC_CPPI_EOQ_BIT);
  515. tail_bd->mode = frame_status;
  516. ++txch->end_of_queue_add;
  517. }
  518. }
  519. txch->active_queue_count++;
  520. rt_sem_release(&priv->tx_lock);
  521. return 0;
  522. }
  523. /**
  524. * emac_dev_xmit: EMAC Transmit function
  525. * @pbuf: PBUF pointer
  526. * @priv: The DaVinci EMAC driver private structure
  527. *
  528. * Called by the system to transmit a packet - we queue the packet in
  529. * EMAC hardware transmit queue
  530. *
  531. * Returns success(RT_EOK) or error code (typically out of desc's)
  532. */
  533. static int emac_dev_xmit(struct pbuf *p, struct emac_priv *priv)
  534. {
  535. int ret_code;
  536. struct emac_netbufobj tx_buf; /* buffer obj-only single frame support */
  537. struct emac_netpktobj tx_packet; /* packet object */
  538. /* If no link, return */
  539. if (!priv->link) {
  540. rt_kprintf("DaVinci EMAC: No link to transmit\n");
  541. return -RT_EBUSY;
  542. }
  543. /* Build the buffer and packet objects - Since only single fragment is
  544. * supported, need not set length and token in both packet & object.
  545. * Doing so for completeness sake & to show that this needs to be done
  546. * in multifragment case
  547. */
  548. tx_packet.buf_list = &tx_buf;
  549. tx_packet.num_bufs = 1; /* only single fragment supported */
  550. tx_packet.pkt_length = p->len;
  551. tx_packet.pkt_token = (void *)p;
  552. tx_buf.length = p->len;
  553. tx_buf.buf_token = (void *)p;
  554. tx_buf.data_ptr = p->payload;
  555. EMAC_CACHE_WRITEBACK((unsigned long)p->payload, p->len);
  556. ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
  557. if (ret_code != 0) {
  558. if (ret_code == EMAC_ERR_TX_OUT_OF_BD) {
  559. rt_kprintf("DaVinci EMAC: xmit() fatal"\
  560. " err. Out of TX BD's\n");
  561. }
  562. priv->net_dev_stats.tx_dropped++;
  563. return -RT_EBUSY;
  564. }
  565. return RT_EOK;
  566. }
  567. /**
  568. * emac_cleanup_txch: Book-keep function to clean TX channel resources
  569. * @priv: The DaVinci EMAC private adapter structure
  570. * @ch: TX channel number
  571. *
  572. * Called to clean up TX channel resources
  573. *
  574. */
  575. static void emac_cleanup_txch(struct emac_priv *priv, rt_uint32_t ch)
  576. {
  577. struct emac_txch *txch = priv->txch[ch];
  578. if (txch) {
  579. if (txch->bd_mem)
  580. txch->bd_mem = NULL;
  581. rt_free(txch->tx_complete);
  582. rt_free(txch);
  583. priv->txch[ch] = NULL;
  584. }
  585. }
  586. /**
  587. * emac_dev_tx_timeout: EMAC Transmit timeout function
  588. * @ndev: The DaVinci EMAC network adapter
  589. *
  590. * Called when system detects that a skb timeout period has expired
  591. * potentially due to a fault in the adapter in not being able to send
  592. * it out on the wire. We teardown the TX channel assuming a hardware
  593. * error and re-initialize the TX channel for hardware operation
  594. *
  595. */
  596. static void emac_dev_tx_timeout(struct emac_priv *priv)
  597. {
  598. rt_kprintf("emac tx timeout.\n");
  599. priv->net_dev_stats.tx_errors++;
  600. emac_int_disable(priv);
  601. emac_stop_txch(priv, EMAC_DEF_TX_CH);
  602. emac_cleanup_txch(priv, EMAC_DEF_TX_CH);
  603. emac_init_txch(priv, EMAC_DEF_TX_CH);
  604. emac_write(EMAC_TXHDP(0), 0);
  605. emac_write(EMAC_TXINTMASKSET, BIT(EMAC_DEF_TX_CH));
  606. emac_int_enable(priv);
  607. }
  608. /* ethernet device interface */
  609. /* transmit packet. */
  610. rt_err_t rt_davinci_emac_tx( rt_device_t dev, struct pbuf* p)
  611. {
  612. rt_err_t err;
  613. struct emac_priv *priv = dev->user_data;
  614. err = emac_dev_xmit(p, priv);
  615. if (err != RT_EOK)
  616. {
  617. emac_dev_tx_timeout(priv);
  618. }
  619. return RT_EOK;
  620. }
  621. /**
  622. * emac_addbd_to_rx_queue: Recycle RX buffer descriptor
  623. * @priv: The DaVinci EMAC driver private structure
  624. * @ch: RX channel number to process buffer descriptors for
  625. * @curr_bd: current buffer descriptor
  626. * @buffer: buffer pointer for descriptor
  627. * @buf_token: buffer token (stores pbuf information)
  628. *
  629. * Prepares the recycled buffer descriptor and addes it to hardware
  630. * receive queue - if queue empty this descriptor becomes the head
  631. * else addes the descriptor to end of queue
  632. *
  633. */
  634. static void emac_addbd_to_rx_queue(struct emac_priv *priv, rt_uint32_t ch,
  635. struct emac_rx_bd __iomem *curr_bd,
  636. char *buffer, void *buf_token)
  637. {
  638. struct emac_rxch *rxch = priv->rxch[ch];
  639. /* populate the hardware descriptor */
  640. curr_bd->h_next = 0;
  641. curr_bd->buff_ptr = virt_to_phys(buffer);
  642. curr_bd->off_b_len = rxch->buf_size;
  643. curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
  644. curr_bd->next = RT_NULL;
  645. curr_bd->data_ptr = buffer;
  646. curr_bd->buf_token = buf_token;
  647. /* write back */
  648. BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
  649. if (rxch->active_queue_head == RT_NULL) {
  650. rxch->active_queue_head = curr_bd;
  651. rxch->active_queue_tail = curr_bd;
  652. if (0 != rxch->queue_active) {
  653. emac_write(EMAC_RXHDP(ch),
  654. emac_virt_to_phys(rxch->active_queue_head));
  655. rxch->queue_active = 1;
  656. }
  657. } else {
  658. struct emac_rx_bd __iomem *tail_bd;
  659. rt_uint32_t frame_status;
  660. tail_bd = rxch->active_queue_tail;
  661. rxch->active_queue_tail = curr_bd;
  662. tail_bd->next = curr_bd;
  663. tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
  664. tail_bd->h_next = emac_virt_to_phys(curr_bd);
  665. frame_status = tail_bd->mode;
  666. if (frame_status & EMAC_CPPI_EOQ_BIT) {
  667. emac_write(EMAC_RXHDP(ch),
  668. emac_virt_to_phys(curr_bd));
  669. frame_status &= ~(EMAC_CPPI_EOQ_BIT);
  670. tail_bd->mode = frame_status;
  671. ++rxch->end_of_queue_add;
  672. }
  673. }
  674. ++rxch->recycled_bd;
  675. }
  676. /**
  677. * emac_net_rx_cb: Prepares packet and sends to upper layer
  678. * @priv: The DaVinci EMAC driver private structure
  679. * @net_pkt_list: Network packet list (received packets)
  680. *
  681. * Invalidates packet buffer memory and sends the received packet to upper
  682. * layer
  683. *
  684. * Returns success or appropriate error code (none as of now)
  685. */
  686. static int emac_net_rx_cb(struct emac_priv *priv,
  687. struct emac_netpktobj *net_pkt_list)
  688. {
  689. struct eth_device *device = &priv->parent;
  690. struct pbuf *p;
  691. p = (struct pbuf *)net_pkt_list->pkt_token;
  692. /* set length of packet */
  693. p->tot_len = net_pkt_list->pkt_length;
  694. p->len = net_pkt_list->pkt_length;
  695. EMAC_CACHE_INVALIDATE((unsigned long)p->payload, p->len);
  696. if (device->netif->input(p, device->netif) != RT_EOK)
  697. {
  698. pbuf_free(p);
  699. }
  700. priv->net_dev_stats.rx_bytes += net_pkt_list->pkt_length;
  701. priv->net_dev_stats.rx_packets++;
  702. return 0;
  703. }
  704. /**
  705. * emac_net_alloc_rx_buf: Allocate a pbuf for RX
  706. * @priv: The DaVinci EMAC driver private structure
  707. * @buf_size: size of PBUF data buffer to allocate
  708. * @data_token: data token returned (pbuf handle for storing in buffer desc)
  709. * @ch: RX channel number
  710. *
  711. * Called during RX channel setup - allocates pbuf buffer of required size
  712. * and provides the pbuf handle and allocated buffer data pointer to caller
  713. *
  714. * Returns pbuf data pointer or 0 on failure to alloc pbuf
  715. */
  716. static void *emac_net_alloc_rx_buf(struct emac_priv *priv, int buf_size,
  717. void **data_token, rt_uint32_t ch)
  718. {
  719. struct pbuf* p;
  720. p = pbuf_alloc(PBUF_LINK, buf_size, PBUF_RAM);
  721. if (RT_NULL == p) {
  722. rt_kprintf("DaVinci EMAC: failed to alloc pbuf\n");
  723. return RT_NULL;
  724. }
  725. /* set device pointer in p and reserve space for extra bytes */
  726. *data_token = (void *) p;
  727. EMAC_CACHE_WRITEBACK_INVALIDATE((unsigned long)p->payload, buf_size);
  728. return p->payload;
  729. }
  730. /**
  731. * emac_rx_bdproc: RX buffer descriptor (packet) processing
  732. * @priv: The DaVinci EMAC driver private structure
  733. * @ch: RX channel number to process buffer descriptors for
  734. * @budget: number of packets allowed to process
  735. *
  736. * Processes RX buffer descriptors - checks ownership bit on the RX buffer
  737. * descriptor, sends the receive packet to upper layer, allocates a new PBUF
  738. * and recycles the buffer descriptor (requeues it in hardware RX queue).
  739. * Only "budget" number of packets are processed and indication of pending
  740. * packets provided to the caller.
  741. *
  742. * Returns number of packets processed (and indication of pending packets)
  743. */
  744. static int emac_rx_bdproc(struct emac_priv *priv, rt_uint32_t ch, rt_uint32_t budget)
  745. {
  746. unsigned long flags;
  747. rt_uint32_t frame_status;
  748. rt_uint32_t pkts_processed = 0;
  749. char *new_buffer;
  750. struct emac_rx_bd __iomem *curr_bd;
  751. struct emac_rx_bd __iomem *last_bd;
  752. struct emac_netpktobj *curr_pkt, pkt_obj;
  753. struct emac_netbufobj buf_obj;
  754. struct emac_netbufobj *rx_buf_obj;
  755. void *new_buf_token;
  756. struct emac_rxch *rxch = priv->rxch[ch];
  757. if (1 == rxch->teardown_pending)
  758. return 0;
  759. ++rxch->proc_count;
  760. rt_sem_take(&priv->rx_lock, RT_WAITING_FOREVER);
  761. pkt_obj.buf_list = &buf_obj;
  762. curr_pkt = &pkt_obj;
  763. curr_bd = rxch->active_queue_head;
  764. BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
  765. frame_status = curr_bd->mode;
  766. while ((curr_bd) &&
  767. ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
  768. (pkts_processed < budget)) {
  769. new_buffer = emac_net_alloc_rx_buf(priv, rxch->buf_size,
  770. &new_buf_token, EMAC_DEF_RX_CH);
  771. if (RT_NULL == new_buffer) {
  772. ++rxch->out_of_rx_buffers;
  773. goto end_emac_rx_bdproc;
  774. }
  775. /* populate received packet data structure */
  776. rx_buf_obj = &curr_pkt->buf_list[0];
  777. rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr;
  778. rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE;
  779. rx_buf_obj->buf_token = curr_bd->buf_token;
  780. curr_pkt->pkt_token = curr_pkt->buf_list->buf_token;
  781. curr_pkt->num_bufs = 1;
  782. curr_pkt->pkt_length =
  783. (frame_status & EMAC_RX_BD_PKT_LENGTH_MASK);
  784. emac_write(EMAC_RXCP(ch), emac_virt_to_phys(curr_bd));
  785. ++rxch->processed_bd;
  786. last_bd = curr_bd;
  787. curr_bd = last_bd->next;
  788. rxch->active_queue_head = curr_bd;
  789. /* check if end of RX queue ? */
  790. if (frame_status & EMAC_CPPI_EOQ_BIT) {
  791. if (curr_bd) {
  792. ++rxch->mis_queued_packets;
  793. emac_write(EMAC_RXHDP(ch),
  794. emac_virt_to_phys(curr_bd));
  795. } else {
  796. ++rxch->end_of_queue;
  797. rxch->queue_active = 0;
  798. }
  799. }
  800. /* recycle BD */
  801. emac_addbd_to_rx_queue(priv, ch, last_bd, new_buffer,
  802. new_buf_token);
  803. /* return the packet to the user - BD ptr passed in
  804. * last parameter for potential *future* use */
  805. rt_sem_release(&priv->rx_lock);
  806. emac_net_rx_cb(priv, curr_pkt);//???
  807. rt_sem_take(&priv->rx_lock, RT_WAITING_FOREVER);
  808. curr_bd = rxch->active_queue_head;
  809. if (curr_bd) {
  810. BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
  811. frame_status = curr_bd->mode;
  812. }
  813. ++pkts_processed;
  814. }
  815. end_emac_rx_bdproc:
  816. rt_sem_release(&priv->rx_lock);
  817. return pkts_processed;
  818. }
  819. /* reception packet. */
  820. struct pbuf *rt_davinci_emac_rx(rt_device_t dev)
  821. {
  822. struct emac_priv *priv = dev->user_data;
  823. struct pbuf* p = RT_NULL;
  824. rt_uint32_t len;
  825. void *buffer;
  826. struct pbuf* q;
  827. rt_uint8_t *buf = RT_NULL;
  828. unsigned int mask;
  829. rt_uint32_t status = 0;
  830. rt_uint32_t num_pkts = 0;
  831. /* Check interrupt vectors and call packet processing */
  832. status = emac_read(EMAC_MACINVECTOR);
  833. mask = EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC;
  834. if (priv->version == EMAC_VERSION_2)
  835. mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
  836. if (status & mask) {
  837. num_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH,
  838. EMAC_DEF_TX_MAX_SERVICE);
  839. } /* TX processing */
  840. /*if (num_pkts)
  841. //return budget;
  842. return RT_NULL;*/
  843. mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
  844. if (priv->version == EMAC_VERSION_2)
  845. mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC;
  846. if (status & mask) {
  847. num_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, EMAC_DEF_RX_MAX_SERVICE);
  848. } /* RX processing */
  849. /*if (num_pkts < EMAC_DEF_RX_MAX_SERVICE) {
  850. emac_int_enable(priv);
  851. }*/
  852. mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
  853. if (priv->version == EMAC_VERSION_2)
  854. mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT;
  855. if (status & mask) {
  856. rt_uint32_t ch, cause;
  857. rt_kprintf("DaVinci EMAC: Fatal Hardware Error\n");
  858. status = emac_read(EMAC_MACSTATUS);
  859. cause = ((status & EMAC_MACSTATUS_TXERRCODE_MASK) >>
  860. EMAC_MACSTATUS_TXERRCODE_SHIFT);
  861. if (cause) {
  862. ch = ((status & EMAC_MACSTATUS_TXERRCH_MASK) >>
  863. EMAC_MACSTATUS_TXERRCH_SHIFT);
  864. }
  865. cause = ((status & EMAC_MACSTATUS_RXERRCODE_MASK) >>
  866. EMAC_MACSTATUS_RXERRCODE_SHIFT);
  867. if (cause) {
  868. ch = ((status & EMAC_MACSTATUS_RXERRCH_MASK) >>
  869. EMAC_MACSTATUS_RXERRCH_SHIFT);
  870. }
  871. } /* Host error processing */
  872. //return num_pkts;
  873. //return p;
  874. emac_int_enable(priv);
  875. return RT_NULL;
  876. }
  877. /**
  878. * emac_set_type0addr: Set EMAC Type0 mac address
  879. * @priv: The DaVinci EMAC driver private structure
  880. * @ch: RX channel number
  881. * @mac_addr: MAC address to set in device
  882. *
  883. * Called internally to set Type0 mac address of the Device
  884. *
  885. * Returns success (0) or appropriate error code (none as of now)
  886. */
  887. static void emac_set_type0addr(struct emac_priv *priv, rt_uint32_t ch, char *mac_addr)
  888. {
  889. rt_uint32_t val;
  890. val = ((mac_addr[5] << 8) | (mac_addr[4]));
  891. emac_write(EMAC_MACSRCADDRLO, val);
  892. val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
  893. (mac_addr[1] << 8) | (mac_addr[0]));
  894. emac_write(EMAC_MACSRCADDRHI, val);
  895. val = emac_read(EMAC_RXUNICASTSET);
  896. val |= BIT(ch);
  897. emac_write(EMAC_RXUNICASTSET, val);
  898. val = emac_read(EMAC_RXUNICASTCLEAR);
  899. val &= ~BIT(ch);
  900. emac_write(EMAC_RXUNICASTCLEAR, val);
  901. }
  902. /**
  903. * emac_set_type1addr: Set EMAC Type1 mac address
  904. * @priv: The DaVinci EMAC driver private structure
  905. * @ch: RX channel number
  906. * @mac_addr: MAC address to set in device
  907. *
  908. * Called internally to set Type1 mac address of the Device
  909. *
  910. * Returns success (0) or appropriate error code (none as of now)
  911. */
  912. static void emac_set_type1addr(struct emac_priv *priv, rt_uint32_t ch, char *mac_addr)
  913. {
  914. rt_uint32_t val;
  915. emac_write(EMAC_MACINDEX, ch);
  916. val = ((mac_addr[5] << 8) | mac_addr[4]);
  917. emac_write(EMAC_MACADDRLO, val);
  918. val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
  919. (mac_addr[1] << 8) | (mac_addr[0]));
  920. emac_write(EMAC_MACADDRHI, val);
  921. emac_set_type0addr(priv, ch, mac_addr);
  922. }
  923. /**
  924. * emac_set_type2addr: Set EMAC Type2 mac address
  925. * @priv: The DaVinci EMAC driver private structure
  926. * @ch: RX channel number
  927. * @mac_addr: MAC address to set in device
  928. * @index: index into RX address entries
  929. * @match: match parameter for RX address matching logic
  930. *
  931. * Called internally to set Type2 mac address of the Device
  932. *
  933. * Returns success (0) or appropriate error code (none as of now)
  934. */
  935. static void emac_set_type2addr(struct emac_priv *priv, rt_uint32_t ch,
  936. char *mac_addr, int index, int match)
  937. {
  938. rt_uint32_t val;
  939. emac_write(EMAC_MACINDEX, index);
  940. val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
  941. (mac_addr[1] << 8) | (mac_addr[0]));
  942. emac_write(EMAC_MACADDRHI, val);
  943. val = ((mac_addr[5] << 8) | mac_addr[4] | ((ch & 0x7) << 16) | \
  944. (match << 19) | BIT(20));
  945. emac_write(EMAC_MACADDRLO, val);
  946. emac_set_type0addr(priv, ch, mac_addr);
  947. }
  948. /**
  949. * emac_setmac: Set mac address in the adapter (internal function)
  950. * @priv: The DaVinci EMAC driver private structure
  951. * @ch: RX channel number
  952. * @mac_addr: MAC address to set in device
  953. *
  954. * Called internally to set the mac address of the Device
  955. *
  956. * Returns success (0) or appropriate error code (none as of now)
  957. */
  958. static void emac_setmac(struct emac_priv *priv, rt_uint32_t ch, char *mac_addr)
  959. {
  960. if (priv->rx_addr_type == 0) {
  961. emac_set_type0addr(priv, ch, mac_addr);
  962. } else if (priv->rx_addr_type == 1) {
  963. rt_uint32_t cnt;
  964. for (cnt = 0; cnt < EMAC_MAX_TXRX_CHANNELS; cnt++)
  965. emac_set_type1addr(priv, ch, mac_addr);
  966. } else if (priv->rx_addr_type == 2) {
  967. emac_set_type2addr(priv, ch, mac_addr, ch, 1);
  968. emac_set_type0addr(priv, ch, mac_addr);
  969. } else {
  970. rt_kprintf("DaVinci EMAC: Wrong addressing\n");
  971. }
  972. }
  973. /** EMAC on-chip buffer descriptor memory
  974. *
  975. * WARNING: Please note that the on chip memory is used for both TX and RX
  976. * buffer descriptor queues and is equally divided between TX and RX desc's
  977. * If the number of TX or RX descriptors change this memory pointers need
  978. * to be adjusted. If external memory is allocated then these pointers can
  979. * pointer to the memory
  980. *
  981. */
  982. #define EMAC_TX_BD_MEM(priv) ((priv)->emac_ctrl_ram)
  983. #define EMAC_RX_BD_MEM(priv) ((priv)->emac_ctrl_ram + \
  984. (((priv)->ctrl_ram_size) >> 1))
  985. /**
  986. * emac_init_txch: TX channel initialization
  987. * @priv: The DaVinci EMAC driver private structure
  988. * @ch: RX channel number
  989. *
  990. * Called during device init to setup a TX channel (allocate buffer desc
  991. * create free pool and keep ready for transmission
  992. *
  993. * Returns success(0) or mem alloc failures error code
  994. */
  995. static int emac_init_txch(struct emac_priv *priv, rt_uint32_t ch)
  996. {
  997. rt_uint32_t cnt, bd_size;
  998. void __iomem *mem;
  999. struct emac_tx_bd __iomem *curr_bd;
  1000. struct emac_txch *txch = RT_NULL;
  1001. txch = rt_malloc(sizeof(struct emac_txch));
  1002. if (RT_NULL == txch) {
  1003. rt_kprintf("DaVinci EMAC: TX Ch mem alloc failed");
  1004. return -RT_ENOMEM;
  1005. }
  1006. rt_memset(txch, 0, sizeof(struct emac_txch));
  1007. priv->txch[ch] = txch;
  1008. txch->service_max = EMAC_DEF_TX_MAX_SERVICE;
  1009. txch->active_queue_head = RT_NULL;
  1010. txch->active_queue_tail = RT_NULL;
  1011. txch->queue_active = 0;
  1012. txch->teardown_pending = 0;
  1013. /* allocate memory for TX CPPI channel on a 4 byte boundry */
  1014. txch->tx_complete = rt_malloc(txch->service_max * sizeof(rt_uint32_t));
  1015. if (RT_NULL == txch->tx_complete) {
  1016. rt_kprintf("DaVinci EMAC: Tx service mem alloc failed");
  1017. rt_free(txch);
  1018. return -RT_ENOMEM;
  1019. }
  1020. memset(txch->tx_complete, 0, txch->service_max * sizeof(rt_uint32_t));
  1021. /* allocate buffer descriptor pool align every BD on four word
  1022. * boundry for future requirements */
  1023. bd_size = (sizeof(struct emac_tx_bd) + 0xF) & ~0xF;
  1024. txch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
  1025. txch->alloc_size = (((bd_size * txch->num_bd) + 0xF) & ~0xF);
  1026. /* alloc TX BD memory */
  1027. txch->bd_mem = EMAC_TX_BD_MEM(priv);
  1028. rt_memset((void *)txch->bd_mem, 0, txch->alloc_size);
  1029. /* initialize the BD linked list */
  1030. mem = (void __iomem *)
  1031. (((rt_uint32_t) txch->bd_mem + 0xF) & ~0xF);
  1032. txch->bd_pool_head = RT_NULL;
  1033. for (cnt = 0; cnt < txch->num_bd; cnt++) {
  1034. curr_bd = mem + (cnt * bd_size);
  1035. curr_bd->next = txch->bd_pool_head;
  1036. txch->bd_pool_head = curr_bd;
  1037. }
  1038. /* reset statistics counters */
  1039. txch->out_of_tx_bd = 0;
  1040. txch->no_active_pkts = 0;
  1041. txch->active_queue_count = 0;
  1042. return 0;
  1043. }
  1044. /**
  1045. * emac_init_rxch: RX channel initialization
  1046. * @priv: The DaVinci EMAC driver private structure
  1047. * @ch: RX channel number
  1048. * @param: mac address for RX channel
  1049. *
  1050. * Called during device init to setup a RX channel (allocate buffers and
  1051. * buffer descriptors, create queue and keep ready for reception
  1052. *
  1053. * Returns success(0) or mem alloc failures error code
  1054. */
  1055. static int emac_init_rxch(struct emac_priv *priv, rt_uint32_t ch, char *param)
  1056. {
  1057. rt_uint32_t cnt, bd_size;
  1058. void __iomem *mem;
  1059. struct emac_rx_bd __iomem *curr_bd;
  1060. struct emac_rxch *rxch = RT_NULL;
  1061. rxch = rt_malloc(sizeof(struct emac_rxch));
  1062. if (RT_NULL == rxch) {
  1063. rt_kprintf("DaVinci EMAC: RX Ch mem alloc failed");
  1064. return -ENOMEM;
  1065. }
  1066. rt_memset(rxch, 0, sizeof(struct emac_rxch));
  1067. priv->rxch[ch] = rxch;
  1068. rxch->buf_size = priv->rx_buf_size;
  1069. rxch->service_max = EMAC_DEF_RX_MAX_SERVICE;
  1070. rxch->queue_active = 0;
  1071. rxch->teardown_pending = 0;
  1072. /* save mac address */
  1073. for (cnt = 0; cnt < 6; cnt++)
  1074. rxch->mac_addr[cnt] = param[cnt];
  1075. /* allocate buffer descriptor pool align every BD on four word
  1076. * boundry for future requirements */
  1077. bd_size = (sizeof(struct emac_rx_bd) + 0xF) & ~0xF;
  1078. rxch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
  1079. rxch->alloc_size = (((bd_size * rxch->num_bd) + 0xF) & ~0xF);
  1080. rxch->bd_mem = EMAC_RX_BD_MEM(priv);
  1081. rt_memset((void *)rxch->bd_mem, 0, rxch->alloc_size);
  1082. rxch->pkt_queue.buf_list = &rxch->buf_queue;
  1083. /* allocate RX buffer and initialize the BD linked list */
  1084. mem = (void __iomem *)
  1085. (((rt_uint32_t) rxch->bd_mem + 0xF) & ~0xF);
  1086. rxch->active_queue_head = RT_NULL;
  1087. rxch->active_queue_tail = mem;
  1088. for (cnt = 0; cnt < rxch->num_bd; cnt++) {
  1089. curr_bd = mem + (cnt * bd_size);
  1090. /* for future use the last parameter contains the BD ptr */
  1091. curr_bd->data_ptr = emac_net_alloc_rx_buf(priv,
  1092. rxch->buf_size,
  1093. (void **)&curr_bd->buf_token,
  1094. EMAC_DEF_RX_CH);
  1095. if (curr_bd->data_ptr == RT_NULL) {
  1096. rt_kprintf("DaVinci EMAC: RX buf mem alloc " \
  1097. "failed for ch %d\n", ch);
  1098. rt_free(rxch);
  1099. return -RT_ENOMEM;
  1100. }
  1101. /* populate the hardware descriptor */
  1102. curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head);
  1103. curr_bd->buff_ptr = virt_to_phys(curr_bd->data_ptr);
  1104. curr_bd->off_b_len = rxch->buf_size;
  1105. curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
  1106. /* write back to hardware memory */
  1107. BD_CACHE_WRITEBACK_INVALIDATE((rt_uint32_t) curr_bd,
  1108. EMAC_BD_LENGTH_FOR_CACHE);
  1109. curr_bd->next = rxch->active_queue_head;
  1110. rxch->active_queue_head = curr_bd;
  1111. }
  1112. /* At this point rxCppi->activeQueueHead points to the first
  1113. RX BD ready to be given to RX HDP and rxch->active_queue_tail
  1114. points to the last RX BD
  1115. */
  1116. return 0;
  1117. }
  1118. /**
  1119. * emac_int_disable: Disable EMAC module interrupt
  1120. * @priv: The DaVinci EMAC driver private structure
  1121. *
  1122. * Disable EMAC interrupt
  1123. *
  1124. */
  1125. static void emac_int_disable(struct emac_priv *priv)
  1126. {
  1127. if (priv->version == EMAC_VERSION_2) {
  1128. unsigned long flags;
  1129. rt_interrupt_enter();
  1130. /* Program C0_Int_En to zero to turn off
  1131. * interrupts to the CPU */
  1132. emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0x0);
  1133. emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0x0);
  1134. /* NOTE: Rx Threshold and Misc interrupts are not disabled */
  1135. rt_interrupt_leave();
  1136. } else {
  1137. /* Set DM644x control registers for interrupt control */
  1138. emac_ctrl_write(EMAC_CTRL_EWCTL, 0x0);
  1139. }
  1140. }
  1141. /**
  1142. * emac_int_enable: Enable EMAC module interrupt
  1143. * @priv: The DaVinci EMAC driver private structure
  1144. *
  1145. * Enable EMAC interrupt
  1146. *
  1147. */
  1148. static void emac_int_enable(struct emac_priv *priv)
  1149. {
  1150. if (priv->version == EMAC_VERSION_2) {
  1151. /*if (priv->int_enable)
  1152. priv->int_enable();*/
  1153. emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0xff);
  1154. emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0xff);
  1155. /* In addition to turning on interrupt Enable, we need
  1156. * ack by writing appropriate values to the EOI
  1157. * register */
  1158. /* NOTE: Rx Threshold and Misc interrupts are not enabled */
  1159. /* ack rxen only then a new pulse will be generated */
  1160. emac_write(EMAC_DM646X_MACEOIVECTOR,
  1161. EMAC_DM646X_MAC_EOI_C0_RXEN);
  1162. /* ack txen- only then a new pulse will be generated */
  1163. emac_write(EMAC_DM646X_MACEOIVECTOR,
  1164. EMAC_DM646X_MAC_EOI_C0_TXEN);
  1165. } else {
  1166. /* Set DM644x control registers for interrupt control */
  1167. emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1);
  1168. }
  1169. }
  1170. /**
  1171. * emac_irq: EMAC interrupt handler
  1172. * @irq: interrupt number
  1173. * @param: EMAC isr parameters
  1174. *
  1175. */
  1176. static void emac_irq(int irq, void *param)
  1177. {
  1178. struct emac_priv *priv = param;
  1179. ++priv->isr_count;
  1180. emac_int_disable(priv);
  1181. eth_device_ready(&priv->parent);
  1182. }
  1183. /**
  1184. * emac_hw_enable: Enable EMAC hardware for packet transmission/reception
  1185. * @priv: The DaVinci EMAC private adapter structure
  1186. *
  1187. * Enables EMAC hardware for packet processing - enables PHY, enables RX
  1188. * for packet reception and enables device interrupts
  1189. *
  1190. * Returns success (0) or appropriate error code (none right now)
  1191. */
  1192. static int emac_hw_enable(struct emac_priv *priv)
  1193. {
  1194. rt_uint32_t ch, val, mbp_enable, mac_control;
  1195. /* Soft reset */
  1196. emac_write(EMAC_SOFTRESET, 1);
  1197. while (emac_read(EMAC_SOFTRESET));
  1198. /* Disable interrupt & Set pacing for more interrupts initially */
  1199. emac_int_disable(priv);
  1200. /* Full duplex enable bit set when auto negotiation happens */
  1201. mac_control =
  1202. (((EMAC_DEF_TXPRIO_FIXED) ? (EMAC_MACCONTROL_TXPTYPE) : 0x0) |
  1203. ((priv->speed == 1000) ? EMAC_MACCONTROL_GIGABITEN : 0x0) |
  1204. ((EMAC_DEF_TXPACING_EN) ? (EMAC_MACCONTROL_TXPACEEN) : 0x0) |
  1205. ((priv->duplex == DUPLEX_FULL) ? 0x1 : 0));
  1206. emac_write(EMAC_MACCONTROL, mac_control);
  1207. mbp_enable =
  1208. (((EMAC_DEF_PASS_CRC) ? (EMAC_RXMBP_PASSCRC_MASK) : 0x0) |
  1209. ((EMAC_DEF_QOS_EN) ? (EMAC_RXMBP_QOSEN_MASK) : 0x0) |
  1210. ((EMAC_DEF_NO_BUFF_CHAIN) ? (EMAC_RXMBP_NOCHAIN_MASK) : 0x0) |
  1211. ((EMAC_DEF_MACCTRL_FRAME_EN) ? (EMAC_RXMBP_CMFEN_MASK) : 0x0) |
  1212. ((EMAC_DEF_SHORT_FRAME_EN) ? (EMAC_RXMBP_CSFEN_MASK) : 0x0) |
  1213. ((EMAC_DEF_ERROR_FRAME_EN) ? (EMAC_RXMBP_CEFEN_MASK) : 0x0) |
  1214. ((EMAC_DEF_PROM_EN) ? (EMAC_RXMBP_CAFEN_MASK) : 0x0) |
  1215. ((EMAC_DEF_PROM_CH & EMAC_RXMBP_CHMASK) << \
  1216. EMAC_RXMBP_PROMCH_SHIFT) |
  1217. ((EMAC_DEF_BCAST_EN) ? (EMAC_RXMBP_BROADEN_MASK) : 0x0) |
  1218. ((EMAC_DEF_BCAST_CH & EMAC_RXMBP_CHMASK) << \
  1219. EMAC_RXMBP_BROADCH_SHIFT) |
  1220. ((EMAC_DEF_MCAST_EN) ? (EMAC_RXMBP_MULTIEN_MASK) : 0x0) |
  1221. ((EMAC_DEF_MCAST_CH & EMAC_RXMBP_CHMASK) << \
  1222. EMAC_RXMBP_MULTICH_SHIFT));
  1223. emac_write(EMAC_RXMBPENABLE, mbp_enable);
  1224. emac_write(EMAC_RXMAXLEN, (EMAC_DEF_MAX_FRAME_SIZE &
  1225. EMAC_RX_MAX_LEN_MASK));
  1226. emac_write(EMAC_RXBUFFEROFFSET, (EMAC_DEF_BUFFER_OFFSET &
  1227. EMAC_RX_BUFFER_OFFSET_MASK));
  1228. emac_write(EMAC_RXFILTERLOWTHRESH, 0);
  1229. emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL);
  1230. priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF;
  1231. val = emac_read(EMAC_TXCONTROL);
  1232. val |= EMAC_TX_CONTROL_TX_ENABLE_VAL;
  1233. emac_write(EMAC_TXCONTROL, val);
  1234. val = emac_read(EMAC_RXCONTROL);
  1235. val |= EMAC_RX_CONTROL_RX_ENABLE_VAL;
  1236. emac_write(EMAC_RXCONTROL, val);
  1237. emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL);
  1238. for (ch = 0; ch < EMAC_DEF_MAX_TX_CH; ch++) {
  1239. emac_write(EMAC_TXHDP(ch), 0);
  1240. emac_write(EMAC_TXINTMASKSET, BIT(ch));
  1241. }
  1242. for (ch = 0; ch < EMAC_DEF_MAX_RX_CH; ch++) {
  1243. struct emac_rxch *rxch = priv->rxch[ch];
  1244. emac_setmac(priv, ch, rxch->mac_addr);
  1245. emac_write(EMAC_RXINTMASKSET, BIT(ch));
  1246. rxch->queue_active = 1;
  1247. emac_write(EMAC_RXHDP(ch),
  1248. (unsigned int)(rxch->active_queue_head)); /* physcal addr */
  1249. }
  1250. /* Enable MII */
  1251. val = emac_read(EMAC_MACCONTROL);
  1252. val |= (EMAC_MACCONTROL_GMIIEN);
  1253. emac_write(EMAC_MACCONTROL, val);
  1254. /* Enable interrupts */
  1255. emac_int_enable(priv);
  1256. return 0;
  1257. }
  1258. /**
  1259. * emac_dev_getnetstats: EMAC get statistics function
  1260. * @ndev: The DaVinci EMAC network adapter
  1261. *
  1262. * Called when system wants to get statistics from the device.
  1263. *
  1264. * We return the statistics in net_device_stats structure pulled from emac
  1265. */
  1266. static struct net_device_stats *emac_dev_getnetstats(struct emac_priv *priv)
  1267. {
  1268. rt_uint32_t mac_control;
  1269. rt_uint32_t stats_clear_mask;
  1270. /* update emac hardware stats and reset the registers*/
  1271. mac_control = emac_read(EMAC_MACCONTROL);
  1272. if (mac_control & EMAC_MACCONTROL_GMIIEN)
  1273. stats_clear_mask = EMAC_STATS_CLR_MASK;
  1274. else
  1275. stats_clear_mask = 0;
  1276. priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES);
  1277. emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask);
  1278. priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) +
  1279. emac_read(EMAC_TXSINGLECOLL) +
  1280. emac_read(EMAC_TXMULTICOLL));
  1281. emac_write(EMAC_TXCOLLISION, stats_clear_mask);
  1282. emac_write(EMAC_TXSINGLECOLL, stats_clear_mask);
  1283. emac_write(EMAC_TXMULTICOLL, stats_clear_mask);
  1284. priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) +
  1285. emac_read(EMAC_RXJABBER) +
  1286. emac_read(EMAC_RXUNDERSIZED));
  1287. emac_write(EMAC_RXOVERSIZED, stats_clear_mask);
  1288. emac_write(EMAC_RXJABBER, stats_clear_mask);
  1289. emac_write(EMAC_RXUNDERSIZED, stats_clear_mask);
  1290. priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) +
  1291. emac_read(EMAC_RXMOFOVERRUNS));
  1292. emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask);
  1293. emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask);
  1294. priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS);
  1295. emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask);
  1296. priv->net_dev_stats.tx_carrier_errors +=
  1297. emac_read(EMAC_TXCARRIERSENSE);
  1298. emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
  1299. priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN);
  1300. emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
  1301. return &priv->net_dev_stats;
  1302. }
  1303. /* RT-Thread Device Interface */
  1304. /* initialize the interface */
  1305. static rt_err_t rt_davinci_emac_init(rt_device_t dev)
  1306. {
  1307. struct emac_priv *priv = dev->user_data;
  1308. unsigned long paddr;
  1309. rt_uint32_t ch, rc;
  1310. int i;
  1311. /* Configuration items */
  1312. priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN;
  1313. /* Clear basic hardware */
  1314. for (ch = 0; ch < EMAC_MAX_TXRX_CHANNELS; ch++) {
  1315. emac_write(EMAC_TXHDP(ch), 0);
  1316. emac_write(EMAC_RXHDP(ch), 0);
  1317. emac_write(EMAC_RXHDP(ch), 0);
  1318. emac_write(EMAC_RXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
  1319. emac_write(EMAC_TXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
  1320. }
  1321. priv->mac_hash1 = 0;
  1322. priv->mac_hash2 = 0;
  1323. emac_write(EMAC_MACHASH1, 0);
  1324. emac_write(EMAC_MACHASH2, 0);
  1325. /* multi ch not supported - open 1 TX, 1RX ch by default */
  1326. rc = emac_init_txch(priv, EMAC_DEF_TX_CH);
  1327. if (0 != rc) {
  1328. rt_kprintf("DaVinci EMAC: emac_init_txch() failed");
  1329. return rc;
  1330. }
  1331. rc = emac_init_rxch(priv, EMAC_DEF_RX_CH, priv->mac_addr);
  1332. if (0 != rc) {
  1333. rt_kprintf("DaVinci EMAC: emac_init_rxch() failed");
  1334. return rc;
  1335. }
  1336. rt_hw_interrupt_install(IRQ_DM365_EMAC_RXPULSE, emac_irq,
  1337. (void *)priv, "EMAC_RXPULSE");
  1338. rt_hw_interrupt_umask(IRQ_DM365_EMAC_RXPULSE);
  1339. rt_hw_interrupt_install(IRQ_DM365_EMAC_TXPULSE, emac_irq,
  1340. (void *)priv, "EMAC_TXPULSE");
  1341. rt_hw_interrupt_umask(IRQ_DM365_EMAC_TXPULSE);
  1342. rt_hw_interrupt_install(IRQ_DM365_EMAC_RXTHRESH, emac_irq,
  1343. (void *)priv, "EMAC_RXTHRESH");
  1344. rt_hw_interrupt_umask(IRQ_DM365_EMAC_RXTHRESH);
  1345. rt_hw_interrupt_install(IRQ_DM365_EMAC_MISCPULSE, emac_irq,
  1346. (void *)priv, "EMAC_MISCPULSE");
  1347. rt_hw_interrupt_umask(IRQ_DM365_EMAC_MISCPULSE);
  1348. emac_mii_reset(priv);
  1349. davinci_emac_phy_init(dev);
  1350. /* Start/Enable EMAC hardware */
  1351. emac_hw_enable(priv);
  1352. rt_timer_init(&priv->timer, "link_timer",
  1353. davinci_emac_update_link,
  1354. (void *)priv,
  1355. RT_TICK_PER_SECOND,
  1356. RT_TIMER_FLAG_PERIODIC);
  1357. rt_timer_start(&priv->timer);
  1358. rt_kprintf("davinci emac initialized\n");
  1359. return RT_EOK;
  1360. }
  1361. static rt_err_t rt_davinci_emac_open(rt_device_t dev, rt_uint16_t oflag)
  1362. {
  1363. return RT_EOK;
  1364. }
  1365. static rt_err_t rt_davinci_emac_close(rt_device_t dev)
  1366. {
  1367. return RT_EOK;
  1368. }
  1369. static rt_size_t rt_davinci_emac_read(rt_device_t dev, rt_off_t pos, void* buffer, rt_size_t size)
  1370. {
  1371. rt_set_errno(-RT_ENOSYS);
  1372. return 0;
  1373. }
  1374. static rt_size_t rt_davinci_emac_write (rt_device_t dev, rt_off_t pos, const void* buffer, rt_size_t size)
  1375. {
  1376. rt_set_errno(-RT_ENOSYS);
  1377. return 0;
  1378. }
  1379. static rt_err_t rt_davinci_emac_control(rt_device_t dev, int cmd, void *args)
  1380. {
  1381. struct emac_priv *priv = dev->user_data;
  1382. switch(cmd)
  1383. {
  1384. case NIOCTL_GADDR:
  1385. /* get mac address */
  1386. if(args) rt_memcpy(args, priv->mac_addr, 6);
  1387. else return -RT_ERROR;
  1388. break;
  1389. default :
  1390. break;
  1391. }
  1392. return RT_EOK;
  1393. }
  1394. void dm365_emac_gpio_init(void)
  1395. {
  1396. rt_uint32_t arm_intmux;
  1397. /*
  1398. * EMAC interrupts are multiplexed with GPIO interrupts
  1399. * Details are available at the DM365 ARM
  1400. * Subsystem Users Guide(sprufg5.pdf) pages 133 - 134
  1401. */
  1402. arm_intmux = davinci_readl(DM365_ARM_INTMUX);
  1403. arm_intmux |= (1 << 14)|(1 << 15)|(1 << 16)|(1 << 17);
  1404. davinci_writel(arm_intmux, DM365_ARM_INTMUX);
  1405. }
  1406. int rt_hw_davinci_emac_init()
  1407. {
  1408. struct emac_priv *priv = &davinci_emac_device;
  1409. struct clk *emac_clk;
  1410. emac_clk = clk_get("EMACCLK");
  1411. emac_bus_frequency = clk_get_rate(emac_clk);
  1412. psc_change_state(DAVINCI_DM365_LPSC_CPGMAC, PSC_ENABLE);
  1413. dm365_emac_gpio_init();
  1414. rt_memset(&davinci_emac_device, 0, sizeof(davinci_emac_device));
  1415. davinci_emac_device.emac_base = (void __iomem *)DM365_EMAC_CNTRL_BASE;
  1416. davinci_emac_device.ctrl_base = (void __iomem *)DM365_EMAC_WRAP_CNTRL_BASE;
  1417. davinci_emac_device.ctrl_ram_size = DM365_EMAC_CNTRL_RAM_SIZE;
  1418. davinci_emac_device.emac_ctrl_ram = (void __iomem *)DM365_EMAC_WRAP_RAM_BASE;
  1419. davinci_emac_device.mdio_base = (void __iomem *)DM365_EMAC_MDIO_BASE;
  1420. davinci_emac_device.version = EMAC_VERSION_2;
  1421. davinci_emac_device.rmii_en = 0;
  1422. davinci_emac_device.phy_addr = 0x09;
  1423. rt_sem_init(&priv->tx_lock, "tx_lock", 1, RT_IPC_FLAG_FIFO);
  1424. rt_sem_init(&priv->rx_lock, "rx_lock", 1, RT_IPC_FLAG_FIFO);
  1425. davinci_emac_device.mac_addr[0] = 0x00;
  1426. davinci_emac_device.mac_addr[1] = 0x60;
  1427. davinci_emac_device.mac_addr[2] = 0x6E;
  1428. davinci_emac_device.mac_addr[3] = 0x11;
  1429. davinci_emac_device.mac_addr[4] = 0x22;
  1430. davinci_emac_device.mac_addr[5] = 0x33;
  1431. davinci_emac_device.parent.parent.init = rt_davinci_emac_init;
  1432. davinci_emac_device.parent.parent.open = rt_davinci_emac_open;
  1433. davinci_emac_device.parent.parent.close = rt_davinci_emac_close;
  1434. davinci_emac_device.parent.parent.read = rt_davinci_emac_read;
  1435. davinci_emac_device.parent.parent.write = rt_davinci_emac_write;
  1436. davinci_emac_device.parent.parent.control = rt_davinci_emac_control;
  1437. davinci_emac_device.parent.parent.user_data = &davinci_emac_device;
  1438. davinci_emac_device.parent.eth_rx = rt_davinci_emac_rx;
  1439. davinci_emac_device.parent.eth_tx = rt_davinci_emac_tx;
  1440. eth_device_init(&(davinci_emac_device.parent), "e0");
  1441. }
  1442. INIT_DEVICE_EXPORT(rt_hw_davinci_emac_init);
  1443. #ifdef RT_USING_FINSH
  1444. #include <finsh.h>
  1445. void dump_emac_stats(void)
  1446. {
  1447. int i;
  1448. struct emac_priv *emac;
  1449. struct net_device_stats *stats;
  1450. rt_device_t dev = rt_device_find("e0");
  1451. if(dev == RT_NULL)
  1452. return;
  1453. emac = (struct emac_priv *)dev->user_data;
  1454. stats = emac_dev_getnetstats(emac);
  1455. rt_kprintf("rx_packets = %d\n"
  1456. "tx_packets = %d\n"
  1457. "rx_bytes = %d\n"
  1458. "tx_bytes = %d\n"
  1459. "rx_errors = %d\n"
  1460. "tx_errors = %d\n"
  1461. "rx_dropped = %d\n"
  1462. "tx_dropped = %d\n"
  1463. "multicast = %d\n"
  1464. "collisions = %d\n",
  1465. stats->rx_packets,
  1466. stats->tx_packets,
  1467. stats->rx_bytes,
  1468. stats->tx_bytes,
  1469. stats->rx_errors,
  1470. stats->tx_errors,
  1471. stats->rx_dropped,
  1472. stats->tx_dropped,
  1473. stats->multicast,
  1474. stats->collisions);
  1475. rt_kprintf("rx_length_errors = %d\n"
  1476. "rx_over_errors = %d\n"
  1477. "rx_crc_errors = %d\n"
  1478. "rx_frame_errors = %d\n"
  1479. "rx_fifo_errors = %d\n"
  1480. "rx_missed_errors = %d\n",
  1481. stats->rx_length_errors,
  1482. stats->rx_over_errors,
  1483. stats->rx_crc_errors,
  1484. stats->rx_frame_errors,
  1485. stats->rx_fifo_errors,
  1486. stats->rx_missed_errors);
  1487. rt_kprintf("tx_aborted_errors = %d\n"
  1488. "tx_carrier_errors = %d\n"
  1489. "tx_fifo_errors = %d\n"
  1490. "tx_heartbeat_errors = %d\n"
  1491. "tx_window_errors = %d\n",
  1492. stats->tx_aborted_errors,
  1493. stats->tx_carrier_errors,
  1494. stats->tx_fifo_errors,
  1495. stats->tx_heartbeat_errors,
  1496. stats->tx_window_errors);
  1497. rt_kprintf("rx_compressed = %d\n"
  1498. "tx_compressed = %d\n",
  1499. stats->rx_compressed,
  1500. stats->tx_compressed);
  1501. rt_kprintf("\n");
  1502. }
  1503. FINSH_FUNCTION_EXPORT(dump_emac_stats, dump emac statistics);
  1504. #ifdef FINSH_USING_MSH
  1505. int cmd_dump_emac_stats(int argc, char** argv)
  1506. {
  1507. dump_emac_stats();
  1508. return 0;
  1509. }
  1510. MSH_CMD_EXPORT_ALIAS(cmd_dump_emac_stats, dump_emac_stats, dump emac statistics);
  1511. #endif
  1512. #endif