fsl_enet_qos.c 132 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662
  1. /*
  2. * Copyright 2019-2021 NXP
  3. * All rights reserved.
  4. *
  5. * SPDX-License-Identifier: BSD-3-Clause
  6. */
  7. #include "fsl_enet_qos.h"
  8. /*******************************************************************************
  9. * Definitions
  10. ******************************************************************************/
  11. /* Component ID definition, used by tools. */
  12. #ifndef FSL_COMPONENT_ID
  13. #define FSL_COMPONENT_ID "platform.drivers.enet_qos"
  14. #endif
  15. /*! @brief Defines 10^9 nanosecond. */
  16. #define ENET_QOS_NANOSECS_ONESECOND (1000000000U)
  17. /*! @brief Defines 10^6 microsecond.*/
  18. #define ENET_QOS_MICRSECS_ONESECOND (1000000U)
  19. /*! @brief Rx buffer LSB ignore bits. */
  20. #define ENET_QOS_RXBUFF_IGNORELSB_BITS (3U)
  21. /*! @brief ENET FIFO size unit. */
  22. #define ENET_QOS_FIFOSIZE_UNIT (256U)
  23. /*! @brief ENET half-dulpex default IPG. */
  24. #define ENET_QOS_HALFDUPLEX_DEFAULTIPG (4U)
  25. /*! @breif ENET miminum ring length. */
  26. #define ENET_QOS_MIN_RINGLEN (4U)
  27. /*! @breif ENET wakeup filter numbers. */
  28. #define ENET_QOS_WAKEUPFILTER_NUM (8U)
  29. /*! @breif Requried systime timer frequency. */
  30. #define ENET_QOS_SYSTIME_REQUIRED_CLK_MHZ (50U)
  31. /*! @brief Ethernet VLAN tag length. */
  32. #define ENET_QOS_FRAME_VLAN_TAGLEN 4U
  33. /*! @brief AVB TYPE */
  34. #define ENET_QOS_AVBTYPE 0x22F0U
  35. #define ENET_QOS_HEAD_TYPE_OFFSET (12)
  36. #define ENET_QOS_HEAD_AVBTYPE_OFFSET (16)
  37. /*! @brief Defines the macro for converting constants from host byte order to network byte order. */
  38. #define ENET_QOS_HTONS(n) __REV16(n)
  39. #define ENET_QOS_HTONL(n) __REV(n)
  40. #define ENET_QOS_NTOHS(n) __REV16(n)
  41. #define ENET_QOS_NTOHL(n) __REV(n)
  42. #define ENET_QOS_DMA_CHX_RX_CTRL_RBSZ
  43. /*******************************************************************************
  44. * Prototypes
  45. ******************************************************************************/
  46. /*!
  47. * @brief Increase the index in the ring.
  48. *
  49. * @param index The current index.
  50. * @param max The size.
  51. * @return the increased index.
  52. */
  53. static uint16_t ENET_QOS_IncreaseIndex(uint16_t index, uint16_t max);
  54. /*!
  55. * @brief Poll status flag.
  56. *
  57. * @param regAddr The register address to read out status
  58. * @param mask The mask to operate the register value.
  59. * @param readyStatus Indicate readyStatus for the field
  60. * @retval kStatus_Success Poll readyStatus Success.
  61. * @retval kStatus_ENET_QOS_Timeout Poll readyStatus timeout.
  62. */
  63. static status_t ENET_QOS_PollStatusFlag(volatile uint32_t *regAddr, uint32_t mask, uint32_t readyStatus);
  64. /*!
  65. * @brief Set ENET DMA controller with the configuration.
  66. *
  67. * @param base ENET peripheral base address.
  68. * @param config ENET Mac configuration.
  69. */
  70. static void ENET_QOS_SetDMAControl(ENET_QOS_Type *base, const enet_qos_config_t *config);
  71. /*!
  72. * @brief Set ENET MAC controller with the configuration.
  73. *
  74. * @param base ENET peripheral base address.
  75. * @param config ENET Mac configuration.
  76. * @param macAddr ENET six-byte mac address.
  77. */
  78. static void ENET_QOS_SetMacControl(ENET_QOS_Type *base,
  79. const enet_qos_config_t *config,
  80. uint8_t *macAddr,
  81. uint8_t macCount);
  82. /*!
  83. * @brief Set ENET MTL with the configuration.
  84. *
  85. * @param base ENET peripheral base address.
  86. * @param config ENET Mac configuration.
  87. */
  88. static void ENET_QOS_SetMTL(ENET_QOS_Type *base, const enet_qos_config_t *config);
  89. /*!
  90. * @brief Set ENET DMA transmit buffer descriptors for one channel.
  91. *
  92. * @param base ENET peripheral base address.
  93. * @param bufferConfig ENET buffer configuration.
  94. * @param intTxEnable tx interrupt enable.
  95. * @param channel The channel number, 0 , 1.
  96. */
  97. static status_t ENET_QOS_TxDescriptorsInit(ENET_QOS_Type *base,
  98. const enet_qos_buffer_config_t *bufferConfig,
  99. bool intTxEnable,
  100. uint8_t channel);
  101. /*!
  102. * @brief Set ENET DMA receive buffer descriptors for one channel.
  103. *
  104. * @param base ENET peripheral base address.
  105. * @param bufferConfig ENET buffer configuration.
  106. * @param intRxEnable tx interrupt enable.
  107. * @param channel The channel number, 0, 1.
  108. */
  109. static status_t ENET_QOS_RxDescriptorsInit(ENET_QOS_Type *base,
  110. enet_qos_config_t *config,
  111. const enet_qos_buffer_config_t *bufferConfig,
  112. bool intRxEnable,
  113. uint8_t channel);
  114. /*!
  115. * @brief Sets the ENET 1588 feature.
  116. *
  117. * Enable the enhacement 1588 buffer descriptor mode and start
  118. * the 1588 timer.
  119. *
  120. * @param base ENET peripheral base address.
  121. * @param config The ENET configuration.
  122. * @param refClk_Hz The reference clock for ptp 1588.
  123. */
  124. static status_t ENET_QOS_SetPtp1588(ENET_QOS_Type *base, const enet_qos_config_t *config, uint32_t refClk_Hz);
  125. /*!
  126. * @brief Store the receive time-stamp for event PTP frame in the time-stamp buffer ring.
  127. *
  128. * @param base ENET peripheral base address.
  129. * @param handle ENET handler.
  130. * @param rxDesc The ENET receive descriptor pointer.
  131. * @param channel The rx channel.
  132. * @param ts The timestamp structure pointer.
  133. */
  134. static void ENET_QOS_StoreRxFrameTime(ENET_QOS_Type *base,
  135. enet_qos_handle_t *handle,
  136. enet_qos_rx_bd_struct_t *rxDesc,
  137. // uint8_t channel,
  138. enet_qos_ptp_time_t *ts);
  139. /*!
  140. * @brief Check if txDirtyRing available.
  141. *
  142. * @param txDirtyRing pointer to txDirtyRing
  143. * @retval txDirty available status.
  144. */
  145. static inline bool ENET_QOS_TxDirtyRingAvailable(enet_qos_tx_dirty_ring_t *txDirtyRing);
  146. /*******************************************************************************
  147. * Variables
  148. ******************************************************************************/
  149. /*! @brief Pointers to enet bases for each instance. */
  150. static ENET_QOS_Type *const s_enetqosBases[] = ENET_QOS_BASE_PTRS;
  151. /*! @brief Pointers to enet IRQ number for each instance. */
  152. static const IRQn_Type s_enetqosIrqId[] = ENET_QOS_IRQS;
  153. /* ENET ISR for transactional APIs. */
  154. static enet_qos_isr_t s_enetqosIsr;
  155. /*! @brief Pointers to enet handles for each instance. */
  156. static enet_qos_handle_t *s_ENETHandle[ARRAY_SIZE(s_enetqosBases)] = {NULL};
  157. #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
  158. /*! @brief Pointers to enet clocks for each instance. */
  159. const clock_ip_name_t s_enetqosClock[ARRAY_SIZE(s_enetqosBases)] = ENETQOS_CLOCKS;
  160. #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
  161. /*******************************************************************************
  162. * Code
  163. ******************************************************************************/
  164. static status_t ENET_QOS_PollStatusFlag(volatile uint32_t *regAddr, uint32_t mask, uint32_t readyStatus)
  165. {
  166. uint8_t retryTimes = 10U;
  167. status_t result = kStatus_Success;
  168. while ((readyStatus != (*regAddr & mask)) && (0U != retryTimes))
  169. {
  170. retryTimes--;
  171. SDK_DelayAtLeastUs(1U, SDK_DEVICE_MAXIMUM_CPU_CLOCK_FREQUENCY);
  172. }
  173. if (retryTimes == 0U)
  174. {
  175. result = kStatus_ENET_QOS_Timeout;
  176. }
  177. return result;
  178. }
  179. /*!
  180. * brief Sets the ENET AVB feature.
  181. *
  182. * ENET_QOS AVB feature configuration, set transmit bandwidth.
  183. * This API is called when the AVB feature is required.
  184. *
  185. * param base ENET_QOS peripheral base address.
  186. * param config The ENET_QOS AVB feature configuration structure.
  187. * param queueIndex ENET_QOS queue index.
  188. */
  189. void ENET_QOS_AVBConfigure(ENET_QOS_Type *base, const enet_qos_cbs_config_t *config, uint8_t queueIndex)
  190. {
  191. assert(config != NULL);
  192. /* Enable AV algorithm */
  193. base->MTL_QUEUE[queueIndex].MTL_TXQX_ETS_CTRL |= ENET_QOS_MTL_TXQX_ETS_CTRL_AVALG_MASK;
  194. /* Configure send slope */
  195. base->MTL_QUEUE[queueIndex].MTL_TXQX_SNDSLP_CRDT = config->sendSlope;
  196. /* Configure idle slope (same register as tx weight) */
  197. base->MTL_QUEUE[queueIndex].MTL_TXQX_QNTM_WGHT = config->idleSlope;
  198. /* Configure high credit */
  199. base->MTL_QUEUE[queueIndex].MTL_TXQX_HI_CRDT = config->highCredit;
  200. /* Configure high credit */
  201. base->MTL_QUEUE[queueIndex].MTL_TXQX_LO_CRDT = config->lowCredit;
  202. }
  203. static uint16_t ENET_QOS_IncreaseIndex(uint16_t index, uint16_t max)
  204. {
  205. /* Increase the index. */
  206. index++;
  207. if (index >= max)
  208. {
  209. index = 0;
  210. }
  211. return index;
  212. }
  213. static uint32_t ENET_QOS_ReverseBits(uint32_t value)
  214. {
  215. value = ((value & 0x55555555UL) << 1U) | ((value >> 1U) & 0x55555555UL);
  216. value = ((value & 0x33333333UL) << 2U) | ((value >> 2U) & 0x33333333UL);
  217. value = ((value & 0x0F0F0F0FUL) << 4U) | ((value >> 4U) & 0x0F0F0F0FUL);
  218. return (value >> 24U) | ((value >> 8U) & 0xFF00UL) | ((value & 0xFF00UL) << 8U) | (value << 24U);
  219. }
  220. static void ENET_QOS_SetDMAControl(ENET_QOS_Type *base, const enet_qos_config_t *config)
  221. {
  222. assert(config != NULL);
  223. uint8_t index;
  224. uint32_t reg;
  225. uint32_t burstLen;
  226. /* Reset first and wait for the complete
  227. * The reset bit will automatically be cleared after complete. */
  228. base->DMA_MODE |= ENET_QOS_DMA_MODE_SWR_MASK;
  229. while ((base->DMA_MODE & ENET_QOS_DMA_MODE_SWR_MASK) != 0U)
  230. {
  231. }
  232. /* Set the burst length. */
  233. for (index = 0; index < ENET_QOS_RING_NUM_MAX; index++)
  234. {
  235. burstLen = (uint32_t)kENET_QOS_BurstLen1;
  236. if (config->multiqueueCfg != NULL)
  237. {
  238. burstLen = (uint32_t)config->multiqueueCfg->burstLen;
  239. }
  240. base->DMA_CH[index].DMA_CHX_CTRL = burstLen & ENET_QOS_DMA_CHX_CTRL_PBLx8_MASK;
  241. reg = base->DMA_CH[index].DMA_CHX_TX_CTRL & ~ENET_QOS_DMA_CHX_TX_CTRL_TxPBL_MASK;
  242. base->DMA_CH[index].DMA_CHX_TX_CTRL = reg | ENET_QOS_DMA_CHX_TX_CTRL_TxPBL(burstLen & 0x3FU);
  243. reg = base->DMA_CH[index].DMA_CHX_RX_CTRL & ~ENET_QOS_DMA_CHX_RX_CTRL_RxPBL_MASK;
  244. base->DMA_CH[index].DMA_CHX_RX_CTRL = reg | ENET_QOS_DMA_CHX_RX_CTRL_RxPBL(burstLen & 0x3FU);
  245. }
  246. }
  247. static void ENET_QOS_SetMTL(ENET_QOS_Type *base, const enet_qos_config_t *config)
  248. {
  249. assert(config != NULL);
  250. uint32_t txqOpreg = 0;
  251. uint32_t rxqOpReg = 0;
  252. enet_qos_multiqueue_config_t *multiqCfg = config->multiqueueCfg;
  253. uint8_t index;
  254. /* Set transmit operation mode. */
  255. if ((config->specialControl & (uint32_t)kENET_QOS_StoreAndForward) != 0U)
  256. {
  257. txqOpreg = ENET_QOS_MTL_TXQX_OP_MODE_TSF_MASK;
  258. rxqOpReg = ENET_QOS_MTL_RXQX_OP_MODE_RSF_MASK;
  259. }
  260. /* Set transmit operation mode. */
  261. txqOpreg |= ENET_QOS_MTL_TXQX_OP_MODE_FTQ_MASK;
  262. /* Set receive operation mode. */
  263. rxqOpReg |= ENET_QOS_MTL_RXQX_OP_MODE_FUP_MASK | ENET_QOS_MTL_RXQX_OP_MODE_RFD(3U) |
  264. ENET_QOS_MTL_RXQX_OP_MODE_RFA(1U) | ENET_QOS_MTL_RXQX_OP_MODE_EHFC_MASK;
  265. if (multiqCfg == NULL)
  266. {
  267. txqOpreg |=
  268. ENET_QOS_MTL_TXQX_OP_MODE_TQS(((uint32_t)ENET_QOS_MTL_TXFIFOSIZE / (uint32_t)ENET_QOS_FIFOSIZE_UNIT - 1U));
  269. rxqOpReg |=
  270. ENET_QOS_MTL_RXQX_OP_MODE_RQS(((uint32_t)ENET_QOS_MTL_RXFIFOSIZE / (uint32_t)ENET_QOS_FIFOSIZE_UNIT - 1U));
  271. base->MTL_QUEUE[0].MTL_TXQX_OP_MODE = txqOpreg | ENET_QOS_MTL_TXQX_OP_MODE_TXQEN((uint32_t)kENET_QOS_DCB_Mode);
  272. base->MTL_QUEUE[0].MTL_RXQX_OP_MODE = rxqOpReg;
  273. }
  274. else
  275. {
  276. /* Set the schedule/arbitration(set for multiple queues). */
  277. base->MTL_OPERATION_MODE = ENET_QOS_MTL_OPERATION_MODE_SCHALG(multiqCfg->mtltxSche) |
  278. ENET_QOS_MTL_OPERATION_MODE_RAA(multiqCfg->mtlrxSche);
  279. for (index = 0; index < multiqCfg->txQueueUse; index++)
  280. {
  281. txqOpreg |= ENET_QOS_MTL_TXQX_OP_MODE_TQS(
  282. ((uint32_t)ENET_QOS_MTL_TXFIFOSIZE / ((uint32_t)multiqCfg->txQueueUse * ENET_QOS_FIFOSIZE_UNIT)) - 1U);
  283. base->MTL_QUEUE[index].MTL_TXQX_OP_MODE =
  284. txqOpreg | ENET_QOS_MTL_TXQX_OP_MODE_TXQEN((uint32_t)multiqCfg->txQueueConfig[index].mode);
  285. if (multiqCfg->txQueueConfig[index].mode == kENET_QOS_AVB_Mode)
  286. {
  287. ENET_QOS_AVBConfigure(base, multiqCfg->txQueueConfig[index].cbsConfig, index);
  288. }
  289. else
  290. {
  291. base->MTL_QUEUE[index].MTL_TXQX_QNTM_WGHT = multiqCfg->txQueueConfig[index].weight;
  292. }
  293. }
  294. volatile uint32_t *mtlrxQuemapReg;
  295. uint8_t configIndex;
  296. for (index = 0; index < multiqCfg->rxQueueUse; index++)
  297. {
  298. rxqOpReg |= ENET_QOS_MTL_RXQX_OP_MODE_RQS(
  299. ((uint32_t)ENET_QOS_MTL_RXFIFOSIZE / ((uint32_t)multiqCfg->rxQueueUse * ENET_QOS_FIFOSIZE_UNIT)) - 1U);
  300. base->MTL_QUEUE[index].MTL_RXQX_OP_MODE = rxqOpReg;
  301. mtlrxQuemapReg = (index < 4U) ? &base->MTL_RXQ_DMA_MAP0 : &base->MTL_RXQ_DMA_MAP1;
  302. configIndex = (index & 0x3U);
  303. *mtlrxQuemapReg &= ~((uint32_t)ENET_QOS_MTL_RXQ_DMA_MAP0_Q0MDMACH_MASK << (8U * configIndex));
  304. *mtlrxQuemapReg |= (uint32_t)ENET_QOS_MTL_RXQ_DMA_MAP0_Q0MDMACH(multiqCfg->rxQueueConfig[index].mapChannel)
  305. << (8U * configIndex);
  306. }
  307. }
  308. }
  309. static void ENET_QOS_SetMacControl(ENET_QOS_Type *base,
  310. const enet_qos_config_t *config,
  311. uint8_t *macAddr,
  312. uint8_t macCount)
  313. {
  314. assert(config != NULL);
  315. uint32_t reg = 0;
  316. /* Set Macaddr */
  317. /* The dma channel 0 is set as to which the rx packet
  318. * whose DA matches the MAC address content is routed. */
  319. if (macAddr != NULL)
  320. {
  321. for (uint8_t i = 0; i < macCount; i++)
  322. {
  323. ENET_QOS_SetMacAddr(base, macAddr, i);
  324. }
  325. }
  326. /* Set the receive filter. */
  327. reg =
  328. ENET_QOS_MAC_PACKET_FILTER_PR(((config->specialControl & (uint32_t)kENET_QOS_PromiscuousEnable) != 0U) ? 1U :
  329. 0U) |
  330. ENET_QOS_MAC_PACKET_FILTER_DBF(((config->specialControl & (uint32_t)kENET_QOS_BroadCastRxDisable) != 0U) ? 1U :
  331. 0U) |
  332. ENET_QOS_MAC_PACKET_FILTER_PM(((config->specialControl & (uint32_t)kENET_QOS_MulticastAllEnable) != 0U) ? 1U :
  333. 0U) |
  334. ENET_QOS_MAC_PACKET_FILTER_HMC(((config->specialControl & (uint32_t)kENET_QOS_HashMulticastEnable) != 0U) ? 1U :
  335. 0U);
  336. base->MAC_PACKET_FILTER = reg;
  337. /* Flow control. */
  338. if ((config->specialControl & (uint32_t)kENET_QOS_FlowControlEnable) != 0U)
  339. {
  340. base->MAC_RX_FLOW_CTRL = ENET_QOS_MAC_RX_FLOW_CTRL_RFE_MASK | ENET_QOS_MAC_RX_FLOW_CTRL_UP_MASK;
  341. base->MAC_TX_FLOW_CTRL_Q[0] = ENET_QOS_MAC_TX_FLOW_CTRL_Q_PT(config->pauseDuration);
  342. }
  343. /* Set the 1us ticket. */
  344. reg = config->csrClock_Hz / ENET_QOS_MICRSECS_ONESECOND - 1U;
  345. base->MAC_ONEUS_TIC_COUNTER = ENET_QOS_MAC_ONEUS_TIC_COUNTER_TIC_1US_CNTR(reg);
  346. /* Set the speed and duplex. */
  347. reg = ENET_QOS_MAC_CONFIGURATION_DM(config->miiDuplex) | (uint32_t)config->miiSpeed |
  348. ENET_QOS_MAC_CONFIGURATION_S2KP(((config->specialControl & (uint32_t)kENET_QOS_8023AS2KPacket) != 0U) ? 1U :
  349. 0U);
  350. if (config->miiDuplex == kENET_QOS_MiiHalfDuplex)
  351. {
  352. reg |= ENET_QOS_MAC_CONFIGURATION_IPG(ENET_QOS_HALFDUPLEX_DEFAULTIPG);
  353. }
  354. base->MAC_CONFIGURATION = reg;
  355. if (config->multiqueueCfg != NULL)
  356. {
  357. reg = 0U;
  358. uint8_t configIndex;
  359. enet_qos_multiqueue_config_t *multiqCfg = config->multiqueueCfg;
  360. uint32_t txQueuePrioMap0 = base->MAC_TXQ_PRTY_MAP0;
  361. uint32_t txQueuePrioMap1 = base->MAC_TXQ_PRTY_MAP1;
  362. uint32_t rxQueuePrioMap0 = base->MAC_RXQ_CTRL[2];
  363. uint32_t rxQueuePrioMap1 = base->MAC_RXQ_CTRL[3];
  364. uint32_t rxCtrlReg1 = base->MAC_RXQ_CTRL[1];
  365. for (uint8_t index = 0U; index < multiqCfg->txQueueUse; index++)
  366. {
  367. configIndex = index & 0x3U;
  368. /* Configure tx queue priority. */
  369. if (index < 4U)
  370. {
  371. txQueuePrioMap0 &= ~((uint32_t)ENET_QOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << (8U * configIndex));
  372. txQueuePrioMap0 |= (uint32_t)ENET_QOS_MAC_TXQ_PRTY_MAP0_PSTQ0(multiqCfg->txQueueConfig[index].priority)
  373. << (8U * configIndex);
  374. }
  375. else
  376. {
  377. txQueuePrioMap1 &= ~((uint32_t)ENET_QOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << (8U * configIndex));
  378. txQueuePrioMap1 |= (uint32_t)ENET_QOS_MAC_TXQ_PRTY_MAP0_PSTQ0(multiqCfg->txQueueConfig[index].priority)
  379. << (8U * configIndex);
  380. }
  381. }
  382. for (uint8_t index = 0U; index < multiqCfg->rxQueueUse; index++)
  383. {
  384. configIndex = index & 0x3U;
  385. /* Configure rx queue priority. */
  386. if (index < 4U)
  387. {
  388. rxQueuePrioMap0 &= ~((uint32_t)ENET_QOS_MAC_RXQ_CTRL_PSRQ0_MASK << (8U * configIndex));
  389. rxQueuePrioMap0 |= (uint32_t)ENET_QOS_MAC_RXQ_CTRL_PSRQ0(multiqCfg->rxQueueConfig[index].priority)
  390. << (8U * configIndex);
  391. }
  392. else
  393. {
  394. rxQueuePrioMap1 &= ~((uint32_t)ENET_QOS_MAC_RXQ_CTRL_PSRQ0_MASK << (8U * configIndex));
  395. rxQueuePrioMap1 |= (uint32_t)ENET_QOS_MAC_RXQ_CTRL_PSRQ0(multiqCfg->rxQueueConfig[index].priority)
  396. << (8U * configIndex);
  397. }
  398. /* Configure queue enable mode. */
  399. reg |= ENET_QOS_MAC_RXQ_CTRL_RXQ0EN((uint32_t)multiqCfg->rxQueueConfig[index].mode) << (2U * index);
  400. /* Configure rx queue routing */
  401. if (((uint8_t)multiqCfg->rxQueueConfig[index].packetRoute & (uint8_t)kENET_QOS_PacketAVCPQ) != 0U)
  402. {
  403. rxCtrlReg1 &= ~ENET_QOS_MAC_RXQ_CTRL_AVCPQ_MASK;
  404. rxCtrlReg1 |= (ENET_QOS_MAC_RXQ_CTRL_AVCPQ(index) | ENET_QOS_MAC_RXQ_CTRL_TACPQE_MASK);
  405. }
  406. if (((uint8_t)multiqCfg->rxQueueConfig[index].packetRoute & (uint8_t)kENET_QOS_PacketPTPQ) != 0U)
  407. {
  408. rxCtrlReg1 &= ~ENET_QOS_MAC_RXQ_CTRL_PTPQ_MASK;
  409. rxCtrlReg1 |= ENET_QOS_MAC_RXQ_CTRL_PTPQ(index);
  410. }
  411. if (((uint8_t)multiqCfg->rxQueueConfig[index].packetRoute & (uint8_t)kENET_QOS_PacketDCBCPQ) != 0U)
  412. {
  413. rxCtrlReg1 &= ~ENET_QOS_MAC_RXQ_CTRL_DCBCPQ_MASK;
  414. rxCtrlReg1 |= ENET_QOS_MAC_RXQ_CTRL_DCBCPQ(index);
  415. }
  416. if (((uint8_t)multiqCfg->rxQueueConfig[index].packetRoute & (uint8_t)kENET_QOS_PacketUPQ) != 0U)
  417. {
  418. rxCtrlReg1 &= ~ENET_QOS_MAC_RXQ_CTRL_UPQ_MASK;
  419. rxCtrlReg1 |= ENET_QOS_MAC_RXQ_CTRL_UPQ(index);
  420. }
  421. if (((uint8_t)multiqCfg->rxQueueConfig[index].packetRoute & (uint8_t)kENET_QOS_PacketMCBCQ) != 0U)
  422. {
  423. rxCtrlReg1 &= ~ENET_QOS_MAC_RXQ_CTRL_MCBCQ_MASK;
  424. rxCtrlReg1 |= (ENET_QOS_MAC_RXQ_CTRL_MCBCQ(index) | ENET_QOS_MAC_RXQ_CTRL_MCBCQEN_MASK);
  425. }
  426. }
  427. base->MAC_TXQ_PRTY_MAP0 = txQueuePrioMap0;
  428. base->MAC_TXQ_PRTY_MAP1 = txQueuePrioMap1;
  429. base->MAC_RXQ_CTRL[2] = rxQueuePrioMap0;
  430. base->MAC_RXQ_CTRL[3] = rxQueuePrioMap1;
  431. base->MAC_RXQ_CTRL[1] = rxCtrlReg1;
  432. }
  433. else
  434. {
  435. /* Configure queue enable mode. */
  436. reg = ENET_QOS_MAC_RXQ_CTRL_RXQ0EN((uint32_t)kENET_QOS_DCB_Mode);
  437. }
  438. /* Enable queue. */
  439. base->MAC_RXQ_CTRL[0] = reg;
  440. /* Mask MMC counters interrupts as we don't handle
  441. * them in the interrupt handler.
  442. */
  443. base->MAC_MMC_RX_INTERRUPT_MASK = 0xFFFFFFFFU;
  444. base->MAC_MMC_TX_INTERRUPT_MASK = 0xFFFFFFFFU;
  445. base->MAC_MMC_IPC_RX_INTERRUPT_MASK = 0xFFFFFFFFU;
  446. base->MAC_MMC_FPE_RX_INTERRUPT_MASK = 0xFFFFFFFFU;
  447. base->MAC_MMC_FPE_TX_INTERRUPT_MASK = 0xFFFFFFFFU;
  448. }
  449. static status_t ENET_QOS_TxDescriptorsInit(ENET_QOS_Type *base,
  450. const enet_qos_buffer_config_t *bufferConfig,
  451. bool intTxEnable,
  452. uint8_t channel)
  453. {
  454. uint16_t j;
  455. enet_qos_tx_bd_struct_t *txbdPtr;
  456. uint32_t control = intTxEnable ? ENET_QOS_TXDESCRIP_RD_IOC_MASK : 0U;
  457. const enet_qos_buffer_config_t *buffCfg = bufferConfig;
  458. if (buffCfg == NULL)
  459. {
  460. return kStatus_InvalidArgument;
  461. }
  462. /* Check the ring length. */
  463. if (buffCfg->txRingLen < ENET_QOS_MIN_RINGLEN)
  464. {
  465. return kStatus_InvalidArgument;
  466. }
  467. /* Set the tx descriptor start/tail pointer, shall be word aligned. */
  468. base->DMA_CH[channel].DMA_CHX_TXDESC_LIST_ADDR =
  469. (uint32_t)buffCfg->txDescStartAddrAlign & ENET_QOS_DMA_CHX_TXDESC_LIST_ADDR_TDESLA_MASK;
  470. base->DMA_CH[channel].DMA_CHX_TXDESC_TAIL_PTR =
  471. (uint32_t)buffCfg->txDescTailAddrAlign & ENET_QOS_DMA_CHX_TXDESC_TAIL_PTR_TDTP_MASK;
  472. /* Set the tx ring length. */
  473. base->DMA_CH[channel].DMA_CHX_TXDESC_RING_LENGTH =
  474. ((uint32_t)buffCfg->txRingLen - 1U) & ENET_QOS_DMA_CHX_TXDESC_RING_LENGTH_TDRL_MASK;
  475. /* Init the txbdPtr to the transmit descriptor start address. */
  476. txbdPtr = (enet_qos_tx_bd_struct_t *)(buffCfg->txDescStartAddrAlign);
  477. for (j = 0; j < buffCfg->txRingLen; j++)
  478. {
  479. txbdPtr->buff1Addr = 0;
  480. txbdPtr->buff2Addr = 0;
  481. txbdPtr->buffLen = control;
  482. txbdPtr->controlStat = 0;
  483. txbdPtr++;
  484. }
  485. return kStatus_Success;
  486. }
  487. static status_t ENET_QOS_RxDescriptorsInit(ENET_QOS_Type *base,
  488. enet_qos_config_t *config,
  489. const enet_qos_buffer_config_t *bufferConfig,
  490. bool intRxEnable,
  491. uint8_t channel)
  492. {
  493. uint16_t j;
  494. uint32_t reg;
  495. enet_qos_rx_bd_struct_t *rxbdPtr;
  496. uint16_t index;
  497. bool doubleBuffEnable = ((config->specialControl & (uint32_t)kENET_QOS_DescDoubleBuffer) != 0U) ? true : false;
  498. const enet_qos_buffer_config_t *buffCfg = bufferConfig;
  499. uint32_t control = ENET_QOS_RXDESCRIP_RD_BUFF1VALID_MASK;
  500. if (buffCfg == NULL)
  501. {
  502. return kStatus_InvalidArgument;
  503. }
  504. if (intRxEnable)
  505. {
  506. control |= ENET_QOS_RXDESCRIP_RD_IOC_MASK;
  507. }
  508. if (doubleBuffEnable)
  509. {
  510. control |= ENET_QOS_RXDESCRIP_RD_BUFF2VALID_MASK;
  511. }
  512. /* Not give ownership to DMA before Rx buffer is ready */
  513. if ((config->rxBuffAlloc == NULL) || (config->rxBuffFree == NULL))
  514. {
  515. control |= ENET_QOS_RXDESCRIP_WR_OWN_MASK;
  516. }
  517. /* Check the ring length. */
  518. if (buffCfg->rxRingLen < ENET_QOS_MIN_RINGLEN)
  519. {
  520. return kStatus_InvalidArgument;
  521. }
  522. /* Set the rx descriptor start/tail pointer, shall be word aligned. */
  523. base->DMA_CH[channel].DMA_CHX_RXDESC_LIST_ADDR =
  524. (uint32_t)buffCfg->rxDescStartAddrAlign & ENET_QOS_DMA_CHX_RXDESC_LIST_ADDR_RDESLA_MASK;
  525. base->DMA_CH[channel].DMA_CHX_RXDESC_TAIL_PTR =
  526. (uint32_t)buffCfg->rxDescTailAddrAlign & ENET_QOS_DMA_CHX_RXDESC_TAIL_PTR_RDTP_MASK;
  527. base->DMA_CH[channel].DMA_CHX_RXDESC_RING_LENGTH =
  528. ((uint32_t)buffCfg->rxRingLen - 1U) & ENET_QOS_DMA_CHX_RXDESC_RING_LENGTH_RDRL_MASK;
  529. reg = base->DMA_CH[channel].DMA_CHX_RX_CTRL & ~ENET_QOS_DMA_CHX_RX_CTRL_RBSZ_13_y_MASK;
  530. reg |= ENET_QOS_DMA_CHX_RX_CTRL_RBSZ_13_y(buffCfg->rxBuffSizeAlign >> ENET_QOS_RXBUFF_IGNORELSB_BITS);
  531. base->DMA_CH[channel].DMA_CHX_RX_CTRL = reg;
  532. /* Init the rxbdPtr to the receive descriptor start address. */
  533. rxbdPtr = (enet_qos_rx_bd_struct_t *)(buffCfg->rxDescStartAddrAlign);
  534. for (j = 0U; j < buffCfg->rxRingLen; j++)
  535. {
  536. if ((config->rxBuffAlloc == NULL) || (config->rxBuffFree == NULL))
  537. {
  538. if (doubleBuffEnable)
  539. {
  540. index = 2U * j;
  541. }
  542. else
  543. {
  544. index = j;
  545. }
  546. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  547. buffCfg->rxBufferStartAddr[index] =
  548. MEMORY_ConvertMemoryMapAddress((uint32_t)buffCfg->rxBufferStartAddr[index], kMEMORY_Local2DMA);
  549. #endif
  550. rxbdPtr->buff1Addr = buffCfg->rxBufferStartAddr[index];
  551. /* The second buffer is set with 0 because it is not required for normal case. */
  552. if (doubleBuffEnable)
  553. {
  554. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  555. buffCfg->rxBufferStartAddr[index + 1U] =
  556. MEMORY_ConvertMemoryMapAddress((uint32_t)buffCfg->rxBufferStartAddr[index + 1U], kMEMORY_Local2DMA);
  557. #endif
  558. rxbdPtr->buff2Addr = buffCfg->rxBufferStartAddr[index + 1U];
  559. }
  560. else
  561. {
  562. rxbdPtr->buff2Addr = 0;
  563. }
  564. }
  565. /* Set the valid and DMA own flag.*/
  566. rxbdPtr->control = control;
  567. rxbdPtr++;
  568. }
  569. return kStatus_Success;
  570. }
  571. static status_t ENET_QOS_SetPtp1588(ENET_QOS_Type *base, const enet_qos_config_t *config, uint32_t refClk_Hz)
  572. {
  573. assert(config != NULL);
  574. assert(config->ptpConfig != NULL);
  575. assert(refClk_Hz != 0U);
  576. uint32_t control = 0U;
  577. status_t result = kStatus_Success;
  578. enet_qos_ptp_config_t *ptpConfig = config->ptpConfig;
  579. uint32_t ptpClk_Hz = refClk_Hz;
  580. uint32_t ssInc, snsSinc;
  581. /* Clear the timestamp interrupt first. */
  582. base->MAC_INTERRUPT_ENABLE &= ~ENET_QOS_MAC_INTERRUPT_ENABLE_TSIE_MASK;
  583. if (ptpConfig->fineUpdateEnable)
  584. {
  585. control |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCFUPDT_MASK;
  586. ptpClk_Hz = ptpConfig->systemTimeClock_Hz; /* PTP clock 50MHz. */
  587. }
  588. /* Enable the IEEE 1588 timestamping and snapshot for event message. */
  589. control |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSENA_MASK | ENET_QOS_MAC_TIMESTAMP_CONTROL_TSIPV4ENA_MASK |
  590. ENET_QOS_MAC_TIMESTAMP_CONTROL_TSIPV6ENA_MASK | ENET_QOS_MAC_TIMESTAMP_CONTROL_TSENALL_MASK |
  591. ENET_QOS_MAC_TIMESTAMP_CONTROL_TSEVNTENA_MASK | ENET_QOS_MAC_TIMESTAMP_CONTROL_SNAPTYPSEL_MASK |
  592. ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCTRLSSR(ptpConfig->tsRollover);
  593. if (ptpConfig->ptp1588V2Enable)
  594. {
  595. control |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSVER2ENA_MASK | ENET_QOS_MAC_TIMESTAMP_CONTROL_TSIPENA_MASK;
  596. }
  597. /* Initialize the sub-second increment register. */
  598. if (ptpConfig->tsRollover == kENET_QOS_DigitalRollover)
  599. {
  600. ssInc = (uint32_t)(((uint64_t)ENET_QOS_NANOSECS_ONESECOND << 8U) / ptpClk_Hz);
  601. }
  602. else
  603. {
  604. ssInc = (uint32_t)((((uint64_t)ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_TSSS_MASK + 1U) << 8U) / ptpClk_Hz);
  605. }
  606. snsSinc = ssInc & 0xFFU;
  607. ssInc = (ssInc >> 8U) & 0xFFU;
  608. base->MAC_TIMESTAMP_CONTROL = control;
  609. /* Initialize the system timer. */
  610. base->MAC_SYSTEM_TIME_NANOSECONDS_UPDATE = 0;
  611. /* Set the second.*/
  612. base->MAC_SYSTEM_TIME_SECONDS_UPDATE = 0;
  613. base->MAC_SYSTEM_TIME_HIGHER_WORD_SECONDS = 0;
  614. /* Initialize the system timer. */
  615. base->MAC_TIMESTAMP_CONTROL |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSINIT_MASK;
  616. while ((base->MAC_TIMESTAMP_CONTROL & ENET_QOS_MAC_TIMESTAMP_CONTROL_TSINIT_MASK) != 0U)
  617. {
  618. }
  619. base->MAC_SUB_SECOND_INCREMENT =
  620. ENET_QOS_MAC_SUB_SECOND_INCREMENT_SSINC(ssInc) | ENET_QOS_MAC_SUB_SECOND_INCREMENT_SNSINC(snsSinc);
  621. /* Set the initial added value for the fine update. */
  622. if (ptpConfig->fineUpdateEnable)
  623. {
  624. result = ENET_QOS_Ptp1588CorrectTimerInFine(base, ptpConfig->defaultAddend);
  625. }
  626. return result;
  627. }
  628. static inline bool ENET_QOS_TxDirtyRingAvailable(enet_qos_tx_dirty_ring_t *txDirtyRing)
  629. {
  630. return !txDirtyRing->isFull;
  631. }
  632. static void ENET_QOS_StoreRxFrameTime(ENET_QOS_Type *base,
  633. enet_qos_handle_t *handle,
  634. enet_qos_rx_bd_struct_t *rxDesc,
  635. enet_qos_ptp_time_t *ts)
  636. {
  637. assert(ts != NULL);
  638. uint32_t nanosecond;
  639. /* Get transmit time stamp second. */
  640. nanosecond = rxDesc->buff1Addr;
  641. if ((base->MAC_TIMESTAMP_CONTROL & ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCTRLSSR_MASK) == 0U)
  642. {
  643. /* Binary rollover, 0.465ns accuracy. */
  644. nanosecond = (uint32_t)(((uint64_t)nanosecond * 465U) / 1000U);
  645. }
  646. ts->second = rxDesc->reserved;
  647. ts->nanosecond = nanosecond;
  648. }
  649. uint32_t ENET_QOS_GetInstance(ENET_QOS_Type *base)
  650. {
  651. uint32_t instance;
  652. /* Find the instance index from base address mappings. */
  653. for (instance = 0; instance < ARRAY_SIZE(s_enetqosBases); instance++)
  654. {
  655. if (s_enetqosBases[instance] == base)
  656. {
  657. break;
  658. }
  659. }
  660. assert(instance < ARRAY_SIZE(s_enetqosBases));
  661. return instance;
  662. }
  663. /*!
  664. * brief Gets the ENET default configuration structure.
  665. *
  666. * The purpose of this API is to get the default ENET configure
  667. * structure for ENET_QOS_Init(). User may use the initialized
  668. * structure unchanged in ENET_QOS_Init(), or modify some fields of the
  669. * structure before calling ENET_QOS_Init().
  670. * Example:
  671. code
  672. enet_qos_config_t config;
  673. ENET_QOS_GetDefaultConfig(&config);
  674. endcode
  675. * param config The ENET mac controller configuration structure pointer.
  676. */
  677. void ENET_QOS_GetDefaultConfig(enet_qos_config_t *config)
  678. {
  679. /* Checks input parameter. */
  680. assert(config != NULL);
  681. /* Initializes the configure structure to zero. */
  682. (void)memset(config, 0, sizeof(*config));
  683. /* Sets RGMII mode, full duplex, 1000Mbps for MAC and PHY data interface. */
  684. config->miiMode = kENET_QOS_RgmiiMode;
  685. config->miiSpeed = kENET_QOS_MiiSpeed1000M;
  686. config->miiDuplex = kENET_QOS_MiiFullDuplex;
  687. /* Sets default configuration for other options. */
  688. config->specialControl = 0;
  689. config->multiqueueCfg = NULL;
  690. config->pauseDuration = 0;
  691. config->ptpConfig = NULL;
  692. }
  693. /*!
  694. * brief Initializes the ENET module.
  695. *
  696. * This function set up the with ENET basic configuration.
  697. *
  698. * param base ENET peripheral base address.
  699. * param config ENET mac configuration structure pointer.
  700. * The "enet_qos_config_t" type mac configuration return from ENET_QOS_GetDefaultConfig
  701. * can be used directly. It is also possible to verify the Mac configuration using other methods.
  702. * param macAddr ENET mac address of Ethernet device. This MAC address should be
  703. * provided.
  704. * param refclkSrc_Hz ENET input reference clock.
  705. */
  706. status_t ENET_QOS_Up(
  707. ENET_QOS_Type *base, const enet_qos_config_t *config, uint8_t *macAddr, uint8_t macCount, uint32_t refclkSrc_Hz)
  708. {
  709. assert(config != NULL);
  710. status_t result = kStatus_Success;
  711. /* Initializes the ENET MTL with basic function. */
  712. ENET_QOS_SetMTL(base, config);
  713. /* Initializes the ENET MAC with basic function. */
  714. ENET_QOS_SetMacControl(base, config, macAddr, macCount);
  715. return result;
  716. }
  717. /*!
  718. * brief Initializes the ENET module.
  719. *
  720. * This function ungates the module clock and initializes it with the ENET basic
  721. * configuration.
  722. *
  723. * param base ENET peripheral base address.
  724. * param config ENET mac configuration structure pointer.
  725. * The "enet_qos_config_t" type mac configuration return from ENET_QOS_GetDefaultConfig
  726. * can be used directly. It is also possible to verify the Mac configuration using other methods.
  727. * param macAddr ENET mac address of Ethernet device. This MAC address should be
  728. * provided.
  729. * param refclkSrc_Hz ENET input reference clock.
  730. */
  731. status_t ENET_QOS_Init(
  732. ENET_QOS_Type *base, const enet_qos_config_t *config, uint8_t *macAddr, uint8_t macCount, uint32_t refclkSrc_Hz)
  733. {
  734. assert(config != NULL);
  735. status_t result = kStatus_Success;
  736. uint32_t instance = ENET_QOS_GetInstance(base);
  737. #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
  738. /* Ungate ENET clock. */
  739. (void)CLOCK_EnableClock(s_enetqosClock[instance]);
  740. #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
  741. /* System configure fistly. */
  742. ENET_QOS_SetSYSControl(config->miiMode);
  743. /* Initializes the ENET DMA with basic function. */
  744. ENET_QOS_SetDMAControl(base, config);
  745. (void)ENET_QOS_Up(base, config, macAddr, macCount, refclkSrc_Hz);
  746. if (config->ptpConfig != NULL)
  747. {
  748. result = ENET_QOS_SetPtp1588(base, config, refclkSrc_Hz);
  749. }
  750. return result;
  751. }
  752. /*!
  753. * brief Stops the ENET module.
  754. * This function disables the ENET module.
  755. *
  756. * param base ENET peripheral base address.
  757. */
  758. void ENET_QOS_Down(ENET_QOS_Type *base)
  759. {
  760. enet_qos_handle_t *handle = s_ENETHandle[ENET_QOS_GetInstance(base)];
  761. enet_qos_tx_bd_struct_t *txbdPtr;
  762. uint8_t index;
  763. uint32_t primask, j;
  764. /* Disable all interrupts */
  765. ENET_QOS_DisableInterrupts(base, 0xFF);
  766. for (index = 0; index < handle->txQueueUse; index++)
  767. {
  768. enet_qos_tx_bd_ring_t *txBdRing = &handle->txBdRing[index];
  769. enet_qos_tx_dirty_ring_t *txDirtyRing = (enet_qos_tx_dirty_ring_t *)&handle->txDirtyRing[index];
  770. /* Clear pending descriptors */
  771. if (handle->callback != NULL)
  772. {
  773. while (txBdRing->txDescUsed > 0U)
  774. {
  775. enet_qos_frame_info_t *txDirty = &txDirtyRing->txDirtyBase[txDirtyRing->txConsumIdx];
  776. txDirty->isTsAvail = false;
  777. handle->callback(base, handle, kENET_QOS_TxIntEvent, index, handle->userData);
  778. primask = DisableGlobalIRQ();
  779. txBdRing->txDescUsed--;
  780. EnableGlobalIRQ(primask);
  781. }
  782. }
  783. /* Disable Tx DMA */
  784. base->DMA_CH[index].DMA_CHX_TX_CTRL &= ~ENET_QOS_DMA_CHX_TX_CTRL_ST_MASK;
  785. /* Flush Tx Queue */
  786. base->MTL_QUEUE[index].MTL_TXQX_OP_MODE |= ENET_QOS_MTL_TXQX_OP_MODE_FTQ_MASK;
  787. /* Wait until Tx Queue is empty */
  788. while ((base->MTL_QUEUE[index].MTL_TXQX_DBG &
  789. (ENET_QOS_MTL_TXQX_DBG_TXQSTS_MASK | ENET_QOS_MTL_TXQX_DBG_PTXQ_MASK)) != 0U)
  790. {
  791. }
  792. /* Reset hardware ring buffer */
  793. base->DMA_CH[index].DMA_CHX_TXDESC_LIST_ADDR =
  794. (uint32_t)handle->txBdRing[index].txBdBase & ENET_QOS_DMA_CHX_TXDESC_LIST_ADDR_TDESLA_MASK;
  795. /* Reset software ring buffer */
  796. handle->txBdRing[index].txGenIdx = 0;
  797. handle->txBdRing[index].txConsumIdx = 0;
  798. handle->txBdRing[index].txDescUsed = 0;
  799. handle->txDirtyRing[index].txGenIdx = 0;
  800. handle->txDirtyRing[index].txConsumIdx = 0;
  801. handle->txDirtyRing[index].isFull = false;
  802. txbdPtr = (enet_qos_tx_bd_struct_t *)(handle->txBdRing[index].txBdBase);
  803. for (j = 0; j < handle->txBdRing[index].txRingLen; j++)
  804. {
  805. txbdPtr->buff1Addr = 0;
  806. txbdPtr->buff2Addr = 0;
  807. txbdPtr->buffLen = 0;
  808. txbdPtr->controlStat = 0;
  809. txbdPtr++;
  810. }
  811. }
  812. /* Disable MAC Rx/Tx */
  813. base->MAC_CONFIGURATION &= ~(ENET_QOS_MAC_CONFIGURATION_TE_MASK | ENET_QOS_MAC_CONFIGURATION_RE_MASK);
  814. /* Disable Rx DMA */
  815. for (index = 0; index < handle->rxQueueUse; index++)
  816. {
  817. base->DMA_CH[index].DMA_CHX_RX_CTRL &= ~ENET_QOS_DMA_CHX_RX_CTRL_SR_MASK;
  818. }
  819. }
  820. /*!
  821. * brief Deinitializes the ENET module.
  822. * This function gates the module clock and disables the ENET module.
  823. *
  824. * param base ENET peripheral base address.
  825. */
  826. void ENET_QOS_Deinit(ENET_QOS_Type *base)
  827. {
  828. /* Reset first and wait for the complete
  829. * The reset bit will automatically be cleared after complete. */
  830. base->DMA_MODE |= ENET_QOS_DMA_MODE_SWR_MASK;
  831. while ((base->DMA_MODE & ENET_QOS_DMA_MODE_SWR_MASK) != 0U)
  832. {
  833. }
  834. #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
  835. /* Disables the clock source. */
  836. (void)CLOCK_DisableClock(s_enetqosClock[ENET_QOS_GetInstance(base)]);
  837. #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
  838. }
  839. /*!
  840. * brief Initialize for all ENET descriptors.
  841. *
  842. * note This function is do all tx/rx descriptors initialization. Because this API
  843. * read all interrupt registers first and then set the interrupt flag for all descriptos,
  844. * if the interrupt register is set. so the descriptor initialization should be called
  845. * after ENET_QOS_Init(), ENET_QOS_EnableInterrupts() and ENET_QOS_CreateHandle()(if transactional APIs
  846. * are used).
  847. *
  848. * param base ENET peripheral base address.
  849. * param config The configuration for ENET.
  850. * param bufferConfig All buffers configuration.
  851. */
  852. status_t ENET_QOS_DescriptorInit(ENET_QOS_Type *base, enet_qos_config_t *config, enet_qos_buffer_config_t *bufferConfig)
  853. {
  854. assert(config != NULL);
  855. assert(bufferConfig != NULL);
  856. bool intTxEnable = false;
  857. bool intRxEnable = false;
  858. uint8_t ringNum = 1;
  859. uint8_t txQueueUse = 1;
  860. uint8_t rxQueueUse = 1;
  861. uint8_t channel;
  862. if (config->multiqueueCfg != NULL)
  863. {
  864. ringNum = MAX(config->multiqueueCfg->txQueueUse, config->multiqueueCfg->rxQueueUse);
  865. txQueueUse = config->multiqueueCfg->txQueueUse;
  866. rxQueueUse = config->multiqueueCfg->rxQueueUse;
  867. }
  868. for (channel = 0; channel < ringNum; channel++)
  869. {
  870. intRxEnable = ((base->DMA_CH[channel].DMA_CHX_INT_EN & ENET_QOS_DMA_CHX_INT_EN_RIE_MASK) != 0U) ? true : false;
  871. intTxEnable = ((base->DMA_CH[channel].DMA_CHX_INT_EN & ENET_QOS_DMA_CHX_INT_EN_TIE_MASK) != 0U) ? true : false;
  872. if (channel < txQueueUse)
  873. {
  874. if ((ENET_QOS_TxDescriptorsInit(base, bufferConfig, intTxEnable, channel) != kStatus_Success))
  875. {
  876. return kStatus_Fail;
  877. }
  878. }
  879. if (channel < rxQueueUse)
  880. {
  881. if ((ENET_QOS_RxDescriptorsInit(base, config, bufferConfig, intRxEnable, channel) != kStatus_Success))
  882. {
  883. return kStatus_Fail;
  884. }
  885. }
  886. bufferConfig++;
  887. }
  888. return kStatus_Success;
  889. }
  890. /*!
  891. * brief Allocates Rx buffers for all BDs.
  892. * It's used for zero copy Rx. In zero copy Rx case, Rx buffers are dynamic. This function
  893. * will populate initial buffers in all BDs for receiving. Then ENET_QOS_GetRxFrame() is used
  894. * to get Rx frame with zero copy, it will allocate new buffer to replace the buffer in BD taken
  895. * by application application should free those buffers after they're used.
  896. *
  897. * note This function should be called after ENET_QOS_CreateHandler() and buffer allocating callback
  898. * function should be ready.
  899. *
  900. * param base ENET_QOS peripheral base address.
  901. * param handle The ENET_QOS handler structure. This is the same handler pointer used in the ENET_QOS_Init.
  902. */
  903. status_t ENET_QOS_RxBufferAllocAll(ENET_QOS_Type *base, enet_qos_handle_t *handle)
  904. {
  905. status_t result = kStatus_Success;
  906. enet_qos_rx_bd_struct_t *rxbdPtr;
  907. uint32_t buffAddr;
  908. uint8_t channel;
  909. uint16_t index;
  910. uint16_t j;
  911. if ((handle->rxBuffAlloc == NULL) || (handle->rxBuffFree == NULL))
  912. {
  913. return kStatus_ENET_QOS_InitMemoryFail;
  914. }
  915. for (channel = 0; channel < handle->rxQueueUse; channel++)
  916. {
  917. /* Init the rxbdPtr to the receive descriptor start address. */
  918. rxbdPtr = handle->rxBdRing[channel].rxBdBase;
  919. for (j = 0U; j < handle->rxBdRing[channel].rxRingLen; j++)
  920. {
  921. if (handle->doubleBuffEnable)
  922. {
  923. index = 2U * j;
  924. }
  925. else
  926. {
  927. index = j;
  928. }
  929. buffAddr = (uint32_t)(uint32_t *)handle->rxBuffAlloc(base, handle->userData, channel);
  930. if (buffAddr == 0U)
  931. {
  932. result = kStatus_ENET_QOS_InitMemoryFail;
  933. break;
  934. }
  935. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  936. buffAddr = MEMORY_ConvertMemoryMapAddress(buffAddr, kMEMORY_Local2DMA);
  937. #endif
  938. rxbdPtr->buff1Addr = buffAddr;
  939. handle->rxBufferStartAddr[channel][index] = buffAddr;
  940. /* The second buffer is set with 0 because it is not required for normal case. */
  941. if (handle->doubleBuffEnable)
  942. {
  943. buffAddr = (uint32_t)(uint32_t *)handle->rxBuffAlloc(base, handle->userData, channel);
  944. if (buffAddr == 0U)
  945. {
  946. result = kStatus_ENET_QOS_InitMemoryFail;
  947. break;
  948. }
  949. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  950. buffAddr = MEMORY_ConvertMemoryMapAddress(buffAddr, kMEMORY_Local2DMA);
  951. #endif
  952. rxbdPtr->buff2Addr = buffAddr;
  953. handle->rxBufferStartAddr[channel][index + 1U] = buffAddr;
  954. }
  955. else
  956. {
  957. rxbdPtr->buff2Addr = 0;
  958. }
  959. /* Set the valid and DMA own flag.*/
  960. rxbdPtr->control |= ENET_QOS_RXDESCRIP_WR_OWN_MASK;
  961. rxbdPtr++;
  962. }
  963. }
  964. if (result == kStatus_ENET_QOS_InitMemoryFail)
  965. {
  966. ENET_QOS_RxBufferFreeAll(base, handle);
  967. }
  968. return result;
  969. }
  970. /*!
  971. * brief Frees Rx buffers in all BDs.
  972. * It's used for zero copy Rx. In zero copy Rx case, Rx buffers are dynamic. This function
  973. * will free left buffers in all BDs.
  974. *
  975. * param base ENET_QOS peripheral base address.
  976. * param handle The ENET_QOS handler structure. This is the same handler pointer used in the ENET_QOS_Init.
  977. */
  978. void ENET_QOS_RxBufferFreeAll(ENET_QOS_Type *base, enet_qos_handle_t *handle)
  979. {
  980. uint32_t buffAddr;
  981. uint8_t channel;
  982. uint16_t index;
  983. uint16_t j;
  984. if (handle->rxBuffFree != NULL)
  985. {
  986. for (channel = 0; channel < handle->rxQueueUse; channel++)
  987. {
  988. for (j = 0U; j < handle->rxBdRing[channel].rxRingLen; j++)
  989. {
  990. if (handle->doubleBuffEnable)
  991. {
  992. index = 2U * j;
  993. }
  994. else
  995. {
  996. index = j;
  997. }
  998. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  999. buffAddr = MEMORY_ConvertMemoryMapAddress((uint32_t)handle->rxBufferStartAddr[channel][index],
  1000. kMEMORY_DMA2Local);
  1001. #else
  1002. buffAddr = (uint32_t)handle->rxBufferStartAddr[channel][index];
  1003. #endif
  1004. if (buffAddr != 0U)
  1005. {
  1006. handle->rxBuffFree(base, (void *)(uint32_t *)buffAddr, handle->userData, channel);
  1007. }
  1008. /* The second buffer is set with 0 because it is not required for normal case. */
  1009. if (handle->doubleBuffEnable)
  1010. {
  1011. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  1012. buffAddr = MEMORY_ConvertMemoryMapAddress((uint32_t)handle->rxBufferStartAddr[channel][index + 1],
  1013. kMEMORY_DMA2Local);
  1014. #else
  1015. buffAddr = (uint32_t)handle->rxBufferStartAddr[channel][index + 1U];
  1016. #endif
  1017. if (buffAddr != 0U)
  1018. {
  1019. handle->rxBuffFree(base, (void *)(uint32_t *)buffAddr, handle->userData, channel);
  1020. }
  1021. }
  1022. }
  1023. }
  1024. }
  1025. }
  1026. /*!
  1027. * brief Starts the ENET rx/tx.
  1028. * This function enable the tx/rx and starts the rx/tx DMA.
  1029. * This shall be set after ENET initialization and before
  1030. * starting to receive the data.
  1031. *
  1032. * param base ENET peripheral base address.
  1033. * param rxRingNum The number of the used rx rings. It shall not be
  1034. * larger than the ENET_QOS_RING_NUM_MAX(2). If the ringNum is set with
  1035. * 1, the ring 0 will be used.
  1036. * param txRingNum The number of the used tx rings. It shall not be
  1037. * larger than the ENET_QOS_RING_NUM_MAX(2). If the ringNum is set with
  1038. * 1, the ring 0 will be used.
  1039. *
  1040. * note This must be called after all the ENET initilization.
  1041. * And should be called when the ENET receive/transmit is required.
  1042. */
  1043. void ENET_QOS_StartRxTx(ENET_QOS_Type *base, uint8_t txRingNum, uint8_t rxRingNum)
  1044. {
  1045. assert(txRingNum != 0U);
  1046. assert(rxRingNum != 0U);
  1047. uint8_t index;
  1048. if (txRingNum > ENET_QOS_RING_NUM_MAX)
  1049. {
  1050. txRingNum = ENET_QOS_RING_NUM_MAX;
  1051. }
  1052. if (rxRingNum > ENET_QOS_RING_NUM_MAX)
  1053. {
  1054. rxRingNum = ENET_QOS_RING_NUM_MAX;
  1055. }
  1056. /* Start/Acive the DMA first. */
  1057. for (index = 0; index < rxRingNum; index++)
  1058. {
  1059. base->DMA_CH[index].DMA_CHX_RX_CTRL |= ENET_QOS_DMA_CHX_RX_CTRL_SR_MASK;
  1060. }
  1061. for (index = 0; index < txRingNum; index++)
  1062. {
  1063. base->DMA_CH[index].DMA_CHX_TX_CTRL |= ENET_QOS_DMA_CHX_TX_CTRL_ST_MASK;
  1064. }
  1065. /* Enable the RX and TX at same time. */
  1066. base->MAC_CONFIGURATION |= (ENET_QOS_MAC_CONFIGURATION_TE_MASK | ENET_QOS_MAC_CONFIGURATION_RE_MASK);
  1067. }
  1068. /*!
  1069. * brief Enables the ENET DMA and MAC interrupts.
  1070. *
  1071. * This function enables the ENET interrupt according to the provided mask. The mask
  1072. * is a logical OR of enet_qos_dma_interrupt_enable_t and enet_qos_mac_interrupt_enable_t.
  1073. * For example, to enable the dma and mac interrupt, do the following.
  1074. * code
  1075. * ENET_QOS_EnableInterrupts(ENET, kENET_QOS_DmaRx | kENET_QOS_DmaTx | kENET_QOS_MacPmt);
  1076. * endcode
  1077. *
  1078. * param base ENET peripheral base address.
  1079. * param mask ENET interrupts to enable. This is a logical OR of both
  1080. * enumeration :: enet_qos_dma_interrupt_enable_t and enet_qos_mac_interrupt_enable_t.
  1081. */
  1082. void ENET_QOS_EnableInterrupts(ENET_QOS_Type *base, uint32_t mask)
  1083. {
  1084. uint32_t interrupt = mask & 0xFFFFU;
  1085. uint8_t index;
  1086. /* For dma interrupt. */
  1087. if (interrupt != 0U)
  1088. {
  1089. for (index = 0; index < ENET_QOS_RING_NUM_MAX; index++)
  1090. {
  1091. /* Set for all abnormal interrupts. */
  1092. if ((ENET_QOS_ABNORM_INT_MASK & interrupt) != 0U)
  1093. {
  1094. interrupt |= ENET_QOS_DMA_CHX_INT_EN_AIE_MASK;
  1095. }
  1096. /* Set for all normal interrupts. */
  1097. if ((ENET_QOS_NORM_INT_MASK & interrupt) != 0U)
  1098. {
  1099. interrupt |= ENET_QOS_DMA_CHX_INT_EN_NIE_MASK;
  1100. }
  1101. base->DMA_CH[index].DMA_CHX_INT_EN = interrupt;
  1102. }
  1103. }
  1104. interrupt = mask >> ENET_QOS_MACINT_ENUM_OFFSET;
  1105. if (interrupt != 0U)
  1106. {
  1107. /* MAC interrupt */
  1108. base->MAC_INTERRUPT_ENABLE |= interrupt;
  1109. }
  1110. }
  1111. /*!
  1112. * brief Clears the ENET mac interrupt events status flag.
  1113. *
  1114. * This function clears enabled ENET interrupts according to the provided mask. The mask
  1115. * is a logical OR of enumeration members. See the ref enet_qos_mac_interrupt_enable_t.
  1116. * For example, to clear the TX frame interrupt and RX frame interrupt, do the following.
  1117. * code
  1118. * ENET_QOS_ClearMacInterruptStatus(ENET, kENET_QOS_MacPmt);
  1119. * endcode
  1120. *
  1121. * param base ENET peripheral base address.
  1122. * param mask ENET interrupt source to be cleared.
  1123. * This is the logical OR of members of the enumeration :: enet_qos_mac_interrupt_enable_t.
  1124. */
  1125. void ENET_QOS_ClearMacInterruptStatus(ENET_QOS_Type *base, uint32_t mask)
  1126. {
  1127. volatile uint32_t dummy;
  1128. if ((mask & (uint32_t)kENET_QOS_MacTimestamp) != 0U)
  1129. {
  1130. dummy = base->MAC_TIMESTAMP_STATUS;
  1131. }
  1132. else if ((mask & (uint32_t)kENET_QOS_MacPmt) != 0U)
  1133. {
  1134. dummy = base->MAC_PMT_CONTROL_STATUS;
  1135. }
  1136. else
  1137. {
  1138. /* Add for avoid the misra 2004 rule 14.10 */
  1139. }
  1140. (void)dummy;
  1141. }
  1142. /*!
  1143. * brief Disables the ENET DMA and MAC interrupts.
  1144. *
  1145. * This function disables the ENET interrupt according to the provided mask. The mask
  1146. * is a logical OR of enet_qos_dma_interrupt_enable_t and enet_qos_mac_interrupt_enable_t.
  1147. * For example, to disable the dma and mac interrupt, do the following.
  1148. * code
  1149. * ENET_QOS_DisableInterrupts(ENET, kENET_QOS_DmaRx | kENET_QOS_DmaTx | kENET_QOS_MacPmt);
  1150. * endcode
  1151. *
  1152. * param base ENET peripheral base address.
  1153. * param mask ENET interrupts to disables. This is a logical OR of both
  1154. * enumeration :: enet_qos_dma_interrupt_enable_t and enet_qos_mac_interrupt_enable_t.
  1155. */
  1156. void ENET_QOS_DisableInterrupts(ENET_QOS_Type *base, uint32_t mask)
  1157. {
  1158. uint32_t interrupt = mask & 0xFFFFU;
  1159. uint8_t index;
  1160. /* For dma interrupt. */
  1161. if (interrupt != 0U)
  1162. {
  1163. for (index = 0; index < ENET_QOS_RING_NUM_MAX; index++)
  1164. {
  1165. /* Set for all abnormal interrupts. */
  1166. if ((ENET_QOS_ABNORM_INT_MASK & interrupt) != 0U)
  1167. {
  1168. interrupt |= ENET_QOS_DMA_CHX_INT_EN_AIE_MASK;
  1169. }
  1170. /* Set for all normal interrupts. */
  1171. if ((ENET_QOS_NORM_INT_MASK & interrupt) != 0U)
  1172. {
  1173. interrupt |= ENET_QOS_DMA_CHX_INT_EN_NIE_MASK;
  1174. }
  1175. base->DMA_CH[index].DMA_CHX_INT_EN &= ~interrupt;
  1176. }
  1177. }
  1178. interrupt = mask >> ENET_QOS_MACINT_ENUM_OFFSET;
  1179. if (interrupt != 0U)
  1180. {
  1181. /* MAC interrupt */
  1182. base->MAC_INTERRUPT_ENABLE &= ~interrupt;
  1183. }
  1184. }
  1185. /*!
  1186. * @brief Set the second level IRQ handler, allow user to overwrite the default
  1187. * second level weak IRQ handler.
  1188. *
  1189. * @param ISRHandler he handler to install.
  1190. */
  1191. void ENET_QOS_SetISRHandler(ENET_QOS_Type *base, enet_qos_isr_t ISRHandler)
  1192. {
  1193. /* Update IRQ entry. */
  1194. s_enetqosIsr = ISRHandler;
  1195. /* Enable NVIC. */
  1196. (void)EnableIRQ(s_enetqosIrqId[ENET_QOS_GetInstance(base)]);
  1197. }
  1198. /*!
  1199. * brief Create ENET Handler
  1200. *
  1201. * This is a transactional API and it's provided to store all datas which are needed
  1202. * during the whole transactional process. This API should not be used when you use
  1203. * functional APIs to do data tx/rx. This is funtion will store many data/flag for
  1204. * transactional use, so all configure API such as ENET_QOS_Init(), ENET_QOS_DescriptorInit(),
  1205. * ENET_QOS_EnableInterrupts() etc.
  1206. *
  1207. * note as our transactional transmit API use the zero-copy transmit buffer.
  1208. * so there are two thing we emphasize here:
  1209. * 1. tx buffer free/requeue for application should be done in the tx
  1210. * interrupt handler. Please set callback: kENET_QOS_TxIntEvent with tx buffer free/requeue
  1211. * process APIs.
  1212. * 2. the tx interrupt is forced to open.
  1213. *
  1214. * param base ENET peripheral base address.
  1215. * param handle ENET handler.
  1216. * param config ENET configuration.
  1217. * param bufferConfig ENET buffer configuration.
  1218. * param callback The callback function.
  1219. * param userData The application data.
  1220. */
  1221. void ENET_QOS_CreateHandler(ENET_QOS_Type *base,
  1222. enet_qos_handle_t *handle,
  1223. enet_qos_config_t *config,
  1224. enet_qos_buffer_config_t *bufferConfig,
  1225. enet_qos_callback_t callback,
  1226. void *userData)
  1227. {
  1228. assert(config != NULL);
  1229. assert(bufferConfig != NULL);
  1230. assert(callback != NULL);
  1231. uint8_t ringNum = 1;
  1232. uint8_t count = 0;
  1233. uint32_t rxIntEnable = 0;
  1234. uint8_t txQueueUse = 1;
  1235. uint8_t rxQueueUse = 1;
  1236. enet_qos_buffer_config_t *buffConfig = bufferConfig;
  1237. /* Store transfer parameters in handle pointer. */
  1238. (void)memset(handle, 0, sizeof(enet_qos_handle_t));
  1239. if (config->multiqueueCfg != NULL)
  1240. {
  1241. txQueueUse = config->multiqueueCfg->txQueueUse;
  1242. rxQueueUse = config->multiqueueCfg->rxQueueUse;
  1243. ringNum = MAX(txQueueUse, rxQueueUse);
  1244. }
  1245. handle->txQueueUse = txQueueUse;
  1246. handle->rxQueueUse = rxQueueUse;
  1247. if ((config->specialControl & (uint32_t)kENET_QOS_DescDoubleBuffer) != 0U)
  1248. {
  1249. handle->doubleBuffEnable = true;
  1250. }
  1251. for (count = 0; count < ringNum; count++)
  1252. {
  1253. if (count < txQueueUse)
  1254. {
  1255. handle->txBdRing[count].txBdBase = buffConfig->txDescStartAddrAlign;
  1256. handle->txBdRing[count].txRingLen = buffConfig->txRingLen;
  1257. handle->txBdRing[count].txGenIdx = 0;
  1258. handle->txBdRing[count].txConsumIdx = 0;
  1259. handle->txBdRing[count].txDescUsed = 0;
  1260. handle->txDirtyRing[count].txDirtyBase = buffConfig->txDirtyStartAddr;
  1261. handle->txDirtyRing[count].txRingLen = buffConfig->txRingLen;
  1262. handle->txDirtyRing[count].txGenIdx = 0;
  1263. handle->txDirtyRing[count].txConsumIdx = 0;
  1264. /* Enable tx interrupt for use transactional API to do tx buffer free/requeue. */
  1265. base->DMA_CH[count].DMA_CHX_INT_EN |= ENET_QOS_DMA_CHX_INT_EN_TIE_MASK | ENET_QOS_DMA_CHX_INT_EN_NIE_MASK;
  1266. }
  1267. if (count < rxQueueUse)
  1268. {
  1269. handle->rxBdRing[count].rxBdBase = buffConfig->rxDescStartAddrAlign;
  1270. handle->rxBdRing[count].rxGenIdx = 0;
  1271. handle->rxBdRing[count].rxRingLen = buffConfig->rxRingLen;
  1272. handle->rxBdRing[count].rxBuffSizeAlign = buffConfig->rxBuffSizeAlign;
  1273. /* Record rx buffer address for re-init Rx buffer descriptor */
  1274. handle->rxBufferStartAddr[count] = buffConfig->rxBufferStartAddr;
  1275. /* Record rx buffer need cache maintain */
  1276. handle->rxMaintainEnable[count] = buffConfig->rxBuffNeedMaintain;
  1277. /* Check if the rx interrrupt is enabled. */
  1278. rxIntEnable |= (base->DMA_CH[count].DMA_CHX_INT_EN & ENET_QOS_DMA_CHX_INT_EN_RIE_MASK);
  1279. }
  1280. buffConfig++;
  1281. }
  1282. handle->rxintEnable = (rxIntEnable != 0U) ? true : false;
  1283. /* Save the handle pointer in the global variables. */
  1284. s_ENETHandle[ENET_QOS_GetInstance(base)] = handle;
  1285. /* Set Rx alloc/free callback. */
  1286. handle->rxBuffAlloc = config->rxBuffAlloc;
  1287. handle->rxBuffFree = config->rxBuffFree;
  1288. /* Set callback and userData. */
  1289. handle->callback = callback;
  1290. handle->userData = userData;
  1291. /* Use default ENET_QOS_CommonIRQHandler as default weak IRQ handler. */
  1292. ENET_QOS_SetISRHandler(base, ENET_QOS_CommonIRQHandler);
  1293. }
  1294. /*!
  1295. * brief Gets the ENET module Mac address.
  1296. *
  1297. * param base ENET peripheral base address.
  1298. * param macAddr The six-byte Mac address pointer.
  1299. * The pointer is allocated by application and input into the API.
  1300. */
  1301. void ENET_QOS_GetMacAddr(ENET_QOS_Type *base, uint8_t *macAddr, uint8_t index)
  1302. {
  1303. assert(macAddr != NULL);
  1304. uint32_t address = base->MAC_ADDRESS[index].LOW;
  1305. /* Get from physical address lower register. */
  1306. macAddr[2] = (uint8_t)(0xFFU & (address >> 24U));
  1307. macAddr[3] = (uint8_t)(0xFFU & (address >> 16U));
  1308. macAddr[4] = (uint8_t)(0xFFU & (address >> 8U));
  1309. macAddr[5] = (uint8_t)(0xFFU & address);
  1310. /* Get from physical address high register. */
  1311. address = base->MAC_ADDRESS[index].HIGH;
  1312. macAddr[0] = (uint8_t)(0xFFU & (address >> 8U));
  1313. macAddr[1] = (uint8_t)(0xFFU & address);
  1314. }
  1315. /*!
  1316. * brief Adds the ENET_QOS device to a multicast group.
  1317. *
  1318. * param base ENET_QOS peripheral base address.
  1319. * param address The six-byte multicast group address which is provided by application.
  1320. */
  1321. void ENET_QOS_AddMulticastGroup(ENET_QOS_Type *base, uint8_t *address)
  1322. {
  1323. assert(address != NULL);
  1324. enet_qos_handle_t *handle = s_ENETHandle[ENET_QOS_GetInstance(base)];
  1325. uint32_t crc = 0xFFFFFFFFU;
  1326. uint32_t count1 = 0;
  1327. uint32_t count2 = 0;
  1328. /* Calculates the CRC-32 polynomial on the multicast group address. */
  1329. for (count1 = 0; count1 < 6U; count1++)
  1330. {
  1331. uint8_t c = address[count1];
  1332. for (count2 = 0; count2 < 0x08U; count2++)
  1333. {
  1334. if (((c ^ crc) & 1U) != 0U)
  1335. {
  1336. crc >>= 1U;
  1337. c >>= 1U;
  1338. crc ^= 0xEDB88320U;
  1339. }
  1340. else
  1341. {
  1342. crc >>= 1U;
  1343. c >>= 1U;
  1344. }
  1345. }
  1346. }
  1347. /* Calculate bitwise reverse value. */
  1348. crc = ENET_QOS_ReverseBits(~crc);
  1349. /* Get highest 6 bits*/
  1350. crc = crc >> 26U;
  1351. handle->multicastCount[crc]++;
  1352. if (0U != (crc & 0x20U))
  1353. {
  1354. base->MAC_HASH_TABLE_REG1 |= (1UL << (crc & 0x1FU));
  1355. }
  1356. else
  1357. {
  1358. base->MAC_HASH_TABLE_REG0 |= (1UL << (crc & 0x1FU));
  1359. }
  1360. }
  1361. /*!
  1362. * brief Moves the ENET_QOS device from a multicast group.
  1363. *
  1364. * param base ENET_QOS peripheral base address.
  1365. * param address The six-byte multicast group address which is provided by application.
  1366. */
  1367. void ENET_QOS_LeaveMulticastGroup(ENET_QOS_Type *base, uint8_t *address)
  1368. {
  1369. assert(address != NULL);
  1370. enet_qos_handle_t *handle = s_ENETHandle[ENET_QOS_GetInstance(base)];
  1371. uint32_t crc = 0xFFFFFFFFU;
  1372. uint32_t count1 = 0;
  1373. uint32_t count2 = 0;
  1374. /* Calculates the CRC-32 polynomial on the multicast group address. */
  1375. for (count1 = 0; count1 < 6U; count1++)
  1376. {
  1377. uint8_t c = address[count1];
  1378. for (count2 = 0; count2 < 0x08U; count2++)
  1379. {
  1380. if (((c ^ crc) & 1U) != 0U)
  1381. {
  1382. crc >>= 1U;
  1383. c >>= 1U;
  1384. crc ^= 0xEDB88320U;
  1385. }
  1386. else
  1387. {
  1388. crc >>= 1U;
  1389. c >>= 1U;
  1390. }
  1391. }
  1392. }
  1393. /* Calculate bitwise reverse value. */
  1394. crc = ENET_QOS_ReverseBits(~crc);
  1395. /* Get highest 6 bits*/
  1396. crc = crc >> 26U;
  1397. handle->multicastCount[crc]--;
  1398. /* Set the hash table if no collisions */
  1399. if (0U == handle->multicastCount[crc])
  1400. {
  1401. if (0U != (crc & 0x20U))
  1402. {
  1403. base->MAC_HASH_TABLE_REG1 &= ~((1UL << (crc & 0x1FU)));
  1404. }
  1405. else
  1406. {
  1407. base->MAC_HASH_TABLE_REG0 &= ~((1UL << (crc & 0x1FU)));
  1408. }
  1409. }
  1410. }
  1411. /*!
  1412. * brief Sets the ENET SMI(serial management interface)- MII management interface.
  1413. *
  1414. * param base ENET peripheral base address.
  1415. */
  1416. void ENET_QOS_SetSMI(ENET_QOS_Type *base, uint32_t csrClock_Hz)
  1417. {
  1418. uint32_t crDiv = 0;
  1419. uint32_t srcClock_Hz = csrClock_Hz / 1000000U;
  1420. assert((srcClock_Hz >= 20U) && (srcClock_Hz < 800U));
  1421. if (srcClock_Hz < 35U)
  1422. {
  1423. crDiv = 2;
  1424. }
  1425. else if (srcClock_Hz < 60U)
  1426. {
  1427. crDiv = 3;
  1428. }
  1429. else if (srcClock_Hz < 100U)
  1430. {
  1431. crDiv = 0;
  1432. }
  1433. else if (srcClock_Hz < 150U)
  1434. {
  1435. crDiv = 1;
  1436. }
  1437. else if (srcClock_Hz < 250U)
  1438. {
  1439. crDiv = 4;
  1440. }
  1441. else if (srcClock_Hz < 300U)
  1442. {
  1443. crDiv = 5;
  1444. }
  1445. else if (srcClock_Hz < 500U)
  1446. {
  1447. crDiv = 6;
  1448. }
  1449. else if (srcClock_Hz < 800U)
  1450. {
  1451. crDiv = 7;
  1452. }
  1453. else
  1454. {
  1455. /* Empty else */
  1456. }
  1457. base->MAC_MDIO_ADDRESS = ENET_QOS_MAC_MDIO_ADDRESS_CR(crDiv);
  1458. }
  1459. /*!
  1460. * brief Starts a SMI write command.
  1461. * It supports MDIO IEEE802.3 Clause 22.
  1462. * After send command, user needs to check whether the transmission is over
  1463. * with ENET_QOS_IsSMIBusy().
  1464. *
  1465. * param base ENET peripheral base address.
  1466. * param phyAddr The PHY address.
  1467. * param phyReg The PHY register.
  1468. * param data The data written to PHY.
  1469. */
  1470. void ENET_QOS_StartSMIWrite(ENET_QOS_Type *base, uint32_t phyAddr, uint32_t phyReg, uint32_t data)
  1471. {
  1472. uint32_t reg = base->MAC_MDIO_ADDRESS & ENET_QOS_MAC_MDIO_ADDRESS_CR_MASK;
  1473. /* Build MII write command. */
  1474. base->MAC_MDIO_ADDRESS = reg | (uint32_t)kENET_QOS_MiiWriteFrame | ENET_QOS_MAC_MDIO_ADDRESS_PA(phyAddr) |
  1475. ENET_QOS_MAC_MDIO_ADDRESS_RDA(phyReg);
  1476. base->MAC_MDIO_DATA = data;
  1477. base->MAC_MDIO_ADDRESS |= ENET_QOS_MAC_MDIO_ADDRESS_GB_MASK;
  1478. }
  1479. /*!
  1480. * brief Starts an SMI read command.
  1481. * It supports MDIO IEEE802.3 Clause 22.
  1482. * After send command, user needs to check whether the transmission is over
  1483. * with ENET_QOS_IsSMIBusy().
  1484. *
  1485. * param base ENET peripheral base address.
  1486. * param phyAddr The PHY address.
  1487. * param phyReg The PHY register.
  1488. */
  1489. void ENET_QOS_StartSMIRead(ENET_QOS_Type *base, uint32_t phyAddr, uint32_t phyReg)
  1490. {
  1491. uint32_t reg = base->MAC_MDIO_ADDRESS & ENET_QOS_MAC_MDIO_ADDRESS_CR_MASK;
  1492. /* Build MII read command. */
  1493. base->MAC_MDIO_ADDRESS = reg | (uint32_t)kENET_QOS_MiiReadFrame | ENET_QOS_MAC_MDIO_ADDRESS_PA(phyAddr) |
  1494. ENET_QOS_MAC_MDIO_ADDRESS_RDA(phyReg);
  1495. base->MAC_MDIO_ADDRESS |= ENET_QOS_MAC_MDIO_ADDRESS_GB_MASK;
  1496. }
  1497. /*!
  1498. * brief Starts a SMI write command.
  1499. * It supports MDIO IEEE802.3 Clause 45.
  1500. * After send command, user needs to check whether the transmission is over
  1501. * with ENET_QOS_IsSMIBusy().
  1502. *
  1503. * param base ENET peripheral base address.
  1504. * param phyAddr The PHY address.
  1505. * param device The PHY device type.
  1506. * param phyReg The PHY register address.
  1507. * param data The data written to PHY.
  1508. */
  1509. void ENET_QOS_StartExtC45SMIWrite(
  1510. ENET_QOS_Type *base, uint32_t phyAddr, uint32_t device, uint32_t phyReg, uint32_t data)
  1511. {
  1512. uint32_t reg = base->MAC_MDIO_ADDRESS & ENET_QOS_MAC_MDIO_ADDRESS_CR_MASK;
  1513. /* Build MII write command. */
  1514. base->MAC_MDIO_ADDRESS = reg | ENET_QOS_MAC_MDIO_ADDRESS_C45E_MASK | (uint32_t)kENET_QOS_MiiWriteFrame |
  1515. ENET_QOS_MAC_MDIO_ADDRESS_PA(phyAddr) | ENET_QOS_MAC_MDIO_ADDRESS_RDA(device);
  1516. base->MAC_MDIO_DATA = data | ENET_QOS_MAC_MDIO_DATA_RA(phyReg);
  1517. base->MAC_MDIO_ADDRESS |= ENET_QOS_MAC_MDIO_ADDRESS_GB_MASK;
  1518. }
  1519. /*!
  1520. * brief Starts a SMI write command.
  1521. * It supports MDIO IEEE802.3 Clause 45.
  1522. * After send command, user needs to check whether the transmission is over
  1523. * with ENET_QOS_IsSMIBusy().
  1524. *
  1525. * param base ENET peripheral base address.
  1526. * param phyAddr The PHY address.
  1527. * param device The PHY device type.
  1528. * param phyReg The PHY register address.
  1529. */
  1530. void ENET_QOS_StartExtC45SMIRead(ENET_QOS_Type *base, uint32_t phyAddr, uint32_t device, uint32_t phyReg)
  1531. {
  1532. uint32_t reg = base->MAC_MDIO_ADDRESS & ENET_QOS_MAC_MDIO_ADDRESS_CR_MASK;
  1533. /* Build MII read command. */
  1534. base->MAC_MDIO_ADDRESS = reg | ENET_QOS_MAC_MDIO_ADDRESS_C45E_MASK | (uint32_t)kENET_QOS_MiiReadFrame |
  1535. ENET_QOS_MAC_MDIO_ADDRESS_PA(phyAddr) | ENET_QOS_MAC_MDIO_ADDRESS_RDA(device);
  1536. base->MAC_MDIO_DATA = ENET_QOS_MAC_MDIO_DATA_RA(phyReg);
  1537. base->MAC_MDIO_ADDRESS |= ENET_QOS_MAC_MDIO_ADDRESS_GB_MASK;
  1538. }
  1539. /*!
  1540. * brief Set the MAC to enter into power down mode.
  1541. * the remote power wake up frame and magic frame can wake up
  1542. * the ENET from the power down mode.
  1543. *
  1544. * param base ENET peripheral base address.
  1545. * param wakeFilter The wakeFilter provided to configure the wake up frame fitlter.
  1546. * Set the wakeFilter to NULL is not required. But if you have the filter requirement,
  1547. * please make sure the wakeFilter pointer shall be eight continous
  1548. * 32-bits configuration.
  1549. */
  1550. void ENET_QOS_EnterPowerDown(ENET_QOS_Type *base, uint32_t *wakeFilter)
  1551. {
  1552. uint8_t index;
  1553. uint32_t *reg = wakeFilter;
  1554. /* Disable the tx dma. */
  1555. base->DMA_CH[0].DMA_CHX_TX_CTRL &= ~ENET_QOS_DMA_CHX_TX_CTRL_ST_MASK;
  1556. base->DMA_CH[1].DMA_CHX_TX_CTRL &= ~ENET_QOS_DMA_CHX_TX_CTRL_ST_MASK;
  1557. /* Disable the mac tx/rx. */
  1558. base->MAC_CONFIGURATION &= ~(ENET_QOS_MAC_CONFIGURATION_RE_MASK | ENET_QOS_MAC_CONFIGURATION_TE_MASK);
  1559. /* Enable the remote wakeup packet and enable the power down mode. */
  1560. if (wakeFilter != NULL)
  1561. {
  1562. for (index = 0; index < ENET_QOS_WAKEUPFILTER_NUM; index++)
  1563. {
  1564. base->MAC_RWK_PACKET_FILTER = *reg;
  1565. reg++;
  1566. }
  1567. }
  1568. base->MAC_PMT_CONTROL_STATUS = ENET_QOS_MAC_PMT_CONTROL_STATUS_MGKPKTEN_MASK |
  1569. ENET_QOS_MAC_PMT_CONTROL_STATUS_RWKPKTEN_MASK |
  1570. ENET_QOS_MAC_PMT_CONTROL_STATUS_PWRDWN_MASK;
  1571. /* Enable the MAC rx. */
  1572. base->MAC_CONFIGURATION |= ENET_QOS_MAC_CONFIGURATION_RE_MASK;
  1573. }
  1574. /*!
  1575. * brief Enable/Disable Rx parser, please notice that for enable/disable Rx Parser,
  1576. * should better disable Receive first.
  1577. *
  1578. * param base ENET_QOS peripheral base address.
  1579. * param enable Enable/Disable Rx parser function
  1580. */
  1581. status_t ENET_QOS_EnableRxParser(ENET_QOS_Type *base, bool enable)
  1582. {
  1583. status_t result = kStatus_Success;
  1584. if (enable)
  1585. {
  1586. base->MTL_OPERATION_MODE |= ENET_QOS_MTL_OPERATION_MODE_FRPE_MASK;
  1587. }
  1588. else
  1589. {
  1590. base->MTL_OPERATION_MODE &= ~ENET_QOS_MTL_OPERATION_MODE_FRPE_MASK;
  1591. result = ENET_QOS_PollStatusFlag(&(base->MTL_RXP_CONTROL_STATUS), ENET_QOS_MTL_RXP_CONTROL_STATUS_RXPI_MASK,
  1592. ENET_QOS_MTL_RXP_CONTROL_STATUS_RXPI_MASK);
  1593. }
  1594. return result;
  1595. }
  1596. /*!
  1597. * brief Gets the size of the read frame.
  1598. * This function gets a received frame size from the ENET buffer descriptors.
  1599. * note The FCS of the frame is automatically removed by MAC and the size is the length without the FCS.
  1600. * After calling ENET_QOS_GetRxFrameSize, ENET_QOS_ReadFrame() should be called to update the
  1601. * receive buffers If the result is not "kStatus_ENET_QOS_RxFrameEmpty".
  1602. *
  1603. * param handle The ENET handler structure. This is the same handler pointer used in the ENET_QOS_Init.
  1604. * param length The length of the valid frame received.
  1605. * param channel The DMAC channel for the rx.
  1606. * retval kStatus_ENET_QOS_RxFrameEmpty No frame received. Should not call ENET_QOS_ReadFrame to read frame.
  1607. * retval kStatus_ENET_QOS_RxFrameError Data error happens. ENET_QOS_ReadFrame should be called with NULL data
  1608. * and NULL length to update the receive buffers.
  1609. * retval kStatus_Success Receive a frame Successfully then the ENET_QOS_ReadFrame
  1610. * should be called with the right data buffer and the captured data length input.
  1611. */
  1612. status_t ENET_QOS_GetRxFrameSize(ENET_QOS_Type *base, enet_qos_handle_t *handle, uint32_t *length, uint8_t channel)
  1613. {
  1614. assert(handle != NULL);
  1615. assert(length != NULL);
  1616. enet_qos_rx_bd_ring_t *rxBdRing = (enet_qos_rx_bd_ring_t *)&handle->rxBdRing[channel];
  1617. enet_qos_rx_bd_struct_t *rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
  1618. uint16_t index = rxBdRing->rxGenIdx;
  1619. uint32_t control = rxDesc->control;
  1620. /* Reset the length to zero. */
  1621. *length = 0;
  1622. if ((control & ENET_QOS_RXDESCRIP_WR_OWN_MASK) != 0U)
  1623. {
  1624. return kStatus_ENET_QOS_RxFrameEmpty;
  1625. }
  1626. else
  1627. {
  1628. do
  1629. {
  1630. /* Application owns the buffer descriptor, get the length. */
  1631. if ((control & ENET_QOS_RXDESCRIP_WR_LD_MASK) != 0U)
  1632. {
  1633. if ((control & ENET_QOS_RXDESCRIP_WR_ERRSUM_MASK) != 0U)
  1634. {
  1635. return kStatus_ENET_QOS_RxFrameError;
  1636. }
  1637. *length = (control & ENET_QOS_RXDESCRIP_WR_PACKETLEN_MASK) - ENET_QOS_FCS_LEN;
  1638. return kStatus_Success;
  1639. }
  1640. index = ENET_QOS_IncreaseIndex(index, rxBdRing->rxRingLen);
  1641. rxDesc = &rxBdRing->rxBdBase[index];
  1642. control = rxDesc->control;
  1643. } while (index != rxBdRing->rxGenIdx);
  1644. return kStatus_ENET_QOS_RxFrameError;
  1645. }
  1646. }
  1647. static void ENET_QOS_DropFrame(ENET_QOS_Type *base, enet_qos_handle_t *handle, uint8_t channel)
  1648. {
  1649. enet_qos_rx_bd_ring_t *rxBdRing = (enet_qos_rx_bd_ring_t *)&handle->rxBdRing[channel];
  1650. enet_qos_rx_bd_struct_t *rxDesc;
  1651. uint16_t index = rxBdRing->rxGenIdx;
  1652. bool tsAvailable = false;
  1653. uint32_t buff1Addr = 0;
  1654. uint32_t buff2Addr = 0;
  1655. /* Not check DMA ownership here, assume there's at least one valid frame left in BD ring */
  1656. do
  1657. {
  1658. /* Get the control flag. */
  1659. rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
  1660. if (!handle->doubleBuffEnable)
  1661. {
  1662. buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
  1663. ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, NULL, handle->rxintEnable,
  1664. handle->doubleBuffEnable);
  1665. }
  1666. else
  1667. {
  1668. buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
  1669. buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
  1670. ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
  1671. handle->rxintEnable, handle->doubleBuffEnable);
  1672. }
  1673. rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
  1674. /* Find the last buffer descriptor for the frame. */
  1675. if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_LD_MASK) != 0U)
  1676. {
  1677. if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_RS1V_MASK) != 0U)
  1678. {
  1679. if ((rxDesc->reserved & ENET_QOS_RXDESCRIP_WR_PTPTSA_MASK) != 0U)
  1680. {
  1681. tsAvailable = true;
  1682. }
  1683. }
  1684. /* Reinit for the context descriptor which has been updated by DMA. */
  1685. rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
  1686. if (tsAvailable && ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_CTXT_MASK) != 0U))
  1687. {
  1688. if (!handle->doubleBuffEnable)
  1689. {
  1690. buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
  1691. ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, NULL, handle->rxintEnable,
  1692. handle->doubleBuffEnable);
  1693. }
  1694. else
  1695. {
  1696. buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
  1697. buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
  1698. ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
  1699. handle->rxintEnable, handle->doubleBuffEnable);
  1700. }
  1701. rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
  1702. }
  1703. break;
  1704. }
  1705. } while (rxBdRing->rxGenIdx != index);
  1706. /* Always try to start receive, in case it had stopped */
  1707. base->DMA_CH[channel].DMA_CHX_RXDESC_TAIL_PTR = (uint32_t)(uint8_t *)&rxBdRing->rxBdBase[rxBdRing->rxRingLen];
  1708. }
  1709. /*!
  1710. * brief Reads a frame from the ENET device.
  1711. * This function reads a frame from the ENET DMA descriptors.
  1712. * The ENET_QOS_GetRxFrameSize should be used to get the size of the prepared data buffer.
  1713. * For example use rx dma channel 0:
  1714. * code
  1715. * uint32_t length;
  1716. * enet_qos_handle_t g_handle;
  1717. * enet_qos_ptp_time_t ts;
  1718. * status = ENET_QOS_GetRxFrameSize(&g_handle, &length, 0);
  1719. * if (length != 0)
  1720. * {
  1721. * uint8_t *data = memory allocate interface;
  1722. * if (!data)
  1723. * {
  1724. * ENET_QOS_ReadFrame(ENET, &g_handle, NULL, 0, 0, &ts);
  1725. * }
  1726. * else
  1727. * {
  1728. * status = ENET_QOS_ReadFrame(ENET, &g_handle, data, length, 0, &ts);
  1729. * }
  1730. * }
  1731. * else if (status == kStatus_ENET_QOS_RxFrameError)
  1732. * {
  1733. * ENET_QOS_ReadFrame(ENET, &g_handle, NULL, 0, 0, &ts);
  1734. * }
  1735. * endcode
  1736. * param base ENET peripheral base address.
  1737. * param handle The ENET handler structure. This is the same handler pointer used in the ENET_QOS_Init.
  1738. * param data The data buffer provided by user to store the frame which memory size should be at least "length".
  1739. * param length The size of the data buffer which is still the length of the received frame.
  1740. * param channel The rx DMA channel. shall not be larger than 2.
  1741. * return The execute status, successful or failure.
  1742. */
  1743. status_t ENET_QOS_ReadFrame(ENET_QOS_Type *base,
  1744. enet_qos_handle_t *handle,
  1745. uint8_t *data,
  1746. uint32_t length,
  1747. uint8_t channel,
  1748. enet_qos_ptp_time_t *ts)
  1749. {
  1750. assert(handle != NULL);
  1751. assert(channel < handle->rxQueueUse);
  1752. uint32_t len = 0;
  1753. uint32_t offset = 0;
  1754. uint32_t control;
  1755. bool isLastBuff = false;
  1756. enet_qos_rx_bd_ring_t *rxBdRing = (enet_qos_rx_bd_ring_t *)&handle->rxBdRing[channel];
  1757. enet_qos_rx_bd_struct_t *rxDesc;
  1758. status_t result = kStatus_Fail;
  1759. uint32_t buff1Addr = 0; /*!< Buffer 1 address */
  1760. uint32_t buff2Addr = 0; /*!< Buffer 2 or next descriptor address */
  1761. bool tsAvailable = false;
  1762. /* For data-NULL input, only update the buffer descriptor. */
  1763. if (data == NULL)
  1764. {
  1765. ENET_QOS_DropFrame(base, handle, channel);
  1766. result = kStatus_Success;
  1767. }
  1768. else
  1769. {
  1770. while ((!isLastBuff))
  1771. {
  1772. /* The last buffer descriptor of a frame. */
  1773. rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
  1774. control = rxDesc->control;
  1775. if (!handle->doubleBuffEnable)
  1776. {
  1777. buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
  1778. if (handle->rxMaintainEnable[channel])
  1779. {
  1780. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  1781. /* Add the cache invalidate maintain. */
  1782. DCACHE_InvalidateByRange(MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local),
  1783. rxBdRing->rxBuffSizeAlign);
  1784. #else
  1785. /* Add the cache invalidate maintain. */
  1786. DCACHE_InvalidateByRange(buff1Addr, rxBdRing->rxBuffSizeAlign);
  1787. #endif
  1788. }
  1789. }
  1790. else
  1791. {
  1792. buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
  1793. buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
  1794. if (handle->rxMaintainEnable[channel])
  1795. {
  1796. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  1797. /* Add the cache invalidate maintain. */
  1798. DCACHE_InvalidateByRange(MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local),
  1799. rxBdRing->rxBuffSizeAlign);
  1800. /* Add the cache invalidate maintain. */
  1801. DCACHE_InvalidateByRange(MEMORY_ConvertMemoryMapAddress(buff2Addr, kMEMORY_DMA2Local),
  1802. rxBdRing->rxBuffSizeAlign);
  1803. #else
  1804. /* Add the cache invalidate maintain. */
  1805. DCACHE_InvalidateByRange(buff1Addr, rxBdRing->rxBuffSizeAlign);
  1806. /* Add the cache invalidate maintain. */
  1807. DCACHE_InvalidateByRange(buff2Addr, rxBdRing->rxBuffSizeAlign);
  1808. #endif
  1809. }
  1810. }
  1811. rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
  1812. if ((control & ENET_QOS_RXDESCRIP_WR_LD_MASK) != 0U)
  1813. {
  1814. /* This is a valid frame. */
  1815. isLastBuff = true;
  1816. /* Remove FCS */
  1817. len = (control & ENET_QOS_RXDESCRIP_WR_PACKETLEN_MASK) - ENET_QOS_FCS_LEN;
  1818. if (length == len)
  1819. {
  1820. /* Copy the frame to user's buffer. */
  1821. len -= offset;
  1822. if (len > rxBdRing->rxBuffSizeAlign)
  1823. {
  1824. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  1825. (void)memcpy((void *)&data[offset],
  1826. (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local),
  1827. rxBdRing->rxBuffSizeAlign);
  1828. #else
  1829. (void)memcpy((void *)&data[offset], (void *)(uint8_t *)buff1Addr, rxBdRing->rxBuffSizeAlign);
  1830. #endif
  1831. offset += rxBdRing->rxBuffSizeAlign;
  1832. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  1833. (void)memcpy((void *)&data[offset],
  1834. (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress(buff2Addr, kMEMORY_DMA2Local),
  1835. len - rxBdRing->rxBuffSizeAlign);
  1836. #else
  1837. (void)memcpy((void *)&data[offset], (void *)(uint8_t *)buff2Addr,
  1838. len - rxBdRing->rxBuffSizeAlign);
  1839. #endif
  1840. }
  1841. else
  1842. {
  1843. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  1844. (void)memcpy((void *)&data[offset],
  1845. (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local),
  1846. len);
  1847. #else
  1848. (void)memcpy((void *)&data[offset], (void *)(uint8_t *)buff1Addr, len);
  1849. #endif
  1850. }
  1851. result = kStatus_Success;
  1852. }
  1853. if ((rxDesc->reserved & ENET_QOS_RXDESCRIP_WR_PTPTSA_MASK) != 0U)
  1854. {
  1855. tsAvailable = true;
  1856. }
  1857. /* Updates the receive buffer descriptors. */
  1858. ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
  1859. handle->rxintEnable, handle->doubleBuffEnable);
  1860. /* Store the rx timestamp which is in the next buffer descriptor of the last
  1861. * descriptor of a frame. */
  1862. rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
  1863. control = rxDesc->control;
  1864. /* If tsAvailable is true, a context descriptor is expected but might not be yet
  1865. * available.
  1866. */
  1867. if (tsAvailable)
  1868. {
  1869. uint8_t retryTimes = 10;
  1870. while (((control & ENET_QOS_RXDESCRIP_WR_OWN_MASK) != 0U) ||
  1871. ((control & ENET_QOS_RXDESCRIP_WR_CTXT_MASK) == 0U))
  1872. {
  1873. SDK_DelayAtLeastUs(1U, SDK_DEVICE_MAXIMUM_CPU_CLOCK_FREQUENCY);
  1874. if (0U == retryTimes--)
  1875. {
  1876. assert(false);
  1877. }
  1878. control = rxDesc->control;
  1879. }
  1880. }
  1881. /* Reinit for the context descritor which has been updated by DMA. */
  1882. if ((control & ENET_QOS_RXDESCRIP_WR_CTXT_MASK) != 0U)
  1883. {
  1884. if (tsAvailable && (NULL != ts))
  1885. {
  1886. ENET_QOS_StoreRxFrameTime(base, handle, rxDesc, ts);
  1887. }
  1888. if (!handle->doubleBuffEnable)
  1889. {
  1890. buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
  1891. ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, NULL, handle->rxintEnable,
  1892. handle->doubleBuffEnable);
  1893. }
  1894. else
  1895. {
  1896. buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
  1897. buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
  1898. ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
  1899. handle->rxintEnable, handle->doubleBuffEnable);
  1900. }
  1901. rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
  1902. }
  1903. }
  1904. else
  1905. {
  1906. /* Store a frame on several buffer descriptors. */
  1907. isLastBuff = false;
  1908. /* Length check. */
  1909. if (offset >= length)
  1910. {
  1911. /* Updates the receive buffer descriptors. */
  1912. ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
  1913. handle->rxintEnable, handle->doubleBuffEnable);
  1914. break;
  1915. }
  1916. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  1917. (void)memcpy((void *)&data[offset],
  1918. (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local),
  1919. rxBdRing->rxBuffSizeAlign);
  1920. #else
  1921. (void)memcpy((void *)&data[offset], (void *)(uint8_t *)buff1Addr, rxBdRing->rxBuffSizeAlign);
  1922. #endif
  1923. offset += rxBdRing->rxBuffSizeAlign;
  1924. if (buff2Addr != 0U)
  1925. {
  1926. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  1927. (void)memcpy((void *)&data[offset],
  1928. (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress(buff2Addr, kMEMORY_DMA2Local),
  1929. rxBdRing->rxBuffSizeAlign);
  1930. #else
  1931. (void)memcpy((void *)&data[offset], (void *)(uint8_t *)buff2Addr, rxBdRing->rxBuffSizeAlign);
  1932. #endif
  1933. offset += rxBdRing->rxBuffSizeAlign;
  1934. }
  1935. ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
  1936. handle->rxintEnable, handle->doubleBuffEnable);
  1937. }
  1938. }
  1939. /* Always try to start receive, in case it had stopped */
  1940. base->DMA_CH[channel].DMA_CHX_RXDESC_TAIL_PTR = (uint32_t)(uint8_t *)&rxBdRing->rxBdBase[rxBdRing->rxRingLen];
  1941. }
  1942. return result;
  1943. }
  1944. /*!
  1945. * brief Updates the buffers and the own status for a given rx descriptor.
  1946. * This function is a low level functional API to Updates the
  1947. * buffers and the own status for a given rx descriptor.
  1948. *
  1949. * param rxDesc The given rx descriptor.
  1950. * param buffer1 The first buffer address in the descriptor.
  1951. * param buffer2 The second buffer address in the descriptor.
  1952. * param intEnable Interrupt enable flag.
  1953. * param doubleBuffEnable The double buffer enable flag.
  1954. *
  1955. * note This must be called after all the ENET initilization.
  1956. * And should be called when the ENET receive/transmit is required.
  1957. */
  1958. void ENET_QOS_UpdateRxDescriptor(
  1959. enet_qos_rx_bd_struct_t *rxDesc, void *buffer1, void *buffer2, bool intEnable, bool doubleBuffEnable)
  1960. {
  1961. assert(rxDesc != NULL);
  1962. uint32_t control = ENET_QOS_RXDESCRIP_RD_OWN_MASK | ENET_QOS_RXDESCRIP_RD_BUFF1VALID_MASK;
  1963. if (intEnable)
  1964. {
  1965. control |= ENET_QOS_RXDESCRIP_RD_IOC_MASK;
  1966. }
  1967. if (doubleBuffEnable)
  1968. {
  1969. control |= ENET_QOS_RXDESCRIP_RD_BUFF2VALID_MASK;
  1970. }
  1971. /* Update the buffer if needed. */
  1972. if (buffer1 != NULL)
  1973. {
  1974. rxDesc->buff1Addr = (uint32_t)(uint8_t *)buffer1;
  1975. }
  1976. if (buffer2 != NULL)
  1977. {
  1978. rxDesc->buff2Addr = (uint32_t)(uint8_t *)buffer2;
  1979. }
  1980. else
  1981. {
  1982. rxDesc->buff2Addr = 0;
  1983. }
  1984. rxDesc->reserved = 0;
  1985. /* Add a data barrier to be sure that the address is written before the
  1986. ownership bit status. */
  1987. __DMB();
  1988. rxDesc->control = control;
  1989. }
  1990. /*!
  1991. * brief Setup a given tx descriptor.
  1992. * This function is a low level functional API to setup or prepare
  1993. * a given tx descriptor.
  1994. *
  1995. * param txDesc The given tx descriptor.
  1996. * param buffer1 The first buffer address in the descriptor.
  1997. * param bytes1 The bytes in the fist buffer.
  1998. * param buffer2 The second buffer address in the descriptor.
  1999. * param bytes1 The bytes in the second buffer.
  2000. * param framelen The length of the frame to be transmitted.
  2001. * param intEnable Interrupt enable flag.
  2002. * param tsEnable The timestamp enable.
  2003. * param flag The flag of this tx desciriptor, see "enet_qos_desc_flag" .
  2004. * param slotNum The slot num used for AV only.
  2005. *
  2006. * note This must be called after all the ENET initilization.
  2007. * And should be called when the ENET receive/transmit is required.
  2008. * Transmit buffers are 'zero-copy' buffers, so the buffer must remain in
  2009. * memory until the packet has been fully transmitted. The buffers
  2010. * should be free or requeued in the transmit interrupt irq handler.
  2011. */
  2012. void ENET_QOS_SetupTxDescriptor(enet_qos_tx_bd_struct_t *txDesc,
  2013. void *buffer1,
  2014. uint32_t bytes1,
  2015. void *buffer2,
  2016. uint32_t bytes2,
  2017. uint32_t framelen,
  2018. bool intEnable,
  2019. bool tsEnable,
  2020. enet_qos_desc_flag flag,
  2021. uint8_t slotNum)
  2022. {
  2023. uint32_t control = ENET_QOS_TXDESCRIP_RD_BL1(bytes1) | ENET_QOS_TXDESCRIP_RD_BL2(bytes2);
  2024. if (tsEnable)
  2025. {
  2026. control |= ENET_QOS_TXDESCRIP_RD_TTSE_MASK;
  2027. }
  2028. else
  2029. {
  2030. control &= ~ENET_QOS_TXDESCRIP_RD_TTSE_MASK;
  2031. }
  2032. if (intEnable)
  2033. {
  2034. control |= ENET_QOS_TXDESCRIP_RD_IOC_MASK;
  2035. }
  2036. else
  2037. {
  2038. control &= ~ENET_QOS_TXDESCRIP_RD_IOC_MASK;
  2039. }
  2040. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  2041. buffer1 = (void *)(uint32_t *)MEMORY_ConvertMemoryMapAddress((uint32_t)(uint32_t *)buffer1, kMEMORY_Local2DMA);
  2042. buffer2 = (void *)(uint32_t *)MEMORY_ConvertMemoryMapAddress((uint32_t)(uint32_t *)buffer2, kMEMORY_Local2DMA);
  2043. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  2044. /* Preare the descriptor for transmit. */
  2045. txDesc->buff1Addr = (uint32_t)(uint8_t *)buffer1;
  2046. txDesc->buff2Addr = (uint32_t)(uint8_t *)buffer2;
  2047. txDesc->buffLen = control;
  2048. /* Make sure all fields of descriptor are written before setting ownership */
  2049. __DMB();
  2050. control = ENET_QOS_TXDESCRIP_RD_FL(framelen) | ENET_QOS_TXDESCRIP_RD_LDFD(flag) | ENET_QOS_TXDESCRIP_RD_OWN_MASK;
  2051. txDesc->controlStat = control;
  2052. /* Make sure the descriptor is written in memory (before MAC starts checking it) */
  2053. __DSB();
  2054. }
  2055. /*!
  2056. * brief Reclaim tx descriptors.
  2057. * This function is used to update the tx descriptor status and
  2058. * store the tx timestamp when the 1588 feature is enabled.
  2059. * This is called by the transmit interupt IRQ handler after the
  2060. * complete of a frame transmission.
  2061. *
  2062. * param base ENET peripheral base address.
  2063. * param handle The ENET handler pointer. This is the same handler pointer used in the ENET_QOS_Init.
  2064. * param channel The tx DMA channnel.
  2065. *
  2066. */
  2067. void ENET_QOS_ReclaimTxDescriptor(ENET_QOS_Type *base, enet_qos_handle_t *handle, uint8_t channel)
  2068. {
  2069. enet_qos_tx_bd_ring_t *txBdRing = &handle->txBdRing[channel];
  2070. enet_qos_tx_bd_struct_t *txDesc = &txBdRing->txBdBase[txBdRing->txConsumIdx];
  2071. enet_qos_tx_dirty_ring_t *txDirtyRing = (enet_qos_tx_dirty_ring_t *)&handle->txDirtyRing[channel];
  2072. enet_qos_frame_info_t *txDirty = NULL;
  2073. uint32_t control, primask;
  2074. control = txDesc->controlStat;
  2075. /* Need to update the first index for transmit buffer free. */
  2076. while ((txBdRing->txDescUsed > 0U) && (0U == (control & ENET_QOS_TXDESCRIP_RD_OWN_MASK)))
  2077. {
  2078. if ((control & ENET_QOS_TXDESCRIP_RD_LD_MASK) != 0U)
  2079. {
  2080. if (ENET_QOS_TxDirtyRingAvailable(txDirtyRing))
  2081. {
  2082. txDirty = &txDirtyRing->txDirtyBase[txBdRing->txConsumIdx];
  2083. txDirtyRing->txGenIdx = ENET_QOS_IncreaseIndex(txDirtyRing->txGenIdx, txDirtyRing->txRingLen);
  2084. if (txDirtyRing->txGenIdx == txDirtyRing->txConsumIdx)
  2085. {
  2086. txDirtyRing->isFull = true;
  2087. }
  2088. if ((control & ENET_QOS_TXDESCRIP_WB_TTSS_MASK) != 0U)
  2089. {
  2090. enet_qos_ptp_time_t *ts = &txDirty->timeStamp;
  2091. uint32_t nanosecond;
  2092. /* Get transmit time stamp second. */
  2093. nanosecond = txDesc->buff1Addr;
  2094. txDirty->isTsAvail = true;
  2095. if (0U == (base->MAC_TIMESTAMP_CONTROL & ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCTRLSSR_MASK))
  2096. {
  2097. /* Binary rollover, 0.465ns accuracy. */
  2098. nanosecond = (nanosecond * 465U) / 1000U;
  2099. }
  2100. ts->second = txDesc->buff2Addr;
  2101. ts->nanosecond = nanosecond;
  2102. }
  2103. else
  2104. {
  2105. txDirty->isTsAvail = false;
  2106. }
  2107. }
  2108. }
  2109. /* For tx buffer free or requeue for each descriptor.
  2110. * The tx interrupt callback should free/requeue the tx buffer. */
  2111. if (handle->callback != NULL)
  2112. {
  2113. handle->callback(base, handle, kENET_QOS_TxIntEvent, channel, handle->userData);
  2114. }
  2115. primask = DisableGlobalIRQ();
  2116. txBdRing->txDescUsed--;
  2117. EnableGlobalIRQ(primask);
  2118. /* Update the txConsumIdx/txDesc. */
  2119. txBdRing->txConsumIdx = ENET_QOS_IncreaseIndex(txBdRing->txConsumIdx, txBdRing->txRingLen);
  2120. txDesc = &txBdRing->txBdBase[txBdRing->txConsumIdx];
  2121. control = txDesc->controlStat;
  2122. }
  2123. }
  2124. /*!
  2125. * brief Transmits an ENET frame.
  2126. * note The CRC is automatically appended to the data. Input the data
  2127. * to send without the CRC.
  2128. *
  2129. * param base ENET peripheral base address.
  2130. * param handle The ENET handler pointer. This is the same handler pointer used in the ENET_QOS_Init.
  2131. * param data The data buffer provided by user to be send.
  2132. * param length The length of the data to be send.
  2133. * param channel Channel to send the frame, same with queue index.
  2134. * param isNeedTs True means save timestamp
  2135. * param context pointer to user context to be kept in the tx dirty frame information.
  2136. * retval kStatus_Success Send frame succeed.
  2137. * retval kStatus_ENET_QOS_TxFrameBusy Transmit buffer descriptor is busy under transmission.
  2138. * The transmit busy happens when the data send rate is over the MAC capacity.
  2139. * The waiting mechanism is recommended to be added after each call return with
  2140. * kStatus_ENET_QOS_TxFrameBusy.
  2141. */
  2142. status_t ENET_QOS_SendFrame(ENET_QOS_Type *base,
  2143. enet_qos_handle_t *handle,
  2144. uint8_t *data,
  2145. uint32_t length,
  2146. uint8_t channel,
  2147. bool isNeedTs,
  2148. void *context)
  2149. {
  2150. assert(handle != NULL);
  2151. assert(data != NULL);
  2152. assert(channel < handle->txQueueUse);
  2153. enet_qos_tx_bd_ring_t *txBdRing;
  2154. enet_qos_tx_bd_struct_t *txDesc;
  2155. enet_qos_tx_dirty_ring_t *txDirtyRing;
  2156. enet_qos_frame_info_t *txDirty;
  2157. uint32_t primask;
  2158. if (length > 2U * ENET_QOS_TXDESCRIP_RD_BL1_MASK)
  2159. {
  2160. return kStatus_ENET_QOS_TxFrameOverLen;
  2161. }
  2162. /* Check if the DMA owns the descriptor. */
  2163. txBdRing = (enet_qos_tx_bd_ring_t *)&handle->txBdRing[channel];
  2164. txDesc = &txBdRing->txBdBase[txBdRing->txGenIdx];
  2165. if (txBdRing->txRingLen == txBdRing->txDescUsed)
  2166. {
  2167. return kStatus_ENET_QOS_TxFrameBusy;
  2168. }
  2169. txDirtyRing = (enet_qos_tx_dirty_ring_t *)&handle->txDirtyRing[channel];
  2170. txDirty = &txDirtyRing->txDirtyBase[txBdRing->txGenIdx];
  2171. txDirty->context = context;
  2172. /* Fill the descriptor. */
  2173. if (length <= ENET_QOS_TXDESCRIP_RD_BL1_MASK)
  2174. {
  2175. ENET_QOS_SetupTxDescriptor(txDesc, data, length, NULL, 0, length, true, isNeedTs, kENET_QOS_FirstLastFlag, 0);
  2176. }
  2177. else
  2178. {
  2179. ENET_QOS_SetupTxDescriptor(txDesc, data, ENET_QOS_TXDESCRIP_RD_BL1_MASK, &data[ENET_QOS_TXDESCRIP_RD_BL1_MASK],
  2180. (length - ENET_QOS_TXDESCRIP_RD_BL1_MASK), length, true, isNeedTs,
  2181. kENET_QOS_FirstLastFlag, 0);
  2182. }
  2183. /* Increase the index. */
  2184. txBdRing->txGenIdx = ENET_QOS_IncreaseIndex(txBdRing->txGenIdx, txBdRing->txRingLen);
  2185. /* Disable interrupt first and then enable interrupt to avoid the race condition. */
  2186. primask = DisableGlobalIRQ();
  2187. txBdRing->txDescUsed++;
  2188. EnableGlobalIRQ(primask);
  2189. /* Update the transmit tail address. */
  2190. txDesc = &txBdRing->txBdBase[txBdRing->txGenIdx];
  2191. if (txBdRing->txGenIdx == 0U)
  2192. {
  2193. txDesc = &txBdRing->txBdBase[txBdRing->txRingLen];
  2194. }
  2195. base->DMA_CH[channel].DMA_CHX_TXDESC_TAIL_PTR = (uint32_t)txDesc & ~ENET_QOS_ADDR_ALIGNMENT;
  2196. return kStatus_Success;
  2197. }
  2198. /*!
  2199. * brief Gets the sent frame.
  2200. *
  2201. * This function is used to get the sent frame for timestamp and buffer clean operation.
  2202. *
  2203. * param handle The ENET handler pointer.This is the same state pointer used in
  2204. * ENET_QOS_Init.
  2205. * param txFrame Input parameter, pointer to enet_qos_frame_info_t for saving read out frame information.
  2206. * param channel Read out frame from specified channel.
  2207. */
  2208. void ENET_QOS_GetTxFrame(enet_qos_handle_t *handle, enet_qos_frame_info_t *txFrame, uint8_t channel)
  2209. {
  2210. assert(handle != NULL);
  2211. assert(channel < handle->txQueueUse);
  2212. enet_qos_tx_dirty_ring_t *txDirtyRing = (enet_qos_tx_dirty_ring_t *)&handle->txDirtyRing[channel];
  2213. enet_qos_frame_info_t *txDirty = &txDirtyRing->txDirtyBase[txDirtyRing->txConsumIdx];
  2214. (void)memcpy(txFrame, txDirty, sizeof(enet_qos_frame_info_t));
  2215. txDirtyRing->isFull = false;
  2216. txDirtyRing->txConsumIdx = ENET_QOS_IncreaseIndex(txDirtyRing->txConsumIdx, txDirtyRing->txRingLen);
  2217. }
  2218. static inline void ENET_QOS_GetRxFrameErr(enet_qos_rx_bd_struct_t *rxDesc, enet_qos_rx_frame_error_t *rxFrameError)
  2219. {
  2220. uint32_t rdes2 = rxDesc->buff2Addr;
  2221. uint32_t rdes3 = rxDesc->control;
  2222. (void)memset(rxFrameError, 0, sizeof(enet_qos_rx_frame_error_t));
  2223. if ((rdes2 & ENET_QOS_RXDESCRIP_WR_SA_FAILURE_MASK) != 0U)
  2224. {
  2225. rxFrameError->rxSrcAddrFilterErr = true;
  2226. }
  2227. if ((rdes2 & ENET_QOS_RXDESCRIP_WR_DA_FAILURE_MASK) != 0U)
  2228. {
  2229. rxFrameError->rxDstAddrFilterErr = true;
  2230. }
  2231. if ((rdes3 & ENET_QOS_RXDESCRIP_WR_DE_MASK) != 0U)
  2232. {
  2233. rxFrameError->rxDstAddrFilterErr = true;
  2234. }
  2235. if ((rdes3 & ENET_QOS_RXDESCRIP_WR_RE_MASK) != 0U)
  2236. {
  2237. rxFrameError->rxReceiveErr = true;
  2238. }
  2239. if ((rdes3 & ENET_QOS_RXDESCRIP_WR_OE_MASK) != 0U)
  2240. {
  2241. rxFrameError->rxOverFlowErr = true;
  2242. }
  2243. if ((rdes3 & ENET_QOS_RXDESCRIP_WR_RWT_MASK) != 0U)
  2244. {
  2245. rxFrameError->rxWatchDogErr = true;
  2246. }
  2247. if ((rdes3 & ENET_QOS_RXDESCRIP_WR_GP_MASK) != 0U)
  2248. {
  2249. rxFrameError->rxGaintPacketErr = true;
  2250. }
  2251. if ((rdes3 & ENET_QOS_RXDESCRIP_WR_CRC_MASK) != 0U)
  2252. {
  2253. rxFrameError->rxCrcErr = true;
  2254. }
  2255. }
  2256. /*!
  2257. * brief Receives one frame in specified BD ring with zero copy.
  2258. *
  2259. * This function will use the user-defined allocate and free callback. Every time application gets one frame through
  2260. * this function, driver will allocate new buffers for the BDs whose buffers have been taken by application.
  2261. * note This function will drop current frame and update related BDs as available for DMA if new buffers allocating
  2262. * fails. Application must provide a memory pool including at least BD number + 1 buffers(+2 if enable double buffer)
  2263. * to make this function work normally. If user calls this function in Rx interrupt handler, be careful that this
  2264. * function makes Rx BD ready with allocating new buffer(normal) or updating current BD(out of memory). If there's
  2265. * always new Rx frame input, Rx interrupt will be triggered forever. Application need to disable Rx interrupt according
  2266. * to specific design in this case.
  2267. *
  2268. * param base ENET peripheral base address.
  2269. * param handle The ENET handler pointer. This is the same handler pointer used in the ENET_Init.
  2270. * param rxFrame The received frame information structure provided by user.
  2271. * param ringId The ring index or ring number.
  2272. * retval kStatus_Success Succeed to get one frame and allocate new memory for Rx buffer.
  2273. * retval kStatus_ENET_QOS_RxFrameEmpty There's no Rx frame in the BD.
  2274. * retval kStatus_ENET_QOS_RxFrameError There's issue in this receiving.
  2275. * retval kStatus_ENET_QOS_RxFrameDrop There's no new buffer memory for BD, drop this frame.
  2276. */
  2277. status_t ENET_QOS_GetRxFrame(ENET_QOS_Type *base,
  2278. enet_qos_handle_t *handle,
  2279. enet_qos_rx_frame_struct_t *rxFrame,
  2280. uint8_t channel)
  2281. {
  2282. assert(handle != NULL);
  2283. assert(channel < handle->rxQueueUse);
  2284. enet_qos_rx_bd_ring_t *rxBdRing = (enet_qos_rx_bd_ring_t *)&handle->rxBdRing[channel];
  2285. enet_qos_rx_bd_struct_t *rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
  2286. uint16_t index = rxBdRing->rxGenIdx;
  2287. status_t result = kStatus_Success;
  2288. uint32_t buff1Addr = 0;
  2289. uint32_t buff2Addr = 0;
  2290. uint16_t buff1Len = 0;
  2291. uint16_t buff2Len = 0;
  2292. uint16_t offset = 0;
  2293. void *newBuff1 = NULL;
  2294. void *newBuff2 = NULL;
  2295. bool isDrop = false;
  2296. bool isLastBuff = false;
  2297. bool tsAvailable = false;
  2298. /* Check the frame status. */
  2299. do
  2300. {
  2301. if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_OWN_MASK) != 0U)
  2302. {
  2303. result = kStatus_ENET_QOS_RxFrameEmpty;
  2304. break;
  2305. }
  2306. /* Check timestamp and error. */
  2307. if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_LD_MASK) != 0U)
  2308. {
  2309. if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_RS1V_MASK) != 0U)
  2310. {
  2311. if ((rxDesc->reserved & ENET_QOS_RXDESCRIP_WR_PTPTSA_MASK) != 0U)
  2312. {
  2313. /* Context descriptor is expected but might not be yet available. */
  2314. uint8_t retryTimes = 10;
  2315. while (((rxDesc->control & ENET_QOS_RXDESCRIP_WR_OWN_MASK) != 0U) ||
  2316. ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_CTXT_MASK) == 0U))
  2317. {
  2318. /* Timsstamp value is not corrupted. */
  2319. if ((rxDesc->buff1Addr != 0xFFFFFFFFU) && (rxDesc->buff2Addr != 0xFFFFFFFFU))
  2320. {
  2321. break;
  2322. }
  2323. if (retryTimes-- == 0U)
  2324. {
  2325. break;
  2326. }
  2327. }
  2328. if (retryTimes != 0U)
  2329. {
  2330. tsAvailable = true;
  2331. }
  2332. else
  2333. {
  2334. result = kStatus_ENET_QOS_RxFrameEmpty;
  2335. break;
  2336. }
  2337. }
  2338. }
  2339. /* Get the frame error if there is. */
  2340. if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_ERRSUM_MASK) != 0U)
  2341. {
  2342. ENET_QOS_GetRxFrameErr(rxDesc, &rxFrame->rxFrameError);
  2343. result = kStatus_ENET_QOS_RxFrameError;
  2344. }
  2345. else if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_PACKETLEN_MASK) == 0U)
  2346. {
  2347. result = kStatus_ENET_QOS_RxFrameEmpty;
  2348. }
  2349. else
  2350. {
  2351. /* Intentional empty */
  2352. }
  2353. break;
  2354. }
  2355. index = ENET_QOS_IncreaseIndex(index, rxBdRing->rxRingLen);
  2356. if (index == rxBdRing->rxGenIdx)
  2357. {
  2358. result = kStatus_ENET_QOS_RxFrameEmpty;
  2359. break;
  2360. }
  2361. rxDesc = &rxBdRing->rxBdBase[index];
  2362. } while (index != rxBdRing->rxGenIdx);
  2363. /* Drop the error frame and return error. */
  2364. if (result != kStatus_Success)
  2365. {
  2366. if (result == kStatus_ENET_QOS_RxFrameError)
  2367. {
  2368. ENET_QOS_DropFrame(base, handle, channel);
  2369. }
  2370. return result;
  2371. }
  2372. /* Get the valid frame */
  2373. index = 0;
  2374. do
  2375. {
  2376. rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
  2377. /* Caculate the buffer and frame length. */
  2378. if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_LD_MASK) != 0U)
  2379. {
  2380. isLastBuff = true;
  2381. rxFrame->totLen = (uint16_t)(rxDesc->control & ENET_QOS_RXDESCRIP_WR_PACKETLEN_MASK);
  2382. if (rxFrame->totLen - offset > (uint16_t)rxBdRing->rxBuffSizeAlign)
  2383. {
  2384. buff1Len = (uint16_t)rxBdRing->rxBuffSizeAlign;
  2385. if (handle->doubleBuffEnable)
  2386. {
  2387. buff2Len = rxFrame->totLen - offset - (uint16_t)rxBdRing->rxBuffSizeAlign - ENET_QOS_FCS_LEN;
  2388. }
  2389. }
  2390. else
  2391. {
  2392. buff1Len = rxFrame->totLen - offset - ENET_QOS_FCS_LEN;
  2393. }
  2394. rxFrame->totLen -= ENET_QOS_FCS_LEN;
  2395. }
  2396. else
  2397. {
  2398. if (!handle->doubleBuffEnable)
  2399. {
  2400. buff1Len = (uint16_t)rxBdRing->rxBuffSizeAlign;
  2401. offset += buff1Len;
  2402. }
  2403. else
  2404. {
  2405. buff1Len = (uint16_t)rxBdRing->rxBuffSizeAlign;
  2406. buff2Len = (uint16_t)rxBdRing->rxBuffSizeAlign;
  2407. offset += buff1Len + buff2Len;
  2408. }
  2409. }
  2410. /* Allocate new buffer to replace the buffer taken by application */
  2411. newBuff1 = handle->rxBuffAlloc(base, handle->userData, channel);
  2412. if (newBuff1 == NULL)
  2413. {
  2414. isDrop = true;
  2415. }
  2416. else if (handle->doubleBuffEnable && (buff2Len != 0U))
  2417. {
  2418. newBuff2 = handle->rxBuffAlloc(base, handle->userData, channel);
  2419. if (newBuff2 == NULL)
  2420. {
  2421. handle->rxBuffFree(base, newBuff1, handle->userData, channel);
  2422. isDrop = true;
  2423. }
  2424. }
  2425. else
  2426. {
  2427. /* Intentional empty */
  2428. }
  2429. if (!isDrop)
  2430. {
  2431. /* Get the frame data information into Rx frame structure. */
  2432. if (!handle->doubleBuffEnable)
  2433. {
  2434. buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
  2435. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  2436. buff1Addr = MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local);
  2437. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  2438. if (handle->rxMaintainEnable[channel])
  2439. {
  2440. DCACHE_InvalidateByRange(buff1Addr, rxBdRing->rxBuffSizeAlign);
  2441. }
  2442. rxFrame->rxBuffArray[index].buffer = (void *)(uint32_t *)buff1Addr;
  2443. rxFrame->rxBuffArray[index].length = buff1Len;
  2444. index++;
  2445. }
  2446. else
  2447. {
  2448. buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
  2449. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  2450. buff1Addr = MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local);
  2451. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  2452. if (handle->rxMaintainEnable[channel])
  2453. {
  2454. DCACHE_InvalidateByRange(buff1Addr, rxBdRing->rxBuffSizeAlign);
  2455. }
  2456. rxFrame->rxBuffArray[index].buffer = (void *)(uint32_t *)buff1Addr;
  2457. rxFrame->rxBuffArray[index].length = buff1Len;
  2458. index++;
  2459. /* If there's no data in buffer2, not add it into rxFrame */
  2460. if (buff2Len != 0U)
  2461. {
  2462. buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
  2463. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  2464. buff2Addr = MEMORY_ConvertMemoryMapAddress(buff2Addr, kMEMORY_DMA2Local);
  2465. #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
  2466. if (handle->rxMaintainEnable[channel])
  2467. {
  2468. DCACHE_InvalidateByRange(buff2Addr, rxBdRing->rxBuffSizeAlign);
  2469. }
  2470. rxFrame->rxBuffArray[index].buffer = (void *)(uint32_t *)buff2Addr;
  2471. rxFrame->rxBuffArray[index].length = buff2Len;
  2472. index++;
  2473. }
  2474. }
  2475. /* Give new buffer from application to BD */
  2476. if (!handle->doubleBuffEnable)
  2477. {
  2478. if (handle->rxMaintainEnable[channel])
  2479. {
  2480. DCACHE_InvalidateByRange((uint32_t)(uint32_t *)newBuff1, rxBdRing->rxBuffSizeAlign);
  2481. }
  2482. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  2483. buff1Addr = MEMORY_ConvertMemoryMapAddress((uint32_t)(uint32_t *)newBuff1, kMEMORY_Local2DMA);
  2484. #else
  2485. buff1Addr = (uint32_t)(uint32_t *)newBuff1;
  2486. #endif
  2487. handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx] = buff1Addr;
  2488. ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint32_t *)buff1Addr, NULL, handle->rxintEnable,
  2489. handle->doubleBuffEnable);
  2490. }
  2491. else
  2492. {
  2493. if (handle->rxMaintainEnable[channel])
  2494. {
  2495. DCACHE_InvalidateByRange((uint32_t)(uint32_t *)newBuff1, rxBdRing->rxBuffSizeAlign);
  2496. }
  2497. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  2498. buff1Addr = MEMORY_ConvertMemoryMapAddress((uint32_t)(uint32_t *)newBuff1, kMEMORY_Local2DMA);
  2499. #else
  2500. buff1Addr = (uint32_t)(uint32_t *)newBuff1;
  2501. #endif
  2502. handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx] = buff1Addr;
  2503. if (buff2Len != 0U)
  2504. {
  2505. if (handle->rxMaintainEnable[channel])
  2506. {
  2507. DCACHE_InvalidateByRange((uint32_t)(uint32_t *)newBuff2, rxBdRing->rxBuffSizeAlign);
  2508. }
  2509. #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
  2510. buff2Addr = MEMORY_ConvertMemoryMapAddress((uint32_t)(uint32_t *)newBuff2, kMEMORY_Local2DMA);
  2511. #else
  2512. buff2Addr = (uint32_t)(uint32_t *)newBuff2;
  2513. #endif
  2514. handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U] = buff2Addr;
  2515. }
  2516. else
  2517. {
  2518. /* If there's no data in buffer2, keep it */
  2519. buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
  2520. }
  2521. ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint32_t *)buff1Addr, (void *)(uint32_t *)buff2Addr,
  2522. handle->rxintEnable, handle->doubleBuffEnable);
  2523. }
  2524. rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
  2525. /* Update context BD if there is */
  2526. if (isLastBuff && tsAvailable)
  2527. {
  2528. rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
  2529. if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_CTXT_MASK) != 0U)
  2530. {
  2531. ENET_QOS_StoreRxFrameTime(base, handle, rxDesc, &rxFrame->rxAttribute.timestamp);
  2532. rxFrame->rxAttribute.isTsAvail = true;
  2533. if (!handle->doubleBuffEnable)
  2534. {
  2535. buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
  2536. ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, NULL, handle->rxintEnable,
  2537. handle->doubleBuffEnable);
  2538. }
  2539. else
  2540. {
  2541. buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
  2542. buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
  2543. ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
  2544. handle->rxintEnable, handle->doubleBuffEnable);
  2545. }
  2546. rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
  2547. }
  2548. }
  2549. /* Always try to start receive, in case it had stopped */
  2550. base->DMA_CH[channel].DMA_CHX_RXDESC_TAIL_PTR =
  2551. (uint32_t)(uint8_t *)&rxBdRing->rxBdBase[rxBdRing->rxRingLen];
  2552. }
  2553. else
  2554. {
  2555. /* Drop frame if there's no new buffer memory */
  2556. /* Free the incomplete frame buffers. */
  2557. while (index-- != 0U)
  2558. {
  2559. handle->rxBuffFree(base, &rxFrame->rxBuffArray[index].buffer, handle->userData, channel);
  2560. }
  2561. /* Update all left BDs of this frame from current index. */
  2562. ENET_QOS_DropFrame(base, handle, channel);
  2563. result = kStatus_ENET_QOS_RxFrameDrop;
  2564. break;
  2565. }
  2566. } while (!isLastBuff);
  2567. return result;
  2568. }
  2569. /*!
  2570. * brief Gets the current ENET time from the PTP 1588 timer without IRQ disable.
  2571. *
  2572. * param base ENET peripheral base address.
  2573. * param second The PTP 1588 system timer second.
  2574. * param nanosecond The PTP 1588 system timer nanosecond.
  2575. * For the unit of the nanosecond is 1ns. so the nanosecond is the real nanosecond.
  2576. */
  2577. void ENET_QOS_Ptp1588GetTimerNoIRQDisable(ENET_QOS_Type *base, uint64_t *second, uint32_t *nanosecond)
  2578. {
  2579. assert(second != NULL);
  2580. assert(nanosecond != NULL);
  2581. uint32_t high_sec[2];
  2582. uint32_t sec[2];
  2583. /* Get the current PTP time. */
  2584. /* Since register reads are not atomic, we need to check for wraps during the read */
  2585. high_sec[1] = base->MAC_SYSTEM_TIME_HIGHER_WORD_SECONDS & ENET_QOS_MAC_SYSTEM_TIME_HIGHER_WORD_SECONDS_TSHWR_MASK;
  2586. do
  2587. {
  2588. high_sec[0] = high_sec[1];
  2589. sec[1] = base->MAC_SYSTEM_TIME_SECONDS;
  2590. do
  2591. {
  2592. sec[0] = sec[1];
  2593. *nanosecond = base->MAC_SYSTEM_TIME_NANOSECONDS & ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_TSSS_MASK;
  2594. sec[1] = base->MAC_SYSTEM_TIME_SECONDS;
  2595. } while (sec[1] != sec[0]);
  2596. high_sec[1] =
  2597. base->MAC_SYSTEM_TIME_HIGHER_WORD_SECONDS & ENET_QOS_MAC_SYSTEM_TIME_HIGHER_WORD_SECONDS_TSHWR_MASK;
  2598. } while (high_sec[1] != high_sec[0]);
  2599. *second = ((uint64_t)high_sec[1] << 32U) | sec[1];
  2600. if ((base->MAC_TIMESTAMP_CONTROL & ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCTRLSSR_MASK) == 0U)
  2601. {
  2602. /* Binary rollover, the unit of the increment is ~ 0.465 ns. */
  2603. *nanosecond = (*nanosecond * 465U) / 1000U;
  2604. }
  2605. }
  2606. /*!
  2607. * brief Gets the current ENET time from the PTP 1588 timer, get a more accurate value
  2608. * with IRQ disabled during get timer.
  2609. *
  2610. * param base ENET peripheral base address.
  2611. * param second The PTP 1588 system timer second.
  2612. * param nanosecond The PTP 1588 system timer nanosecond.
  2613. * For the unit of the nanosecond is 1ns. so the nanosecond is the real nanosecond.
  2614. */
  2615. void ENET_QOS_Ptp1588GetTimer(ENET_QOS_Type *base, uint64_t *second, uint32_t *nanosecond)
  2616. {
  2617. uint32_t primask;
  2618. /* Disables the interrupt. */
  2619. primask = DisableGlobalIRQ();
  2620. ENET_QOS_Ptp1588GetTimerNoIRQDisable(base, second, nanosecond);
  2621. /* Enables the interrupt. */
  2622. EnableGlobalIRQ(primask);
  2623. }
  2624. /*!
  2625. * brief Coreect the ENET PTP 1588 timer in coarse method.
  2626. *
  2627. * param base ENET peripheral base address.
  2628. * param operation The system time operation, refer to "enet_qos_systime_op"
  2629. * param second The correction second.
  2630. * param nanosecond The correction nanosecond.
  2631. */
  2632. status_t ENET_QOS_Ptp1588CorrectTimerInCoarse(ENET_QOS_Type *base,
  2633. enet_qos_systime_op operation,
  2634. uint32_t second,
  2635. uint32_t nanosecond)
  2636. {
  2637. uint32_t corrSecond = second;
  2638. uint32_t corrNanosecond;
  2639. status_t result = kStatus_Success;
  2640. /* Set the system timer. */
  2641. if ((base->MAC_TIMESTAMP_CONTROL & ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCTRLSSR_MASK) != 0U)
  2642. {
  2643. if (operation == kENET_QOS_SystimeSubtract)
  2644. {
  2645. /* Set with the complement of the sub-second. */
  2646. corrSecond = ENET_QOS_MAC_SYSTEM_TIME_SECONDS_UPDATE_TSS_MASK - (second - 1U);
  2647. corrNanosecond = ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_ADDSUB_MASK |
  2648. ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS(ENET_QOS_NANOSECS_ONESECOND - nanosecond);
  2649. }
  2650. else
  2651. {
  2652. corrNanosecond = ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS(nanosecond);
  2653. }
  2654. }
  2655. else
  2656. {
  2657. nanosecond = ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS_MASK / ENET_QOS_NANOSECS_ONESECOND * nanosecond;
  2658. if (operation == kENET_QOS_SystimeSubtract)
  2659. {
  2660. /* Set with the complement of the sub-second. */
  2661. corrSecond = ENET_QOS_MAC_SYSTEM_TIME_SECONDS_UPDATE_TSS_MASK - (second - 1U);
  2662. corrNanosecond = ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_ADDSUB_MASK |
  2663. ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS(
  2664. ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS_MASK + 1U - nanosecond);
  2665. }
  2666. else
  2667. {
  2668. corrNanosecond = ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS(nanosecond);
  2669. }
  2670. }
  2671. base->MAC_SYSTEM_TIME_SECONDS_UPDATE = corrSecond;
  2672. base->MAC_SYSTEM_TIME_NANOSECONDS_UPDATE = corrNanosecond;
  2673. /* Update the timer. */
  2674. base->MAC_TIMESTAMP_CONTROL |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSUPDT_MASK;
  2675. /* Wait for update finish */
  2676. result = ENET_QOS_PollStatusFlag(&(base->MAC_TIMESTAMP_CONTROL), ENET_QOS_MAC_TIMESTAMP_CONTROL_TSUPDT_MASK, 0U);
  2677. return result;
  2678. }
  2679. /*!
  2680. * brief Correct the ENET PTP 1588 timer in fine method.
  2681. *
  2682. *
  2683. * param base ENET peripheral base address.
  2684. * param addend The addend value to be set in the fine method
  2685. * note Should take refer to the chapter "System time correction" and
  2686. * see the description for the "fine correction method".
  2687. */
  2688. status_t ENET_QOS_Ptp1588CorrectTimerInFine(ENET_QOS_Type *base, uint32_t addend)
  2689. {
  2690. status_t result = kStatus_Success;
  2691. base->MAC_TIMESTAMP_ADDEND = addend;
  2692. base->MAC_TIMESTAMP_CONTROL |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSADDREG_MASK;
  2693. result = ENET_QOS_PollStatusFlag(&(base->MAC_TIMESTAMP_CONTROL), ENET_QOS_MAC_TIMESTAMP_CONTROL_TSADDREG_MASK, 0U);
  2694. return result;
  2695. }
  2696. /*!
  2697. * @brief Sets the ENET OQS PTP 1588 PPS target time registers.
  2698. *
  2699. * param base ENET QOS peripheral base address.
  2700. * param instance The ENET QOS PTP PPS instance.
  2701. * param seconds The target seconds.
  2702. * param nanoseconds The target nanoseconds.
  2703. */
  2704. status_t ENET_QOS_Ptp1588PpsSetTrgtTime(ENET_QOS_Type *base,
  2705. enet_qos_ptp_pps_instance_t instance,
  2706. uint32_t seconds,
  2707. uint32_t nanoseconds)
  2708. {
  2709. uint32_t *mac_pps_trgt_ns;
  2710. uint32_t *mac_pps_trgt_s;
  2711. mac_pps_trgt_ns = (uint32_t *)((uint32_t)&base->MAC_PPS0_TARGET_TIME_NANOSECONDS + 0x10U * (uint32_t)instance);
  2712. mac_pps_trgt_s = (uint32_t *)((uint32_t)&base->MAC_PPS0_TARGET_TIME_SECONDS + 0x10U * (uint32_t)instance);
  2713. if ((*mac_pps_trgt_ns & ENET_QOS_MAC_PPS0_TARGET_TIME_NANOSECONDS_TRGTBUSY0_MASK) != 0U)
  2714. {
  2715. return kStatus_ENET_QOS_TrgtBusy;
  2716. }
  2717. *mac_pps_trgt_ns = ENET_QOS_MAC_PPS0_TARGET_TIME_NANOSECONDS_TTSL0(nanoseconds);
  2718. *mac_pps_trgt_s = ENET_QOS_MAC_PPS0_TARGET_TIME_SECONDS_TSTRH0(seconds);
  2719. return kStatus_Success;
  2720. }
  2721. static status_t ENET_QOS_EstReadWriteWord(
  2722. ENET_QOS_Type *base, uint32_t addr, uint32_t *data, uint8_t gcrr, uint8_t read, uint8_t dbgm)
  2723. {
  2724. uint32_t ctrl;
  2725. int retry = 10;
  2726. ctrl = ENET_QOS_MTL_EST_GCL_CONTROL_ADDR(addr) | ENET_QOS_MTL_EST_GCL_CONTROL_SRWO(1) |
  2727. ENET_QOS_MTL_EST_GCL_CONTROL_DBGM(dbgm) | ENET_QOS_MTL_EST_GCL_CONTROL_GCRR(gcrr);
  2728. if (read != 0U)
  2729. {
  2730. ctrl |= ENET_QOS_MTL_EST_GCL_CONTROL_R1W0(1);
  2731. }
  2732. else
  2733. {
  2734. base->MTL_EST_GCL_DATA = *data;
  2735. }
  2736. base->MTL_EST_GCL_CONTROL = ctrl;
  2737. while ((base->MTL_EST_GCL_CONTROL & ENET_QOS_MTL_EST_GCL_CONTROL_SRWO_MASK) != 0U)
  2738. {
  2739. if (retry-- < 0)
  2740. {
  2741. return kStatus_Timeout;
  2742. }
  2743. SDK_DelayAtLeastUs(1, SDK_DEVICE_MAXIMUM_CPU_CLOCK_FREQUENCY);
  2744. }
  2745. if (read != 0U)
  2746. {
  2747. *data = base->MTL_EST_GCL_DATA;
  2748. }
  2749. if ((base->MTL_EST_GCL_CONTROL & ENET_QOS_MTL_EST_GCL_CONTROL_ERR0_MASK) != 0U)
  2750. {
  2751. return kStatus_ENET_QOS_Est_SwListWriteAbort;
  2752. }
  2753. return kStatus_Success;
  2754. }
  2755. static status_t ENET_QOS_EstProgramWord(ENET_QOS_Type *base, uint32_t addr, uint32_t *data, uint8_t gcrr)
  2756. {
  2757. return ENET_QOS_EstReadWriteWord(base, addr, data, gcrr, 0, 0);
  2758. }
  2759. static status_t ENET_QOS_EstReadWord(ENET_QOS_Type *base, uint32_t addr, uint32_t *data, uint8_t gcrr, uint8_t dbgm)
  2760. {
  2761. return ENET_QOS_EstReadWriteWord(base, addr, data, gcrr, 1, dbgm);
  2762. }
  2763. /*!
  2764. * @brief Program Gate Control List.
  2765. *
  2766. * This function is used to program the Enhanced Scheduled Transmisson. (IEEE802.1Qbv)
  2767. *
  2768. * @param base ENET peripheral base address..
  2769. * @param gcl Pointer to the Gate Control List structure.
  2770. * @param ptpClk_Hz frequency of the PTP clock.
  2771. */
  2772. status_t ENET_QOS_EstProgramGcl(ENET_QOS_Type *base, enet_qos_est_gcl_t *gcl, uint32_t ptpClk_Hz)
  2773. {
  2774. assert(gcl != NULL);
  2775. uint32_t i, control, data;
  2776. enet_qos_est_gate_op_t *gateOp;
  2777. status_t rc;
  2778. #define EST_MAX_INTERVAL ((1UL << ENET_QOS_EST_WID) - 1U)
  2779. #define EST_MAX_GATE ((1UL << (32U - ENET_QOS_EST_WID)) - 1U)
  2780. if (!gcl->enable)
  2781. {
  2782. goto exit;
  2783. }
  2784. /* Sanity checks */
  2785. if (gcl->numEntries > ENET_QOS_EST_DEP)
  2786. {
  2787. return kStatus_ENET_QOS_Est_InvalidParameter;
  2788. }
  2789. if (gcl->opList == NULL)
  2790. {
  2791. return kStatus_ENET_QOS_Est_InvalidParameter;
  2792. }
  2793. gateOp = gcl->opList;
  2794. for (i = 0; i < gcl->numEntries; i++)
  2795. {
  2796. if (gateOp->interval > EST_MAX_INTERVAL)
  2797. {
  2798. return kStatus_ENET_QOS_Est_InvalidParameter;
  2799. }
  2800. if (gateOp->gate > EST_MAX_GATE)
  2801. {
  2802. return kStatus_ENET_QOS_Est_InvalidParameter;
  2803. }
  2804. gateOp++;
  2805. }
  2806. /* Check if sw list is busy */
  2807. if ((base->MTL_EST_CONTROL & ENET_QOS_MTL_EST_CONTROL_SSWL_MASK) != 0U)
  2808. {
  2809. return kStatus_ENET_QOS_Est_SwListBusy;
  2810. }
  2811. gateOp = gcl->opList;
  2812. for (i = 0; i < gcl->numEntries; i++)
  2813. {
  2814. data = gateOp->interval | (gateOp->gate << ENET_QOS_EST_WID);
  2815. rc = ENET_QOS_EstProgramWord(base, i, &data, 0);
  2816. if (rc != kStatus_Success)
  2817. {
  2818. return rc;
  2819. }
  2820. gateOp++;
  2821. }
  2822. /* BTR High */
  2823. data = (uint32_t)(gcl->baseTime >> 32U);
  2824. rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_btr_high, &data, 1U);
  2825. if (rc != kStatus_Success)
  2826. {
  2827. return rc;
  2828. }
  2829. /* BTR Low */
  2830. data = (uint32_t)gcl->baseTime;
  2831. rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_btr_low, &data, 1);
  2832. if (rc != kStatus_Success)
  2833. {
  2834. return rc;
  2835. }
  2836. /* CTR High */
  2837. data = (uint32_t)(gcl->cycleTime >> 32U);
  2838. rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_ctr_high, &data, 1);
  2839. if (rc != kStatus_Success)
  2840. {
  2841. return rc;
  2842. }
  2843. /* CTR Low */
  2844. data = (uint32_t)gcl->cycleTime;
  2845. rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_ctr_low, &data, 1);
  2846. if (rc != kStatus_Success)
  2847. {
  2848. return rc;
  2849. }
  2850. /* TER */
  2851. data = gcl->extTime;
  2852. rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_ter, &data, 1);
  2853. if (rc != kStatus_Success)
  2854. {
  2855. return rc;
  2856. }
  2857. /* LLR */
  2858. data = gcl->numEntries;
  2859. rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_llr, &data, 1);
  2860. if (rc != kStatus_Success)
  2861. {
  2862. return rc;
  2863. }
  2864. exit:
  2865. control = base->MTL_EST_CONTROL;
  2866. if (gcl->enable)
  2867. {
  2868. control &= ~ENET_QOS_MTL_EST_CONTROL_PTOV_MASK;
  2869. control |= ENET_QOS_MTL_EST_CONTROL_SSWL_MASK | ENET_QOS_MTL_EST_CONTROL_EEST_MASK |
  2870. ENET_QOS_MTL_EST_CONTROL_PTOV((1000000000U / ptpClk_Hz) * 6U);
  2871. }
  2872. else
  2873. {
  2874. control &= ~ENET_QOS_MTL_EST_CONTROL_EEST_MASK;
  2875. }
  2876. base->MTL_EST_CONTROL = control;
  2877. return kStatus_Success;
  2878. }
  2879. /*!
  2880. * @brief Read Gate Control List.
  2881. *
  2882. * This function is used to read the Enhanced Scheduled Transmisson list. (IEEE802.1Qbv)
  2883. *
  2884. * @param base ENET peripheral base address..
  2885. * @param gcl Pointer to the Gate Control List structure.
  2886. * @param listLen length of the provided opList array in gcl structure.
  2887. * @param hwList Boolean if True read HW list, false read SW list.
  2888. */
  2889. status_t ENET_QOS_EstReadGcl(ENET_QOS_Type *base, enet_qos_est_gcl_t *gcl, uint32_t listLen, bool hwList)
  2890. {
  2891. assert(gcl != NULL);
  2892. assert(gcl->opList != NULL);
  2893. uint8_t dbgm = 0;
  2894. uint32_t data, i;
  2895. enet_qos_est_gate_op_t *gateOp;
  2896. status_t rc;
  2897. if (hwList == true)
  2898. {
  2899. dbgm = 1;
  2900. }
  2901. /* LLR */
  2902. rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_llr, &data, 1, dbgm);
  2903. if (rc != kStatus_Success)
  2904. {
  2905. return rc;
  2906. }
  2907. gcl->numEntries = data;
  2908. if (gcl->numEntries > listLen)
  2909. {
  2910. return kStatus_ENET_QOS_Est_InvalidParameter;
  2911. }
  2912. /* BTR High */
  2913. rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_btr_high, &data, 1, dbgm);
  2914. if (rc != kStatus_Success)
  2915. {
  2916. return rc;
  2917. }
  2918. gcl->baseTime = (uint64_t)data << 32U;
  2919. /* BTR Low */
  2920. rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_btr_low, &data, 1, dbgm);
  2921. if (rc != kStatus_Success)
  2922. {
  2923. return rc;
  2924. }
  2925. gcl->baseTime |= data;
  2926. /* CTR High */
  2927. rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_ctr_high, &data, 1, dbgm);
  2928. if (rc != kStatus_Success)
  2929. {
  2930. return rc;
  2931. }
  2932. gcl->cycleTime = (uint64_t)data << 32U;
  2933. /* CTR Low */
  2934. rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_ctr_low, &data, 1, dbgm);
  2935. if (rc != kStatus_Success)
  2936. {
  2937. return rc;
  2938. }
  2939. gcl->cycleTime |= data;
  2940. /* TER */
  2941. rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_ter, &data, 1, dbgm);
  2942. if (rc != kStatus_Success)
  2943. {
  2944. return rc;
  2945. }
  2946. gcl->extTime = data;
  2947. gateOp = gcl->opList;
  2948. for (i = 0; i < gcl->numEntries; i++)
  2949. {
  2950. rc = ENET_QOS_EstReadWord(base, i, &data, 0, dbgm);
  2951. if (rc != kStatus_Success)
  2952. {
  2953. return rc;
  2954. }
  2955. gateOp->interval = data & (EST_MAX_INTERVAL);
  2956. gateOp->gate = data >> ENET_QOS_EST_WID;
  2957. gateOp++;
  2958. }
  2959. return kStatus_Success;
  2960. }
  2961. /*!
  2962. * brief Read flexible rx parser configuration at specified index.
  2963. *
  2964. * This function is used to read flexible rx parser configuration at specified index.
  2965. *
  2966. * param base ENET peripheral base address..
  2967. * param rxpConfig The rx parser configuration pointer.
  2968. * param entryIndex The rx parser entry index to read, start from 0.
  2969. * retval kStatus_Success Configure rx parser success.
  2970. * retval kStatus_ENET_QOS_Timeout Poll status flag timeout.
  2971. */
  2972. status_t ENET_QOS_ReadRxParser(ENET_QOS_Type *base, enet_qos_rxp_config_t *rxpConfig, uint16_t entryIndex)
  2973. {
  2974. assert(rxpConfig != NULL);
  2975. assert(entryIndex < ENET_QOS_RXP_ENTRY_COUNT);
  2976. uint32_t *dataPtr;
  2977. uint8_t entrySize = sizeof(enet_qos_rxp_config_t) / sizeof(uint32_t);
  2978. uint32_t value = 0U;
  2979. status_t result = kStatus_Success;
  2980. /* Wait hardware not busy */
  2981. result = ENET_QOS_PollStatusFlag(&(base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS),
  2982. ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK, 0U);
  2983. if (kStatus_Success != result)
  2984. {
  2985. return result;
  2986. }
  2987. for (uint8_t i = 0; i < entrySize; i++)
  2988. {
  2989. /* Read address. */
  2990. value = ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_ADDR((uint32_t)entrySize * entryIndex + i);
  2991. /* Issue read command. */
  2992. value &= ~ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_WRRDN_MASK;
  2993. base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS = value;
  2994. /* Start Read */
  2995. value |= ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK;
  2996. base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS = value;
  2997. /* Wait hardware not busy */
  2998. result = ENET_QOS_PollStatusFlag(&base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS,
  2999. ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK, 0U);
  3000. if (kStatus_Success != result)
  3001. {
  3002. return result;
  3003. }
  3004. dataPtr = (uint32_t *)(void *)&rxpConfig[entryIndex];
  3005. dataPtr = &dataPtr[i];
  3006. /* Read data */
  3007. *dataPtr = base->MTL_RXP_INDIRECT_ACC_DATA;
  3008. }
  3009. return result;
  3010. }
  3011. /*!
  3012. * brief Configure flexible rx parser.
  3013. *
  3014. * This function is used to configure the flexible rx parser table.
  3015. *
  3016. * param base ENET peripheral base address..
  3017. * param rxpConfig The rx parser configuration pointer.
  3018. * param entryCount The rx parser entry count.
  3019. * retval kStatus_Success Configure rx parser success.
  3020. * retval kStatus_ENET_QOS_Timeout Poll status flag timeout.
  3021. */
  3022. status_t ENET_QOS_ConfigureRxParser(ENET_QOS_Type *base, enet_qos_rxp_config_t *rxpConfig, uint16_t entryCount)
  3023. {
  3024. assert(rxpConfig != NULL);
  3025. assert(entryCount <= ENET_QOS_RXP_ENTRY_COUNT);
  3026. uint32_t *dataPtr;
  3027. uint32_t entrySize = sizeof(enet_qos_rxp_config_t) / sizeof(uint32_t);
  3028. uint32_t value = 0U;
  3029. status_t result = kStatus_Success;
  3030. bool enableRx = false;
  3031. /* Disable the MAC rx. */
  3032. if (0U != (base->MAC_CONFIGURATION & ENET_QOS_MAC_CONFIGURATION_RE_MASK))
  3033. {
  3034. base->MAC_CONFIGURATION &= ~ENET_QOS_MAC_CONFIGURATION_RE_MASK;
  3035. enableRx = true;
  3036. }
  3037. /* Disable frame parser. */
  3038. result = ENET_QOS_EnableRxParser(base, false);
  3039. if (kStatus_Success != result)
  3040. {
  3041. return result;
  3042. }
  3043. for (uint8_t count = 0; count < entryCount; count++)
  3044. {
  3045. for (uint8_t i = 0; i < entrySize; i++)
  3046. {
  3047. /* Wait hardware not busy */
  3048. result = ENET_QOS_PollStatusFlag(&base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS,
  3049. ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK, 0U);
  3050. if (kStatus_Success != result)
  3051. {
  3052. return result;
  3053. }
  3054. dataPtr = (uint32_t *)(void *)&rxpConfig[count];
  3055. dataPtr = &dataPtr[i];
  3056. /* Write data before issue write command */
  3057. base->MTL_RXP_INDIRECT_ACC_DATA = *dataPtr;
  3058. /* Write address and issue write command */
  3059. value = ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_ADDR(entrySize * count + i);
  3060. // base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS = value;
  3061. value |= ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_WRRDN_MASK;
  3062. base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS = value;
  3063. /* Start write */
  3064. value |= ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK;
  3065. base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS = value;
  3066. }
  3067. }
  3068. /* Wait hardware not busy */
  3069. result = ENET_QOS_PollStatusFlag(&(base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS),
  3070. ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK, 0U);
  3071. if (kStatus_Success != result)
  3072. {
  3073. return result;
  3074. }
  3075. /* Program NVE and NPE. */
  3076. value = base->MTL_RXP_CONTROL_STATUS;
  3077. value &= ~(ENET_QOS_MTL_RXP_CONTROL_STATUS_NVE_MASK | ENET_QOS_MTL_RXP_CONTROL_STATUS_NPE_MASK);
  3078. value |= ENET_QOS_MTL_RXP_CONTROL_STATUS_NPE((uint32_t)entryCount - 1U);
  3079. if (entryCount < 3U)
  3080. {
  3081. value |= ENET_QOS_MTL_RXP_CONTROL_STATUS_NVE(2U);
  3082. }
  3083. else
  3084. {
  3085. value |= ENET_QOS_MTL_RXP_CONTROL_STATUS_NVE((uint32_t)entryCount - 1U);
  3086. }
  3087. base->MTL_RXP_CONTROL_STATUS = value;
  3088. /* Enable frame parser. */
  3089. result = ENET_QOS_EnableRxParser(base, true);
  3090. /* Enable Receive */
  3091. if (enableRx)
  3092. {
  3093. base->MAC_CONFIGURATION |= ENET_QOS_MAC_CONFIGURATION_RE_MASK;
  3094. }
  3095. return result;
  3096. }
  3097. /*!
  3098. * brief Gets statistical data in transfer.
  3099. *
  3100. * param base ENET_QOS peripheral base address.
  3101. * param statistics The statistics structure pointer.
  3102. */
  3103. void ENET_QOS_GetStatistics(ENET_QOS_Type *base, enet_qos_transfer_stats_t *statistics)
  3104. {
  3105. /* Rx statistics */
  3106. statistics->statsRxFrameCount = base->MAC_RX_PACKETS_COUNT_GOOD_BAD;
  3107. statistics->statsRxCrcErr = base->MAC_RX_CRC_ERROR_PACKETS;
  3108. statistics->statsRxAlignErr = base->MAC_RX_ALIGNMENT_ERROR_PACKETS;
  3109. statistics->statsRxLengthErr = base->MAC_RX_LENGTH_ERROR_PACKETS;
  3110. statistics->statsRxFifoOverflowErr = base->MAC_RX_FIFO_OVERFLOW_PACKETS;
  3111. /* Tx statistics */
  3112. statistics->statsTxFrameCount = base->MAC_TX_PACKET_COUNT_GOOD_BAD;
  3113. statistics->statsTxFifoUnderRunErr = base->MAC_TX_UNDERFLOW_ERROR_PACKETS;
  3114. }
  3115. /*!
  3116. * brief The ENET IRQ handler.
  3117. *
  3118. * param base ENET peripheral base address.
  3119. * param handle The ENET handler pointer.
  3120. */
  3121. void ENET_QOS_CommonIRQHandler(ENET_QOS_Type *base, enet_qos_handle_t *handle)
  3122. {
  3123. /* Check for the interrupt source type. */
  3124. /* DMA CHANNEL 0. */
  3125. if ((base->DMA_INTERRUPT_STATUS & ENET_QOS_DMA_INTERRUPT_STATUS_DC0IS_MASK) != 0U)
  3126. {
  3127. uint32_t flag = base->DMA_CH[0].DMA_CHX_STAT;
  3128. if ((flag & ENET_QOS_DMA_CHX_STAT_RI_MASK) != 0U)
  3129. {
  3130. base->DMA_CH[0].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_RI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
  3131. if (handle->callback != NULL)
  3132. {
  3133. handle->callback(base, handle, kENET_QOS_RxIntEvent, 0, handle->userData);
  3134. }
  3135. }
  3136. if ((flag & ENET_QOS_DMA_CHX_STAT_TI_MASK) != 0U)
  3137. {
  3138. base->DMA_CH[0].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_TI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
  3139. ENET_QOS_ReclaimTxDescriptor(base, handle, 0);
  3140. }
  3141. }
  3142. /* DMA CHANNEL 1. */
  3143. if ((base->DMA_INTERRUPT_STATUS & ENET_QOS_DMA_INTERRUPT_STATUS_DC1IS_MASK) != 0U)
  3144. {
  3145. uint32_t flag = base->DMA_CH[1].DMA_CHX_STAT;
  3146. if ((flag & ENET_QOS_DMA_CHX_STAT_RI_MASK) != 0U)
  3147. {
  3148. base->DMA_CH[1].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_RI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
  3149. if (handle->callback != NULL)
  3150. {
  3151. handle->callback(base, handle, kENET_QOS_RxIntEvent, 1, handle->userData);
  3152. }
  3153. }
  3154. if ((flag & ENET_QOS_DMA_CHX_STAT_TI_MASK) != 0U)
  3155. {
  3156. base->DMA_CH[1].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_TI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
  3157. ENET_QOS_ReclaimTxDescriptor(base, handle, 1);
  3158. }
  3159. }
  3160. /* DMA CHANNEL 2. */
  3161. if ((base->DMA_INTERRUPT_STATUS & ENET_QOS_DMA_INTERRUPT_STATUS_DC2IS_MASK) != 0U)
  3162. {
  3163. uint32_t flag = base->DMA_CH[2].DMA_CHX_STAT;
  3164. if ((flag & ENET_QOS_DMA_CHX_STAT_RI_MASK) != 0U)
  3165. {
  3166. base->DMA_CH[2].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_RI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
  3167. if (handle->callback != NULL)
  3168. {
  3169. handle->callback(base, handle, kENET_QOS_RxIntEvent, 2, handle->userData);
  3170. }
  3171. }
  3172. if ((flag & ENET_QOS_DMA_CHX_STAT_TI_MASK) != 0U)
  3173. {
  3174. base->DMA_CH[2].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_TI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
  3175. ENET_QOS_ReclaimTxDescriptor(base, handle, 2);
  3176. }
  3177. }
  3178. /* DMA CHANNEL 3. */
  3179. if ((base->DMA_INTERRUPT_STATUS & ENET_QOS_DMA_INTERRUPT_STATUS_DC3IS_MASK) != 0U)
  3180. {
  3181. uint32_t flag = base->DMA_CH[3].DMA_CHX_STAT;
  3182. if ((flag & ENET_QOS_DMA_CHX_STAT_RI_MASK) != 0U)
  3183. {
  3184. base->DMA_CH[3].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_RI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
  3185. if (handle->callback != NULL)
  3186. {
  3187. handle->callback(base, handle, kENET_QOS_RxIntEvent, 3, handle->userData);
  3188. }
  3189. }
  3190. if ((flag & ENET_QOS_DMA_CHX_STAT_TI_MASK) != 0U)
  3191. {
  3192. base->DMA_CH[3].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_TI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
  3193. ENET_QOS_ReclaimTxDescriptor(base, handle, 3);
  3194. }
  3195. }
  3196. /* MAC TIMESTAMP. */
  3197. if ((base->DMA_INTERRUPT_STATUS & ENET_QOS_DMA_INTERRUPT_STATUS_MACIS_MASK) != 0U)
  3198. {
  3199. if ((base->MAC_INTERRUPT_STATUS & ENET_QOS_MAC_INTERRUPT_STATUS_TSIS_MASK) != 0U)
  3200. {
  3201. if (handle->callback != NULL)
  3202. {
  3203. handle->callback(base, handle, kENET_QOS_TimeStampIntEvent, 0, handle->userData);
  3204. }
  3205. }
  3206. }
  3207. SDK_ISR_EXIT_BARRIER;
  3208. }
  3209. #if defined(ENET_QOS)
  3210. void ENET_QOS_DriverIRQHandler(void);
  3211. void ENET_QOS_DriverIRQHandler(void)
  3212. {
  3213. s_enetqosIsr(ENET_QOS, s_ENETHandle[0]);
  3214. }
  3215. #endif
  3216. #if defined(CONNECTIVITY__ENET_QOS)
  3217. void CONNECTIVITY_EQOS_INT_DriverIRQHandler(void);
  3218. void CONNECTIVITY_EQOS_INT_DriverIRQHandler(void)
  3219. {
  3220. s_enetqosIsr(CONNECTIVITY__ENET_QOS, s_ENETHandle[0]);
  3221. }
  3222. #endif