sdhci.c 92 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152
  1. /*
  2. * Copyright (c) 2006-2024 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2024-08-16 zhujiale first version
  9. */
  10. #include <rtthread.h>
  11. #include <mm_aspace.h>
  12. #include "sdhci.h"
  13. #include <string.h>
  14. #define DBG_TAG "RT_SDHCI"
  15. #ifdef DRV_DEBUG
  16. #define DBG_LVL DBG_LOG
  17. #else
  18. #define DBG_LVL DBG_INFO
  19. #endif /* DRV_DEBUG */
  20. #include <rtdbg.h>
  21. static unsigned int debug_quirks = 0;
  22. static unsigned int debug_quirks2;
  23. /********************************************************* */
  24. /* cmd */
  25. /********************************************************* */
  26. void rt_read_reg_debug(struct rt_sdhci_host *host)
  27. {
  28. rt_kprintf("0x00 addddddddddddd = %x \n", rt_sdhci_readl(host, 0x00));
  29. rt_kprintf("0x04 EMMC_BLOCKSIZE = %x \n", rt_sdhci_readw(host, 0x04));
  30. rt_kprintf("0x06 EMMC_BLOCKCOUNT = %x \n", rt_sdhci_readw(host, 0x06));
  31. rt_kprintf("0x08 RT_SDHCI_ARGUMENT = %x \n", rt_sdhci_readl(host, 0x08));
  32. rt_kprintf("0x0c EMMC_XFER_MODE = %x \n", rt_sdhci_readw(host, 0x0c));
  33. rt_kprintf("0x0e RT_SDHCI_COMMAND = %x \n", rt_sdhci_readw(host, 0x0e));
  34. rt_kprintf("0x24 RT_SDHCI_PRESENT_STATE = %x \n", rt_sdhci_readl(host, 0x24));
  35. rt_kprintf("0x28 RT_SDHCI_HOST_CONTROL = %x \n", rt_sdhci_readb(host, 0x28));
  36. rt_kprintf("0x29 RT_SDHCI_POWER_CONTROL = %x \n", rt_sdhci_readb(host, 0x29));
  37. rt_kprintf("0x2a EMMC_BGAP_CTRL = %x \n", rt_sdhci_readb(host, 0x2a));
  38. rt_kprintf("0x2c EMMC_CLK_CTRL = %x \n", rt_sdhci_readw(host, 0x2c));
  39. rt_kprintf("0x2e EMMC_TOUT_CTRL = %x \n", rt_sdhci_readb(host, 0x2e));
  40. rt_kprintf("0x2f EMMC_SW_RST = %x \n", rt_sdhci_readb(host, 0x2f));
  41. rt_kprintf("0x30 RT_SDHCI_INT_STATUS = %x \n", rt_sdhci_readw(host, 0x30));
  42. rt_kprintf("0x32 RT_SDHCI_ERR_INT_STATUS = %x \n", rt_sdhci_readw(host, 0x32));
  43. rt_kprintf("0x34 RT_SDHCI_INT_ENABLE = %x \n", rt_sdhci_readw(host, 0x34));
  44. rt_kprintf("0x36 EMMC ERROR INT STATEN = %x \n", rt_sdhci_readw(host, 0x36));
  45. rt_kprintf("0x38 EMMC NORMAL INT SIGNAL EN = %x \n", rt_sdhci_readw(host, 0x38));
  46. rt_kprintf("0x3a EMMC ERROR INT SIGNAL EN = %x \n", rt_sdhci_readw(host, 0x3a));
  47. rt_kprintf("0x3c EMMC_AUTO_CMD_STAT = %x \n", rt_sdhci_readw(host, 0x3c));
  48. rt_kprintf("0x3e EMMC_HOST_CTRL2 = %x \n", rt_sdhci_readw(host, 0x3e));
  49. rt_kprintf("0x40 EMMC_CAPABILITIES1 = %x \n", rt_sdhci_readl(host, 0x40));
  50. rt_kprintf("0x44 EMMC_CAPABILITIES2 = %x \n", rt_sdhci_readl(host, 0x44));
  51. rt_kprintf("0x52 EMMC_FORC_ERR_INT_STAT = %x \n", rt_sdhci_readw(host, 0x52));
  52. rt_kprintf("0x54 EMMC_ADMA_ERR_STAT = %x \n", rt_sdhci_readb(host, 0x54));
  53. rt_kprintf("0x58 EMMC_ADMA_SA = %x \n", rt_sdhci_readl(host, 0x58));
  54. rt_kprintf("0x66 EMMC_PRESET_SDR12 = %x \n", rt_sdhci_readw(host, 0x66));
  55. rt_kprintf("0x68 EMMC_PRESET_SDR25 = %x \n", rt_sdhci_readw(host, 0x68));
  56. rt_kprintf("0x6a EMMC_PRESET_SDR50 = %x \n", rt_sdhci_readw(host, 0x6a));
  57. rt_kprintf("0x6c EMMC_PRESET_SDR104 = %x \n", rt_sdhci_readw(host, 0x6c));
  58. rt_kprintf("0x6e EMMC_PRESET_DDR50 = %x \n", rt_sdhci_readw(host, 0x6e));
  59. rt_kprintf("0x78 EMMC_ADMA_ID = %x \n", rt_sdhci_readl(host, 0x78));
  60. rt_kprintf("0xfe EMMC_HOST_CNTRL_VERS = %x \n", rt_sdhci_readw(host, 0xfe));
  61. }
  62. static inline rt_bool_t sdhci_has_requests(struct rt_sdhci_host *host)
  63. {
  64. return host->cmd || host->data_cmd;
  65. }
  66. static inline rt_bool_t sdhci_auto_cmd23(struct rt_sdhci_host *host,
  67. struct rt_mmcsd_req *mrq)
  68. {
  69. return mrq->sbc && (host->flags & RT_SDHCI_AUTO_CMD23);
  70. }
  71. static inline rt_bool_t sdhci_auto_cmd12(struct rt_sdhci_host *host,
  72. struct rt_mmcsd_req *mrq)
  73. {
  74. return !mrq->sbc && (host->flags & RT_SDHCI_AUTO_CMD12) && !mrq->cap_cmd_during_tfr;
  75. }
  76. static inline rt_bool_t sdhci_manual_cmd23(struct rt_sdhci_host *host,
  77. struct rt_mmcsd_req *mrq)
  78. {
  79. return mrq->sbc && !(host->flags & RT_SDHCI_AUTO_CMD23);
  80. }
  81. static inline rt_bool_t sdhci_data_line_cmd(struct rt_mmcsd_cmd *cmd)
  82. {
  83. return cmd->data || cmd->flags & MMC_RSP_BUSY;
  84. }
  85. void rt_sdhci_data_irq_timeout(struct rt_sdhci_host *host, rt_bool_t enable)
  86. {
  87. if (enable)
  88. host->ier |= RT_SDHCI_INT_DATA_TIMEOUT;
  89. else
  90. host->ier &= ~RT_SDHCI_INT_DATA_TIMEOUT;
  91. rt_sdhci_writel(host, host->ier, RT_SDHCI_INT_ENABLE);
  92. rt_sdhci_writel(host, host->ier, RT_SDHCI_SIGNAL_ENABLE);
  93. }
  94. void rt_sdhci_set_uhs(struct rt_sdhci_host *host, unsigned timing)
  95. {
  96. rt_uint16_t ctrl_2;
  97. ctrl_2 = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2);
  98. ctrl_2 &= ~RT_SDHCI_CTRL_UHS_MASK;
  99. if ((timing == MMC_TIMING_MMC_HS200) || (timing == MMC_TIMING_UHS_SDR104))
  100. ctrl_2 |= RT_SDHCI_CTRL_UHS_SDR104;
  101. else if (timing == MMC_TIMING_UHS_SDR12)
  102. ctrl_2 |= RT_SDHCI_CTRL_UHS_SDR12;
  103. else if (timing == MMC_TIMING_UHS_SDR25)
  104. ctrl_2 |= RT_SDHCI_CTRL_UHS_SDR25;
  105. else if (timing == MMC_TIMING_UHS_SDR50)
  106. ctrl_2 |= RT_SDHCI_CTRL_UHS_SDR50;
  107. else if ((timing == MMC_TIMING_UHS_DDR50) || (timing == MMC_TIMING_MMC_DDR52))
  108. ctrl_2 |= RT_SDHCI_CTRL_UHS_DDR50;
  109. else if (timing == MMC_TIMING_MMC_HS400)
  110. ctrl_2 |= RT_SDHCI_CTRL_HS400; /* Non-standard */
  111. rt_sdhci_writew(host, ctrl_2, RT_SDHCI_HOST_CONTROL2);
  112. }
  113. void rt_sdhci_set_bus_width(struct rt_sdhci_host *host, int width)
  114. {
  115. rt_uint8_t ctrl;
  116. ctrl = rt_sdhci_readb(host, RT_SDHCI_HOST_CONTROL);
  117. if (width == MMC_BUS_WIDTH_8)
  118. {
  119. ctrl &= ~RT_SDHCI_CTRL_4BITBUS;
  120. ctrl |= RT_SDHCI_CTRL_8BITBUS;
  121. }
  122. else
  123. {
  124. if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
  125. ctrl &= ~RT_SDHCI_CTRL_8BITBUS;
  126. if (width == MMC_BUS_WIDTH_4)
  127. ctrl |= RT_SDHCI_CTRL_4BITBUS;
  128. else
  129. ctrl &= ~RT_SDHCI_CTRL_4BITBUS;
  130. }
  131. rt_sdhci_writeb(host, ctrl, RT_SDHCI_HOST_CONTROL);
  132. }
  133. static inline rt_bool_t sdhci_can_64bit_dma(struct rt_sdhci_host *host)
  134. {
  135. if (host->version >= RT_SDHCI_SPEC_410 && host->v4_mode)
  136. return host->caps & RT_SDHCI_CAN_64BIT_V4;
  137. return host->caps & RT_SDHCI_CAN_64BIT;
  138. }
  139. static void sdhci_do_enable_v4_mode(struct rt_sdhci_host *host)
  140. {
  141. rt_uint16_t ctrl2;
  142. ctrl2 = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2);
  143. if (ctrl2 & RT_SDHCI_CTRL_V4_MODE)
  144. return;
  145. ctrl2 |= RT_SDHCI_CTRL_V4_MODE;
  146. rt_sdhci_writew(host, ctrl2, RT_SDHCI_HOST_CONTROL2);
  147. }
  148. void rt_sdhci_cleanup_host(struct rt_sdhci_host *host)
  149. {
  150. return;
  151. }
  152. static void sdhci_set_default_irqs(struct rt_sdhci_host *host)
  153. {
  154. host->ier = RT_SDHCI_INT_BUS_POWER | RT_SDHCI_INT_DATA_END_BIT | RT_SDHCI_INT_DATA_CRC | RT_SDHCI_INT_DATA_TIMEOUT | RT_SDHCI_INT_INDEX | RT_SDHCI_INT_END_BIT | RT_SDHCI_INT_CRC | RT_SDHCI_INT_TIMEOUT | RT_SDHCI_INT_DATA_END | RT_SDHCI_INT_RESPONSE;
  155. if (host->tuning_mode == RT_SDHCI_TUNING_MODE_2 || host->tuning_mode == RT_SDHCI_TUNING_MODE_3)
  156. host->ier |= RT_SDHCI_INT_RETUNE;
  157. rt_sdhci_writel(host, host->ier, RT_SDHCI_INT_ENABLE);
  158. rt_sdhci_writel(host, host->ier, RT_SDHCI_SIGNAL_ENABLE);
  159. }
  160. static inline void sdhci_auto_cmd_select(struct rt_sdhci_host *host,
  161. struct rt_mmcsd_cmd *cmd,
  162. rt_uint16_t *mode)
  163. {
  164. rt_bool_t use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && (cmd->cmd_code != SD_IO_RW_EXTENDED);
  165. rt_bool_t use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
  166. rt_uint16_t ctrl2;
  167. if (host->version >= RT_SDHCI_SPEC_410 && host->v4_mode && (use_cmd12 || use_cmd23))
  168. {
  169. *mode |= RT_SDHCI_TRNS_AUTO_SEL;
  170. ctrl2 = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2);
  171. if (use_cmd23)
  172. ctrl2 |= RT_SDHCI_CMD23_ENABLE;
  173. else
  174. ctrl2 &= ~RT_SDHCI_CMD23_ENABLE;
  175. rt_sdhci_writew(host, ctrl2, RT_SDHCI_HOST_CONTROL2);
  176. return;
  177. }
  178. if (use_cmd12)
  179. *mode |= RT_SDHCI_TRNS_AUTO_CMD12;
  180. else if (use_cmd23)
  181. *mode |= RT_SDHCI_TRNS_AUTO_CMD23;
  182. }
  183. static rt_bool_t sdhci_present_error(struct rt_sdhci_host *host,
  184. struct rt_mmcsd_cmd *cmd, rt_bool_t present)
  185. {
  186. if (!present || host->flags & RT_SDHCI_DEVICE_DEAD)
  187. {
  188. cmd->err = -ENOMEDIUM;
  189. return RT_TRUE;
  190. }
  191. return RT_FALSE;
  192. }
  193. static rt_uint16_t sdhci_get_preset_value(struct rt_sdhci_host *host)
  194. {
  195. rt_uint16_t preset = 0;
  196. switch (host->timing)
  197. {
  198. case MMC_TIMING_MMC_HS:
  199. case MMC_TIMING_SD_HS:
  200. preset = rt_sdhci_readw(host, RT_SDHCI_PRESET_FOR_HIGH_SPEED);
  201. break;
  202. case MMC_TIMING_UHS_SDR12:
  203. preset = rt_sdhci_readw(host, RT_SDHCI_PRESET_FOR_SDR12);
  204. break;
  205. case MMC_TIMING_UHS_SDR25:
  206. preset = rt_sdhci_readw(host, RT_SDHCI_PRESET_FOR_SDR25);
  207. break;
  208. case MMC_TIMING_UHS_SDR50:
  209. preset = rt_sdhci_readw(host, RT_SDHCI_PRESET_FOR_SDR50);
  210. break;
  211. case MMC_TIMING_UHS_SDR104:
  212. case MMC_TIMING_MMC_HS200:
  213. preset = rt_sdhci_readw(host, RT_SDHCI_PRESET_FOR_SDR104);
  214. break;
  215. case MMC_TIMING_UHS_DDR50:
  216. case MMC_TIMING_MMC_DDR52:
  217. preset = rt_sdhci_readw(host, RT_SDHCI_PRESET_FOR_DDR50);
  218. break;
  219. case MMC_TIMING_MMC_HS400:
  220. preset = rt_sdhci_readw(host, RT_SDHCI_PRESET_FOR_HS400);
  221. break;
  222. default:
  223. preset = rt_sdhci_readw(host, RT_SDHCI_PRESET_FOR_SDR12);
  224. break;
  225. }
  226. return preset;
  227. }
  228. static void sdhci_set_card_detection(struct rt_sdhci_host *host, rt_bool_t enable)
  229. {
  230. rt_uint32_t present;
  231. if ((host->quirks & RT_SDHCI_QUIRK_BROKEN_CARD_DETECTION) || !mmc_card_is_removable(host->mmc))
  232. return;
  233. if (enable)
  234. {
  235. present = rt_sdhci_readl(host, RT_SDHCI_PRESENT_STATE) & RT_SDHCI_CARD_PRESENT;
  236. host->ier |= present ? RT_SDHCI_INT_CARD_REMOVE : RT_SDHCI_INT_CARD_INSERT;
  237. }
  238. else
  239. {
  240. host->ier &= ~(RT_SDHCI_INT_CARD_REMOVE | RT_SDHCI_INT_CARD_INSERT);
  241. }
  242. rt_sdhci_writel(host, host->ier, RT_SDHCI_INT_ENABLE);
  243. rt_sdhci_writel(host, host->ier, RT_SDHCI_SIGNAL_ENABLE);
  244. }
  245. static void sdhci_enable_card_detection(struct rt_sdhci_host *host)
  246. {
  247. sdhci_set_card_detection(host, RT_TRUE);
  248. }
  249. /********************************************************* */
  250. /* reset */
  251. /********************************************************* */
  252. enum sdhci_reset_reason
  253. {
  254. RT_SDHCI_RESET_FOR_INIT,
  255. RT_SDHCI_RESET_FOR_REQUEST_ERROR,
  256. RT_SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY,
  257. RT_SDHCI_RESET_FOR_TUNING_ABORT,
  258. RT_SDHCI_RESET_FOR_CARD_REMOVED,
  259. RT_SDHCI_RESET_FOR_CQE_RECOVERY,
  260. };
  261. static rt_bool_t sdhci_needs_reset(struct rt_sdhci_host *host, struct rt_mmcsd_req *mrq)
  262. {
  263. return (!(host->flags & RT_SDHCI_DEVICE_DEAD) && ((mrq->cmd && mrq->cmd->err) || (mrq->sbc && mrq->sbc->err) || (mrq->data && mrq->data->stop && mrq->data->stop->err) || (host->quirks & RT_SDHCI_QUIRK_RESET_AFTER_REQUEST)));
  264. }
  265. static rt_bool_t sdhci_do_reset(struct rt_sdhci_host *host, rt_uint8_t mask)
  266. {
  267. if (host->quirks & RT_SDHCI_QUIRK_NO_CARD_NO_RESET)
  268. {
  269. struct rt_mmc_host *mmc = host->mmc;
  270. if (!mmc->ops->get_cd(mmc))
  271. return RT_FALSE;
  272. }
  273. if (host->ops->reset)
  274. {
  275. host->ops->reset(host, mask);
  276. }
  277. return RT_TRUE;
  278. }
  279. static void sdhci_reset_for_reason(struct rt_sdhci_host *host, enum sdhci_reset_reason reason)
  280. {
  281. if (host->quirks2 & RT_SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER)
  282. {
  283. sdhci_do_reset(host, RT_SDHCI_RESET_CMD | RT_SDHCI_RESET_DATA);
  284. return;
  285. }
  286. switch (reason)
  287. {
  288. case RT_SDHCI_RESET_FOR_INIT:
  289. sdhci_do_reset(host, RT_SDHCI_RESET_CMD | RT_SDHCI_RESET_DATA);
  290. break;
  291. case RT_SDHCI_RESET_FOR_REQUEST_ERROR:
  292. case RT_SDHCI_RESET_FOR_TUNING_ABORT:
  293. case RT_SDHCI_RESET_FOR_CARD_REMOVED:
  294. case RT_SDHCI_RESET_FOR_CQE_RECOVERY:
  295. sdhci_do_reset(host, RT_SDHCI_RESET_CMD);
  296. sdhci_do_reset(host, RT_SDHCI_RESET_DATA);
  297. break;
  298. case RT_SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY:
  299. sdhci_do_reset(host, RT_SDHCI_RESET_DATA);
  300. break;
  301. }
  302. }
  303. #define sdhci_reset_for(h, r) sdhci_reset_for_reason((h), RT_SDHCI_RESET_FOR_##r)
  304. static void sdhci_reset_for_all(struct rt_sdhci_host *host)
  305. {
  306. if (sdhci_do_reset(host, RT_SDHCI_RESET_ALL))
  307. {
  308. if (host->flags & (RT_SDHCI_USE_SDMA))
  309. {
  310. if (host->ops->enable_dma)
  311. host->ops->enable_dma(host);
  312. }
  313. host->preset_enabled = RT_FALSE;
  314. }
  315. }
  316. static void sdhci_runtime_pm_bus_on(struct rt_sdhci_host *host)
  317. {
  318. if (host->bus_on)
  319. return;
  320. host->bus_on = RT_TRUE;
  321. }
  322. static void sdhci_runtime_pm_bus_off(struct rt_sdhci_host *host)
  323. {
  324. if (!host->bus_on)
  325. return;
  326. host->bus_on = RT_FALSE;
  327. }
  328. void rt_sdhci_reset(struct rt_sdhci_host *host, rt_uint8_t mask)
  329. {
  330. ssize_t timeout;
  331. rt_sdhci_writeb(host, mask, RT_SDHCI_SOFTWARE_RESET);
  332. if (mask & RT_SDHCI_RESET_ALL)
  333. {
  334. host->clock = 0;
  335. if (host->quirks2 & RT_SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
  336. sdhci_runtime_pm_bus_off(host);
  337. }
  338. timeout = rt_tick_from_millisecond(150);
  339. while (1)
  340. {
  341. timeout = timeout - rt_tick_get();
  342. if (!(rt_sdhci_readb(host, RT_SDHCI_SOFTWARE_RESET) & mask))
  343. break;
  344. if (timeout < 0)
  345. {
  346. rt_kprintf("%s: Reset 0x%x never completed.\n",
  347. mmc_hostname(host->mmc), (int)mask);
  348. rt_read_reg_debug(host);
  349. return;
  350. }
  351. rt_hw_us_delay(10);
  352. }
  353. }
  354. /********************************************************* */
  355. /* data */
  356. /********************************************************* */
  357. static rt_ubase_t sdhci_sdma_address(struct rt_sdhci_host *host)
  358. {
  359. return (rt_ubase_t)rt_kmem_v2p(host->data->buf);
  360. }
  361. static void sdhci_set_adma_addr(struct rt_sdhci_host *host, rt_uint32_t addr)
  362. {
  363. rt_sdhci_writel(host, lower_32_bits(addr), RT_SDHCI_ADMA_ADDRESS);
  364. if (host->flags & RT_SDHCI_USE_64_BIT_DMA)
  365. rt_sdhci_writel(host, upper_32_bits(addr), RT_SDHCI_ADMA_ADDRESS_HI);
  366. }
  367. static void sdhci_set_sdma_addr(struct rt_sdhci_host *host, rt_uint32_t addr)
  368. {
  369. if (host->v4_mode)
  370. sdhci_set_adma_addr(host, addr);
  371. else
  372. rt_sdhci_writel(host, addr, RT_SDHCI_DMA_ADDRESS);
  373. }
  374. static void sdhci_config_dma(struct rt_sdhci_host *host)
  375. {
  376. rt_uint8_t ctrl;
  377. rt_uint16_t ctrl2;
  378. if (host->version < RT_SDHCI_SPEC_200)
  379. return;
  380. ctrl = rt_sdhci_readb(host, RT_SDHCI_HOST_CONTROL);
  381. ctrl &= ~RT_SDHCI_CTRL_DMA_MASK;
  382. if (!(host->flags & RT_SDHCI_REQ_USE_DMA))
  383. goto out;
  384. /* Note if DMA Select is zero then SDMA is selected */
  385. if (host->flags & RT_SDHCI_USE_64_BIT_DMA)
  386. {
  387. if (host->v4_mode)
  388. {
  389. ctrl2 = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2);
  390. ctrl2 |= RT_SDHCI_CTRL_64BIT_ADDR;
  391. rt_sdhci_writew(host, ctrl2, RT_SDHCI_HOST_CONTROL2);
  392. }
  393. }
  394. out:
  395. rt_sdhci_writeb(host, ctrl, RT_SDHCI_HOST_CONTROL);
  396. }
  397. static inline void sdhci_set_block_info(struct rt_sdhci_host *host,
  398. struct rt_mmcsd_data *data)
  399. {
  400. int boundary;
  401. size_t total_size = data->blks * data->blksize;
  402. if (total_size <= 512)
  403. boundary = 0; /* 4k bytes*/
  404. else if (total_size <= 1024)
  405. boundary = 1; /* 8 KB*/
  406. else if (total_size <= 2048)
  407. boundary = 2; /* 16 KB*/
  408. else if (total_size <= 4096)
  409. boundary = 3; /* 32 KB*/
  410. else if (total_size <= 8192)
  411. boundary = 4; /* 64 KB*/
  412. else if (total_size <= 16384)
  413. boundary = 5; /* 128 KB*/
  414. else if (total_size <= 32768)
  415. boundary = 6; /* 256 KB*/
  416. else
  417. boundary = 7; /* 512 KB*/
  418. rt_sdhci_writew(host,
  419. RT_SDHCI_MAKE_BLKSZ(boundary, data->blksize),
  420. RT_SDHCI_BLOCK_SIZE);
  421. if (host->version >= RT_SDHCI_SPEC_410 && host->v4_mode && (host->quirks2 & RT_SDHCI_QUIRK2_USE_32BIT_BLK_CNT))
  422. {
  423. if (rt_sdhci_readw(host, RT_SDHCI_BLOCK_COUNT))
  424. rt_sdhci_writew(host, 0, RT_SDHCI_BLOCK_COUNT);
  425. rt_sdhci_writew(host, data->blks, RT_SDHCI_32BIT_BLK_CNT);
  426. }
  427. else
  428. {
  429. rt_sdhci_writew(host, data->blks, RT_SDHCI_BLOCK_COUNT);
  430. }
  431. }
  432. static void sdhci_set_transfer_irqs(struct rt_sdhci_host *host)
  433. {
  434. rt_uint32_t pio_irqs = RT_SDHCI_INT_DATA_AVAIL | RT_SDHCI_INT_SPACE_AVAIL;
  435. rt_uint32_t dma_irqs = RT_SDHCI_INT_DMA_END;
  436. if (host->flags & RT_SDHCI_REQ_USE_DMA)
  437. host->ier = (host->ier & ~pio_irqs) | dma_irqs;
  438. else
  439. host->ier = (host->ier & ~dma_irqs) | pio_irqs;
  440. if (host->flags & (RT_SDHCI_AUTO_CMD23 | RT_SDHCI_AUTO_CMD12))
  441. host->ier |= RT_SDHCI_INT_AUTO_CMD_ERR;
  442. else
  443. host->ier &= ~RT_SDHCI_INT_AUTO_CMD_ERR;
  444. rt_sdhci_writel(host, host->ier, RT_SDHCI_INT_ENABLE);
  445. rt_sdhci_writel(host, host->ier, RT_SDHCI_SIGNAL_ENABLE);
  446. }
  447. static void sdhci_prepare_data(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd)
  448. {
  449. struct rt_mmcsd_data *data = cmd->data;
  450. LOG_D(data->blksize * data->blks > 524288);
  451. LOG_D(data->blksize > host->mmc->max_blk_size);
  452. LOG_D(data->blks > 65535);
  453. host->data = data;
  454. host->data_early = 0;
  455. host->data->bytes_xfered = 0;
  456. if (host->flags & RT_SDHCI_USE_SDMA)
  457. {
  458. unsigned int length_mask, offset_mask;
  459. host->flags |= RT_SDHCI_REQ_USE_DMA;
  460. length_mask = 0;
  461. offset_mask = 0;
  462. if (host->quirks & RT_SDHCI_QUIRK_32BIT_DMA_SIZE)
  463. length_mask = 3;
  464. if (host->quirks & RT_SDHCI_QUIRK_32BIT_DMA_ADDR)
  465. offset_mask = 3;
  466. if ((data->blks * data->blksize) & length_mask)
  467. {
  468. host->flags &= ~RT_SDHCI_REQ_USE_DMA;
  469. }
  470. else if ((rt_ubase_t)rt_kmem_v2p(data->buf) & offset_mask)
  471. {
  472. host->flags &= ~RT_SDHCI_REQ_USE_DMA;
  473. }
  474. }
  475. sdhci_config_dma(host);
  476. if (host->flags & RT_SDHCI_REQ_USE_DMA)
  477. {
  478. if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE)
  479. rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, data->buf, data->blks * data->blksize);
  480. else
  481. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, data->buf, data->blks * data->blksize);
  482. sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
  483. }
  484. if (!(host->flags & RT_SDHCI_REQ_USE_DMA))
  485. {
  486. host->blocks = data->blks;
  487. }
  488. sdhci_set_transfer_irqs(host);
  489. sdhci_set_block_info(host, data);
  490. }
  491. static void sdhci_set_mrq_done(struct rt_sdhci_host *host, struct rt_mmcsd_req *mrq)
  492. {
  493. int i;
  494. for (i = 0; i < RT_SDHCI_MAX_MRQS; i++)
  495. {
  496. if (host->mrqs_done[i] == mrq)
  497. {
  498. LOG_D(1);
  499. return;
  500. }
  501. }
  502. for (i = 0; i < RT_SDHCI_MAX_MRQS; i++)
  503. {
  504. if (!host->mrqs_done[i])
  505. {
  506. host->mrqs_done[i] = mrq;
  507. break;
  508. }
  509. }
  510. LOG_D(i >= RT_SDHCI_MAX_MRQS);
  511. }
  512. static inline rt_bool_t sdhci_defer_done(struct rt_sdhci_host *host,
  513. struct rt_mmcsd_req *mrq)
  514. {
  515. struct rt_mmcsd_data *data = mrq->data;
  516. return host->pending_reset || host->always_defer_done || ((host->flags & RT_SDHCI_REQ_USE_DMA) && data && data->host_cookie == COOKIE_MAPPED);
  517. }
  518. /********************************************************* */
  519. /* pio */
  520. /********************************************************* */
  521. static void rt_sdhci_read_block_pio(struct rt_sdhci_host *host,void **buf)
  522. {
  523. rt_uint32_t scratch;
  524. size_t len;
  525. rt_uint32_t blksize = host->data->blksize;
  526. while (blksize)
  527. {
  528. len = min(4U, blksize);
  529. scratch = rt_sdhci_readl(host, RT_SDHCI_BUFFER);
  530. rt_memcpy(*buf, &scratch, len);
  531. *buf += len;
  532. blksize -= len;
  533. }
  534. }
  535. static void rt_sdhci_write_block_pio(struct rt_sdhci_host *host,void **buf)
  536. {
  537. size_t blksize, len;
  538. rt_uint32_t scratch;
  539. LOG_D("PIO writing\n");
  540. blksize = host->data->blksize;
  541. scratch = 0;
  542. while (blksize)
  543. {
  544. len = min(4U, blksize);
  545. rt_memcpy(&scratch, *buf, len);
  546. *buf += len;
  547. blksize -= len;
  548. rt_sdhci_writel(host, scratch, RT_SDHCI_BUFFER);
  549. }
  550. }
  551. static void sdhci_transfer_pio(struct rt_sdhci_host *host)
  552. {
  553. rt_uint32_t mask;
  554. if (host->blocks == 0)
  555. return;
  556. if (host->data->flags & DATA_DIR_READ)
  557. mask = RT_SDHCI_DATA_AVAILABLE;
  558. else
  559. mask = RT_SDHCI_SPACE_AVAILABLE;
  560. if ((host->quirks & RT_SDHCI_QUIRK_BROKEN_SMALL_PIO) && (host->data->blks == 1))
  561. {
  562. mask = ~0;
  563. }
  564. void *buf = (void *)host->data->buf;
  565. while (rt_sdhci_readl(host, RT_SDHCI_PRESENT_STATE) & mask)
  566. {
  567. if (host->quirks & RT_SDHCI_QUIRK_PIO_NEEDS_DELAY)
  568. rt_hw_us_delay(100);
  569. if (host->data->flags & DATA_DIR_READ)
  570. rt_sdhci_read_block_pio(host,&buf);
  571. else
  572. rt_sdhci_write_block_pio(host,&buf);
  573. host->data->blks--;
  574. if (host->data->blks == 0)
  575. break;
  576. }
  577. }
  578. /********************************************************* */
  579. /* config */
  580. /********************************************************* */
  581. static rt_bool_t sdhci_timing_has_preset(unsigned char timing)
  582. {
  583. switch (timing)
  584. {
  585. case MMC_TIMING_UHS_SDR12:
  586. case MMC_TIMING_UHS_SDR25:
  587. case MMC_TIMING_UHS_SDR50:
  588. case MMC_TIMING_UHS_SDR104:
  589. case MMC_TIMING_UHS_DDR50:
  590. case MMC_TIMING_MMC_DDR52:
  591. return RT_TRUE;
  592. }
  593. return RT_FALSE;
  594. }
  595. static rt_bool_t sdhci_preset_needed(struct rt_sdhci_host *host, unsigned char timing)
  596. {
  597. return !(host->quirks2 & RT_SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && sdhci_timing_has_preset(timing);
  598. }
  599. static rt_bool_t sdhci_presetable_values_change(struct rt_sdhci_host *host, struct rt_mmcsd_io_cfg *ios)
  600. {
  601. return !host->preset_enabled && (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type);
  602. }
  603. static void sdhci_preset_value_enable(struct rt_sdhci_host *host, rt_bool_t enable)
  604. {
  605. if (host->version < RT_SDHCI_SPEC_300)
  606. return;
  607. if (host->preset_enabled != enable)
  608. {
  609. rt_uint16_t ctrl = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2);
  610. if (enable)
  611. ctrl |= RT_SDHCI_CTRL_PRESET_VAL_ENABLE;
  612. else
  613. ctrl &= ~RT_SDHCI_CTRL_PRESET_VAL_ENABLE;
  614. rt_sdhci_writew(host, ctrl, RT_SDHCI_HOST_CONTROL2);
  615. if (enable)
  616. host->flags |= RT_SDHCI_PV_ENABLED;
  617. else
  618. host->flags &= ~RT_SDHCI_PV_ENABLED;
  619. host->preset_enabled = enable;
  620. }
  621. }
  622. static void sdhci_set_power_reg(struct rt_sdhci_host *host, unsigned char mode,
  623. unsigned short vdd)
  624. {
  625. struct rt_mmc_host *mmc = host->mmc;
  626. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
  627. if (mode != MMC_POWER_OFF)
  628. rt_sdhci_writeb(host, RT_SDHCI_POWER_ON, RT_SDHCI_POWER_CONTROL);
  629. else
  630. rt_sdhci_writeb(host, 0, RT_SDHCI_POWER_CONTROL);
  631. }
  632. void rt_sdhci_set_power_with_noreg(struct rt_sdhci_host *host, unsigned char mode,
  633. unsigned short vdd)
  634. {
  635. rt_uint8_t pwr = 0;
  636. if (mode != MMC_POWER_OFF)
  637. {
  638. switch (1 << vdd)
  639. {
  640. case MMC_VDD_165_195:
  641. case MMC_VDD_20_21:
  642. pwr = RT_SDHCI_POWER_180;
  643. break;
  644. case MMC_VDD_29_30:
  645. case MMC_VDD_30_31:
  646. pwr = RT_SDHCI_POWER_300;
  647. break;
  648. case MMC_VDD_32_33:
  649. case MMC_VDD_33_34:
  650. case MMC_VDD_34_35:
  651. case MMC_VDD_35_36:
  652. pwr = RT_SDHCI_POWER_330;
  653. break;
  654. default:
  655. break;
  656. }
  657. }
  658. if (host->pwr == pwr)
  659. return;
  660. host->pwr = pwr;
  661. if (pwr == 0)
  662. {
  663. rt_sdhci_writeb(host, 0, RT_SDHCI_POWER_CONTROL);
  664. if (host->quirks2 & RT_SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
  665. sdhci_runtime_pm_bus_off(host);
  666. }
  667. else
  668. {
  669. if (!(host->quirks & RT_SDHCI_QUIRK_SINGLE_POWER_WRITE))
  670. rt_sdhci_writeb(host, 0, RT_SDHCI_POWER_CONTROL);
  671. if (host->quirks & RT_SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
  672. rt_sdhci_writeb(host, pwr, RT_SDHCI_POWER_CONTROL);
  673. pwr |= RT_SDHCI_POWER_ON;
  674. rt_sdhci_writeb(host, pwr, RT_SDHCI_POWER_CONTROL);
  675. if (host->quirks2 & RT_SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
  676. sdhci_runtime_pm_bus_on(host);
  677. if (host->quirks & RT_SDHCI_QUIRK_DELAY_AFTER_POWER)
  678. rt_thread_mdelay(10);
  679. }
  680. }
  681. void rt_sdhci_set_power(struct rt_sdhci_host *host, unsigned char mode,
  682. unsigned short vdd)
  683. {
  684. if (!host->mmc->supply.vmmc)
  685. rt_sdhci_set_power_with_noreg(host, mode, vdd);
  686. else
  687. sdhci_set_power_reg(host, mode, vdd);
  688. }
  689. int rt_sdhci_start_signal_voltage_switch(struct rt_mmc_host *mmc,
  690. struct rt_mmcsd_io_cfg *ios)
  691. {
  692. struct rt_sdhci_host *host = mmc_priv(mmc);
  693. rt_uint16_t ctrl;
  694. int ret;
  695. if (host->version < RT_SDHCI_SPEC_300)
  696. return 0;
  697. ctrl = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2);
  698. switch (ios->signal_voltage)
  699. {
  700. case MMC_SIGNAL_VOLTAGE_330:
  701. if (!(host->flags & RT_SDHCI_SIGNALING_330))
  702. return -EINVAL;
  703. ctrl &= ~RT_SDHCI_CTRL_VDD_180;
  704. rt_sdhci_writew(host, ctrl, RT_SDHCI_HOST_CONTROL2);
  705. if (!mmc->supply.vqmmc)
  706. {
  707. ret = rt_mmc_regulator_set_vqmmc(mmc, ios);
  708. if (ret < 0)
  709. {
  710. return -EIO;
  711. }
  712. }
  713. rt_thread_mdelay(5);
  714. ctrl = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2);
  715. if (!(ctrl & RT_SDHCI_CTRL_VDD_180))
  716. return 0;
  717. return -EAGAIN;
  718. case MMC_SIGNAL_VOLTAGE_180:
  719. if (!(host->flags & RT_SDHCI_SIGNALING_180))
  720. return -EINVAL;
  721. if (!mmc->supply.vqmmc)
  722. {
  723. ret = rt_mmc_regulator_set_vqmmc(mmc, ios);
  724. if (ret < 0)
  725. {
  726. LOG_D("%s: Switching to 1.8V signalling voltage failed\n",
  727. mmc_hostname(mmc));
  728. return -EIO;
  729. }
  730. }
  731. ctrl |= RT_SDHCI_CTRL_VDD_180;
  732. rt_sdhci_writew(host, ctrl, RT_SDHCI_HOST_CONTROL2);
  733. if (host->ops->voltage_switch)
  734. host->ops->voltage_switch(host);
  735. ctrl = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2);
  736. if (ctrl & RT_SDHCI_CTRL_VDD_180)
  737. return 0;
  738. LOG_D("%s: 1.8V regulator output did not become stable\n",
  739. mmc_hostname(mmc));
  740. return -EAGAIN;
  741. case MMC_SIGNAL_VOLTAGE_120:
  742. if (!(host->flags & RT_SDHCI_SIGNALING_120))
  743. return -EINVAL;
  744. if (!mmc->supply.vqmmc)
  745. {
  746. ret = rt_mmc_regulator_set_vqmmc(mmc, ios);
  747. if (ret < 0)
  748. {
  749. LOG_D("%s: Switching to 1.2V signalling voltage failed\n",
  750. mmc_hostname(mmc));
  751. return -EIO;
  752. }
  753. }
  754. return 0;
  755. default:
  756. return 0;
  757. }
  758. }
  759. static int sdhci_get_cd(struct rt_mmc_host *mmc)
  760. {
  761. struct rt_sdhci_host *host = mmc_priv(mmc);
  762. int gpio_cd = rt_mmc_gpio_get_cd(mmc);
  763. if (host->flags & RT_SDHCI_DEVICE_DEAD)
  764. return 0;
  765. if (!mmc_card_is_removable(mmc))
  766. return 1;
  767. if (gpio_cd >= 0)
  768. return !!gpio_cd;
  769. if (host->quirks & RT_SDHCI_QUIRK_BROKEN_CARD_DETECTION)
  770. return 1;
  771. return !!(rt_sdhci_readl(host, RT_SDHCI_PRESENT_STATE) & RT_SDHCI_CARD_PRESENT);
  772. }
  773. static int sdhci_check_ro(struct rt_sdhci_host *host)
  774. {
  775. int is_readonly;
  776. rt_base_t flags;
  777. flags = rt_spin_lock_irqsave(&host->lock);
  778. if (host->flags & RT_SDHCI_DEVICE_DEAD)
  779. is_readonly = 0;
  780. else if (host->ops->get_ro)
  781. is_readonly = host->ops->get_ro(host);
  782. else if (rt_mmc_can_gpio_ro(host->mmc))
  783. is_readonly = rt_mmc_gpio_get_ro(host->mmc);
  784. else
  785. is_readonly = !(rt_sdhci_readl(host, RT_SDHCI_PRESENT_STATE)
  786. & RT_SDHCI_WRITE_PROTECT);
  787. rt_spin_unlock_irqrestore(&host->lock, flags);
  788. return host->quirks & RT_SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? !is_readonly : is_readonly;
  789. }
  790. #define SAMPLE_COUNT 5
  791. static int rt_sdhci_ro_get(struct rt_mmc_host *mmc)
  792. {
  793. struct rt_sdhci_host *host = mmc_priv(mmc);
  794. int i, ro_count;
  795. if (!(host->quirks & RT_SDHCI_QUIRK_UNSTABLE_RO_DETECT))
  796. return sdhci_check_ro(host);
  797. ro_count = 0;
  798. for (i = 0; i < SAMPLE_COUNT; i++)
  799. {
  800. if (sdhci_check_ro(host))
  801. {
  802. if (++ro_count > SAMPLE_COUNT / 2)
  803. return 1;
  804. }
  805. rt_thread_mdelay(30);
  806. }
  807. return 0;
  808. }
  809. static void rt_sdhci_enable_io_irq_nolock(struct rt_sdhci_host *host, int enable)
  810. {
  811. if (!(host->flags & RT_SDHCI_DEVICE_DEAD))
  812. {
  813. if (enable)
  814. host->ier |= RT_SDHCI_INT_CARD_INT;
  815. else
  816. host->ier &= ~RT_SDHCI_INT_CARD_INT;
  817. rt_sdhci_writel(host, host->ier, RT_SDHCI_INT_ENABLE);
  818. rt_sdhci_writel(host, host->ier, RT_SDHCI_SIGNAL_ENABLE);
  819. }
  820. }
  821. static void sdhci_ack_sdio_irq(struct rt_mmc_host *mmc)
  822. {
  823. rt_base_t flags;
  824. struct rt_sdhci_host *host = mmc_priv(mmc);
  825. flags = rt_spin_lock_irqsave(&host->lock);
  826. rt_sdhci_enable_io_irq_nolock(host, RT_TRUE);
  827. rt_spin_unlock_irqrestore(&host->lock, flags);
  828. }
  829. static void sdhci_del_timer(struct rt_sdhci_host *host, struct rt_mmcsd_req *mrq)
  830. {
  831. if (sdhci_data_line_cmd(mrq->cmd))
  832. rt_timer_stop(&host->data_timer);
  833. else
  834. rt_timer_stop(&host->timer);
  835. }
  836. static unsigned int sdhci_target_timeout(struct rt_sdhci_host *host,
  837. struct rt_mmcsd_cmd *cmd,
  838. struct rt_mmcsd_data *data)
  839. {
  840. unsigned int target_timeout;
  841. if (!data)
  842. {
  843. target_timeout = cmd->busy_timeout * 1000;
  844. }
  845. else
  846. {
  847. target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
  848. if (host->clock && data->timeout_clks)
  849. {
  850. rt_uint32_t val;
  851. val = 1000000ULL * data->timeout_clks;
  852. if (do_div(val, host->clock))
  853. target_timeout++;
  854. target_timeout += val;
  855. }
  856. }
  857. return target_timeout;
  858. }
  859. static rt_uint8_t sdhci_calc_timeout(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd,
  860. rt_bool_t *too_big)
  861. {
  862. rt_uint8_t count;
  863. struct rt_mmcsd_data *data;
  864. unsigned target_timeout, current_timeout;
  865. *too_big = RT_FALSE;
  866. if (host->quirks & RT_SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
  867. return host->max_timeout_count;
  868. if (cmd == NULL)
  869. return host->max_timeout_count;
  870. data = cmd->data;
  871. if (!data && !cmd->busy_timeout)
  872. return host->max_timeout_count;
  873. target_timeout = sdhci_target_timeout(host, cmd, data);
  874. count = 0;
  875. current_timeout = (1 << 13) * 1000 / host->timeout_clk;
  876. while (current_timeout < target_timeout)
  877. {
  878. count++;
  879. current_timeout <<= 1;
  880. if (count > host->max_timeout_count)
  881. {
  882. if (!(host->quirks2 & RT_SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
  883. LOG_D("Too large timeout 0x%x requested for CMD%d!\n",
  884. count, cmd->cmd_code);
  885. count = host->max_timeout_count;
  886. *too_big = RT_TRUE;
  887. break;
  888. }
  889. }
  890. return count;
  891. }
  892. static void sdhci_calc_sw_timeout(struct rt_sdhci_host *host,
  893. struct rt_mmcsd_cmd *cmd)
  894. {
  895. struct rt_mmcsd_data *data = cmd->data;
  896. struct rt_mmc_host *mmc = host->mmc;
  897. struct rt_mmcsd_io_cfg *ios = &mmc->ios;
  898. unsigned char bus_width = 1 << ios->bus_width;
  899. unsigned int blksz;
  900. unsigned int freq;
  901. rt_uint64_t target_timeout;
  902. rt_uint64_t transfer_time;
  903. target_timeout = sdhci_target_timeout(host, cmd, data);
  904. target_timeout *= 1000L;
  905. if (data)
  906. {
  907. blksz = data->blksize;
  908. freq = mmc->actual_clock ?: host->clock;
  909. transfer_time = (rt_uint64_t)blksz * 1000000000L * (8 / bus_width);
  910. do_div(transfer_time, freq);
  911. transfer_time = transfer_time * 2;
  912. host->data_timeout = data->blks * target_timeout + transfer_time;
  913. }
  914. else
  915. {
  916. host->data_timeout = target_timeout;
  917. }
  918. if (host->data_timeout)
  919. host->data_timeout += MMC_CMD_TRANSFER_TIME;
  920. }
  921. void rt_sdhci_timeout_set(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd)
  922. {
  923. rt_bool_t too_big = RT_FALSE;
  924. rt_uint8_t count = sdhci_calc_timeout(host, cmd, &too_big);
  925. if (too_big && host->quirks2 & RT_SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)
  926. {
  927. sdhci_calc_sw_timeout(host, cmd);
  928. rt_sdhci_data_irq_timeout(host, RT_FALSE);
  929. }
  930. else if (!(host->ier & RT_SDHCI_INT_DATA_TIMEOUT))
  931. {
  932. rt_sdhci_data_irq_timeout(host, RT_FALSE);
  933. }
  934. rt_sdhci_writeb(host, count, RT_SDHCI_TIMEOUT_CONTROL);
  935. }
  936. static void sdhci_set_timeout(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd)
  937. {
  938. if (host->ops->set_timeout)
  939. host->ops->set_timeout(host, cmd);
  940. else
  941. rt_sdhci_timeout_set(host, cmd);
  942. }
  943. static void sdhci_start_timer(struct rt_sdhci_host *host, struct rt_mmcsd_req *mrq,
  944. unsigned long timeout)
  945. {
  946. if (sdhci_data_line_cmd(mrq->cmd))
  947. {
  948. rt_tick_t tick = rt_tick_get();
  949. if (timeout < tick)
  950. {
  951. timeout = tick;
  952. }
  953. tick = timeout - tick;
  954. rt_timer_stop(&host->data_timer);
  955. rt_timer_control(&host->data_timer, RT_TIMER_CTRL_SET_TIME, &tick);
  956. rt_timer_start(&host->data_timer);
  957. }
  958. else
  959. {
  960. rt_tick_t tick = rt_tick_get();
  961. if (timeout < tick)
  962. {
  963. timeout = tick;
  964. }
  965. tick = timeout - tick;
  966. rt_timer_stop(&host->timer);
  967. rt_timer_control(&host->timer, RT_TIMER_CTRL_SET_TIME, &tick);
  968. rt_timer_start(&host->timer);
  969. }
  970. }
  971. static void __sdhci_finish_mrq(struct rt_sdhci_host *host, struct rt_mmcsd_req *mrq)
  972. {
  973. if (host->cmd && host->cmd->mrq == mrq)
  974. host->cmd = NULL;
  975. if (host->data_cmd && host->data_cmd->mrq == mrq)
  976. host->data_cmd = NULL;
  977. if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
  978. host->deferred_cmd = NULL;
  979. if (host->data && host->data->mrq == mrq)
  980. host->data = NULL;
  981. if (sdhci_needs_reset(host, mrq))
  982. host->pending_reset = RT_TRUE;
  983. sdhci_set_mrq_done(host, mrq);
  984. sdhci_del_timer(host, mrq);
  985. }
  986. static void sdhci_finish_mrq(struct rt_sdhci_host *host, struct rt_mmcsd_req *mrq)
  987. {
  988. __sdhci_finish_mrq(host, mrq);
  989. rt_workqueue_submit_work(host->complete_wq, &host->complete_work, 0);
  990. }
  991. static void sdhci_error_out_mrqs(struct rt_sdhci_host *host, int err)
  992. {
  993. if (host->data_cmd)
  994. {
  995. host->data_cmd->err = err;
  996. sdhci_finish_mrq(host, host->data_cmd->mrq);
  997. }
  998. if (host->cmd)
  999. {
  1000. host->cmd->err = err;
  1001. sdhci_finish_mrq(host, host->cmd->mrq);
  1002. }
  1003. }
  1004. static void sdhci_card_event(struct rt_mmc_host *mmc)
  1005. {
  1006. struct rt_sdhci_host *host = mmc_priv(mmc);
  1007. rt_uint32_t flags;
  1008. int present;
  1009. if (host->ops->card_event)
  1010. host->ops->card_event(host);
  1011. present = mmc->ops->get_cd(mmc);
  1012. flags = rt_spin_lock_irqsave(&host->lock);
  1013. if (sdhci_has_requests(host) && !present)
  1014. {
  1015. rt_kprintf("%s: Card removed during transfer!\n",
  1016. mmc_hostname(mmc));
  1017. rt_kprintf("%s: Resetting controller.\n",
  1018. mmc_hostname(mmc));
  1019. sdhci_do_reset(host, RT_SDHCI_RESET_CMD);
  1020. sdhci_do_reset(host, RT_SDHCI_RESET_DATA);
  1021. sdhci_error_out_mrqs(host, -ENOMEDIUM);
  1022. }
  1023. rt_spin_unlock_irqrestore(&host->lock, flags);
  1024. }
  1025. static int sdhci_card_busy(struct rt_mmc_host *mmc)
  1026. {
  1027. struct rt_sdhci_host *host = mmc_priv(mmc);
  1028. rt_uint32_t present_state;
  1029. present_state = rt_sdhci_readl(host, RT_SDHCI_PRESENT_STATE);
  1030. return !(present_state & RT_SDHCI_DATA_0_LVL_MASK);
  1031. }
  1032. static int sdhci_prepare_hs400_tuning(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios)
  1033. {
  1034. struct rt_sdhci_host *host = mmc_priv(mmc);
  1035. rt_uint32_t flags;
  1036. flags = rt_spin_lock_irqsave(&host->lock);
  1037. host->flags |= RT_SDHCI_HS400_TUNING;
  1038. rt_spin_unlock_irqrestore(&host->lock, flags);
  1039. return 0;
  1040. }
  1041. static void sdhci_set_transfer_mode(struct rt_sdhci_host *host,
  1042. struct rt_mmcsd_cmd *cmd)
  1043. {
  1044. rt_uint16_t mode = 0;
  1045. struct rt_mmcsd_data *data = cmd->data;
  1046. if (data == NULL)
  1047. {
  1048. if (host->quirks2 & RT_SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD)
  1049. {
  1050. if (!mmc_op_tuning(cmd->cmd_code))
  1051. rt_sdhci_writew(host, 0x0, RT_SDHCI_TRANSFER_MODE);
  1052. }
  1053. else
  1054. {
  1055. mode = rt_sdhci_readw(host, RT_SDHCI_TRANSFER_MODE);
  1056. rt_sdhci_writew(host, mode & ~(RT_SDHCI_TRNS_AUTO_CMD12 | RT_SDHCI_TRNS_AUTO_CMD23), RT_SDHCI_TRANSFER_MODE);
  1057. }
  1058. return;
  1059. }
  1060. if (!(host->quirks2 & RT_SDHCI_QUIRK2_SUPPORT_SINGLE))
  1061. mode = RT_SDHCI_TRNS_BLK_CNT_EN;
  1062. if (mmc_op_multi(cmd->cmd_code) || data->blks > 1)
  1063. {
  1064. mode = RT_SDHCI_TRNS_BLK_CNT_EN | RT_SDHCI_TRNS_MULTI;
  1065. sdhci_auto_cmd_select(host, cmd, &mode);
  1066. if (sdhci_auto_cmd23(host, cmd->mrq))
  1067. rt_sdhci_writel(host, cmd->mrq->sbc->arg, RT_SDHCI_ARGUMENT2);
  1068. }
  1069. if (data->flags & DATA_DIR_READ)
  1070. mode |= RT_SDHCI_TRNS_READ;
  1071. if (host->flags & RT_SDHCI_REQ_USE_DMA)
  1072. mode |= RT_SDHCI_TRNS_DMA;
  1073. rt_sdhci_writew(host, mode, RT_SDHCI_TRANSFER_MODE);
  1074. }
  1075. static rt_bool_t sdhci_send_command(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd)
  1076. {
  1077. int flags;
  1078. rt_uint32_t mask;
  1079. unsigned long timeout;
  1080. cmd->err = 0;
  1081. if ((host->quirks2 & RT_SDHCI_QUIRK2_STOP_WITH_TC) && cmd->cmd_code == MMC_STOP_TRANSMISSION)
  1082. cmd->flags |= MMC_RSP_BUSY;
  1083. mask = RT_SDHCI_CMD_INHIBIT;
  1084. if (sdhci_data_line_cmd(cmd))
  1085. mask |= RT_SDHCI_DATA_INHIBIT;
  1086. if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
  1087. mask &= ~RT_SDHCI_DATA_INHIBIT;
  1088. if (rt_sdhci_readl(host, RT_SDHCI_PRESENT_STATE) & mask)
  1089. return RT_FALSE;
  1090. host->cmd = cmd;
  1091. host->data_timeout = 0;
  1092. if (sdhci_data_line_cmd(cmd))
  1093. {
  1094. host->data_cmd = cmd;
  1095. sdhci_set_timeout(host, cmd);
  1096. }
  1097. if (cmd->data)
  1098. {
  1099. sdhci_prepare_data(host, cmd);
  1100. }
  1101. rt_sdhci_writel(host, cmd->arg, RT_SDHCI_ARGUMENT);
  1102. sdhci_set_transfer_mode(host, cmd);
  1103. if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY))
  1104. {
  1105. cmd->flags &= ~MMC_RSP_BUSY;
  1106. }
  1107. if (!(cmd->flags & MMC_RSP_PRESENT))
  1108. flags = RT_SDHCI_CMD_RESP_NONE;
  1109. else if (cmd->flags & MMC_RSP_136)
  1110. flags = RT_SDHCI_CMD_RESP_LONG;
  1111. else if (cmd->flags & MMC_RSP_BUSY)
  1112. flags = RT_SDHCI_CMD_RESP_SHORT_BUSY;
  1113. else
  1114. flags = RT_SDHCI_CMD_RESP_SHORT;
  1115. if (cmd->flags & MMC_RSP_CRC)
  1116. flags |= RT_SDHCI_CMD_CRC;
  1117. if (cmd->flags & MMC_RSP_OPCODE)
  1118. flags |= RT_SDHCI_CMD_INDEX;
  1119. if (cmd->data || mmc_op_tuning(cmd->cmd_code))
  1120. flags |= RT_SDHCI_CMD_DATA;
  1121. timeout = rt_tick_get();
  1122. if (host->data_timeout)
  1123. timeout += rt_tick_from_millisecond(host->data_timeout * 1000);
  1124. else if (!cmd->data && cmd->busy_timeout > 9000)
  1125. timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * RT_TICK_PER_SECOND + RT_TICK_PER_SECOND;
  1126. else
  1127. timeout += 10 * RT_TICK_PER_SECOND;
  1128. sdhci_start_timer(host, cmd->mrq, timeout);
  1129. rt_sdhci_writew(host, RT_SDHCI_MAKE_CMD(cmd->cmd_code, flags), RT_SDHCI_COMMAND);
  1130. return RT_TRUE;
  1131. }
  1132. /********************************************************* */
  1133. /* dma */
  1134. /********************************************************* */
  1135. static void __sdhci_finish_data(struct rt_sdhci_host *host, rt_bool_t sw_data_timeout)
  1136. {
  1137. struct rt_mmcsd_cmd *data_cmd = host->data_cmd;
  1138. struct rt_mmcsd_data *data = host->data;
  1139. host->data = NULL;
  1140. host->data_cmd = NULL;
  1141. if (data->err)
  1142. {
  1143. if (!host->cmd || host->cmd == data_cmd)
  1144. sdhci_reset_for(host, REQUEST_ERROR);
  1145. else
  1146. sdhci_reset_for(host, REQUEST_ERROR_DATA_ONLY);
  1147. }
  1148. if (data->err)
  1149. {
  1150. data->bytes_xfered = 0;
  1151. }
  1152. else
  1153. {
  1154. data->bytes_xfered = data->blksize * data->blks;
  1155. }
  1156. if (data->stop && ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) || data->err))
  1157. {
  1158. if (data->mrq->cap_cmd_during_tfr)
  1159. {
  1160. __sdhci_finish_mrq(host, data->mrq);
  1161. }
  1162. else
  1163. {
  1164. host->cmd = NULL;
  1165. if (!sdhci_send_command(host, data->stop))
  1166. {
  1167. if (sw_data_timeout)
  1168. {
  1169. data->stop->err = -EIO;
  1170. __sdhci_finish_mrq(host, data->mrq);
  1171. }
  1172. else
  1173. {
  1174. host->deferred_cmd = data->stop;
  1175. }
  1176. }
  1177. }
  1178. }
  1179. else
  1180. {
  1181. __sdhci_finish_mrq(host, data->mrq);
  1182. }
  1183. }
  1184. static void sdhci_finish_data(struct rt_sdhci_host *host)
  1185. {
  1186. __sdhci_finish_data(host, RT_FALSE);
  1187. }
  1188. /********************************************************* */
  1189. /* irq */
  1190. /********************************************************* */
  1191. static void sdhci_data_irq(struct rt_sdhci_host *host, rt_uint32_t intmask)
  1192. {
  1193. rt_uint32_t command;
  1194. if (intmask & RT_SDHCI_INT_DATA_AVAIL && !host->data)
  1195. {
  1196. command = RT_SDHCI_GET_CMD(rt_sdhci_readw(host, RT_SDHCI_COMMAND));
  1197. if (command == MMC_SEND_TUNING_BLOCK || command == MMC_SEND_TUNING_BLOCK_HS200)
  1198. {
  1199. host->tuning_done = 1;
  1200. rt_wqueue_wakeup(&host->buf_ready_int, 0);
  1201. return;
  1202. }
  1203. }
  1204. if (!host->data)
  1205. {
  1206. struct rt_mmcsd_cmd *data_cmd = host->data_cmd;
  1207. if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY))
  1208. {
  1209. if (intmask & RT_SDHCI_INT_DATA_TIMEOUT)
  1210. {
  1211. host->data_cmd = NULL;
  1212. data_cmd->err = -ETIMEDOUT;
  1213. __sdhci_finish_mrq(host, data_cmd->mrq);
  1214. return;
  1215. }
  1216. if (intmask & RT_SDHCI_INT_DATA_END)
  1217. {
  1218. host->data_cmd = NULL;
  1219. if (host->cmd == data_cmd)
  1220. return;
  1221. __sdhci_finish_mrq(host, data_cmd->mrq);
  1222. return;
  1223. }
  1224. }
  1225. if (host->pending_reset)
  1226. return;
  1227. rt_kprintf("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
  1228. mmc_hostname(host->mmc), (unsigned)intmask);
  1229. rt_read_reg_debug(host);
  1230. return;
  1231. }
  1232. if (intmask & RT_SDHCI_INT_DATA_TIMEOUT)
  1233. host->data->err = -ETIMEDOUT;
  1234. else if (intmask & RT_SDHCI_INT_DATA_END_BIT)
  1235. host->data->err = -EILSEQ;
  1236. else if ((intmask & RT_SDHCI_INT_DATA_CRC) && RT_SDHCI_GET_CMD(rt_sdhci_readw(host, RT_SDHCI_COMMAND)) != MMC_BUS_TEST_R)
  1237. {
  1238. host->data->err = -EILSEQ;
  1239. }
  1240. if (host->data->err)
  1241. {
  1242. sdhci_finish_data(host);
  1243. }
  1244. else
  1245. {
  1246. if (intmask & (RT_SDHCI_INT_DATA_AVAIL | RT_SDHCI_INT_SPACE_AVAIL))
  1247. sdhci_transfer_pio(host);
  1248. if (intmask & RT_SDHCI_INT_DMA_END)
  1249. {
  1250. rt_uint32_t dmastart, dmanow;
  1251. dmastart = sdhci_sdma_address(host);
  1252. dmanow = dmastart + host->data->bytes_xfered;
  1253. dmanow = (dmanow & ~((rt_uint32_t)RT_SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + RT_SDHCI_DEFAULT_BOUNDARY_SIZE;
  1254. host->data->bytes_xfered = dmanow - dmastart;
  1255. LOG_D("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
  1256. &dmastart, host->data->bytes_xfered, &dmanow);
  1257. sdhci_set_sdma_addr(host, dmanow);
  1258. }
  1259. if (intmask & RT_SDHCI_INT_DATA_END)
  1260. {
  1261. struct rt_mmcsd_data *data = host->data;
  1262. if (data->buf)
  1263. {
  1264. if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE)
  1265. {
  1266. rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, data->buf, data->blks * data->blksize);
  1267. } else {
  1268. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, data->buf, data->blks * data->blksize);
  1269. }
  1270. }
  1271. if (host->cmd == host->data_cmd)
  1272. {
  1273. host->data_early = 1;
  1274. }
  1275. else
  1276. {
  1277. sdhci_finish_data(host);
  1278. }
  1279. }
  1280. }
  1281. }
  1282. static void rt_sdhci_read_rsp_136(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd)
  1283. {
  1284. int i, reg;
  1285. for (i = 0; i < 4; i++)
  1286. {
  1287. reg = RT_SDHCI_RESPONSE + (3 - i) * 4;
  1288. cmd->resp[i] = rt_sdhci_readl(host, reg);
  1289. }
  1290. if (host->quirks2 & RT_SDHCI_QUIRK2_RSP_136_HAS_CRC)
  1291. return;
  1292. for (i = 0; i < 4; i++)
  1293. {
  1294. cmd->resp[i] <<= 8;
  1295. if (i != 3)
  1296. cmd->resp[i] |= cmd->resp[i + 1] >> 24;
  1297. }
  1298. }
  1299. static void sdhci_command_end(struct rt_sdhci_host *host)
  1300. {
  1301. struct rt_mmcsd_cmd *cmd = host->cmd;
  1302. host->cmd = NULL;
  1303. if (cmd->flags & MMC_RSP_PRESENT)
  1304. {
  1305. if (cmd->flags & MMC_RSP_136)
  1306. {
  1307. rt_sdhci_read_rsp_136(host, cmd);
  1308. }
  1309. else
  1310. {
  1311. cmd->resp[0] = rt_sdhci_readl(host, RT_SDHCI_RESPONSE);
  1312. }
  1313. }
  1314. if (cmd->flags & MMC_RSP_BUSY)
  1315. {
  1316. if (cmd->data)
  1317. {
  1318. LOG_D("Cannot wait for busy signal when also doing a data transfer");
  1319. }
  1320. else if (!(host->quirks & RT_SDHCI_QUIRK_NO_BUSY_IRQ) && cmd == host->data_cmd)
  1321. {
  1322. return;
  1323. }
  1324. }
  1325. if (cmd == cmd->mrq->sbc)
  1326. {
  1327. if (!sdhci_send_command(host, cmd->mrq->cmd))
  1328. {
  1329. host->deferred_cmd = cmd->mrq->cmd;
  1330. }
  1331. }
  1332. else
  1333. {
  1334. if (host->data && host->data_early)
  1335. sdhci_finish_data(host);
  1336. if (!cmd->data)
  1337. __sdhci_finish_mrq(host, cmd->mrq);
  1338. }
  1339. }
  1340. static void sdhci_cmd_irq(struct rt_sdhci_host *host, rt_uint32_t intmask, rt_uint32_t *intmask_p)
  1341. {
  1342. if (intmask & RT_SDHCI_INT_AUTO_CMD_ERR && host->data_cmd)
  1343. {
  1344. struct rt_mmcsd_req *mrq = host->data_cmd->mrq;
  1345. rt_uint16_t auto_cmd_status = rt_sdhci_readw(host, RT_SDHCI_AUTO_CMD_STATUS);
  1346. int data_err_bit = (auto_cmd_status & RT_SDHCI_AUTO_CMD_TIMEOUT) ? RT_SDHCI_INT_DATA_TIMEOUT : RT_SDHCI_INT_DATA_CRC;
  1347. if (!mrq->sbc && (host->flags & RT_SDHCI_AUTO_CMD12))
  1348. {
  1349. *intmask_p |= data_err_bit;
  1350. return;
  1351. }
  1352. }
  1353. if (!host->cmd)
  1354. {
  1355. if (host->pending_reset)
  1356. return;
  1357. rt_kprintf("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
  1358. mmc_hostname(host->mmc), (unsigned)intmask);
  1359. rt_read_reg_debug(host);
  1360. return;
  1361. }
  1362. if (intmask & (RT_SDHCI_INT_TIMEOUT | RT_SDHCI_INT_CRC | RT_SDHCI_INT_END_BIT | RT_SDHCI_INT_INDEX))
  1363. {
  1364. if (intmask & RT_SDHCI_INT_TIMEOUT)
  1365. host->cmd->err = -ETIMEDOUT;
  1366. else
  1367. host->cmd->err = -EILSEQ;
  1368. /* Treat data command CRC error the same as data CRC error */
  1369. if (host->cmd->data && (intmask & (RT_SDHCI_INT_CRC | RT_SDHCI_INT_TIMEOUT)) == RT_SDHCI_INT_CRC)
  1370. {
  1371. host->cmd = NULL;
  1372. *intmask_p |= RT_SDHCI_INT_DATA_CRC;
  1373. return;
  1374. }
  1375. __sdhci_finish_mrq(host, host->cmd->mrq);
  1376. return;
  1377. }
  1378. if (intmask & RT_SDHCI_INT_AUTO_CMD_ERR)
  1379. {
  1380. struct rt_mmcsd_req *mrq = host->cmd->mrq;
  1381. rt_uint16_t auto_cmd_status = rt_sdhci_readw(host, RT_SDHCI_AUTO_CMD_STATUS);
  1382. int err = (auto_cmd_status & RT_SDHCI_AUTO_CMD_TIMEOUT) ? -ETIMEDOUT : -EILSEQ;
  1383. if (mrq->sbc && (host->flags & RT_SDHCI_AUTO_CMD23))
  1384. {
  1385. mrq->sbc->err = err;
  1386. __sdhci_finish_mrq(host, mrq);
  1387. return;
  1388. }
  1389. }
  1390. if (intmask & RT_SDHCI_INT_RESPONSE)
  1391. sdhci_command_end(host);
  1392. }
  1393. static void sdhci_irq(int irq, void *dev_id)
  1394. {
  1395. #define IRQ_NONE 0
  1396. #define IRQ_WAIT 1
  1397. #define IRQ_DONE 2
  1398. struct rt_mmcsd_req* mrqs_done[RT_SDHCI_MAX_MRQS] = { 0 };
  1399. struct rt_sdhci_host *host = dev_id;
  1400. rt_uint32_t intmask, mask, unexpected = 0;
  1401. int max_loops = 16;
  1402. int i, result= IRQ_NONE ;
  1403. rt_spin_lock(&host->lock);
  1404. if (host->runtime_suspended)
  1405. {
  1406. rt_spin_unlock(&host->lock);
  1407. return;
  1408. }
  1409. intmask = rt_sdhci_readl(host, RT_SDHCI_INT_STATUS);
  1410. if (!intmask || intmask == 0xffffffff)
  1411. {
  1412. result = IRQ_NONE;
  1413. goto out;
  1414. }
  1415. do {
  1416. LOG_D("IRQ status 0x%08x\n", intmask);
  1417. if (host->ops->irq)
  1418. {
  1419. intmask = host->ops->irq(host, intmask);
  1420. if (!intmask)
  1421. goto cont;
  1422. }
  1423. /* Clear selected interrupts. */
  1424. mask = intmask & (RT_SDHCI_INT_CMD_MASK | RT_SDHCI_INT_DATA_MASK | RT_SDHCI_INT_BUS_POWER);
  1425. rt_sdhci_writel(host, mask, RT_SDHCI_INT_STATUS);
  1426. if (intmask & (RT_SDHCI_INT_CARD_INSERT | RT_SDHCI_INT_CARD_REMOVE))
  1427. {
  1428. rt_uint32_t present = rt_sdhci_readl(host, RT_SDHCI_PRESENT_STATE) & RT_SDHCI_CARD_PRESENT;
  1429. host->ier &= ~(RT_SDHCI_INT_CARD_INSERT | RT_SDHCI_INT_CARD_REMOVE);
  1430. host->ier |= present ? RT_SDHCI_INT_CARD_REMOVE : RT_SDHCI_INT_CARD_INSERT;
  1431. rt_sdhci_writel(host, host->ier, RT_SDHCI_INT_ENABLE);
  1432. rt_sdhci_writel(host, host->ier, RT_SDHCI_SIGNAL_ENABLE);
  1433. rt_sdhci_writel(host, intmask & (RT_SDHCI_INT_CARD_INSERT | RT_SDHCI_INT_CARD_REMOVE), RT_SDHCI_INT_STATUS);
  1434. host->thread_isr |= intmask & (RT_SDHCI_INT_CARD_INSERT | RT_SDHCI_INT_CARD_REMOVE);
  1435. result = IRQ_WAIT;
  1436. }
  1437. if (intmask & RT_SDHCI_INT_CMD_MASK)
  1438. sdhci_cmd_irq(host, intmask & RT_SDHCI_INT_CMD_MASK, &intmask);
  1439. if (intmask & RT_SDHCI_INT_DATA_MASK)
  1440. sdhci_data_irq(host, intmask & RT_SDHCI_INT_DATA_MASK);
  1441. if (intmask & RT_SDHCI_INT_BUS_POWER)
  1442. rt_kprintf("%s: Card is consuming too much power!\n",
  1443. mmc_hostname(host->mmc));
  1444. intmask &= ~(RT_SDHCI_INT_CARD_INSERT | RT_SDHCI_INT_CARD_REMOVE | RT_SDHCI_INT_CMD_MASK | RT_SDHCI_INT_DATA_MASK | RT_SDHCI_INT_ERROR | RT_SDHCI_INT_BUS_POWER | RT_SDHCI_INT_RETUNE | RT_SDHCI_INT_CARD_INT);
  1445. if (intmask)
  1446. {
  1447. unexpected |= intmask;
  1448. rt_sdhci_writel(host, intmask, RT_SDHCI_INT_STATUS);
  1449. }
  1450. cont:
  1451. if (result == IRQ_NONE)
  1452. result = IRQ_WAIT;
  1453. intmask = rt_sdhci_readl(host, RT_SDHCI_INT_STATUS);
  1454. } while (intmask && --max_loops);
  1455. for (i = 0; i < RT_SDHCI_MAX_MRQS; i++)
  1456. {
  1457. struct rt_mmcsd_req *mrq = host->mrqs_done[i];
  1458. if (!mrq)
  1459. continue;
  1460. if (sdhci_defer_done(host, mrq))
  1461. {
  1462. result = IRQ_WAIT;
  1463. }
  1464. else
  1465. {
  1466. mrqs_done[i] = mrq;
  1467. host->mrqs_done[i] = NULL;
  1468. }
  1469. }
  1470. out:
  1471. if (host->deferred_cmd)
  1472. result = IRQ_WAIT;
  1473. rt_spin_unlock(&host->lock);
  1474. for (i = 0; i < RT_SDHCI_MAX_MRQS; i++)
  1475. {
  1476. if (!mrqs_done[i])
  1477. continue;
  1478. if (host->ops->request_done)
  1479. host->ops->request_done(host, mrqs_done[i]);
  1480. else
  1481. rt_mmc_request_done(host->mmc, mrqs_done[i]);
  1482. }
  1483. if (unexpected)
  1484. {
  1485. rt_kprintf("%s: Unexpected interrupt 0x%08x.\n",
  1486. mmc_hostname(host->mmc), unexpected);
  1487. rt_read_reg_debug(host);
  1488. }
  1489. if (result == IRQ_WAIT)
  1490. {
  1491. rt_workqueue_submit_work(host->irq_wq, &host->irq_work, 0);
  1492. }
  1493. }
  1494. static rt_bool_t sdhci_send_command_retry(struct rt_sdhci_host *host,
  1495. struct rt_mmcsd_cmd *cmd,
  1496. unsigned long flags)
  1497. {
  1498. struct rt_mmcsd_cmd *deferred_cmd = host->deferred_cmd;
  1499. int timeout = 10; /* Approx. 10 ms */
  1500. rt_bool_t present;
  1501. while (!sdhci_send_command(host, cmd))
  1502. {
  1503. if (!timeout--)
  1504. {
  1505. rt_kprintf("%s: Controller never released inhibit bit(s).\n",
  1506. mmc_hostname(host->mmc));
  1507. rt_read_reg_debug(host);
  1508. cmd->err = -EIO;
  1509. return RT_FALSE;
  1510. }
  1511. rt_spin_unlock_irqrestore(&host->lock, flags);
  1512. rt_thread_mdelay(1);
  1513. present = host->mmc->ops->get_cd(host->mmc);
  1514. flags = rt_spin_lock_irqsave(&host->lock);
  1515. if (cmd == deferred_cmd && cmd != host->deferred_cmd)
  1516. return RT_TRUE;
  1517. if (sdhci_present_error(host, cmd, present))
  1518. return RT_FALSE;
  1519. }
  1520. if (cmd == host->deferred_cmd)
  1521. host->deferred_cmd = NULL;
  1522. return RT_TRUE;
  1523. }
  1524. static rt_bool_t rt_sdhci_start_request_done(struct rt_sdhci_host *host)
  1525. {
  1526. rt_base_t flags;
  1527. struct rt_mmcsd_req *mrq;
  1528. int i;
  1529. flags = rt_spin_lock_irqsave(&host->lock);
  1530. for (i = 0; i < RT_SDHCI_MAX_MRQS; i++)
  1531. {
  1532. mrq = host->mrqs_done[i];
  1533. if (mrq)
  1534. break;
  1535. }
  1536. if (!mrq)
  1537. {
  1538. rt_spin_unlock_irqrestore(&host->lock, flags);
  1539. return RT_TRUE;
  1540. }
  1541. if (sdhci_needs_reset(host, mrq))
  1542. {
  1543. if (host->cmd || host->data_cmd)
  1544. {
  1545. rt_spin_unlock_irqrestore(&host->lock, flags);
  1546. return RT_TRUE;
  1547. }
  1548. /* Some controllers need this kick or reset won't work here */
  1549. if (host->quirks & RT_SDHCI_QUIRK_CLOCK_BEFORE_RESET)
  1550. /* This is to force an update */
  1551. host->ops->set_clock(host, host->clock);
  1552. sdhci_reset_for(host, REQUEST_ERROR);
  1553. host->pending_reset = RT_FALSE;
  1554. }
  1555. if (host->flags & RT_SDHCI_REQ_USE_DMA)
  1556. {
  1557. struct rt_mmcsd_data *data = mrq->data;
  1558. if (data && data->host_cookie == COOKIE_MAPPED)
  1559. {
  1560. if (host->bounce_buffer)
  1561. {
  1562. /*
  1563. * On reads, copy the bounced data into the
  1564. * sglist
  1565. */
  1566. if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE)
  1567. {
  1568. unsigned int length = data->bytes_xfered;
  1569. if (length > host->bounce_buffer_size)
  1570. {
  1571. LOG_E("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
  1572. mmc_hostname(host->mmc),
  1573. host->bounce_buffer_size,
  1574. data->bytes_xfered);
  1575. /* Cap it down and continue */
  1576. length = host->bounce_buffer_size;
  1577. }
  1578. rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, data->buf, data->blks * data->blksize);
  1579. } else {
  1580. /* No copying, just switch ownership */
  1581. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, data->buf, data->blks * data->blksize);
  1582. }
  1583. }
  1584. data->host_cookie = COOKIE_UNMAPPED;
  1585. }
  1586. else {
  1587. if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE)
  1588. {
  1589. rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, data->buf, data->blks * data->blksize);
  1590. } else {
  1591. /* No copying, just switch ownership */
  1592. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, data->buf, data->blks * data->blksize);
  1593. }
  1594. }
  1595. }
  1596. host->mrqs_done[i] = NULL;
  1597. rt_spin_unlock_irqrestore(&host->lock, flags);
  1598. if (host->ops->request_done)
  1599. host->ops->request_done(host, mrq);
  1600. else
  1601. rt_mmc_request_done(host->mmc, mrq);
  1602. return RT_FALSE;
  1603. }
  1604. static void sdhci_thread_irq(struct rt_work *work, void *work_data)
  1605. {
  1606. struct rt_sdhci_host* host = work_data;
  1607. struct rt_mmcsd_cmd *cmd;
  1608. rt_base_t flags;
  1609. rt_uint32_t isr;
  1610. while (!rt_sdhci_start_request_done(host));
  1611. flags = rt_spin_lock_irqsave(&host->lock);
  1612. isr = host->thread_isr;
  1613. host->thread_isr = 0;
  1614. cmd = host->deferred_cmd;
  1615. if (cmd && !sdhci_send_command_retry(host, cmd, flags))
  1616. sdhci_finish_mrq(host, cmd->mrq);
  1617. rt_spin_unlock_irqrestore(&host->lock, flags);
  1618. if (isr & (RT_SDHCI_INT_CARD_INSERT | RT_SDHCI_INT_CARD_REMOVE))
  1619. {
  1620. struct rt_mmc_host *mmc = host->mmc;
  1621. mmc->ops->card_event(mmc);
  1622. }
  1623. }
  1624. void rt_sdhci_enable_io_irq(struct rt_mmc_host *mmc, int enable)
  1625. {
  1626. struct rt_sdhci_host *host = mmc_priv(mmc);
  1627. rt_uint32_t flags;
  1628. flags = rt_spin_lock_irqsave(&host->lock);
  1629. rt_sdhci_enable_io_irq_nolock(host, enable);
  1630. rt_spin_unlock_irqrestore(&host->lock, flags);
  1631. }
  1632. /********************************************************* */
  1633. /* request */
  1634. /********************************************************* */
  1635. void rt_sdhci_start_request(struct rt_mmc_host *mmc, struct rt_mmcsd_req *mrq)
  1636. {
  1637. struct rt_sdhci_host *host = mmc_priv(mmc);
  1638. struct rt_mmcsd_cmd *cmd;
  1639. rt_base_t flags;
  1640. rt_bool_t present;
  1641. /* Firstly check card presence */
  1642. present = mmc->ops->get_cd(mmc);
  1643. flags = rt_spin_lock_irqsave(&host->lock);
  1644. if (sdhci_present_error(host, mrq->cmd, present))
  1645. goto out_finish;
  1646. cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
  1647. if (!sdhci_send_command_retry(host, cmd, flags))
  1648. goto out_finish;
  1649. rt_spin_unlock_irqrestore(&host->lock, flags);
  1650. return;
  1651. out_finish:
  1652. sdhci_finish_mrq(host, mrq);
  1653. rt_spin_unlock_irqrestore(&host->lock, flags);
  1654. }
  1655. static void sdhci_complete_work(struct rt_work *work, void *work_data)
  1656. {
  1657. struct rt_sdhci_host *host = work_data;
  1658. while (!rt_sdhci_start_request_done(host));
  1659. }
  1660. /********************************************************* */
  1661. /* timer */
  1662. /********************************************************* */
  1663. static void sdhci_timeout_timer(void *parameter)
  1664. {
  1665. struct rt_sdhci_host *host = parameter;
  1666. rt_base_t flags;
  1667. flags = rt_spin_lock_irqsave(&host->lock);
  1668. if (host->cmd && !sdhci_data_line_cmd(host->cmd))
  1669. {
  1670. rt_kprintf("%s: Timeout waiting for hardware cmd interrupt.\n",
  1671. mmc_hostname(host->mmc));
  1672. rt_read_reg_debug(host);
  1673. host->cmd->err = -ETIMEDOUT;
  1674. sdhci_finish_mrq(host, host->cmd->mrq);
  1675. }
  1676. rt_spin_unlock_irqrestore(&host->lock, flags);
  1677. }
  1678. static void sdhci_timeout_data_timer(void *parameter)
  1679. {
  1680. struct rt_sdhci_host *host = parameter;
  1681. rt_base_t flags;
  1682. flags = rt_spin_lock_irqsave(&host->lock);
  1683. if (host->data || host->data_cmd || (host->cmd && sdhci_data_line_cmd(host->cmd)))
  1684. {
  1685. rt_kprintf("%s: Timeout waiting for hardware interrupt.\n",
  1686. mmc_hostname(host->mmc));
  1687. rt_read_reg_debug(host);
  1688. if (host->data)
  1689. {
  1690. host->data->err = -ETIMEDOUT;
  1691. __sdhci_finish_data(host, RT_TRUE);
  1692. rt_workqueue_submit_work(host->complete_wq, &host->complete_work, 0);
  1693. }
  1694. else if (host->data_cmd)
  1695. {
  1696. host->data_cmd->err = -ETIMEDOUT;
  1697. sdhci_finish_mrq(host, host->data_cmd->mrq);
  1698. }
  1699. else
  1700. {
  1701. host->cmd->err = -ETIMEDOUT;
  1702. sdhci_finish_mrq(host, host->cmd->mrq);
  1703. }
  1704. }
  1705. rt_spin_unlock_irqrestore(&host->lock, flags);
  1706. }
  1707. /********************************************************* */
  1708. /* tuning */
  1709. /********************************************************* */
  1710. int rt_sdhci_execute_tuning(struct rt_mmc_host *mmc, rt_uint32_t opcode)
  1711. {
  1712. struct rt_sdhci_host *host = mmc_priv(mmc);
  1713. int err = 0;
  1714. unsigned int tuning_count = 0;
  1715. rt_bool_t hs400_tuning;
  1716. hs400_tuning = host->flags & RT_SDHCI_HS400_TUNING;
  1717. if (host->tuning_mode == RT_SDHCI_TUNING_MODE_1)
  1718. tuning_count = host->tuning_count;
  1719. switch (host->timing)
  1720. {
  1721. /* HS400 tuning is done in HS200 mode */
  1722. case MMC_TIMING_MMC_HS400:
  1723. err = -EINVAL;
  1724. goto out;
  1725. case MMC_TIMING_MMC_HS200:
  1726. if (hs400_tuning)
  1727. tuning_count = 0;
  1728. break;
  1729. case MMC_TIMING_UHS_SDR104:
  1730. case MMC_TIMING_UHS_DDR50:
  1731. break;
  1732. case MMC_TIMING_UHS_SDR50:
  1733. if (host->flags & RT_SDHCI_SDR50_NEEDS_TUNING)
  1734. break;
  1735. fallthrough;
  1736. default:
  1737. goto out;
  1738. }
  1739. if (host->ops->platform_execute_tuning)
  1740. {
  1741. err = host->ops->platform_execute_tuning(host, opcode);
  1742. goto out;
  1743. }
  1744. mmc->retune_period = tuning_count;
  1745. if (host->tuning_delay < 0)
  1746. host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
  1747. rt_sdhci_start_tuning(host);
  1748. host->tuning_err = __sdhci_execute_tuning(host, opcode);
  1749. rt_sdhci_end_tuning(host);
  1750. out:
  1751. host->flags &= ~RT_SDHCI_HS400_TUNING;
  1752. return err;
  1753. }
  1754. int __sdhci_execute_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode)
  1755. {
  1756. int i;
  1757. for (i = 0; i < host->tuning_loop_count; i++)
  1758. {
  1759. rt_uint16_t ctrl;
  1760. rt_sdhci_send_tuning(host, opcode);
  1761. if (!host->tuning_done)
  1762. {
  1763. rt_sdhci_abort_tuning(host, opcode);
  1764. return -ETIMEDOUT;
  1765. }
  1766. if (host->tuning_delay > 0)
  1767. rt_thread_mdelay(host->tuning_delay);
  1768. ctrl = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2);
  1769. if (!(ctrl & RT_SDHCI_CTRL_EXEC_TUNING))
  1770. {
  1771. if (ctrl & RT_SDHCI_CTRL_TUNED_CLK)
  1772. return 0; /* Success! */
  1773. break;
  1774. }
  1775. }
  1776. LOG_D("%s: Tuning failed, falling back to fixed sampling clock\n",
  1777. mmc_hostname(host->mmc));
  1778. rt_sdhci_reset_tuning(host);
  1779. return -EAGAIN;
  1780. }
  1781. void rt_sdhci_start_tuning(struct rt_sdhci_host *host)
  1782. {
  1783. rt_uint16_t ctrl;
  1784. ctrl = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2);
  1785. ctrl |= RT_SDHCI_CTRL_EXEC_TUNING;
  1786. if (host->quirks2 & RT_SDHCI_QUIRK2_TUNING_WORK_AROUND)
  1787. ctrl |= RT_SDHCI_CTRL_TUNED_CLK;
  1788. rt_sdhci_writew(host, ctrl, RT_SDHCI_HOST_CONTROL2);
  1789. rt_sdhci_writel(host, RT_SDHCI_INT_DATA_AVAIL, RT_SDHCI_INT_ENABLE);
  1790. rt_sdhci_writel(host, RT_SDHCI_INT_DATA_AVAIL, RT_SDHCI_SIGNAL_ENABLE);
  1791. }
  1792. void rt_sdhci_end_tuning(struct rt_sdhci_host *host)
  1793. {
  1794. rt_sdhci_writel(host, host->ier, RT_SDHCI_INT_ENABLE);
  1795. rt_sdhci_writel(host, host->ier, RT_SDHCI_SIGNAL_ENABLE);
  1796. }
  1797. void rt_sdhci_abort_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode)
  1798. {
  1799. rt_sdhci_reset_tuning(host);
  1800. sdhci_reset_for(host, TUNING_ABORT);
  1801. rt_sdhci_end_tuning(host);
  1802. }
  1803. void rt_sdhci_send_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode)
  1804. {
  1805. struct rt_mmc_host *mmc = host->mmc;
  1806. struct rt_mmcsd_cmd cmd = {};
  1807. struct rt_mmcsd_req mrq = {};
  1808. unsigned long flags;
  1809. rt_uint32_t b = host->sdma_boundary;
  1810. flags = rt_spin_lock_irqsave(&host->lock);
  1811. cmd.cmd_code = opcode;
  1812. cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
  1813. cmd.mrq = &mrq;
  1814. mrq.cmd = &cmd;
  1815. if (cmd.cmd_code == MMC_SEND_TUNING_BLOCK_HS200 && mmc->ios.bus_width == MMC_BUS_WIDTH_8)
  1816. rt_sdhci_writew(host, RT_SDHCI_MAKE_BLKSZ(b, 128), RT_SDHCI_BLOCK_SIZE);
  1817. else
  1818. rt_sdhci_writew(host, RT_SDHCI_MAKE_BLKSZ(b, 64), RT_SDHCI_BLOCK_SIZE);
  1819. rt_sdhci_writew(host, RT_SDHCI_TRNS_READ, RT_SDHCI_TRANSFER_MODE);
  1820. if (!sdhci_send_command_retry(host, &cmd, flags))
  1821. {
  1822. rt_spin_unlock_irqrestore(&host->lock, flags);
  1823. host->tuning_done = 0;
  1824. return;
  1825. }
  1826. host->cmd = NULL;
  1827. sdhci_del_timer(host, &mrq);
  1828. host->tuning_done = 0;
  1829. rt_spin_unlock_irqrestore(&host->lock, flags);
  1830. }
  1831. void rt_sdhci_reset_tuning(struct rt_sdhci_host *host)
  1832. {
  1833. rt_uint16_t ctrl;
  1834. ctrl = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2);
  1835. ctrl &= ~RT_SDHCI_CTRL_TUNED_CLK;
  1836. ctrl &= ~RT_SDHCI_CTRL_EXEC_TUNING;
  1837. rt_sdhci_writew(host, ctrl, RT_SDHCI_HOST_CONTROL2);
  1838. }
  1839. /********************************************************* */
  1840. /* error */
  1841. /********************************************************* */
  1842. static const struct mmc_host_ops rt_sdhci_ops = {
  1843. .request = rt_sdhci_start_request,
  1844. .set_ios = rt_sdhci_ios_set,
  1845. .get_cd = sdhci_get_cd,
  1846. .get_ro = rt_sdhci_ro_get,
  1847. .enable_sdio_irq = rt_sdhci_enable_io_irq,
  1848. .ack_sdio_irq = sdhci_ack_sdio_irq,
  1849. .start_signal_voltage_switch = rt_sdhci_start_signal_voltage_switch,
  1850. .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
  1851. .execute_tuning = rt_sdhci_execute_tuning,
  1852. .card_event = sdhci_card_event,
  1853. .card_busy = sdhci_card_busy,
  1854. };
  1855. void rt_sdhci_uninit_host(struct rt_sdhci_host *host, int dead)
  1856. {
  1857. struct rt_mmc_host *mmc = host->mmc;
  1858. unsigned long flags;
  1859. if (dead)
  1860. {
  1861. flags = rt_spin_lock_irqsave(&host->lock);
  1862. host->flags |= RT_SDHCI_DEVICE_DEAD;
  1863. if (sdhci_has_requests(host))
  1864. {
  1865. rt_kprintf("%s: Controller removed during "
  1866. " transfer!\n",
  1867. mmc_hostname(mmc));
  1868. sdhci_error_out_mrqs(host, -ENOMEDIUM);
  1869. }
  1870. rt_spin_unlock_irqrestore(&host->lock, flags);
  1871. }
  1872. sdhci_set_card_detection(host, RT_FALSE);
  1873. rt_mmc_remove_host(mmc);
  1874. if (!dead)
  1875. sdhci_reset_for_all(host);
  1876. rt_sdhci_writel(host, 0, RT_SDHCI_INT_ENABLE);
  1877. rt_sdhci_writel(host, 0, RT_SDHCI_SIGNAL_ENABLE);
  1878. rt_timer_delete(&host->timer);
  1879. rt_timer_delete(&host->data_timer);
  1880. rt_workqueue_destroy(host->complete_wq);
  1881. }
  1882. rt_uint16_t rt_sdhci_clk_set(struct rt_sdhci_host *host, unsigned int clock,
  1883. unsigned int *actual_clock)
  1884. {
  1885. int div = 0; /* Initialized for compiler warning */
  1886. int real_div = div, clk_mul = 1;
  1887. rt_uint16_t clk = 0;
  1888. rt_bool_t switch_base_clk = RT_FALSE;
  1889. if (host->version >= RT_SDHCI_SPEC_300)
  1890. {
  1891. if (host->preset_enabled)
  1892. {
  1893. rt_uint16_t pre_val;
  1894. clk = rt_sdhci_readw(host, RT_SDHCI_CLOCK_CONTROL);
  1895. pre_val = sdhci_get_preset_value(host);
  1896. div = FIELD_GET(RT_SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
  1897. if (host->clk_mul && (pre_val & RT_SDHCI_PRESET_CLKGEN_SEL))
  1898. {
  1899. clk = RT_SDHCI_PROG_CLOCK_MODE;
  1900. real_div = div + 1;
  1901. clk_mul = host->clk_mul;
  1902. }
  1903. else
  1904. {
  1905. real_div = max_t(int, 1, div << 1);
  1906. }
  1907. goto clock_set;
  1908. }
  1909. if (host->clk_mul)
  1910. {
  1911. for (div = 1; div <= 1024; div++)
  1912. {
  1913. if ((host->max_clk * host->clk_mul / div)
  1914. <= clock)
  1915. break;
  1916. }
  1917. if ((host->max_clk * host->clk_mul / div) <= clock)
  1918. {
  1919. clk = RT_SDHCI_PROG_CLOCK_MODE;
  1920. real_div = div;
  1921. clk_mul = host->clk_mul;
  1922. div--;
  1923. }
  1924. else
  1925. {
  1926. switch_base_clk = RT_TRUE;
  1927. }
  1928. }
  1929. if (!host->clk_mul || switch_base_clk)
  1930. {
  1931. if (host->max_clk <= clock)
  1932. div = 1;
  1933. else
  1934. {
  1935. for (div = 2; div < RT_SDHCI_MAX_DIV_SPEC_300;
  1936. div += 2)
  1937. {
  1938. if ((host->max_clk / div) <= clock)
  1939. break;
  1940. }
  1941. }
  1942. real_div = div;
  1943. div >>= 1;
  1944. if ((host->quirks2 & RT_SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
  1945. && !div && host->max_clk <= 25000000)
  1946. div = 1;
  1947. }
  1948. }
  1949. else
  1950. {
  1951. for (div = 1; div < RT_SDHCI_MAX_DIV_SPEC_200; div *= 2)
  1952. {
  1953. if ((host->max_clk / div) <= clock)
  1954. break;
  1955. }
  1956. real_div = div;
  1957. div >>= 1;
  1958. }
  1959. clock_set:
  1960. if (real_div)
  1961. *actual_clock = (host->max_clk * clk_mul) / real_div;
  1962. clk |= (div & RT_SDHCI_DIV_MASK) << RT_SDHCI_DIVIDER_SHIFT;
  1963. clk |= ((div & RT_SDHCI_DIV_HI_MASK) >> RT_SDHCI_DIV_MASK_LEN)
  1964. << RT_SDHCI_DIVIDER_HI_SHIFT;
  1965. return clk;
  1966. }
  1967. void rt_sdhci_clk_enable(struct rt_sdhci_host *host, rt_uint16_t clk)
  1968. {
  1969. long timeout;
  1970. clk |= RT_SDHCI_CLOCK_INT_EN;
  1971. rt_sdhci_writew(host, clk, RT_SDHCI_CLOCK_CONTROL);
  1972. timeout = rt_tick_from_millisecond(150);
  1973. while (1)
  1974. {
  1975. timeout = timeout - rt_tick_get();
  1976. clk = rt_sdhci_readw(host, RT_SDHCI_CLOCK_CONTROL);
  1977. if (clk & RT_SDHCI_CLOCK_INT_STABLE)
  1978. break;
  1979. if (timeout < 0)
  1980. {
  1981. rt_kprintf("%s: Internal clock never stabilised.\n",
  1982. mmc_hostname(host->mmc));
  1983. rt_read_reg_debug(host);
  1984. return;
  1985. }
  1986. rt_hw_us_delay(10);
  1987. }
  1988. if (host->version >= RT_SDHCI_SPEC_410 && host->v4_mode)
  1989. {
  1990. clk |= RT_SDHCI_CLOCK_PLL_EN;
  1991. clk &= ~RT_SDHCI_CLOCK_INT_STABLE;
  1992. rt_sdhci_writew(host, clk, RT_SDHCI_CLOCK_CONTROL);
  1993. timeout = rt_tick_from_millisecond(150);
  1994. while (1)
  1995. {
  1996. timeout = timeout - rt_tick_get();
  1997. clk = rt_sdhci_readw(host, RT_SDHCI_CLOCK_CONTROL);
  1998. if (clk & RT_SDHCI_CLOCK_INT_STABLE)
  1999. break;
  2000. if (timeout < 0)
  2001. {
  2002. rt_kprintf("%s: PLL clock never stabilised.\n",
  2003. mmc_hostname(host->mmc));
  2004. rt_read_reg_debug(host);
  2005. return;
  2006. }
  2007. rt_hw_us_delay(10);
  2008. }
  2009. }
  2010. clk |= RT_SDHCI_CLOCK_CARD_EN;
  2011. rt_sdhci_writew(host, clk, RT_SDHCI_CLOCK_CONTROL);
  2012. }
  2013. void rt_sdhci_set_clock(struct rt_sdhci_host *host, unsigned int clock)
  2014. {
  2015. rt_uint16_t clk;
  2016. host->mmc->actual_clock = 0;
  2017. rt_sdhci_writew(host, 0, RT_SDHCI_CLOCK_CONTROL);
  2018. if (clock == 0)
  2019. return;
  2020. clk = rt_sdhci_clk_set(host, clock, &host->mmc->actual_clock);
  2021. rt_sdhci_clk_enable(host, clk);
  2022. }
  2023. void rt_sdhci_read_caps(struct rt_sdhci_host *host, const rt_uint16_t *ver,
  2024. const rt_uint32_t *caps, const rt_uint32_t *caps1)
  2025. {
  2026. rt_uint16_t v;
  2027. rt_uint64_t dt_caps_mask = 0;
  2028. rt_uint64_t dt_caps = 0;
  2029. if (host->read_caps)
  2030. return;
  2031. host->read_caps = RT_TRUE;
  2032. if (debug_quirks)
  2033. host->quirks = debug_quirks;
  2034. if (debug_quirks2)
  2035. host->quirks2 = debug_quirks2;
  2036. sdhci_reset_for_all(host);
  2037. if (host->v4_mode)
  2038. sdhci_do_enable_v4_mode(host);
  2039. #ifdef RT_USING_OFW
  2040. rt_ofw_prop_read_u64(mmc_dev(host->mmc)->ofw_node,
  2041. "sdhci-caps-mask", &dt_caps_mask);
  2042. rt_ofw_prop_read_u64(mmc_dev(host->mmc)->ofw_node,
  2043. "sdhci-caps", &dt_caps);
  2044. #endif
  2045. v = ver ? *ver : rt_sdhci_readw(host, RT_SDHCI_HOST_VERSION);
  2046. host->version = (v & RT_SDHCI_SPEC_VER_MASK) >> RT_SDHCI_SPEC_VER_SHIFT;
  2047. if (caps)
  2048. {
  2049. host->caps = *caps;
  2050. }
  2051. else
  2052. {
  2053. host->caps = rt_sdhci_readl(host, RT_SDHCI_CAPABILITIES);
  2054. host->caps &= ~lower_32_bits(dt_caps_mask);
  2055. host->caps |= lower_32_bits(dt_caps);
  2056. }
  2057. if (host->version < RT_SDHCI_SPEC_300)
  2058. return;
  2059. if (caps1)
  2060. {
  2061. host->caps1 = *caps1;
  2062. }
  2063. else
  2064. {
  2065. host->caps1 = rt_sdhci_readl(host, RT_SDHCI_CAPABILITIES_1);
  2066. host->caps1 &= ~upper_32_bits(dt_caps_mask);
  2067. host->caps1 |= upper_32_bits(dt_caps);
  2068. }
  2069. }
  2070. struct rt_sdhci_host *rt_sdhci_alloc_host(struct rt_device *dev,
  2071. size_t priv_size)
  2072. {
  2073. struct rt_mmc_host *mmc;
  2074. struct rt_sdhci_host *host;
  2075. mmc = rt_mmc_alloc_host(sizeof(struct rt_sdhci_host) + priv_size, dev);
  2076. if (!mmc)
  2077. return NULL;
  2078. host = mmc_priv(mmc);
  2079. host->mmc = mmc;
  2080. host->mmc_host_ops = rt_sdhci_ops;
  2081. mmc->ops = &host->mmc_host_ops;
  2082. host->flags = RT_SDHCI_SIGNALING_330;
  2083. host->cqe_ier = RT_SDHCI_CQE_INT_MASK;
  2084. host->cqe_err_ier = RT_SDHCI_CQE_INT_ERR_MASK;
  2085. host->tuning_delay = -1;
  2086. host->tuning_loop_count = MAX_TUNING_LOOP;
  2087. host->sdma_boundary = RT_SDHCI_DEFAULT_BOUNDARY_ARG;
  2088. host->max_timeout_count = 0xE;
  2089. return host;
  2090. }
  2091. int rt_sdhci_setup_host(struct rt_sdhci_host *host)
  2092. {
  2093. struct rt_mmc_host *mmc;
  2094. size_t max_current_caps;
  2095. unsigned int ocr_avail;
  2096. unsigned int override_timeout_clk;
  2097. size_t max_clk;
  2098. int ret = 0;
  2099. bool enable_vqmmc = RT_FALSE;
  2100. RT_ASSERT(host != NULL);
  2101. mmc = host->mmc;
  2102. if (!mmc->supply.vqmmc)
  2103. {
  2104. if (ret)
  2105. return ret;
  2106. enable_vqmmc = RT_TRUE;
  2107. }
  2108. LOG_D("Version: 0x%08x | Present: 0x%08x\n",
  2109. rt_sdhci_readw(host, RT_SDHCI_HOST_VERSION),
  2110. rt_sdhci_readl(host, RT_SDHCI_PRESENT_STATE));
  2111. LOG_D("Caps: 0x%08x | Caps_1: 0x%08x\n",
  2112. rt_sdhci_readl(host, RT_SDHCI_CAPABILITIES),
  2113. rt_sdhci_readl(host, RT_SDHCI_CAPABILITIES_1));
  2114. rt_sdhci_read_caps(host,RT_NULL,RT_NULL,RT_NULL);
  2115. override_timeout_clk = host->timeout_clk;
  2116. if (host->version > RT_SDHCI_SPEC_420)
  2117. {
  2118. rt_kprintf("%s: Unknown controller version (%d). You may experience problems.\n",
  2119. mmc_hostname(mmc), host->version);
  2120. }
  2121. if (host->quirks & RT_SDHCI_QUIRK_FORCE_DMA)
  2122. host->flags |= RT_SDHCI_USE_SDMA;
  2123. else if (!(host->caps & RT_SDHCI_CAN_DO_SDMA))
  2124. LOG_D("Controller doesn't have SDMA capability\n");
  2125. else
  2126. host->flags |= RT_SDHCI_USE_SDMA;
  2127. if ((host->quirks & RT_SDHCI_QUIRK_BROKEN_DMA) && (host->flags & RT_SDHCI_USE_SDMA))
  2128. {
  2129. LOG_D("Disabling DMA as it is marked broken\n");
  2130. host->flags &= ~RT_SDHCI_USE_SDMA;
  2131. }
  2132. if (sdhci_can_64bit_dma(host))
  2133. host->flags |= RT_SDHCI_USE_64_BIT_DMA;
  2134. if (host->flags & RT_SDHCI_USE_SDMA)
  2135. {
  2136. if (host->ops->set_dma_mask)
  2137. ret = host->ops->set_dma_mask(host);
  2138. if (!ret && host->ops->enable_dma)
  2139. ret = host->ops->enable_dma(host);
  2140. if (ret)
  2141. {
  2142. rt_kprintf("%s: No suitable DMA available - falling back to PIO\n",
  2143. mmc_hostname(mmc));
  2144. host->flags &= ~RT_SDHCI_USE_SDMA;
  2145. ret = 0;
  2146. }
  2147. }
  2148. if ((host->flags & RT_SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
  2149. host->flags &= ~RT_SDHCI_USE_SDMA;
  2150. if (!(host->flags & RT_SDHCI_USE_SDMA))
  2151. {
  2152. host->dma_mask = DMA_BIT_MASK(64);
  2153. }
  2154. if (host->version >= RT_SDHCI_SPEC_300)
  2155. host->max_clk = FIELD_GET(RT_SDHCI_CLOCK_V3_BASE_MASK, host->caps);
  2156. else
  2157. host->max_clk = FIELD_GET(RT_SDHCI_CLOCK_BASE_MASK, host->caps);
  2158. host->max_clk *= 1000000;
  2159. if (host->max_clk == 0 || host->quirks & RT_SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN)
  2160. {
  2161. if (!host->ops->get_max_clock)
  2162. {
  2163. rt_kprintf("%s: Hardware doesn't specify base clock frequency. %p \n",
  2164. mmc_hostname(mmc), host->ops->get_max_clock);
  2165. ret = -ENODEV;
  2166. goto undma;
  2167. }
  2168. host->max_clk = host->ops->get_max_clock(host);
  2169. }
  2170. host->clk_mul = FIELD_GET(RT_SDHCI_CLOCK_MUL_MASK, host->caps1);
  2171. if (host->clk_mul)
  2172. host->clk_mul += 1;
  2173. max_clk = host->max_clk;
  2174. if (host->ops->get_min_clock)
  2175. mmc->f_min = host->ops->get_min_clock(host);
  2176. else if (host->version >= RT_SDHCI_SPEC_300)
  2177. {
  2178. if (host->clk_mul)
  2179. max_clk = host->max_clk * host->clk_mul;
  2180. mmc->f_min = host->max_clk / RT_SDHCI_MAX_DIV_SPEC_300;
  2181. }
  2182. else
  2183. mmc->f_min = host->max_clk / RT_SDHCI_MAX_DIV_SPEC_200;
  2184. if (!mmc->f_max || mmc->f_max > max_clk)
  2185. mmc->f_max = max_clk;
  2186. if (!(host->quirks & RT_SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK))
  2187. {
  2188. host->timeout_clk = FIELD_GET(RT_SDHCI_TIMEOUT_CLK_MASK, host->caps);
  2189. if (host->caps & RT_SDHCI_TIMEOUT_CLK_UNIT)
  2190. host->timeout_clk *= 1000;
  2191. if (host->timeout_clk == 0)
  2192. {
  2193. if (!host->ops->get_timeout_clock)
  2194. {
  2195. rt_kprintf("%s: Hardware doesn't specify timeout clock frequency.\n",
  2196. mmc_hostname(mmc));
  2197. ret = -ENODEV;
  2198. goto undma;
  2199. }
  2200. host->timeout_clk =
  2201. DIV_ROUND_UP(host->ops->get_timeout_clock(host),
  2202. 1000);
  2203. }
  2204. if (override_timeout_clk)
  2205. host->timeout_clk = override_timeout_clk;
  2206. mmc->max_busy_timeout = host->ops->get_max_timeout_count ? host->ops->get_max_timeout_count(host) : 1 << 27;
  2207. mmc->max_busy_timeout /= host->timeout_clk;
  2208. }
  2209. if (host->quirks2 & RT_SDHCI_QUIRK2_DISABLE_HW_TIMEOUT && !host->ops->get_max_timeout_count)
  2210. mmc->max_busy_timeout = 0;
  2211. mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23;
  2212. mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
  2213. if (host->quirks & RT_SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
  2214. host->flags |= RT_SDHCI_AUTO_CMD12;
  2215. if ((host->version >= RT_SDHCI_SPEC_300) && (!(host->flags & RT_SDHCI_USE_SDMA) || host->v4_mode) && !(host->quirks2 & RT_SDHCI_QUIRK2_ACMD23_BROKEN))
  2216. {
  2217. host->flags |= RT_SDHCI_AUTO_CMD23;
  2218. LOG_D("Auto-CMD23 available\n");
  2219. }
  2220. else
  2221. {
  2222. LOG_D("Auto-CMD23 unavailable\n");
  2223. }
  2224. if (!(host->quirks & RT_SDHCI_QUIRK_FORCE_1_BIT_DATA))
  2225. mmc->caps |= MMC_CAP_4_BIT_DATA;
  2226. if (host->quirks2 & RT_SDHCI_QUIRK2_HOST_NO_CMD23)
  2227. mmc->caps &= ~MMC_CAP_CMD23;
  2228. if (host->caps & RT_SDHCI_CAN_DO_HISPD)
  2229. mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
  2230. if ((host->quirks & RT_SDHCI_QUIRK_BROKEN_CARD_DETECTION) && mmc_card_is_removable(mmc) && rt_mmc_gpio_get_cd(mmc) < 0)
  2231. mmc->caps |= MMC_CAP_NEEDS_POLL;
  2232. if (mmc->supply.vqmmc)
  2233. {
  2234. if (enable_vqmmc)
  2235. {
  2236. host->sdhci_core_to_disable_vqmmc = !ret;
  2237. }
  2238. if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
  2239. 1950000))
  2240. host->caps1 &= ~(RT_SDHCI_SUPPORT_SDR104 | RT_SDHCI_SUPPORT_SDR50 | RT_SDHCI_SUPPORT_DDR50);
  2241. if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
  2242. 3600000))
  2243. host->flags &= ~RT_SDHCI_SIGNALING_330;
  2244. if (ret)
  2245. {
  2246. rt_kprintf("%s: Failed to enable vqmmc regulator: %d\n",
  2247. mmc_hostname(mmc), ret);
  2248. mmc->supply.vqmmc = (void *)-EINVAL;
  2249. }
  2250. }
  2251. if (host->quirks2 & RT_SDHCI_QUIRK2_NO_1_8_V)
  2252. {
  2253. host->caps1 &= ~(RT_SDHCI_SUPPORT_SDR104 | RT_SDHCI_SUPPORT_SDR50 | RT_SDHCI_SUPPORT_DDR50);
  2254. mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
  2255. mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
  2256. }
  2257. if (host->caps1 & (RT_SDHCI_SUPPORT_SDR104 | RT_SDHCI_SUPPORT_SDR50 | RT_SDHCI_SUPPORT_DDR50))
  2258. mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
  2259. if (host->caps1 & RT_SDHCI_SUPPORT_SDR104)
  2260. {
  2261. mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
  2262. if (!(host->quirks2 & RT_SDHCI_QUIRK2_BROKEN_HS200))
  2263. mmc->caps2 |= MMC_CAP2_HS200;
  2264. }
  2265. else if (host->caps1 & RT_SDHCI_SUPPORT_SDR50)
  2266. {
  2267. mmc->caps |= MMC_CAP_UHS_SDR50;
  2268. }
  2269. if (host->quirks2 & RT_SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && (host->caps1 & RT_SDHCI_SUPPORT_HS400))
  2270. mmc->caps2 |= MMC_CAP2_HS400;
  2271. if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && (!mmc->supply.vqmmc || !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 1300000)))
  2272. mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
  2273. if ((host->caps1 & RT_SDHCI_SUPPORT_DDR50) && !(host->quirks2 & RT_SDHCI_QUIRK2_BROKEN_DDR50))
  2274. mmc->caps |= MMC_CAP_UHS_DDR50;
  2275. if (host->caps1 & RT_SDHCI_USE_SDR50_TUNING)
  2276. host->flags |= RT_SDHCI_SDR50_NEEDS_TUNING;
  2277. if (host->caps1 & RT_SDHCI_DRIVER_TYPE_A)
  2278. mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
  2279. if (host->caps1 & RT_SDHCI_DRIVER_TYPE_C)
  2280. mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
  2281. if (host->caps1 & RT_SDHCI_DRIVER_TYPE_D)
  2282. mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
  2283. host->tuning_count = FIELD_GET(RT_SDHCI_RETUNING_TIMER_COUNT_MASK,
  2284. host->caps1);
  2285. if (host->tuning_count)
  2286. host->tuning_count = 1 << (host->tuning_count - 1);
  2287. /* Re-tuning mode supported by the Host Controller */
  2288. host->tuning_mode = FIELD_GET(RT_SDHCI_RETUNING_MODE_MASK, host->caps1);
  2289. ocr_avail = 0;
  2290. max_current_caps = rt_sdhci_readl(host, RT_SDHCI_MAX_CURRENT);
  2291. if (!max_current_caps && mmc->supply.vmmc)
  2292. {
  2293. int curr = regulator_get_current_limit(mmc->supply.vmmc);
  2294. if (curr > 0)
  2295. {
  2296. curr = curr / 1000; /* convert to mA */
  2297. curr = curr / RT_SDHCI_MAX_CURRENT_MULTIPLIER;
  2298. curr = min_t(rt_uint32_t, curr, RT_SDHCI_MAX_CURRENT_LIMIT);
  2299. max_current_caps =
  2300. FIELD_PREP(RT_SDHCI_MAX_CURRENT_330_MASK, curr) | FIELD_PREP(RT_SDHCI_MAX_CURRENT_300_MASK, curr) | FIELD_PREP(RT_SDHCI_MAX_CURRENT_180_MASK, curr);
  2301. }
  2302. }
  2303. if (host->caps & RT_SDHCI_CAN_VDD_330)
  2304. {
  2305. ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
  2306. mmc->max_current_330 = FIELD_GET(RT_SDHCI_MAX_CURRENT_330_MASK,
  2307. max_current_caps)
  2308. * RT_SDHCI_MAX_CURRENT_MULTIPLIER;
  2309. }
  2310. if (host->caps & RT_SDHCI_CAN_VDD_300)
  2311. {
  2312. ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
  2313. mmc->max_current_300 = FIELD_GET(RT_SDHCI_MAX_CURRENT_300_MASK,
  2314. max_current_caps)
  2315. * RT_SDHCI_MAX_CURRENT_MULTIPLIER;
  2316. }
  2317. if (host->caps & RT_SDHCI_CAN_VDD_180)
  2318. {
  2319. ocr_avail |= MMC_VDD_165_195;
  2320. mmc->max_current_180 = FIELD_GET(RT_SDHCI_MAX_CURRENT_180_MASK,
  2321. max_current_caps)
  2322. * RT_SDHCI_MAX_CURRENT_MULTIPLIER;
  2323. }
  2324. if (host->ocr_mask)
  2325. ocr_avail = host->ocr_mask;
  2326. if (mmc->ocr_avail)
  2327. ocr_avail = mmc->ocr_avail;
  2328. mmc->ocr_avail = ocr_avail;
  2329. mmc->ocr_avail_sdio = ocr_avail;
  2330. if (host->ocr_avail_sdio)
  2331. mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
  2332. mmc->ocr_avail_sd = ocr_avail;
  2333. if (host->ocr_avail_sd)
  2334. mmc->ocr_avail_sd &= host->ocr_avail_sd;
  2335. else
  2336. mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
  2337. mmc->ocr_avail_mmc = ocr_avail;
  2338. if (host->ocr_avail_mmc)
  2339. mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
  2340. if (mmc->ocr_avail == 0)
  2341. {
  2342. rt_kprintf("%s: Hardware doesn't report any support voltages.\n",
  2343. mmc_hostname(mmc));
  2344. ret = -ENODEV;
  2345. goto unreg;
  2346. }
  2347. if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
  2348. host->flags |= RT_SDHCI_SIGNALING_180;
  2349. if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
  2350. host->flags |= RT_SDHCI_SIGNALING_120;
  2351. rt_spin_lock_init(&host->lock);
  2352. mmc->max_req_size = 524288;
  2353. if (host->flags & RT_SDHCI_USE_SDMA)
  2354. {
  2355. mmc->max_segs = 1;
  2356. }
  2357. else
  2358. { /* PIO */
  2359. mmc->max_segs = RT_SDHCI_MAX_SEGS;
  2360. }
  2361. mmc->max_seg_size = mmc->max_req_size;
  2362. if (host->quirks & RT_SDHCI_QUIRK_FORCE_BLK_SZ_2048)
  2363. {
  2364. mmc->max_blk_size = 2;
  2365. }
  2366. else
  2367. {
  2368. mmc->max_blk_size = (host->caps & RT_SDHCI_MAX_BLOCK_MASK) >> RT_SDHCI_MAX_BLOCK_SHIFT;
  2369. if (mmc->max_blk_size >= 3)
  2370. {
  2371. rt_kprintf("%s: Invalid maximum block size, assuming 512 bytes\n",
  2372. mmc_hostname(mmc));
  2373. mmc->max_blk_size = 0;
  2374. }
  2375. }
  2376. mmc->max_blk_size = 512 << mmc->max_blk_size;
  2377. /*
  2378. * Maximum block count.
  2379. */
  2380. mmc->max_blk_count = (host->quirks & RT_SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
  2381. return 0;
  2382. unreg:
  2383. undma:
  2384. return ret;
  2385. }
  2386. static void sdhci_init(struct rt_sdhci_host *host, int soft)
  2387. {
  2388. struct rt_mmc_host *mmc = host->mmc;
  2389. rt_base_t flags;
  2390. if (soft)
  2391. {
  2392. sdhci_do_reset(host, RT_SDHCI_RESET_CMD | RT_SDHCI_RESET_DATA);
  2393. }
  2394. else
  2395. {
  2396. sdhci_do_reset(host, RT_SDHCI_RESET_ALL);
  2397. }
  2398. if (host->v4_mode)
  2399. {
  2400. sdhci_do_enable_v4_mode(host);
  2401. }
  2402. flags = rt_spin_lock_irqsave(&host->lock);
  2403. sdhci_set_default_irqs(host);
  2404. rt_spin_unlock_irqrestore(&host->lock, flags);
  2405. host->cqe_on = RT_FALSE;
  2406. if (soft)
  2407. {
  2408. /* force clock reconfiguration */
  2409. host->clock = 0;
  2410. host->reinit_uhs = RT_TRUE;
  2411. mmc->ops->set_ios(mmc, &mmc->ios);
  2412. }
  2413. }
  2414. static void sdhci_reinit(struct rt_sdhci_host *host)
  2415. {
  2416. rt_uint32_t cd = host->ier & (RT_SDHCI_INT_CARD_REMOVE | RT_SDHCI_INT_CARD_INSERT);
  2417. sdhci_init(host, 0);
  2418. sdhci_enable_card_detection(host);
  2419. if (cd != (host->ier & (RT_SDHCI_INT_CARD_REMOVE | RT_SDHCI_INT_CARD_INSERT)))
  2420. rt_mmc_detect_change(host->mmc, rt_tick_from_millisecond(200));
  2421. }
  2422. int rt_sdhci_init_host(struct rt_sdhci_host *host)
  2423. {
  2424. struct rt_mmc_host *mmc = host->mmc;
  2425. int ret;
  2426. if ((mmc->caps2 & MMC_CAP2_CQE) && (host->quirks & RT_SDHCI_QUIRK_BROKEN_CQE))
  2427. {
  2428. mmc->caps2 &= ~MMC_CAP2_CQE;
  2429. }
  2430. host->complete_wq = rt_workqueue_create("sdhci", 4096, 20);
  2431. if (!host->complete_wq)
  2432. return -ENOMEM;
  2433. rt_work_init(&host->complete_work, sdhci_complete_work, host);
  2434. rt_timer_init(&host->timer, "sdhci_timer", sdhci_timeout_timer, host, 0, RT_TIMER_FLAG_SOFT_TIMER);
  2435. rt_timer_init(&host->data_timer, "sdhci_data_timer", sdhci_timeout_data_timer, host, 0, RT_TIMER_FLAG_SOFT_TIMER);
  2436. rt_wqueue_init(&host->buf_ready_int);
  2437. sdhci_init(host, 0);
  2438. host->irq_wq = rt_workqueue_create("sdhci_irq", 8192, 1);
  2439. rt_work_init(&host->irq_work, sdhci_thread_irq, host);
  2440. rt_hw_interrupt_install(host->irq, sdhci_irq, host, mmc_hostname(mmc));
  2441. rt_pic_irq_unmask(host->irq);
  2442. ret = rt_mmc_add_host(mmc);
  2443. if (ret)
  2444. goto unirq;
  2445. LOG_D("%s: RT_SDHCI controller on %s [%s] using %s\n",
  2446. mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->parent.name,
  2447. (host->flags & RT_SDHCI_USE_SDMA) ? "DMA" : "PIO");
  2448. sdhci_enable_card_detection(host);
  2449. return 0;
  2450. unirq:
  2451. sdhci_reset_for_all(host);
  2452. rt_sdhci_writel(host, 0, RT_SDHCI_INT_ENABLE);
  2453. rt_sdhci_writel(host, 0, RT_SDHCI_SIGNAL_ENABLE);
  2454. return ret;
  2455. }
  2456. int rt_sdhci_set_and_add_host(struct rt_sdhci_host *host)
  2457. {
  2458. int ret;
  2459. ret = rt_sdhci_setup_host(host);
  2460. if (ret)
  2461. return ret;
  2462. ret = rt_sdhci_init_host(host);
  2463. if (ret)
  2464. goto cleanup;
  2465. return 0;
  2466. cleanup:
  2467. rt_sdhci_cleanup_host(host);
  2468. return ret;
  2469. }
  2470. void rt_sdhci_ios_set(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios)
  2471. {
  2472. struct rt_sdhci_host *host = mmc_priv(mmc);
  2473. rt_bool_t reinit_uhs = host->reinit_uhs;
  2474. rt_bool_t turning_on_clk = RT_FALSE;
  2475. rt_uint8_t ctrl;
  2476. host->reinit_uhs = RT_FALSE;
  2477. if (ios->power_mode == MMC_POWER_UNDEFINED)
  2478. return;
  2479. if (host->flags & RT_SDHCI_DEVICE_DEAD)
  2480. {
  2481. if (mmc->supply.vmmc && ios->power_mode == MMC_POWER_OFF)
  2482. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
  2483. return;
  2484. }
  2485. if (ios->power_mode == MMC_POWER_OFF)
  2486. {
  2487. rt_sdhci_writel(host, 0, RT_SDHCI_SIGNAL_ENABLE);
  2488. sdhci_reinit(host);
  2489. }
  2490. if (host->version >= RT_SDHCI_SPEC_300 && (ios->power_mode == MMC_POWER_UP) && !(host->quirks2 & RT_SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
  2491. sdhci_preset_value_enable(host, RT_FALSE);
  2492. if (!ios->clock || ios->clock != host->clock)
  2493. {
  2494. turning_on_clk = ios->clock && !host->clock;
  2495. host->ops->set_clock(host, ios->clock);
  2496. host->clock = ios->clock;
  2497. if (host->quirks & RT_SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && host->clock)
  2498. {
  2499. host->timeout_clk = mmc->actual_clock ? mmc->actual_clock / 1000 : host->clock / 1000;
  2500. mmc->max_busy_timeout =
  2501. host->ops->get_max_timeout_count ? host->ops->get_max_timeout_count(host) : 1 << 27;
  2502. mmc->max_busy_timeout /= host->timeout_clk;
  2503. }
  2504. }
  2505. if (host->ops->set_power)
  2506. host->ops->set_power(host, ios->power_mode, ios->vdd);
  2507. else
  2508. rt_sdhci_set_power(host, ios->power_mode, ios->vdd);
  2509. host->ops->set_bus_width(host, ios->bus_width);
  2510. if (!reinit_uhs && turning_on_clk && host->timing == ios->timing && host->version >= RT_SDHCI_SPEC_300 && !sdhci_presetable_values_change(host, ios))
  2511. return;
  2512. ctrl = rt_sdhci_readb(host, RT_SDHCI_HOST_CONTROL);
  2513. if (!(host->quirks & RT_SDHCI_QUIRK_NO_HISPD_BIT))
  2514. {
  2515. if (ios->timing == MMC_TIMING_SD_HS || ios->timing == MMC_TIMING_MMC_HS || ios->timing == MMC_TIMING_MMC_HS400 || ios->timing == MMC_TIMING_MMC_HS200 || ios->timing == MMC_TIMING_MMC_DDR52 || ios->timing == MMC_TIMING_UHS_SDR50 || ios->timing == MMC_TIMING_UHS_SDR104 || ios->timing == MMC_TIMING_UHS_DDR50 || ios->timing == MMC_TIMING_UHS_SDR25)
  2516. ctrl |= RT_SDHCI_CTRL_HISPD;
  2517. else
  2518. ctrl &= ~RT_SDHCI_CTRL_HISPD;
  2519. }
  2520. if (host->version >= RT_SDHCI_SPEC_300)
  2521. {
  2522. rt_uint16_t clk, ctrl_2;
  2523. clk = rt_sdhci_readw(host, RT_SDHCI_CLOCK_CONTROL);
  2524. if (clk & RT_SDHCI_CLOCK_CARD_EN)
  2525. {
  2526. clk &= ~RT_SDHCI_CLOCK_CARD_EN;
  2527. rt_sdhci_writew(host, clk, RT_SDHCI_CLOCK_CONTROL);
  2528. }
  2529. rt_sdhci_writeb(host, ctrl, RT_SDHCI_HOST_CONTROL);
  2530. if (!host->preset_enabled)
  2531. {
  2532. ctrl_2 = rt_sdhci_readw(host, RT_SDHCI_HOST_CONTROL2);
  2533. ctrl_2 &= ~RT_SDHCI_CTRL_DRV_TYPE_MASK;
  2534. if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
  2535. ctrl_2 |= RT_SDHCI_CTRL_DRV_TYPE_A;
  2536. else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
  2537. ctrl_2 |= RT_SDHCI_CTRL_DRV_TYPE_B;
  2538. else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
  2539. ctrl_2 |= RT_SDHCI_CTRL_DRV_TYPE_C;
  2540. else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
  2541. ctrl_2 |= RT_SDHCI_CTRL_DRV_TYPE_D;
  2542. else
  2543. {
  2544. LOG_D("%s: invalid driver type, default to driver type B\n",
  2545. mmc_hostname(mmc));
  2546. ctrl_2 |= RT_SDHCI_CTRL_DRV_TYPE_B;
  2547. }
  2548. rt_sdhci_writew(host, ctrl_2, RT_SDHCI_HOST_CONTROL2);
  2549. host->drv_type = ios->drv_type;
  2550. }
  2551. host->ops->set_uhs_signaling(host, ios->timing);
  2552. host->timing = ios->timing;
  2553. if (sdhci_preset_needed(host, ios->timing))
  2554. {
  2555. rt_uint16_t preset;
  2556. sdhci_preset_value_enable(host, RT_TRUE);
  2557. preset = sdhci_get_preset_value(host);
  2558. ios->drv_type = FIELD_GET(RT_SDHCI_PRESET_DRV_MASK,
  2559. preset);
  2560. host->drv_type = ios->drv_type;
  2561. }
  2562. host->ops->set_clock(host, host->clock);
  2563. }
  2564. else
  2565. rt_sdhci_writeb(host, ctrl, RT_SDHCI_HOST_CONTROL);
  2566. }
  2567. void rt_sdhci_free_host(struct rt_sdhci_host *host)
  2568. {
  2569. rt_sdhci_cleanup_host(host);
  2570. rt_free(host);
  2571. }