drv_sdhci.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989
  1. /*
  2. * Copyright (c) 2006-2025 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2017-10-10 Tanek first version
  9. * 2021-07-07 linzhenxing add sd card drivers in mmu
  10. * 2021-07-14 linzhenxing add emmc
  11. */
  12. #include <rtthread.h>
  13. #include <rthw.h>
  14. #include <rtdevice.h>
  15. #include "board.h"
  16. #include "drv_sdhci.h"
  17. #include "riscv_io.h"
  18. #include <string.h>
  19. #include <ioremap.h>
  20. #include <cache.h>
  21. #include <mm_aspace.h>
  22. #ifdef RT_USING_SDIO
  23. #define DBG_TAG "drv_sdhci"
  24. #ifdef RT_SDIO_DEBUG
  25. #define DBG_LVL DBG_LOG
  26. #else
  27. #define DBG_LVL DBG_WARNING
  28. #endif /* RT_SDIO_DEBUG */
  29. #include <rtdbg.h>
  30. #if defined(BSP_USING_SDIO0) || defined(BSP_USING_SDIO1)
  31. #define SDHCI_SDMA_ENABLE
  32. #define CACHE_LINESIZE (64)
  33. #define BIT(x) (1 << x)
  34. #define DWC_MSHC_PTR_VENDOR1 0x500
  35. #define MSHC_CTRL_R (DWC_MSHC_PTR_VENDOR1 + 0x08)
  36. #define EMMC_CTRL_R (DWC_MSHC_PTR_VENDOR1 + 0x2c)
  37. #define SDHCI_VENDER_AT_CTRL_REG (DWC_MSHC_PTR_VENDOR1 + 0x40)
  38. #define SDHCI_VENDER_AT_STAT_REG (DWC_MSHC_PTR_VENDOR1 + 0x44)
  39. #define SDHCI_TUNE_CLK_STOP_EN_MASK BIT(16)
  40. #define SDHCI_TUNE_SWIN_TH_VAL_LSB (24)
  41. #define SDHCI_TUNE_SWIN_TH_VAL_MASK (0xFF)
  42. #define CARD_IS_EMMC 0
  43. #define EMMC_RST_N 2
  44. #define EMMC_RST_N_OE 3
  45. #define DWC_MSHC_PTR_PHY_REGS 0x300
  46. #define DWC_MSHC_PHY_CNFG (DWC_MSHC_PTR_PHY_REGS + 0x0)
  47. #define PAD_SN_LSB 20
  48. #define PAD_SN_MASK 0xF
  49. #define PAD_SN_DEFAULT ((0x8 & PAD_SN_MASK) << PAD_SN_LSB)
  50. #define PAD_SP_LSB 16
  51. #define PAD_SP_MASK 0xF
  52. #define PAD_SP_DEFAULT ((0x9 & PAD_SP_MASK) << PAD_SP_LSB)
  53. #define PHY_PWRGOOD BIT(1)
  54. #define PHY_RSTN BIT(0)
  55. #define DWC_MSHC_CMDPAD_CNFG (DWC_MSHC_PTR_PHY_REGS + 0x4)
  56. #define DWC_MSHC_DATPAD_CNFG (DWC_MSHC_PTR_PHY_REGS + 0x6)
  57. #define DWC_MSHC_CLKPAD_CNFG (DWC_MSHC_PTR_PHY_REGS + 0x8)
  58. #define DWC_MSHC_STBPAD_CNFG (DWC_MSHC_PTR_PHY_REGS + 0xA)
  59. #define DWC_MSHC_RSTNPAD_CNFG (DWC_MSHC_PTR_PHY_REGS + 0xC)
  60. #define TXSLEW_N_LSB 9
  61. #define TXSLEW_N_MASK 0xF
  62. #define TXSLEW_P_LSB 5
  63. #define TXSLEW_P_MASK 0xF
  64. #define WEAKPULL_EN_LSB 3
  65. #define WEAKPULL_EN_MASK 0x3
  66. #define RXSEL_LSB 0
  67. #define RXSEL_MASK 0x3
  68. #define DWC_MSHC_COMMDL_CNFG (DWC_MSHC_PTR_PHY_REGS + 0x1C)
  69. #define DWC_MSHC_SDCLKDL_CNFG (DWC_MSHC_PTR_PHY_REGS + 0x1D)
  70. #define DWC_MSHC_SDCLKDL_DC (DWC_MSHC_PTR_PHY_REGS + 0x1E)
  71. #define DWC_MSHC_SMPLDL_CNFG (DWC_MSHC_PTR_PHY_REGS + 0x20)
  72. #define DWC_MSHC_ATDL_CNFG (DWC_MSHC_PTR_PHY_REGS + 0x21)
  73. #define DWC_MSHC_PHY_PAD_SD_CLK \
  74. ((1 << TXSLEW_N_LSB) | (3 << TXSLEW_P_LSB) | (0 << WEAKPULL_EN_LSB) | (2 << RXSEL_LSB))
  75. #define DWC_MSHC_PHY_PAD_SD_DAT \
  76. ((1 << TXSLEW_N_LSB) | (3 << TXSLEW_P_LSB) | (1 << WEAKPULL_EN_LSB) | (2 << RXSEL_LSB))
  77. #define DWC_MSHC_PHY_PAD_SD_STB \
  78. ((1 << TXSLEW_N_LSB) | (3 << TXSLEW_P_LSB) | (2 << WEAKPULL_EN_LSB) | (2 << RXSEL_LSB))
  79. #define DWC_MSHC_PHY_PAD_EMMC_CLK \
  80. ((2 << TXSLEW_N_LSB) | (2 << TXSLEW_P_LSB) | (0 << WEAKPULL_EN_LSB) | (1 << RXSEL_LSB))
  81. #define DWC_MSHC_PHY_PAD_EMMC_DAT \
  82. ((2 << TXSLEW_N_LSB) | (2 << TXSLEW_P_LSB) | (1 << WEAKPULL_EN_LSB) | (1 << RXSEL_LSB))
  83. #define DWC_MSHC_PHY_PAD_EMMC_STB \
  84. ((2 << TXSLEW_N_LSB) | (2 << TXSLEW_P_LSB) | (2 << WEAKPULL_EN_LSB) | (1 << RXSEL_LSB))
  85. static struct sdhci_host* sdhci_host0;
  86. static struct sdhci_host* sdhci_host1;
  87. static inline void sdhci_writel(struct sdhci_host* host, uint32_t val, int reg)
  88. {
  89. writel(val, (void*)host->mapbase + reg);
  90. }
  91. static inline void sdhci_writew(struct sdhci_host* host, uint16_t val, int reg)
  92. {
  93. writew((uint16_t)val, (void*)host->mapbase + reg);
  94. }
  95. static inline void sdhci_writeb(struct sdhci_host* host, uint8_t val, int reg)
  96. {
  97. writeb((uint8_t)val, (void*)host->mapbase + reg);
  98. }
  99. static inline uint32_t sdhci_readl(struct sdhci_host* host, int reg)
  100. {
  101. return (uint32_t)readl((void*)host->mapbase + reg);
  102. }
  103. static inline uint16_t sdhci_readw(struct sdhci_host* host, int reg)
  104. {
  105. return (uint16_t)readw((void*)host->mapbase + reg);
  106. }
  107. static inline uint8_t sdhci_readb(struct sdhci_host* host, int reg)
  108. {
  109. return (uint8_t)readb((void*)host->mapbase + reg);
  110. }
  111. static void emmc_reg_display(struct sdhci_host* host)
  112. {
  113. rt_kprintf("SD_MASA_R:%x\n", sdhci_readl(host, SDHCI_DMA_ADDRESS));
  114. rt_kprintf("BLCOKSIZE_R:%x\n", sdhci_readw(host, SDHCI_BLOCK_SIZE));
  115. rt_kprintf("BLOCKCOUNT_R:%x\n", sdhci_readw(host, SDHCI_BLOCK_COUNT));
  116. rt_kprintf("ARGUMENT_R:%x\n", sdhci_readl(host, SDHCI_ARGUMENT));
  117. rt_kprintf("XFER_MODE_R:%x\n", sdhci_readw(host, SDHCI_TRANSFER_MODE));
  118. rt_kprintf("CMD_R:%x\n", sdhci_readw(host, SDHCI_COMMAND));
  119. rt_kprintf("RESP0_R:%x\n", sdhci_readl(host, SDHCI_RESPONSE));
  120. rt_kprintf("RESP1_R:%x\n", sdhci_readl(host, SDHCI_RESPONSE + 4));
  121. rt_kprintf("RESP2_R:%x\n", sdhci_readl(host, SDHCI_RESPONSE + 8));
  122. rt_kprintf("RESP3_R:%x\n", sdhci_readl(host, SDHCI_RESPONSE + 12));
  123. rt_kprintf("BUF_DATA_R:%x\n", sdhci_readl(host, SDHCI_BUFFER));
  124. rt_kprintf("PSTATE_REG_R:%x\n", sdhci_readl(host, SDHCI_PRESENT_STATE));
  125. rt_kprintf("HOST_CTL_R:%x\n", sdhci_readb(host, SDHCI_HOST_CONTROL));
  126. rt_kprintf("PWR_CTRL_R:%x\n", sdhci_readb(host, SDHCI_POWER_CONTROL));
  127. rt_kprintf("BGAP_CTRL_R:%x\n", sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
  128. rt_kprintf("WUP_CTRL_R:%x\n", sdhci_readb(host, SDHCI_WAKE_UP_CONTROL));
  129. rt_kprintf("CLK_CTRL_R:%x\n", sdhci_readw(host, SDHCI_CLOCK_CONTROL));
  130. rt_kprintf("TOUT_CTRL_R:%x\n", sdhci_readb(host, SDHCI_TIMEOUT_CONTROL));
  131. rt_kprintf("SW_RSR_R:%x\n", sdhci_readb(host, SDHCI_SOFTWARE_RESET));
  132. rt_kprintf("NORMAL_INT_STAT_R:%x\n", sdhci_readw(host, SDHCI_INT_STATUS));
  133. rt_kprintf("ERROR_INT_STAT_R:%x\n", sdhci_readw(host, SDHCI_INT_STATUS + 2));
  134. rt_kprintf("NORMAL_INT_STAT_EN_R:%x\n", sdhci_readw(host, SDHCI_INT_ENABLE));
  135. rt_kprintf("ERROR_INT_STAT_EN_R:%x\n", sdhci_readw(host, SDHCI_INT_ENABLE + 2));
  136. rt_kprintf("NORNAL_INT_SIGNAL_EN_R:%x\n", sdhci_readw(host, SDHCI_SIGNAL_ENABLE));
  137. rt_kprintf("ERROR_INT_SIGNAL_EN_R:%x\n", sdhci_readw(host, SDHCI_SIGNAL_ENABLE + 2));
  138. rt_kprintf("AUTO_CMD_STAT_R:%x\n", sdhci_readw(host, SDHCI_AUTO_CMD_STATUS));
  139. rt_kprintf("HOST_CTRL2_R:%x\n", sdhci_readw(host, SDHCI_HOST_CONTROL2));
  140. rt_kprintf("CAPABILITIES1_R:%x\n", sdhci_readl(host, SDHCI_CAPABILITIES));
  141. rt_kprintf("CAPABILITIES2_R:%x\n", sdhci_readl(host, SDHCI_CAPABILITIES_1));
  142. rt_kprintf("FORCE_AUTO_CMD_STAT_R:%x\n", sdhci_readw(host, SDHCI_MAX_CURRENT));
  143. rt_kprintf("FORCE_ERROR_INT_STAT_R:%x\n", sdhci_readw(host, SDHCI_SET_ACMD12_ERROR));
  144. rt_kprintf("AMDA_ERR_STAT_STAT_R:%x\n", sdhci_readl(host, SDHCI_ADMA_ERROR));
  145. rt_kprintf("AMDA_SA_LOW_STAT_R:%x\n", sdhci_readl(host, SDHCI_ADMA_ADDRESS));
  146. rt_kprintf("AMDA_SA_HIGH_STAT_R:%x\n", sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI));
  147. }
  148. static inline void delay_1k(unsigned int uicnt)
  149. {
  150. int i, j;
  151. for (i = 0; i < uicnt; i++)
  152. for (j = 0; j < 1000; j++)
  153. asm("nop");
  154. }
  155. static void dwcmshc_phy_1_8v_init(struct sdhci_host* host)
  156. {
  157. sdhci_writew(host, DWC_MSHC_PHY_PAD_EMMC_DAT, DWC_MSHC_CMDPAD_CNFG);
  158. sdhci_writew(host, DWC_MSHC_PHY_PAD_EMMC_DAT, DWC_MSHC_DATPAD_CNFG);
  159. sdhci_writew(host, DWC_MSHC_PHY_PAD_EMMC_CLK, DWC_MSHC_CLKPAD_CNFG);
  160. sdhci_writew(host, DWC_MSHC_PHY_PAD_EMMC_STB, DWC_MSHC_STBPAD_CNFG);
  161. sdhci_writew(host, DWC_MSHC_PHY_PAD_EMMC_DAT, DWC_MSHC_RSTNPAD_CNFG);
  162. }
  163. static void dwcmshc_phy_3_3v_init(struct sdhci_host* host)
  164. {
  165. sdhci_writew(host, DWC_MSHC_PHY_PAD_SD_DAT, DWC_MSHC_CMDPAD_CNFG);
  166. sdhci_writew(host, DWC_MSHC_PHY_PAD_SD_DAT, DWC_MSHC_DATPAD_CNFG);
  167. sdhci_writew(host, DWC_MSHC_PHY_PAD_SD_CLK, DWC_MSHC_CLKPAD_CNFG);
  168. sdhci_writew(host, DWC_MSHC_PHY_PAD_SD_STB, DWC_MSHC_STBPAD_CNFG);
  169. sdhci_writew(host, DWC_MSHC_PHY_PAD_SD_DAT, DWC_MSHC_RSTNPAD_CNFG);
  170. }
  171. static void dwcmshc_phy_delay_config(struct sdhci_host* host)
  172. {
  173. sdhci_writeb(host, 1, DWC_MSHC_COMMDL_CNFG);
  174. if (host->tx_delay_line > 256)
  175. {
  176. LOG_E("host%d: tx_delay_line err\n", host->index);
  177. } else if (host->tx_delay_line > 128)
  178. {
  179. sdhci_writeb(host, 0x1, DWC_MSHC_SDCLKDL_CNFG);
  180. sdhci_writeb(host, host->tx_delay_line - 128, DWC_MSHC_SDCLKDL_DC);
  181. } else {
  182. sdhci_writeb(host, 0x0, DWC_MSHC_SDCLKDL_CNFG);
  183. sdhci_writeb(host, host->tx_delay_line, DWC_MSHC_SDCLKDL_DC);
  184. }
  185. sdhci_writeb(host, host->rx_delay_line, DWC_MSHC_SMPLDL_CNFG);
  186. sdhci_writeb(host, 0xc, DWC_MSHC_ATDL_CNFG);
  187. sdhci_writel(host, (sdhci_readl(host, SDHCI_VENDER_AT_CTRL_REG) | BIT(16) | BIT(17) | BIT(19) | BIT(20)), SDHCI_VENDER_AT_CTRL_REG);
  188. sdhci_writel(host, 0x0, SDHCI_VENDER_AT_STAT_REG);
  189. }
  190. static int dwcmshc_phy_init(struct sdhci_host* host)
  191. {
  192. uint32_t reg;
  193. uint32_t timeout = 15000;
  194. /* reset phy */
  195. sdhci_writew(host, 0, DWC_MSHC_PHY_CNFG);
  196. /* Disable the clock */
  197. sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
  198. if (host->io_fixed_1v8)
  199. {
  200. uint32_t data = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  201. data |= SDHCI_CTRL_VDD_180;
  202. sdhci_writew(host, data, SDHCI_HOST_CONTROL2);
  203. dwcmshc_phy_1_8v_init(host);
  204. } else {
  205. dwcmshc_phy_3_3v_init(host);
  206. }
  207. dwcmshc_phy_delay_config(host);
  208. /* Wait max 150 ms */
  209. while (1)
  210. {
  211. reg = sdhci_readl(host, DWC_MSHC_PHY_CNFG);
  212. if (reg & PHY_PWRGOOD)
  213. break;
  214. if (!timeout)
  215. {
  216. return -1;
  217. }
  218. timeout--;
  219. delay_1k(1);
  220. }
  221. reg = PAD_SN_DEFAULT | PAD_SP_DEFAULT;
  222. sdhci_writel(host, reg, DWC_MSHC_PHY_CNFG);
  223. /* de-assert the phy */
  224. reg |= PHY_RSTN;
  225. sdhci_writel(host, reg, DWC_MSHC_PHY_CNFG);
  226. return 0;
  227. }
  228. static void sdhci_reset(struct sdhci_host* host, uint8_t mask)
  229. {
  230. unsigned long timeout;
  231. /* Wait max 100 ms */
  232. timeout = 100;
  233. sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
  234. while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)
  235. {
  236. if (timeout == 0)
  237. {
  238. LOG_E("%s: Reset 0x%x never completed.\n",
  239. __func__, (int)mask);
  240. return;
  241. }
  242. timeout--;
  243. delay_1k(1);
  244. }
  245. if (mask == SDHCI_RESET_ALL)
  246. {
  247. if (host->index == 0)
  248. {
  249. uint16_t emmc_ctl = sdhci_readw(host, EMMC_CTRL_R);
  250. if (host->is_emmc_card)
  251. emmc_ctl |= (1 << CARD_IS_EMMC);
  252. else
  253. emmc_ctl &= ~(1 << CARD_IS_EMMC);
  254. sdhci_writeb(host, emmc_ctl, EMMC_CTRL_R);
  255. }
  256. if (host->have_phy)
  257. dwcmshc_phy_init(host);
  258. else
  259. sdhci_writeb(host, host->mshc_ctrl_r, MSHC_CTRL_R);
  260. }
  261. }
  262. static uint32_t sdhci_get_present_status_flag(struct sdhci_host* sdhci_host)
  263. {
  264. return sdhci_readl(sdhci_host, SDHCI_PRESENT_STATE);
  265. }
  266. static uint32_t sdhci_get_int_status_flag(struct sdhci_host* sdhci_host)
  267. {
  268. return sdhci_readl(sdhci_host, SDHCI_INT_STATUS);
  269. }
  270. static void sdhci_clear_int_status_flag(struct sdhci_host* sdhci_host, uint32_t mask)
  271. {
  272. sdhci_writel(sdhci_host, mask, SDHCI_INT_STATUS);
  273. }
  274. static void sdhic_error_recovery(struct sdhci_host* sdhci_host)
  275. {
  276. uint32_t status;
  277. /* get host present status */
  278. status = sdhci_get_present_status_flag(sdhci_host);
  279. /* check command inhibit status flag */
  280. if ((status & SDHCI_CMD_INHIBIT) != 0U)
  281. {
  282. /* reset command line */
  283. sdhci_reset(sdhci_host, SDHCI_RESET_CMD);
  284. }
  285. /* check data inhibit status flag */
  286. if ((status & SDHCI_DATA_INHIBIT) != 0U)
  287. {
  288. /* reset data line */
  289. sdhci_reset(sdhci_host, SDHCI_RESET_DATA);
  290. }
  291. }
  292. static rt_err_t sdhci_receive_command_response(struct sdhci_host* sdhci_host, struct sdhci_command* command)
  293. {
  294. if (command->responseType == card_response_type_r2)
  295. {
  296. /* CRC is stripped so we need to do some shifting. */
  297. for (int i = 0; i < 4; i++)
  298. {
  299. command->response[3 - i] = sdhci_readl(sdhci_host, SDHCI_RESPONSE + (3 - i) * 4) << 8;
  300. if (i != 3)
  301. command->response[3 - i] |= sdhci_readb(sdhci_host, SDHCI_RESPONSE + (3 - i) * 4 - 1);
  302. }
  303. } else {
  304. command->response[0] = sdhci_readl(sdhci_host, SDHCI_RESPONSE);
  305. }
  306. /* check response error flag */
  307. if ((command->responseErrorFlags != 0U) && ((command->responseType == card_response_type_r1) || (command->responseType == card_response_type_r1b) || (command->responseType == card_response_type_r6) || (command->responseType == card_response_type_r5)))
  308. {
  309. if (((command->responseErrorFlags) & (command->response[0U])) != 0U)
  310. return -1; /* kStatus_USDHC_SendCommandFailed; */
  311. }
  312. return 0;
  313. }
  314. static void sdhci_send_command(struct sdhci_host* sdhci_host, struct sdhci_command* command, rt_bool_t enDMA)
  315. {
  316. RT_ASSERT(RT_NULL != command);
  317. uint32_t cmd_r, xfer_mode;
  318. struct sdhci_data* sdhci_data = sdhci_host->sdhci_data;
  319. cmd_r = SDHCI_MAKE_CMD(command->index, command->flags);
  320. if (sdhci_data != RT_NULL)
  321. {
  322. #ifdef SDHCI_SDMA_ENABLE
  323. rt_ubase_t start_addr, dma_addr;
  324. if (sdhci_data->rxData)
  325. start_addr = (rt_ubase_t)((uint8_t*)sdhci_data->rxData);
  326. else
  327. start_addr = (rt_ubase_t)((uint8_t*)sdhci_data->txData);
  328. rt_hw_cpu_dcache_clean((void*)start_addr, sdhci_data->blockSize * sdhci_data->blockCount);
  329. command->flags2 |= sdhci_enable_dma_flag;
  330. dma_addr = (rt_ubase_t)rt_kmem_v2p((void*)start_addr);
  331. sdhci_writel(sdhci_host, dma_addr, SDHCI_DMA_ADDRESS);
  332. #endif
  333. sdhci_writew(sdhci_host, SDHCI_MAKE_BLKSZ(7, sdhci_data->blockSize), SDHCI_BLOCK_SIZE);
  334. sdhci_writew(sdhci_host, sdhci_data->blockCount, SDHCI_BLOCK_COUNT);
  335. }
  336. xfer_mode = command->flags2 & 0x1ff;
  337. sdhci_writew(sdhci_host, xfer_mode, SDHCI_TRANSFER_MODE);
  338. sdhci_writel(sdhci_host, command->argument, SDHCI_ARGUMENT);
  339. sdhci_writew(sdhci_host, cmd_r, SDHCI_COMMAND);
  340. }
  341. static rt_err_t sdhci_wait_command_done(struct sdhci_host* sdhci_host, struct sdhci_command* command, rt_bool_t executeTuning)
  342. {
  343. RT_ASSERT(RT_NULL != command);
  344. rt_uint32_t event;
  345. /* tuning cmd do not need to wait command done */
  346. if (executeTuning)
  347. return 0;
  348. /* Wait command complete or USDHC encounters error. */
  349. rt_event_recv(&sdhci_host->event, SDHCI_INT_ERROR | SDHCI_INT_RESPONSE,
  350. RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR, RT_WAITING_FOREVER, &event);
  351. if (event & SDHCI_INT_ERROR)
  352. {
  353. LOG_D("%s: Error detected in status(0x%X)!\n", __func__, sdhci_host->error_code);
  354. return -1;
  355. }
  356. return sdhci_receive_command_response(sdhci_host, command);
  357. }
  358. static rt_err_t sdhci_transfer_data_blocking(struct sdhci_host* sdhci_host, struct sdhci_data* data, rt_bool_t enDMA)
  359. {
  360. #ifdef SDHCI_SDMA_ENABLE
  361. rt_err_t err;
  362. rt_uint32_t event;
  363. while (1)
  364. {
  365. err = rt_event_recv(&sdhci_host->event, SDHCI_INT_ERROR | SDHCI_INT_DATA_END | SDHCI_INT_DMA_END,
  366. RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR, 1000, &event);
  367. if (err == -RT_ETIMEOUT)
  368. {
  369. rt_kprintf("%s: Transfer data timeout\n", __func__);
  370. return -1;
  371. }
  372. if (event & SDHCI_INT_ERROR)
  373. {
  374. LOG_D("%s: Error detected in status(0x%X)!\n", __func__, sdhci_host->error_code);
  375. emmc_reg_display(sdhci_host);
  376. return -1;
  377. }
  378. if (event & SDHCI_INT_DMA_END)
  379. {
  380. sdhci_writel(sdhci_host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS);
  381. sdhci_writel(sdhci_host, sdhci_readl(sdhci_host, SDHCI_DMA_ADDRESS), SDHCI_DMA_ADDRESS);
  382. }
  383. if (event & SDHCI_INT_DATA_END)
  384. {
  385. if (data && data->rxData)
  386. rt_hw_cpu_dcache_invalidate((void*)data->rxData, data->blockSize * data->blockCount);
  387. return 0;
  388. }
  389. }
  390. #else
  391. uint32_t stat, rdy, mask, timeout, block;
  392. block = 0;
  393. timeout = 1000000;
  394. rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL;
  395. mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE;
  396. while (1)
  397. {
  398. stat = sdhci_get_int_status_flag(sdhci_host);
  399. if (stat & SDHCI_INT_ERROR)
  400. {
  401. LOG_D("%s: Error detected in status(0x%X)!\n", __func__, stat);
  402. emmc_reg_display(sdhci_host);
  403. return -1;
  404. }
  405. if (stat & rdy)
  406. {
  407. if (!(sdhci_readl(sdhci_host, SDHCI_PRESENT_STATE) & mask))
  408. {
  409. continue;
  410. }
  411. sdhci_clear_int_status_flag(sdhci_host, rdy);
  412. if (data->rxData)
  413. {
  414. for (int i = 0; i < data->blockSize / 4; i++)
  415. data->rxData[i + block * data->blockSize] = sdhci_readl(sdhci_host, SDHCI_BUFFER);
  416. } else {
  417. for (int i = 0; i < data->blockSize / 4; i++)
  418. sdhci_writel(sdhci_host, data->txData[i + block * data->blockSize], SDHCI_BUFFER);
  419. }
  420. block++;
  421. if (block >= data->blockCount)
  422. return 0;
  423. }
  424. if (timeout == 0)
  425. {
  426. rt_kprintf("%s: Transfer data timeout\n", __func__);
  427. return -1;
  428. }
  429. timeout--;
  430. delay_1k(1);
  431. }
  432. #endif
  433. }
  434. static rt_err_t sdhci_set_transfer_config(struct sdhci_host* sdhci_host, struct sdhci_command* sdhci_command, struct sdhci_data* sdhci_data)
  435. {
  436. RT_ASSERT(sdhci_command);
  437. /* Define the flag corresponding to each response type. */
  438. switch (sdhci_command->responseType)
  439. {
  440. case card_response_type_none:
  441. break;
  442. case card_response_type_r1: /* Response 1 */
  443. case card_response_type_r5: /* Response 5 */
  444. case card_response_type_r6: /* Response 6 */
  445. case card_response_type_r7: /* Response 7 */
  446. sdhci_command->flags |= (sdhci_cmd_resp_short | sdhci_enable_cmd_crc_flag | sdhci_enable_cmd_index_chk_flag);
  447. break;
  448. case card_response_type_r1b: /* Response 1 with busy */
  449. case card_response_type_r5b: /* Response 5 with busy */
  450. sdhci_command->flags |= (sdhci_cmd_resp_short_busy | sdhci_enable_cmd_crc_flag | sdhci_enable_cmd_index_chk_flag);
  451. break;
  452. case card_response_type_r2: /* Response 2 */
  453. sdhci_command->flags |= (sdhci_cmd_resp_long | sdhci_enable_cmd_crc_flag);
  454. break;
  455. case card_response_type_r3: /* Response 3 */
  456. case card_response_type_r4: /* Response 4 */
  457. sdhci_command->flags |= (sdhci_cmd_resp_short);
  458. break;
  459. default:
  460. break;
  461. }
  462. if (sdhci_command->type == card_command_type_abort)
  463. {
  464. sdhci_command->flags |= sdhci_enable_command_type_abort;
  465. } else if (sdhci_command->type == card_command_type_resume)
  466. {
  467. sdhci_command->flags |= sdhci_enable_command_type_resume;
  468. } else if (sdhci_command->type == card_command_type_suspend)
  469. {
  470. sdhci_command->flags |= sdhci_enable_command_type_suspend;
  471. } else if (sdhci_command->type == card_command_type_normal)
  472. {
  473. sdhci_command->flags |= sdhci_enable_command_type_normal;
  474. }
  475. if (sdhci_data)
  476. {
  477. sdhci_command->flags |= sdhci_enable_cmd_data_present_flag;
  478. sdhci_command->flags2 |= sdhci_enable_block_count_flag;
  479. if (sdhci_data->rxData)
  480. {
  481. sdhci_command->flags2 |= sdhci_data_read_flag;
  482. }
  483. if (sdhci_data->blockCount > 1U)
  484. {
  485. sdhci_command->flags2 |= (sdhci_multiple_block_flag);
  486. /* auto command 12 */
  487. if (sdhci_data->enableAutoCommand12)
  488. {
  489. /* Enable Auto command 12. */
  490. sdhci_command->flags2 |= sdhci_enable_auto_command12_flag;
  491. }
  492. /* auto command 23 */
  493. if (sdhci_data->enableAutoCommand23)
  494. {
  495. sdhci_command->flags2 |= sdhci_enable_auto_command23_flag;
  496. }
  497. }
  498. }
  499. return 0;
  500. }
  501. static rt_err_t sdhci_transfer_blocking(struct sdhci_host* sdhci_host)
  502. {
  503. RT_ASSERT(sdhci_host);
  504. struct sdhci_command* sdhci_command = sdhci_host->sdhci_command;
  505. struct sdhci_data* sdhci_data = sdhci_host->sdhci_data;
  506. rt_bool_t enDMA = false;
  507. int ret = RT_EOK;
  508. /* Wait until command/data bus out of busy status. */
  509. while (sdhci_get_present_status_flag(sdhci_host) & sdhci_command_inhibit_flag)
  510. {
  511. }
  512. while (sdhci_data && (sdhci_get_present_status_flag(sdhci_host) & sdhci_data_inhibit_flag))
  513. {
  514. }
  515. sdhci_writel(sdhci_host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
  516. ret = sdhci_set_transfer_config(sdhci_host, sdhci_command, sdhci_data);
  517. if (ret != 0)
  518. {
  519. return ret;
  520. }
  521. sdhci_writel(sdhci_host, sdhci_readl(sdhci_host, SDHCI_SIGNAL_ENABLE) |
  522. SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK, SDHCI_SIGNAL_ENABLE);
  523. rt_event_control(&sdhci_host->event, RT_IPC_CMD_RESET, 0);
  524. sdhci_send_command(sdhci_host, sdhci_command, enDMA);
  525. /* wait command done */
  526. ret = sdhci_wait_command_done(sdhci_host, sdhci_command, ((sdhci_data == RT_NULL) ? false : sdhci_data->executeTuning));
  527. /* transfer data */
  528. if ((sdhci_data != RT_NULL) && (ret == 0))
  529. {
  530. ret = sdhci_transfer_data_blocking(sdhci_host, sdhci_data, enDMA);
  531. }
  532. sdhci_writel(sdhci_host, sdhci_readl(sdhci_host, SDHCI_SIGNAL_ENABLE) &
  533. ~(SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK), SDHCI_SIGNAL_ENABLE);
  534. sdhci_writel(sdhci_host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
  535. sdhci_reset(sdhci_host, SDHCI_RESET_CMD);
  536. sdhci_reset(sdhci_host, SDHCI_RESET_DATA);
  537. return ret;
  538. }
  539. static void sdhci_init(struct sdhci_host* host)
  540. {
  541. sdhci_reset(host, SDHCI_RESET_ALL);
  542. sdhci_writeb(host, SDHCI_CTRL_HISPD, SDHCI_HOST_CONTROL);
  543. sdhci_writeb(host, 0x7, SDHCI_TIMEOUT_CONTROL);
  544. sdhci_writeb(host, SDHCI_POWER_ON | SDHCI_POWER_330, SDHCI_POWER_CONTROL);
  545. sdhci_writew(host, SDHCI_CLOCK_INT_EN, SDHCI_CLOCK_CONTROL);
  546. while ((sdhci_readw(host, SDHCI_CLOCK_CONTROL) & SDHCI_CLOCK_INT_STABLE) == 0)
  547. ;
  548. sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK, SDHCI_INT_ENABLE);
  549. sdhci_writel(host, SDHCI_INT_CARD_INT, SDHCI_SIGNAL_ENABLE);
  550. }
  551. static void sdhci_irq(int vector, void* param)
  552. {
  553. struct sdhci_host* host = param;
  554. uint32_t status = sdhci_get_int_status_flag(host);
  555. if (status & (SDHCI_INT_ERROR | SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | SDHCI_INT_RESPONSE))
  556. {
  557. host->error_code = (status >> 16) & 0xffff;
  558. rt_event_send(&host->event, status & (SDHCI_INT_ERROR | SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | SDHCI_INT_RESPONSE));
  559. }
  560. if (status & SDHCI_INT_CARD_INT)
  561. sdio_irq_wakeup(host->host);
  562. sdhci_clear_int_status_flag(host, status);
  563. }
  564. static void kd_mmc_request(struct rt_mmcsd_host* host, struct rt_mmcsd_req* req)
  565. {
  566. struct sdhci_host* mmcsd;
  567. struct rt_mmcsd_cmd* cmd;
  568. struct rt_mmcsd_data* data;
  569. rt_err_t error;
  570. struct sdhci_data sdhci_data = { 0 };
  571. struct sdhci_command sdhci_command = { 0 };
  572. RT_ASSERT(host != RT_NULL);
  573. RT_ASSERT(req != RT_NULL);
  574. mmcsd = (struct sdhci_host*)host->private_data;
  575. RT_ASSERT(mmcsd != RT_NULL);
  576. cmd = req->cmd;
  577. RT_ASSERT(cmd != RT_NULL);
  578. LOG_D("\tcmd->cmd_code: %02d, cmd->arg: %08x, cmd->flags: %08x --> ", cmd->cmd_code, cmd->arg, cmd->flags);
  579. data = cmd->data;
  580. sdhci_command.index = cmd->cmd_code;
  581. sdhci_command.argument = cmd->arg;
  582. if (cmd->cmd_code == STOP_TRANSMISSION)
  583. sdhci_command.type = card_command_type_abort;
  584. else
  585. sdhci_command.type = card_command_type_normal;
  586. switch (cmd->flags & RESP_MASK)
  587. {
  588. case RESP_NONE:
  589. sdhci_command.responseType = card_response_type_none;
  590. break;
  591. case RESP_R1:
  592. sdhci_command.responseType = card_response_type_r1;
  593. break;
  594. case RESP_R1B:
  595. sdhci_command.responseType = card_response_type_r1b;
  596. break;
  597. case RESP_R2:
  598. sdhci_command.responseType = card_response_type_r2;
  599. break;
  600. case RESP_R3:
  601. sdhci_command.responseType = card_response_type_r3;
  602. break;
  603. case RESP_R4:
  604. sdhci_command.responseType = card_response_type_r4;
  605. break;
  606. case RESP_R6:
  607. sdhci_command.responseType = card_response_type_r6;
  608. break;
  609. case RESP_R7:
  610. sdhci_command.responseType = card_response_type_r7;
  611. break;
  612. case RESP_R5:
  613. sdhci_command.responseType = card_response_type_r5;
  614. break;
  615. default:
  616. RT_ASSERT(RT_NULL);
  617. }
  618. sdhci_command.flags = 0;
  619. sdhci_command.flags2 = 0;
  620. sdhci_command.responseErrorFlags = 0;
  621. mmcsd->sdhci_command = &sdhci_command;
  622. if (data)
  623. {
  624. if (req->stop != RT_NULL)
  625. sdhci_data.enableAutoCommand12 = true;
  626. else
  627. sdhci_data.enableAutoCommand12 = false;
  628. sdhci_data.enableAutoCommand23 = false;
  629. sdhci_data.blockSize = data->blksize;
  630. sdhci_data.blockCount = data->blks;
  631. if (data->flags == DATA_DIR_WRITE)
  632. {
  633. sdhci_data.txData = data->buf;
  634. sdhci_data.rxData = RT_NULL;
  635. } else {
  636. sdhci_data.rxData = data->buf;
  637. sdhci_data.txData = RT_NULL;
  638. }
  639. #ifdef SDHCI_SDMA_ENABLE
  640. uint32_t sz = sdhci_data.blockSize * sdhci_data.blockCount;
  641. uint32_t pad = 0;
  642. if (sz & (CACHE_LINESIZE - 1))
  643. pad = (sz + (CACHE_LINESIZE - 1)) & ~(CACHE_LINESIZE - 1);
  644. if (sdhci_data.rxData && (((uint64_t)(sdhci_data.rxData) & (CACHE_LINESIZE - 1)) || pad))
  645. {
  646. sdhci_data.rxData = rt_malloc_align(pad ? pad : sz, CACHE_LINESIZE);
  647. } else if (((uint64_t)(sdhci_data.txData) & (CACHE_LINESIZE - 1)) || pad)
  648. {
  649. sdhci_data.txData = rt_malloc_align(pad ? pad : sz, CACHE_LINESIZE);
  650. rt_memcpy((void *)sdhci_data.txData, data->buf, sz);
  651. }
  652. #endif
  653. mmcsd->sdhci_data = &sdhci_data;
  654. } else {
  655. mmcsd->sdhci_data = RT_NULL;
  656. }
  657. error = sdhci_transfer_blocking(mmcsd);
  658. #ifdef SDHCI_SDMA_ENABLE
  659. if (data && sdhci_data.rxData && sdhci_data.rxData != data->buf)
  660. {
  661. rt_memcpy(data->buf, sdhci_data.rxData, sdhci_data.blockSize * sdhci_data.blockCount);
  662. rt_free_align(sdhci_data.rxData);
  663. } else if (data && sdhci_data.txData && sdhci_data.txData != data->buf)
  664. {
  665. rt_free_align((void *)sdhci_data.txData);
  666. }
  667. #endif
  668. if (error == -1)
  669. {
  670. LOG_D(" ***USDHC_TransferBlocking error: %d*** --> \n", error);
  671. cmd->err = -RT_ERROR;
  672. }
  673. if ((cmd->flags & RESP_MASK) == RESP_R2)
  674. {
  675. cmd->resp[3] = sdhci_command.response[0];
  676. cmd->resp[2] = sdhci_command.response[1];
  677. cmd->resp[1] = sdhci_command.response[2];
  678. cmd->resp[0] = sdhci_command.response[3];
  679. LOG_D(" resp 0x%08X 0x%08X 0x%08X 0x%08X\n",
  680. cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
  681. } else {
  682. cmd->resp[0] = sdhci_command.response[0];
  683. LOG_D(" resp 0x%08X\n", cmd->resp[0]);
  684. }
  685. mmcsd_req_complete(host);
  686. }
  687. static void kd_mmc_clock_freq_change(struct sdhci_host* host, uint32_t clock)
  688. {
  689. uint32_t div, val;
  690. val = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
  691. val &= ~(SDHCI_CLOCK_CARD_EN | SDHCI_PROG_CLOCK_MODE);
  692. sdhci_writew(host, val, SDHCI_CLOCK_CONTROL);
  693. if (clock == 0)
  694. return;
  695. if (host->max_clk <= clock)
  696. {
  697. div = 1;
  698. } else {
  699. for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2)
  700. {
  701. if ((host->max_clk / div) <= clock)
  702. break;
  703. }
  704. }
  705. div >>= 1;
  706. val &= ~((SDHCI_DIV_MASK << SDHCI_DIVIDER_SHIFT) | SDHCI_DIV_HI_MASK);
  707. val |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
  708. val |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
  709. << SDHCI_DIVIDER_HI_SHIFT;
  710. val |= SDHCI_CLOCK_CARD_EN | SDHCI_PROG_CLOCK_MODE;
  711. sdhci_writew(host, val, SDHCI_CLOCK_CONTROL);
  712. while ((sdhci_readw(host, SDHCI_CLOCK_CONTROL) & SDHCI_CLOCK_INT_STABLE) == 0)
  713. ;
  714. }
  715. static void kd_set_iocfg(struct rt_mmcsd_host* host, struct rt_mmcsd_io_cfg* io_cfg)
  716. {
  717. struct sdhci_host* mmcsd;
  718. unsigned int sdhci_clk;
  719. unsigned int bus_width;
  720. uint8_t ctrl;
  721. RT_ASSERT(host != RT_NULL);
  722. RT_ASSERT(host->private_data != RT_NULL);
  723. RT_ASSERT(io_cfg != RT_NULL);
  724. mmcsd = (struct sdhci_host*)host->private_data;
  725. sdhci_clk = io_cfg->clock;
  726. bus_width = io_cfg->bus_width;
  727. LOG_D("%s: sdhci_clk=%d, bus_width:%d\n", __func__, sdhci_clk, bus_width);
  728. kd_mmc_clock_freq_change(mmcsd, sdhci_clk);
  729. ctrl = sdhci_readb(mmcsd, SDHCI_HOST_CONTROL);
  730. ctrl &= ~(SDHCI_CTRL_4BITBUS | SDHCI_CTRL_8BITBUS);
  731. if (bus_width == 3)
  732. ctrl |= SDHCI_CTRL_8BITBUS;
  733. else if (bus_width == 2)
  734. ctrl |= SDHCI_CTRL_4BITBUS;
  735. sdhci_writeb(mmcsd, ctrl, SDHCI_HOST_CONTROL);
  736. }
  737. static void kd_enable_sdio_irq(struct rt_mmcsd_host* mmcsd_host, rt_int32_t en)
  738. {
  739. struct sdhci_host* host = (struct sdhci_host*)mmcsd_host->private_data;
  740. uint32_t val;
  741. val = sdhci_readw(host, SDHCI_INT_ENABLE);
  742. if (en)
  743. val |= SDHCI_INT_CARD_INT;
  744. else
  745. val &= ~SDHCI_INT_CARD_INT;
  746. sdhci_writew(host, val, SDHCI_INT_ENABLE);
  747. }
  748. static const struct rt_mmcsd_host_ops ops = {
  749. kd_mmc_request,
  750. kd_set_iocfg,
  751. RT_NULL,
  752. kd_enable_sdio_irq,
  753. RT_NULL,
  754. };
  755. void kd_sdhci0_reset(int value)
  756. {
  757. struct sdhci_host* host = sdhci_host0;
  758. uint16_t emmc_ctl = sdhci_readw(host, EMMC_CTRL_R);
  759. emmc_ctl |= (1 << EMMC_RST_N_OE);
  760. if (value)
  761. emmc_ctl |= (1 << EMMC_RST_N);
  762. else
  763. emmc_ctl &= ~(1 << EMMC_RST_N);
  764. sdhci_writeb(host, emmc_ctl, EMMC_CTRL_R);
  765. }
  766. void kd_sdhci_change(void)
  767. {
  768. #ifdef BSP_USING_SDIO0
  769. mmcsd_change(sdhci_host0->host);
  770. #endif
  771. #ifdef BSP_USING_SDIO1
  772. mmcsd_change(sdhci_host1->host);
  773. #endif
  774. }
  775. rt_int32_t kd_sdhci_init(void)
  776. {
  777. uint32_t val;
  778. void* hi_sys_virt_addr = rt_ioremap((void*)0x91585000, 0x10);
  779. #ifdef BSP_USING_SDIO0
  780. val = readl(hi_sys_virt_addr + 0);
  781. val |= 1 << 6 | 1 << 4;
  782. writel(val, hi_sys_virt_addr + 0);
  783. sdhci_host0 = rt_malloc(sizeof(struct sdhci_host));
  784. if (!sdhci_host0)
  785. return -1;
  786. rt_memset(sdhci_host0, 0, sizeof(struct sdhci_host));
  787. sdhci_host0->mapbase = (void*)rt_ioremap((void*)SDEMMC0_BASE, 0x1000);
  788. sdhci_host0->index = 0;
  789. sdhci_host0->have_phy = 1;
  790. sdhci_host0->mshc_ctrl_r = 0;
  791. sdhci_host0->rx_delay_line = 0x0d;
  792. sdhci_host0->tx_delay_line = 0xc0;
  793. #ifdef BSP_SDIO0_EMMC
  794. sdhci_host0->is_emmc_card = 1;
  795. #else
  796. sdhci_host0->is_emmc_card = 0;
  797. #endif
  798. #ifdef BSP_SDIO0_1V8
  799. sdhci_host0->io_fixed_1v8 = 1;
  800. #else
  801. sdhci_host0->io_fixed_1v8 = 0;
  802. #endif
  803. sdhci_host0->sdhci_data = RT_NULL;
  804. sdhci_host0->sdhci_command = RT_NULL;
  805. sdhci_host0->max_clk = 200000000;
  806. sdhci_init(sdhci_host0);
  807. rt_event_init(&sdhci_host0->event, "sd0_event", RT_IPC_FLAG_PRIO);
  808. rt_hw_interrupt_install(IRQN_SD0, sdhci_irq, sdhci_host0, "sd0");
  809. rt_hw_interrupt_umask(IRQN_SD0);
  810. struct rt_mmcsd_host* mmcsd_host0 = mmcsd_alloc_host();
  811. if (!mmcsd_host0)
  812. {
  813. rt_free(sdhci_host0);
  814. return -1;
  815. }
  816. mmcsd_host0->ops = &ops;
  817. mmcsd_host0->freq_min = 400000;
  818. mmcsd_host0->freq_max = 50000000;
  819. #ifdef BSP_SDIO0_EMMC
  820. strncpy(mmcsd_host0->name, "emmc", sizeof(mmcsd_host0->name) - 1);
  821. mmcsd_host0->flags = MMCSD_BUSWIDTH_8 | MMCSD_MUTBLKWRITE | MMCSD_SUP_HIGHSPEED | MMCSD_SUP_SDIO_IRQ;
  822. #else
  823. strncpy(mmcsd_host0->name, "sd0", sizeof(mmcsd_host0->name) - 1);
  824. mmcsd_host0->flags = MMCSD_BUSWIDTH_4 | MMCSD_MUTBLKWRITE | MMCSD_SUP_HIGHSPEED | MMCSD_SUP_SDIO_IRQ;
  825. #endif
  826. mmcsd_host0->valid_ocr = sdhci_host0->io_fixed_1v8 ? VDD_165_195 : VDD_32_33 | VDD_33_34;
  827. #ifdef BSP_USING_CYW43XX
  828. mmcsd_host0->valid_ocr = VDD_32_33 | VDD_33_34;
  829. #endif
  830. mmcsd_host0->max_seg_size = 512 * 512;
  831. mmcsd_host0->max_dma_segs = 1;
  832. mmcsd_host0->max_blk_size = 512;
  833. mmcsd_host0->max_blk_count = 4096;
  834. mmcsd_host0->private_data = sdhci_host0;
  835. sdhci_host0->host = mmcsd_host0;
  836. #endif
  837. #ifdef BSP_USING_SDIO1
  838. val = readl(hi_sys_virt_addr + 8);
  839. val |= 1 << 2 | 1 << 0;
  840. writel(val, hi_sys_virt_addr + 8);
  841. sdhci_host1 = rt_malloc(sizeof(struct sdhci_host));
  842. if (!sdhci_host1)
  843. return -2;
  844. rt_memset(sdhci_host1, 0, sizeof(struct sdhci_host));
  845. sdhci_host1->mapbase = (void*)rt_ioremap((void*)SDEMMC1_BASE, 0x1000);
  846. sdhci_host1->index = 1;
  847. sdhci_host1->have_phy = 0;
  848. sdhci_host1->mshc_ctrl_r = 0;
  849. sdhci_host1->rx_delay_line = 0;
  850. sdhci_host1->tx_delay_line = 0;
  851. sdhci_host1->sdhci_data = RT_NULL;
  852. sdhci_host1->sdhci_command = RT_NULL;
  853. sdhci_host1->max_clk = 100000000;
  854. sdhci_init(sdhci_host1);
  855. rt_event_init(&sdhci_host1->event, "sd1_event", RT_IPC_FLAG_PRIO);
  856. rt_hw_interrupt_install(IRQN_SD1, sdhci_irq, sdhci_host1, "sd1");
  857. rt_hw_interrupt_umask(IRQN_SD1);
  858. struct rt_mmcsd_host* mmcsd_host1 = mmcsd_alloc_host();
  859. if (!mmcsd_host1)
  860. {
  861. rt_free(sdhci_host1);
  862. return -2;
  863. }
  864. strncpy(mmcsd_host1->name, "sd1", sizeof(mmcsd_host1->name) - 1);
  865. mmcsd_host1->ops = &ops;
  866. mmcsd_host1->freq_min = 400000;
  867. mmcsd_host1->freq_max = 50000000;
  868. mmcsd_host1->valid_ocr = VDD_32_33 | VDD_33_34;
  869. mmcsd_host1->flags = MMCSD_BUSWIDTH_4 | MMCSD_MUTBLKWRITE | MMCSD_SUP_HIGHSPEED | MMCSD_SUP_SDIO_IRQ;
  870. mmcsd_host1->max_seg_size = 512 * 512;
  871. mmcsd_host1->max_dma_segs = 1;
  872. mmcsd_host1->max_blk_size = 512;
  873. mmcsd_host1->max_blk_count = 4096;
  874. mmcsd_host1->private_data = sdhci_host1;
  875. sdhci_host1->host = mmcsd_host1;
  876. #endif
  877. kd_sdhci_change();
  878. rt_iounmap(hi_sys_virt_addr);
  879. return 0;
  880. }
  881. INIT_DEVICE_EXPORT(kd_sdhci_init);
  882. #endif /*defined(BSP_USING_SDIO0) || defined(BSP_USING_SDIO1)*/
  883. #endif /*defined(RT_USING_SDIO)*/