drv_sdio.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971
  1. /*
  2. * Copyright (c) 2022-2024 HPMicro
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-02-23 HPMicro First version
  9. * 2022-07-19 HPMicro Fixed the multi-block read/write issue
  10. * 2023-07-27 HPMicro Fixed clock setting issue
  11. * 2023-08-02 HPMicro Added speed mode setting
  12. * 2024-01-03 HPMicro Added multiple instance support
  13. * 2024-05-23 HPMicro Fixed unaligned transfer issue in the SDIO case
  14. * 2024-05-25 HPMicro Added HS200 & HS400 support, optimize the cache-management policy for read
  15. * 2024-05-26 HPMicro Added UHS-I support, added DDR50 and High Speed DDR mode support
  16. * 2024-06-19 HPMicro Added timeout check for SDXC transfer
  17. */
  18. #include <rtthread.h>
  19. #ifdef BSP_USING_SDXC
  20. #include <rthw.h>
  21. #include <rtdevice.h>
  22. #include <rtdbg.h>
  23. #include "board.h"
  24. #include "hpm_sdxc_drv.h"
  25. #include "hpm_l1c_drv.h"
  26. #define CACHE_LINESIZE HPM_L1C_CACHELINE_SIZE
  27. #define SDXC_ADMA_TABLE_WORDS (2U)
  28. #define SDXC_AMDA2_ADDR_ALIGN (4U)
  29. #define SDXC_DATA_TIMEOUT (0xFU)
  30. #define SDXC_CACHELINE_ALIGN_DOWN(x) HPM_L1C_CACHELINE_ALIGN_DOWN(x)
  31. #define SDXC_CACHELINE_ALIGN_UP(x) HPM_L1C_CACHELINE_ALIGN_UP(x)
  32. #define SDXC_IS_CACHELINE_ALIGNED(n) ((uint32_t)(n) % (uint32_t)(CACHE_LINESIZE) == 0U)
  33. /**
  34. * Note: Allocate cache-line aligned buffer in the SD/eMMC read/write case may require larger heap size
  35. * if the read/write length is a big number (for example: 64KB), the RT-Thread RTOS may
  36. * be unable to allocate enough size of buffer if the heap size is small.
  37. *
  38. * Keep this option disabled by default, please enable it if the default setting cannot meet
  39. * real requirement of application.
  40. */
  41. #define HPM_SDXC_ALLOC_CACHELINE_ALIGNED_BUF 0
  42. struct hpm_mmcsd
  43. {
  44. struct rt_mmcsd_host *host;
  45. struct rt_mmcsd_req *req;
  46. struct rt_mmcsd_cmd *cmd;
  47. struct rt_timer *timer;
  48. char name[RT_NAME_MAX];
  49. rt_uint32_t *buf;
  50. SDXC_Type *sdxc_base;
  51. int32_t irq_num;
  52. uint32_t *sdxc_adma2_table;
  53. bool support_8bit;
  54. bool support_4bit;
  55. bool support_1v8;
  56. bool support_3v3;
  57. uint8_t power_mode;
  58. uint8_t bus_width;
  59. uint8_t timing;
  60. uint8_t bus_mode;
  61. uint32_t freq;
  62. uint16_t vdd;
  63. const char *vsel_pin_name;
  64. const char *pwr_pin_name;
  65. };
  66. /**
  67. * @brief SDIO CMD53 argument
  68. */
  69. typedef union
  70. {
  71. uint32_t value;
  72. struct
  73. {
  74. uint32_t count :9;
  75. uint32_t reg_addr :17;
  76. uint32_t op_code :1;
  77. uint32_t block_mode :1;
  78. uint32_t func_num :3;
  79. uint32_t rw_flag :1;
  80. };
  81. } sdio_cmd53_arg_t;
  82. static void hpm_sdmmc_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req);
  83. static void hpm_sdmmc_set_iocfg(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg);
  84. static void hpm_sdmmc_enable_sdio_irq(struct rt_mmcsd_host *host, rt_int32_t en);
  85. static void hpm_sdmmc_host_recovery(SDXC_Type *base);
  86. static hpm_stat_t hpm_sdmmc_transfer(SDXC_Type *base, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer);
  87. static rt_int32_t hpm_sdmmc_execute_tuning(struct rt_mmcsd_host *host, rt_int32_t opcode);
  88. static rt_int32_t hpm_sdmmc_switch_uhs_voltage(struct rt_mmcsd_host *host);
  89. static void hpm_sdmmc_power_on_via_pin(struct hpm_mmcsd *mmcsd);
  90. static void hpm_sdmmc_power_off_via_pin(struct hpm_mmcsd *mmcsd);
  91. static void hpm_sdmmc_switch_to_3v3_via_pin(struct hpm_mmcsd *mmcsd);
  92. static void hpm_sdmmc_switch_to_1v8_via_pin(struct hpm_mmcsd *mmcsd);
  93. static void hpm_sdmmc_pin_init(const char *pin_name, bool is_output);
  94. static void hpm_sdmmc_pin_write(const char *pin_name, rt_uint8_t value);
  95. static void hpm_sdmmc_pin_init(const char *pin_name, bool is_output)
  96. {
  97. rt_base_t pin = rt_pin_get(pin_name);
  98. if (pin < 0) {
  99. return;
  100. }
  101. rt_uint8_t mode = (is_output) ? PIN_MODE_OUTPUT : PIN_MODE_INPUT_PULLUP;
  102. if (is_output)
  103. {
  104. rt_pin_mode(pin, mode);
  105. }
  106. }
  107. static void hpm_sdmmc_pin_write(const char *pin_name, rt_uint8_t value)
  108. {
  109. rt_base_t pin = rt_pin_get(pin_name);
  110. if (pin < 0)
  111. {
  112. return;
  113. }
  114. rt_pin_write(pin, value);
  115. }
  116. static void hpm_sdmmc_power_on_via_pin(struct hpm_mmcsd *mmcsd)
  117. {
  118. hpm_sdmmc_pin_write(mmcsd->pwr_pin_name, 1);
  119. }
  120. static void hpm_sdmmc_power_off_via_pin(struct hpm_mmcsd *mmcsd)
  121. {
  122. hpm_sdmmc_pin_write(mmcsd->pwr_pin_name, 0);
  123. }
  124. static void hpm_sdmmc_switch_to_3v3_via_pin(struct hpm_mmcsd *mmcsd)
  125. {
  126. hpm_sdmmc_pin_write(mmcsd->vsel_pin_name, 0);
  127. }
  128. static void hpm_sdmmc_switch_to_1v8_via_pin(struct hpm_mmcsd *mmcsd)
  129. {
  130. hpm_sdmmc_pin_write(mmcsd->vsel_pin_name, 1);
  131. }
  132. static rt_int32_t hpm_sdmmc_switch_uhs_voltage(struct rt_mmcsd_host *host)
  133. {
  134. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  135. SDXC_Type *base = mmcsd->sdxc_base;
  136. /* 1. Stop providing clock to the card */
  137. sdxc_enable_inverse_clock(mmcsd->sdxc_base, false);
  138. sdxc_enable_sd_clock(mmcsd->sdxc_base, false);
  139. /* 2. Wait until DAT[3:0] are 4'b0000 */
  140. uint32_t data3_0_level;
  141. uint32_t delay_cnt = 1000000UL;
  142. do
  143. {
  144. data3_0_level = sdxc_get_data3_0_level(mmcsd->sdxc_base);
  145. --delay_cnt;
  146. } while ((data3_0_level != 0U) && (delay_cnt > 0U));
  147. if (delay_cnt < 1)
  148. {
  149. return -RT_ETIMEOUT;
  150. }
  151. /* 3. Switch to 1.8V */
  152. hpm_sdmmc_switch_to_1v8_via_pin(mmcsd);
  153. sdxc_select_voltage(mmcsd->sdxc_base, sdxc_bus_voltage_sd_1v8);
  154. /* 4. spec:host delay 5ms, host: give more delay time here */
  155. rt_thread_mdelay(10);
  156. /* 5. Provide SD clock the card again */
  157. sdxc_enable_inverse_clock(mmcsd->sdxc_base, true);
  158. sdxc_enable_sd_clock(mmcsd->sdxc_base, true);
  159. /* 6. spec: wait 1ms, host: give more delay time here */
  160. rt_thread_mdelay(5);
  161. /* 7. Check DAT[3:0], make sure the value is 4'b0000 */
  162. delay_cnt = 1000000UL;
  163. data3_0_level;
  164. do
  165. {
  166. data3_0_level = sdxc_get_data3_0_level(mmcsd->sdxc_base);
  167. --delay_cnt;
  168. } while ((data3_0_level == 0U) && (delay_cnt > 0U));
  169. if (delay_cnt < 1)
  170. {
  171. return -RT_ETIMEOUT;
  172. }
  173. return RT_EOK;
  174. }
  175. static const struct rt_mmcsd_host_ops hpm_mmcsd_host_ops =
  176. {
  177. .request = hpm_sdmmc_request,
  178. .set_iocfg = hpm_sdmmc_set_iocfg,
  179. .get_card_status = NULL,
  180. .enable_sdio_irq = NULL,
  181. .execute_tuning = hpm_sdmmc_execute_tuning,
  182. .switch_uhs_voltage = hpm_sdmmc_switch_uhs_voltage,
  183. };
  184. #if defined(BSP_USING_SDXC0)
  185. /* Place the ADMA2 table to non-cacheable region */
  186. ATTR_PLACE_AT_NONCACHEABLE static uint32_t s_sdxc0_adma2_table[SDXC_ADMA_TABLE_WORDS];
  187. /* SDXC0 */
  188. static struct hpm_mmcsd s_hpm_sdxc0 =
  189. {
  190. .name = "sd0",
  191. .sdxc_base = HPM_SDXC0,
  192. .sdxc_adma2_table = s_sdxc0_adma2_table,
  193. .irq_num = IRQn_SDXC0,
  194. #if defined(BSP_SDXC0_BUS_WIDTH_8BIT)
  195. .support_8bit = true,
  196. .support_4bit = true,
  197. #elif defined(BSP_SDXC0_BUS_WIDTH_4BIT)
  198. .support_4bit = true,
  199. #elif defined(BSP_SDXC0_BUS_WIDTH_1BIT)
  200. #else
  201. .support_4bit = true,
  202. #endif
  203. #if defined(BSP_SDXC0_VOLTAGE_3V3)
  204. .support_3v3 = true,
  205. #endif
  206. #if defined(BSP_SDXC0_VOLTAGE_1V8)
  207. .support_1v8 = true,
  208. #endif
  209. #if defined(BSP_SDXC0_VOLTAGE_DUAL)
  210. .support_3v3 = true,
  211. .support_1v8 = true,
  212. #endif
  213. #if defined(BSP_SDXC0_VSEL_PIN)
  214. .vsel_pin_name = BSP_SDXC0_VSEL_PIN,
  215. #endif
  216. #if defined(BSP_SDXC0_PWR_PIN)
  217. .pwr_pin_name = BSP_SDXC0_PWR_PIN,
  218. #endif
  219. };
  220. #endif
  221. #if defined(BSP_USING_SDXC1)
  222. /* Place the ADMA2 table to non-cacheable region */
  223. ATTR_PLACE_AT_NONCACHEABLE static uint32_t s_sdxc1_adma2_table[SDXC_ADMA_TABLE_WORDS];
  224. static struct hpm_mmcsd s_hpm_sdxc1 =
  225. {
  226. .name = "sd1",
  227. .sdxc_base = HPM_SDXC1,
  228. .sdxc_adma2_table = s_sdxc1_adma2_table,
  229. .irq_num = IRQn_SDXC1,
  230. #if defined(BSP_SDXC1_BUS_WIDTH_8BIT)
  231. .support_8bit = true,
  232. .support_4bit = true,
  233. #elif defined(BSP_SDXC1_BUS_WIDTH_4BIT)
  234. .support_4bit = true,
  235. #elif defined(BSP_SDXC1_BUS_WIDTH_1BIT)
  236. #else
  237. .support_4bit = true,
  238. #endif
  239. #if defined(BSP_SDXC1_VOLTAGE_3V3)
  240. .support_3v3 = true,
  241. #endif
  242. #if defined(BSP_SDXC1_VOLTAGE_1V8)
  243. .support_1v8 = true,
  244. #endif
  245. #if defined(BSP_SDXC1_VOLTAGE_DUAL)
  246. .support_3v3 = true,
  247. .support_1v8 = true,
  248. #endif
  249. #if defined(BSP_SDXC1_VSEL_PIN)
  250. .vsel_pin_name = BSP_SDXC1_VSEL_PIN,
  251. #endif
  252. #if defined(BSP_SDXC1_PWR_PIN)
  253. .pwr_pin_name = BSP_SDXC1_PWR_PIN,
  254. #endif
  255. };
  256. #endif
  257. static struct hpm_mmcsd *hpm_sdxcs[] =
  258. {
  259. #if defined(BSP_USING_SDXC0)
  260. &s_hpm_sdxc0,
  261. #endif
  262. #if defined(BSP_USING_SDXC1)
  263. &s_hpm_sdxc1,
  264. #endif
  265. };
  266. static rt_int32_t hpm_sdmmc_execute_tuning(struct rt_mmcsd_host *host, rt_int32_t opcode)
  267. {
  268. RT_ASSERT(host != RT_NULL); RT_ASSERT(host->private_data != RT_NULL);
  269. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  270. SDXC_Type *base = mmcsd->sdxc_base;
  271. /* Prepare the Auto tuning environment */
  272. sdxc_stop_clock_during_phase_code_change(base, true);
  273. sdxc_set_post_change_delay(base, 3U);
  274. sdxc_select_cardclk_delay_source(base, false);
  275. sdxc_enable_power(base, true);
  276. hpm_stat_t err = sdxc_perform_auto_tuning(base, opcode);
  277. return (err != status_success) ? -RT_EPERM : RT_EOK;
  278. }
  279. static hpm_stat_t hpm_sdmmc_transfer(SDXC_Type *base, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer)
  280. {
  281. hpm_stat_t status;
  282. sdxc_command_t *cmd = xfer->command;
  283. sdxc_data_t *data = xfer->data;
  284. status = sdxc_transfer_nonblocking(base, dma_config, xfer);
  285. if (status != status_success)
  286. {
  287. return -RT_ERROR;
  288. }
  289. /* Wait until idle */
  290. volatile uint32_t interrupt_status = sdxc_get_interrupt_status(base);
  291. volatile rt_base_t start_tick = rt_tick_get();
  292. while (!IS_HPM_BITMASK_SET(interrupt_status, SDXC_INT_STAT_CMD_COMPLETE_MASK))
  293. {
  294. interrupt_status = sdxc_get_interrupt_status(base);
  295. status = sdxc_parse_interrupt_status(base);
  296. HPM_BREAK_IF(status != status_success);
  297. rt_base_t current_tick = rt_tick_get();
  298. if ((current_tick - start_tick) > RT_TICK_PER_SECOND)
  299. {
  300. return -RT_ETIMEOUT;
  301. }
  302. }
  303. sdxc_clear_interrupt_status(base, SDXC_INT_STAT_CMD_COMPLETE_MASK);
  304. if (status == status_success)
  305. {
  306. status = sdxc_receive_cmd_response(base, cmd);
  307. }
  308. if ((status == status_success) && (data != RT_NULL))
  309. {
  310. interrupt_status = sdxc_get_interrupt_status(base);
  311. start_tick = rt_tick_get();
  312. while (!IS_HPM_BITMASK_SET(interrupt_status, SDXC_INT_STAT_XFER_COMPLETE_MASK | SDXC_STS_ERROR))
  313. {
  314. interrupt_status = sdxc_get_interrupt_status(base);
  315. status = sdxc_parse_interrupt_status(base);
  316. HPM_BREAK_IF(status != status_success);
  317. rt_base_t current_tick = rt_tick_get();
  318. if ((current_tick - start_tick) > RT_TICK_PER_SECOND)
  319. {
  320. return -RT_ETIMEOUT;
  321. }
  322. }
  323. }
  324. return status;
  325. }
  326. /**
  327. * !@brief SDMMC request implementation based on HPMicro SDXC Host
  328. */
  329. static void hpm_sdmmc_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req)
  330. {
  331. RT_ASSERT(host != RT_NULL);
  332. RT_ASSERT(host->private_data != RT_NULL);
  333. RT_ASSERT(req != RT_NULL);
  334. RT_ASSERT(req->cmd != RT_NULL);
  335. sdxc_adma_config_t adma_config = { 0 };
  336. sdxc_xfer_t xfer = { 0 };
  337. sdxc_command_t sdxc_cmd = { 0 };
  338. sdxc_data_t sdxc_data = { 0 };
  339. uint32_t *raw_alloc_buf = RT_NULL;
  340. uint32_t *aligned_buf = RT_NULL;
  341. hpm_stat_t err = status_invalid_argument;
  342. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  343. struct rt_mmcsd_cmd *cmd = req->cmd;
  344. struct rt_mmcsd_data *data = cmd->data;
  345. /* configure command */
  346. sdxc_cmd.cmd_index = cmd->cmd_code;
  347. sdxc_cmd.cmd_argument = cmd->arg;
  348. sdxc_cmd.cmd_type = (cmd->cmd_code == STOP_TRANSMISSION) ? sdxc_cmd_type_abort_cmd : sdxc_cmd_type_normal_cmd;
  349. switch (cmd->flags & RESP_MASK)
  350. {
  351. case RESP_NONE:
  352. sdxc_cmd.resp_type = sdxc_dev_resp_none;
  353. break;
  354. case RESP_R1:
  355. sdxc_cmd.resp_type = sdxc_dev_resp_r1;
  356. break;
  357. case RESP_R1B:
  358. sdxc_cmd.resp_type = sdxc_dev_resp_r1b;
  359. break;
  360. case RESP_R2:
  361. sdxc_cmd.resp_type = sdxc_dev_resp_r2;
  362. break;
  363. case RESP_R3:
  364. sdxc_cmd.resp_type = sdxc_dev_resp_r3;
  365. break;
  366. case RESP_R4:
  367. sdxc_cmd.resp_type = sdxc_dev_resp_r4;
  368. break;
  369. case RESP_R6:
  370. sdxc_cmd.resp_type = sdxc_dev_resp_r6;
  371. break;
  372. case RESP_R7:
  373. sdxc_cmd.resp_type = sdxc_dev_resp_r7;
  374. break;
  375. case RESP_R5:
  376. sdxc_cmd.resp_type = sdxc_dev_resp_r5;
  377. break;
  378. default:
  379. RT_ASSERT(NULL);
  380. break;
  381. }
  382. sdxc_cmd.cmd_flags = 0UL;
  383. xfer.command = &sdxc_cmd;
  384. xfer.data = NULL;
  385. if (data != NULL)
  386. {
  387. sdxc_data.enable_auto_cmd12 = false;
  388. sdxc_data.enable_auto_cmd23 = false;
  389. sdxc_data.enable_ignore_error = false;
  390. sdxc_data.data_type = sdxc_xfer_data_normal;
  391. sdxc_data.block_size = data->blksize;
  392. sdxc_data.block_cnt = data->blks;
  393. /* configure adma2 */
  394. adma_config.dma_type = sdxc_dmasel_adma2;
  395. adma_config.adma_table = (uint32_t*) core_local_mem_to_sys_address(BOARD_RUNNING_CORE,
  396. (uint32_t) mmcsd->sdxc_adma2_table);
  397. adma_config.adma_table_words = SDXC_ADMA_TABLE_WORDS;
  398. size_t xfer_buf_addr = (uint32_t)data->buf;
  399. uint32_t xfer_len = data->blks * data->blksize;
  400. if ((req->data->flags & DATA_DIR_WRITE) != 0U)
  401. {
  402. uint32_t write_size = xfer_len;
  403. size_t aligned_start;
  404. uint32_t aligned_size;
  405. #if defined(HPM_SDXC_ALLOC_CACHELINE_ALIGNED_BUF) && (HPM_SDXC_ALLOC_CACHELINE_ALIGNED_BUF == 1)
  406. if (!SDXC_IS_CACHELINE_ALIGNED(xfer_buf_addr) || !SDXC_IS_CACHELINE_ALIGNED(write_size))
  407. #else
  408. if ((xfer_buf_addr % 4 != 0) && (write_size % 4 != 0))
  409. #endif
  410. {
  411. write_size = SDXC_CACHELINE_ALIGN_UP(xfer_len);
  412. raw_alloc_buf = (uint32_t *) rt_malloc(write_size + CACHE_LINESIZE - RT_ALIGN_SIZE);
  413. RT_ASSERT(raw_alloc_buf != RT_NULL);
  414. aligned_buf = (uint32_t *) SDXC_CACHELINE_ALIGN_UP(raw_alloc_buf);
  415. RT_ASSERT(aligned_buf != RT_NULL);
  416. memcpy(aligned_buf, data->buf, xfer_len);
  417. memset(aligned_buf + write_size, 0, write_size - xfer_len);
  418. sdxc_data.tx_data = (uint32_t const *) core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t) aligned_buf);
  419. aligned_start = (uint32_t)sdxc_data.tx_data;
  420. aligned_size = write_size;
  421. }
  422. else
  423. {
  424. sdxc_data.tx_data = (uint32_t const *) core_local_mem_to_sys_address(BOARD_RUNNING_CORE, xfer_buf_addr);
  425. aligned_start = SDXC_CACHELINE_ALIGN_DOWN(sdxc_data.tx_data);
  426. size_t aligned_end = SDXC_CACHELINE_ALIGN_UP((uint32_t)sdxc_data.tx_data + write_size);
  427. aligned_size = aligned_end - aligned_start;
  428. }
  429. rt_base_t level = rt_hw_interrupt_disable();
  430. l1c_dc_flush(aligned_start, aligned_size);
  431. rt_hw_interrupt_enable(level);
  432. sdxc_data.rx_data = NULL;
  433. }
  434. else
  435. {
  436. uint32_t read_size = xfer_len;
  437. #if defined(HPM_SDXC_ALLOC_CACHELINE_ALIGNED_BUF) && (HPM_SDXC_ALLOC_CACHELINE_ALIGNED_BUF == 1)
  438. if (!SDXC_IS_CACHELINE_ALIGNED(xfer_buf_addr) || !SDXC_IS_CACHELINE_ALIGNED(read_size))
  439. #else
  440. if ((xfer_buf_addr % 4 != 0) || (read_size % 4 != 0))
  441. #endif
  442. {
  443. uint32_t aligned_read_size = SDXC_CACHELINE_ALIGN_UP(read_size);
  444. raw_alloc_buf = (uint32_t *) rt_malloc(aligned_read_size + CACHE_LINESIZE - RT_ALIGN_SIZE);
  445. RT_ASSERT(raw_alloc_buf != RT_NULL);
  446. aligned_buf = (uint32_t *) SDXC_CACHELINE_ALIGN_UP(raw_alloc_buf);
  447. sdxc_data.rx_data = (uint32_t*) core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t) aligned_buf);
  448. }
  449. else
  450. {
  451. sdxc_data.rx_data = (uint32_t*) core_local_mem_to_sys_address(BOARD_RUNNING_CORE, xfer_buf_addr);
  452. size_t aligned_start = SDXC_CACHELINE_ALIGN_DOWN(sdxc_data.rx_data);
  453. size_t aligned_end = SDXC_CACHELINE_ALIGN_UP((uint32_t)sdxc_data.rx_data + read_size);
  454. uint32_t aligned_size = aligned_end - aligned_start;
  455. rt_base_t level = rt_hw_interrupt_disable();
  456. l1c_dc_flush(aligned_start, aligned_size);
  457. rt_hw_interrupt_enable(level);
  458. }
  459. sdxc_data.tx_data = RT_NULL;
  460. }
  461. xfer.data = &sdxc_data;
  462. /* Align the write/read size since the ADMA2 engine in the SDXC cannot transfer unaligned size of data */
  463. if ((cmd->cmd_code == SD_IO_RW_EXTENDED) && (xfer_len % 4 != 0))
  464. {
  465. sdio_cmd53_arg_t cmd53_arg;
  466. cmd53_arg.value = sdxc_cmd.cmd_argument;
  467. cmd53_arg.count = HPM_ALIGN_UP(xfer_len, 4);
  468. sdxc_cmd.cmd_argument = cmd53_arg.value;
  469. sdxc_data.block_size = HPM_ALIGN_UP(xfer_len, 4);
  470. }
  471. }
  472. if ((req->data->blks > 1) && ((cmd->cmd_code == READ_MULTIPLE_BLOCK) || ((cmd->cmd_code == WRITE_MULTIPLE_BLOCK))))
  473. {
  474. xfer.data->enable_auto_cmd12 = true;
  475. }
  476. err = hpm_sdmmc_transfer(mmcsd->sdxc_base, &adma_config, &xfer);
  477. LOG_I("cmd=%d, arg=%x\n", cmd->cmd_code, cmd->arg);
  478. if (err != status_success)
  479. {
  480. hpm_sdmmc_host_recovery(mmcsd->sdxc_base);
  481. if (err != status_sdxc_cmd_timeout_error) /* Ignore command timeout error by default */
  482. {
  483. LOG_E(" ***hpm_sdmmc_transfer error: %d, cmd:%d, arg:0x%x*** -->\n", err, cmd->cmd_code, cmd->arg);
  484. }
  485. cmd->err = -RT_ERROR;
  486. }
  487. else
  488. {
  489. LOG_I(" ***hpm_sdmmc_transfer passed: %d*** -->\n", err);
  490. if (sdxc_cmd.resp_type == sdxc_dev_resp_r2)
  491. {
  492. LOG_I("resp:0x%08x 0x%08x 0x%08x 0x%08x\n", sdxc_cmd.response[0],
  493. sdxc_cmd.response[1], sdxc_cmd.response[2], sdxc_cmd.response[3]);
  494. }
  495. else
  496. {
  497. LOG_I("resp:0x%08x\n", sdxc_cmd.response[0]);
  498. }
  499. }
  500. if ((sdxc_data.rx_data != NULL) && (cmd->err == RT_EOK))
  501. {
  502. uint32_t read_size = data->blks * data->blksize;
  503. if (aligned_buf != RT_NULL)
  504. {
  505. uint32_t aligned_read_size = SDXC_CACHELINE_ALIGN_UP(read_size);
  506. rt_base_t level = rt_hw_interrupt_disable();
  507. l1c_dc_invalidate((uint32_t) aligned_buf, aligned_read_size);
  508. rt_hw_interrupt_enable(level);
  509. memcpy(data->buf, aligned_buf, read_size);
  510. }
  511. else
  512. {
  513. size_t aligned_start = SDXC_CACHELINE_ALIGN_DOWN(sdxc_data.rx_data);
  514. size_t aligned_end = SDXC_CACHELINE_ALIGN_UP((uint32_t)sdxc_data.rx_data + read_size);
  515. uint32_t aligned_size = aligned_end - aligned_start;
  516. rt_base_t level = rt_hw_interrupt_disable();
  517. l1c_dc_invalidate(aligned_start, aligned_size);
  518. rt_hw_interrupt_enable(level);
  519. }
  520. }
  521. if (raw_alloc_buf != RT_NULL)
  522. {
  523. rt_free(raw_alloc_buf);
  524. raw_alloc_buf = RT_NULL;
  525. aligned_buf = RT_NULL;
  526. }
  527. if ((cmd->flags & RESP_MASK) == RESP_R2)
  528. {
  529. cmd->resp[3] = sdxc_cmd.response[0];
  530. cmd->resp[2] = sdxc_cmd.response[1];
  531. cmd->resp[1] = sdxc_cmd.response[2];
  532. cmd->resp[0] = sdxc_cmd.response[3];
  533. }
  534. else
  535. {
  536. cmd->resp[0] = sdxc_cmd.response[0];
  537. }
  538. mmcsd_req_complete(host);
  539. }
  540. static void hpm_sdmmc_set_cardclk_delay_chain(struct hpm_mmcsd *mmcsd)
  541. {
  542. SDXC_Type *base = mmcsd->sdxc_base;
  543. bool need_inverse = sdxc_is_inverse_clock_enabled(base);
  544. sdxc_enable_inverse_clock(base, false);
  545. sdxc_enable_sd_clock(base, false);
  546. uint32_t num_delaycells = sdxc_get_default_cardclk_delay_chain(base, mmcsd->freq);
  547. sdxc_set_cardclk_delay_chain(base, num_delaycells);
  548. sdxc_enable_inverse_clock(base, need_inverse);
  549. sdxc_enable_sd_clock(base, true);
  550. }
  551. ATTR_WEAK void init_sdxc_ds_pin(SDXC_Type *base)
  552. {
  553. LOG_W("Ignore this warning if the DS pin is not supported\n");
  554. }
  555. /**
  556. * !@brief Set IO Configuration for HPMicro IO and SDXC Host
  557. */
  558. static void hpm_sdmmc_set_iocfg(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg)
  559. {
  560. RT_ASSERT(host != RT_NULL);
  561. RT_ASSERT(host->private_data != RT_NULL);
  562. RT_ASSERT(io_cfg != RT_NULL);
  563. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  564. /* Power control */
  565. uint32_t vdd = io_cfg->vdd;
  566. if (io_cfg->power_mode != mmcsd->power_mode)
  567. {
  568. switch(io_cfg->power_mode)
  569. {
  570. case MMCSD_POWER_OFF:
  571. hpm_sdmmc_power_off_via_pin(mmcsd);
  572. break;
  573. case MMCSD_POWER_ON:
  574. hpm_sdmmc_power_on_via_pin(mmcsd);
  575. break;
  576. case MMCSD_POWER_UP:
  577. hpm_sdmmc_power_off_via_pin(mmcsd);
  578. rt_thread_mdelay(10);
  579. hpm_sdmmc_power_on_via_pin(mmcsd);
  580. /* After power up, wait 1ms, then wait 74 card clock */
  581. rt_thread_mdelay(1);
  582. sdxc_wait_card_active(mmcsd->sdxc_base);
  583. break;
  584. default:
  585. /* Do nothing */
  586. break;
  587. }
  588. mmcsd->power_mode = io_cfg->power_mode;
  589. }
  590. /* Voltage switch */
  591. if (mmcsd->vdd != vdd)
  592. {
  593. if (vdd == 7)
  594. {
  595. /* Switch to 1.8V */
  596. hpm_sdmmc_switch_to_1v8_via_pin(mmcsd);
  597. }
  598. else
  599. {
  600. /* Switch to 3V */
  601. hpm_sdmmc_switch_to_3v3_via_pin(mmcsd);
  602. }
  603. mmcsd->vdd = vdd;
  604. }
  605. /* Set bus width */
  606. if (mmcsd->bus_width != io_cfg->bus_width)
  607. {
  608. switch (io_cfg->bus_width)
  609. {
  610. case MMCSD_BUS_WIDTH_4:
  611. sdxc_set_data_bus_width(mmcsd->sdxc_base, sdxc_bus_width_4bit);
  612. break;
  613. case MMCSD_BUS_WIDTH_8:
  614. sdxc_set_data_bus_width(mmcsd->sdxc_base, sdxc_bus_width_8bit);
  615. break;
  616. default:
  617. sdxc_set_data_bus_width(mmcsd->sdxc_base, sdxc_bus_width_1bit);
  618. break;
  619. }
  620. mmcsd->bus_width = io_cfg->bus_width;
  621. }
  622. /* Set timing mode */
  623. bool need_config_ds = false;
  624. if (mmcsd->timing != io_cfg->timing)
  625. {
  626. switch (io_cfg->timing)
  627. {
  628. case MMCSD_TIMING_LEGACY:
  629. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_normal);
  630. break;
  631. case MMCSD_TIMING_SD_HS:
  632. case MMCSD_TIMING_MMC_HS:
  633. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_high);
  634. break;
  635. case MMCSD_TIMING_UHS_SDR12:
  636. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_sdr12);
  637. break;
  638. case MMCSD_TIMING_UHS_SDR25:
  639. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_sdr25);
  640. break;
  641. case MMCSD_TIMING_UHS_SDR50:
  642. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_sdr50);
  643. break;
  644. case MMCSD_TIMING_UHS_SDR104:
  645. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_sdr104);
  646. break;
  647. case MMCSD_TIMING_UHS_DDR50:
  648. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_ddr50);
  649. /* Must switch to 1.8V signaling for UHS_DDR50 */
  650. sdxc_select_voltage(mmcsd->sdxc_base, sdxc_bus_voltage_sd_1v8);
  651. break;
  652. case MMCSD_TIMING_MMC_DDR52:
  653. sdxc_enable_emmc_support(mmcsd->sdxc_base, true);
  654. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_emmc_speed_high_speed_ddr);
  655. break;
  656. case MMCSD_TIMING_MMC_HS200:
  657. sdxc_enable_emmc_support(mmcsd->sdxc_base, true);
  658. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_emmc_speed_hs200);
  659. break;
  660. case MMCSD_TIMING_MMC_HS400:
  661. case MMCSD_TIMING_MMC_HS400_ENH_DS:
  662. sdxc_enable_emmc_support(mmcsd->sdxc_base, true);
  663. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_emmc_speed_hs400);
  664. if (io_cfg->timing == MMCSD_TIMING_MMC_HS400_ENH_DS)
  665. {
  666. sdxc_enable_enhanced_strobe(mmcsd->sdxc_base, true);
  667. uint32_t num_delaycells = sdxc_get_default_strobe_delay(mmcsd->sdxc_base);
  668. sdxc_set_data_strobe_delay(mmcsd->sdxc_base, num_delaycells);
  669. }
  670. need_config_ds = true;
  671. break;
  672. }
  673. mmcsd->timing = io_cfg->timing;
  674. }
  675. /* Initialize SDXC Pins */
  676. bool open_drain = io_cfg->bus_mode == MMCSD_BUSMODE_OPENDRAIN;
  677. bool is_1v8 = (io_cfg->vdd == 7) || (mmcsd->host->valid_ocr == VDD_165_195);
  678. uint32_t width = (io_cfg->bus_width == MMCSD_BUS_WIDTH_8) ? 8 : ((io_cfg->bus_width == MMCSD_BUS_WIDTH_4) ? 4 : 1);
  679. init_sdxc_cmd_pin(mmcsd->sdxc_base, open_drain, is_1v8);
  680. init_sdxc_clk_data_pins(mmcsd->sdxc_base, width, is_1v8);
  681. rt_thread_mdelay(1);
  682. if (need_config_ds)
  683. {
  684. init_sdxc_ds_pin(mmcsd->sdxc_base);
  685. rt_thread_mdelay(1);
  686. }
  687. /* Initialize SDXC clock */
  688. uint32_t sdxc_clock = io_cfg->clock;
  689. if (sdxc_clock != 0U)
  690. {
  691. if (mmcsd->freq != sdxc_clock)
  692. {
  693. bool need_reverse = true;
  694. bool need_card_delay_clk = false;
  695. if ((mmcsd->timing == MMCSD_TIMING_UHS_DDR50) ||
  696. (mmcsd->timing == MMCSD_TIMING_MMC_DDR52) ||
  697. (mmcsd->timing == MMCSD_TIMING_MMC_HS400) ||
  698. (mmcsd->timing == MMCSD_TIMING_MMC_HS400_ENH_DS))
  699. {
  700. need_reverse = false;
  701. need_card_delay_clk = true;
  702. }
  703. /* Ensure request frequency from mmcsd stack level doesn't exceed maximum supported frequency by host */
  704. uint32_t clock_freq = MIN(mmcsd->host->freq_max, sdxc_clock);
  705. clock_freq = board_sd_configure_clock(mmcsd->sdxc_base, clock_freq, need_reverse);
  706. LOG_I("mmcsd clock: %dHz\n", clock_freq);
  707. mmcsd->freq = sdxc_clock;
  708. if (need_card_delay_clk)
  709. {
  710. hpm_sdmmc_set_cardclk_delay_chain(mmcsd);
  711. }
  712. }
  713. }
  714. }
  715. static void hpm_sdmmc_enable_sdio_irq(struct rt_mmcsd_host *host, rt_int32_t en)
  716. {
  717. RT_ASSERT(host != RT_NULL);
  718. RT_ASSERT(host->private_data != RT_NULL);
  719. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  720. if (en != 0)
  721. {
  722. intc_m_enable_irq_with_priority(mmcsd->irq_num, 1);
  723. }
  724. else
  725. {
  726. intc_m_disable_irq(mmcsd->irq_num);
  727. }
  728. }
  729. static void hpm_sdmmc_host_recovery(SDXC_Type *base)
  730. {
  731. uint32_t pstate = sdxc_get_present_status(base);
  732. bool need_reset_cmd_line = false;
  733. bool need_reset_data_line = false;
  734. if ((pstate & SDXC_PSTATE_CMD_INHIBIT_MASK) != 0U)
  735. {
  736. /* Reset command line */
  737. need_reset_cmd_line = true;
  738. }
  739. if ((pstate & SDXC_PSTATE_DAT_INHIBIT_MASK) != 0U)
  740. {
  741. /* Reset data line */
  742. need_reset_data_line = true;
  743. }
  744. uint32_t int_stat = sdxc_get_interrupt_status(base);
  745. if ((int_stat & 0xF0000UL) != 0U)
  746. {
  747. need_reset_cmd_line = true;
  748. }
  749. if ((int_stat & 0x700000) != 0U)
  750. {
  751. need_reset_data_line = true;
  752. }
  753. if (need_reset_cmd_line)
  754. {
  755. sdxc_reset(base, sdxc_reset_cmd_line, 0xFFFFUL);
  756. }
  757. if (need_reset_data_line)
  758. {
  759. sdxc_reset(base, sdxc_reset_data_line, 0xFFFFUL);
  760. }
  761. if (need_reset_cmd_line || need_reset_data_line)
  762. {
  763. sdxc_clear_interrupt_status(base, ~0UL);
  764. }
  765. rt_thread_mdelay(10);
  766. }
  767. int rt_hw_sdio_init(void)
  768. {
  769. rt_err_t err = RT_EOK;
  770. struct rt_mmcsd_host *host = NULL;
  771. struct hpm_mmcsd *mmcsd = NULL;
  772. for (uint32_t i = 0; i < ARRAY_SIZE(hpm_sdxcs); i++) {
  773. host = mmcsd_alloc_host();
  774. if (host == NULL)
  775. {
  776. err = -RT_ERROR;
  777. break;
  778. }
  779. mmcsd = hpm_sdxcs[i];
  780. host->ops = &hpm_mmcsd_host_ops;
  781. host->freq_min = 375000;
  782. host->freq_max = 50000000;
  783. host->valid_ocr = 0;
  784. /* Determine supported Voltage range */
  785. if (mmcsd->support_3v3)
  786. {
  787. host->valid_ocr |= VDD_30_31 | VDD_31_32 | VDD_32_33 | VDD_33_34;
  788. }
  789. if (mmcsd->support_1v8)
  790. {
  791. host->valid_ocr |= VDD_165_195;
  792. }
  793. /* Determine Host supported features */
  794. host->flags = MMCSD_MUTBLKWRITE | MMCSD_SUP_HIGHSPEED | MMCSD_SUP_SDIO_IRQ;
  795. if (mmcsd->support_4bit)
  796. {
  797. host->flags |= MMCSD_BUSWIDTH_4;
  798. }
  799. if (mmcsd->support_8bit) {
  800. host->flags |= MMCSD_BUSWIDTH_8;
  801. }
  802. if (mmcsd->support_1v8)
  803. {
  804. host->freq_max = 166000000;
  805. host->flags |= MMCSD_SUP_HS200_1V8;
  806. host->flags |= MMCSD_SUP_SDR50 | MMCSD_SUP_SDR104;
  807. if (sdxc_is_ddr50_supported(mmcsd->sdxc_base))
  808. {
  809. host->flags |= MMCSD_SUP_DDR50;
  810. }
  811. if (mmcsd->support_8bit)
  812. {
  813. host->flags |= MMCSD_SUP_HS400_1V8 | MMCSD_SUP_ENH_DS;
  814. }
  815. }
  816. /* For eMMC device, add High Speed DDR mode support as long as it is supported by the host controller */
  817. if (sdxc_is_ddr50_supported(mmcsd->sdxc_base))
  818. {
  819. host->flags |= MMCSD_SUP_HIGHSPEED_DDR;
  820. }
  821. rt_strncpy(host->name, mmcsd->name, RT_NAME_MAX);
  822. host->max_seg_size = 0x80000;
  823. host->max_dma_segs = 1;
  824. host->max_blk_size = 512;
  825. host->max_blk_count = 1024;
  826. mmcsd->host = host;
  827. /* Perform necessary initialization */
  828. board_sd_configure_clock(mmcsd->sdxc_base, 375000, true);
  829. sdxc_config_t sdxc_config = { 0 };
  830. sdxc_config.data_timeout = 1000;
  831. sdxc_init(mmcsd->sdxc_base, &sdxc_config);
  832. host->private_data = mmcsd;
  833. /* Initialize PWR pin and VSEL pin */
  834. if (mmcsd->pwr_pin_name != RT_NULL)
  835. {
  836. hpm_sdmmc_pin_init(mmcsd->pwr_pin_name, true);
  837. rt_thread_mdelay(1);
  838. if (host->valid_ocr == VDD_165_195)
  839. {
  840. hpm_sdmmc_switch_to_1v8_via_pin(mmcsd);
  841. }
  842. else
  843. {
  844. hpm_sdmmc_switch_to_3v3_via_pin(mmcsd);
  845. }
  846. }
  847. if (mmcsd->vsel_pin_name != RT_NULL)
  848. {
  849. hpm_sdmmc_pin_init(mmcsd->vsel_pin_name, true);
  850. rt_thread_mdelay(1);
  851. }
  852. mmcsd_change(host);
  853. };
  854. if (err != RT_EOK)
  855. {
  856. if (host != NULL)
  857. {
  858. mmcsd_free_host(host);
  859. host = NULL;
  860. }
  861. }
  862. return err;
  863. }
  864. INIT_DEVICE_EXPORT(rt_hw_sdio_init);
  865. #endif