drv_sdio.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978
  1. /*
  2. * Copyright (c) 2022-2025 HPMicro
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-02-23 HPMicro First version
  9. * 2022-07-19 HPMicro Fixed the multi-block read/write issue
  10. * 2023-07-27 HPMicro Fixed clock setting issue
  11. * 2023-08-02 HPMicro Added speed mode setting
  12. * 2024-01-03 HPMicro Added multiple instance support
  13. * 2024-05-23 HPMicro Fixed unaligned transfer issue in the SDIO case
  14. * 2024-05-25 HPMicro Added HS200 & HS400 support, optimize the cache-management policy for read
  15. * 2024-05-26 HPMicro Added UHS-I support, added DDR50 and High Speed DDR mode support
  16. * 2024-06-19 HPMicro Added timeout check for SDXC transfer
  17. */
  18. #include <rtthread.h>
  19. #ifdef BSP_USING_SDXC
  20. #include <rthw.h>
  21. #include <rtdevice.h>
  22. #include <rtdbg.h>
  23. #include "board.h"
  24. #include "hpm_sdxc_drv.h"
  25. #include "hpm_l1c_drv.h"
  26. #define CACHE_LINESIZE HPM_L1C_CACHELINE_SIZE
  27. #define SDXC_ADMA_TABLE_WORDS (2U)
  28. #define SDXC_AMDA2_ADDR_ALIGN (4U)
  29. #define SDXC_DATA_TIMEOUT (0xFU)
  30. #define SDXC_CACHELINE_ALIGN_DOWN(x) HPM_L1C_CACHELINE_ALIGN_DOWN(x)
  31. #define SDXC_CACHELINE_ALIGN_UP(x) HPM_L1C_CACHELINE_ALIGN_UP(x)
  32. #define SDXC_IS_CACHELINE_ALIGNED(n) ((uint32_t)(n) % (uint32_t)(CACHE_LINESIZE) == 0U)
  33. /**
  34. * Note: Allocate cache-line aligned buffer in the SD/eMMC read/write case may require larger heap size
  35. * if the read/write length is a big number (for example: 64KB), the RT-Thread RTOS may
  36. * be unable to allocate enough size of buffer if the heap size is small.
  37. *
  38. * Keep this option disabled by default, please enable it if the default setting cannot meet
  39. * real requirement of application.
  40. */
  41. #ifndef HPM_SDXC_ALLOC_CACHELINE_ALIGNED_BUF
  42. #define HPM_SDXC_ALLOC_CACHELINE_ALIGNED_BUF 0
  43. #endif
  44. struct hpm_mmcsd
  45. {
  46. struct rt_mmcsd_host *host;
  47. struct rt_mmcsd_req *req;
  48. struct rt_mmcsd_cmd *cmd;
  49. struct rt_timer *timer;
  50. char name[RT_NAME_MAX];
  51. rt_uint32_t *buf;
  52. SDXC_Type *sdxc_base;
  53. int32_t irq_num;
  54. uint32_t *sdxc_adma2_table;
  55. bool support_8bit;
  56. bool support_4bit;
  57. bool support_1v8;
  58. bool support_3v3;
  59. uint8_t power_mode;
  60. uint8_t bus_width;
  61. uint8_t timing;
  62. uint8_t bus_mode;
  63. uint32_t freq;
  64. uint16_t vdd;
  65. const char *vsel_pin_name;
  66. const char *pwr_pin_name;
  67. };
  68. /**
  69. * @brief SDIO CMD53 argument
  70. */
  71. typedef union
  72. {
  73. uint32_t value;
  74. struct
  75. {
  76. uint32_t count :9;
  77. uint32_t reg_addr :17;
  78. uint32_t op_code :1;
  79. uint32_t block_mode :1;
  80. uint32_t func_num :3;
  81. uint32_t rw_flag :1;
  82. };
  83. } sdio_cmd53_arg_t;
  84. static void hpm_sdmmc_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req);
  85. static void hpm_sdmmc_set_iocfg(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg);
  86. static void hpm_sdmmc_enable_sdio_irq(struct rt_mmcsd_host *host, rt_int32_t en);
  87. static void hpm_sdmmc_host_recovery(SDXC_Type *base);
  88. static hpm_stat_t hpm_sdmmc_transfer(SDXC_Type *base, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer);
  89. static rt_int32_t hpm_sdmmc_execute_tuning(struct rt_mmcsd_host *host, rt_int32_t opcode);
  90. static rt_int32_t hpm_sdmmc_switch_uhs_voltage(struct rt_mmcsd_host *host);
  91. static void hpm_sdmmc_power_on_via_pin(struct hpm_mmcsd *mmcsd);
  92. static void hpm_sdmmc_power_off_via_pin(struct hpm_mmcsd *mmcsd);
  93. static void hpm_sdmmc_switch_to_3v3_via_pin(struct hpm_mmcsd *mmcsd);
  94. static void hpm_sdmmc_switch_to_1v8_via_pin(struct hpm_mmcsd *mmcsd);
  95. static void hpm_sdmmc_pin_init(const char *pin_name, bool is_output);
  96. static void hpm_sdmmc_pin_write(const char *pin_name, rt_uint8_t value);
  97. static void hpm_sdmmc_pin_init(const char *pin_name, bool is_output)
  98. {
  99. rt_base_t pin = rt_pin_get(pin_name);
  100. if (pin < 0) {
  101. return;
  102. }
  103. rt_uint8_t mode = (is_output) ? PIN_MODE_OUTPUT : PIN_MODE_INPUT_PULLUP;
  104. if (is_output)
  105. {
  106. rt_pin_mode(pin, mode);
  107. }
  108. }
  109. static void hpm_sdmmc_pin_write(const char *pin_name, rt_uint8_t value)
  110. {
  111. rt_base_t pin = rt_pin_get(pin_name);
  112. if (pin < 0)
  113. {
  114. return;
  115. }
  116. rt_pin_write(pin, value);
  117. }
  118. static void hpm_sdmmc_power_on_via_pin(struct hpm_mmcsd *mmcsd)
  119. {
  120. hpm_sdmmc_pin_write(mmcsd->pwr_pin_name, 1);
  121. }
  122. static void hpm_sdmmc_power_off_via_pin(struct hpm_mmcsd *mmcsd)
  123. {
  124. hpm_sdmmc_pin_write(mmcsd->pwr_pin_name, 0);
  125. }
  126. static void hpm_sdmmc_switch_to_3v3_via_pin(struct hpm_mmcsd *mmcsd)
  127. {
  128. hpm_sdmmc_pin_write(mmcsd->vsel_pin_name, 0);
  129. }
  130. static void hpm_sdmmc_switch_to_1v8_via_pin(struct hpm_mmcsd *mmcsd)
  131. {
  132. hpm_sdmmc_pin_write(mmcsd->vsel_pin_name, 1);
  133. }
  134. static rt_int32_t hpm_sdmmc_switch_uhs_voltage(struct rt_mmcsd_host *host)
  135. {
  136. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  137. SDXC_Type *base = mmcsd->sdxc_base;
  138. /* 1. Stop providing clock to the card */
  139. sdxc_enable_inverse_clock(mmcsd->sdxc_base, false);
  140. sdxc_enable_sd_clock(mmcsd->sdxc_base, false);
  141. /* 2. Wait until DAT[3:0] are 4'b0000 */
  142. uint32_t data3_0_level;
  143. uint32_t delay_cnt = 1000000UL;
  144. do
  145. {
  146. data3_0_level = sdxc_get_data3_0_level(mmcsd->sdxc_base);
  147. --delay_cnt;
  148. } while ((data3_0_level != 0U) && (delay_cnt > 0U));
  149. if (delay_cnt < 1)
  150. {
  151. return -RT_ETIMEOUT;
  152. }
  153. /* 3. Switch to 1.8V */
  154. hpm_sdmmc_switch_to_1v8_via_pin(mmcsd);
  155. sdxc_select_voltage(mmcsd->sdxc_base, sdxc_bus_voltage_sd_1v8);
  156. /* 4. spec:host delay 5ms, host: give more delay time here */
  157. rt_thread_mdelay(10);
  158. /* 5. Provide SD clock the card again */
  159. sdxc_enable_inverse_clock(mmcsd->sdxc_base, true);
  160. sdxc_enable_sd_clock(mmcsd->sdxc_base, true);
  161. /* 6. spec: wait 1ms, host: give more delay time here */
  162. rt_thread_mdelay(5);
  163. /* 7. Check DAT[3:0], make sure the value is 4'b0000 */
  164. delay_cnt = 1000000UL;
  165. data3_0_level;
  166. do
  167. {
  168. data3_0_level = sdxc_get_data3_0_level(mmcsd->sdxc_base);
  169. --delay_cnt;
  170. } while ((data3_0_level == 0U) && (delay_cnt > 0U));
  171. if (delay_cnt < 1)
  172. {
  173. return -RT_ETIMEOUT;
  174. }
  175. return RT_EOK;
  176. }
  177. static const struct rt_mmcsd_host_ops hpm_mmcsd_host_ops =
  178. {
  179. .request = hpm_sdmmc_request,
  180. .set_iocfg = hpm_sdmmc_set_iocfg,
  181. .get_card_status = NULL,
  182. .enable_sdio_irq = NULL,
  183. .execute_tuning = hpm_sdmmc_execute_tuning,
  184. .switch_uhs_voltage = hpm_sdmmc_switch_uhs_voltage,
  185. };
  186. #if defined(BSP_USING_SDXC0)
  187. /* Place the ADMA2 table to non-cacheable region */
  188. ATTR_PLACE_AT_NONCACHEABLE static uint32_t s_sdxc0_adma2_table[SDXC_ADMA_TABLE_WORDS];
  189. /* SDXC0 */
  190. static struct hpm_mmcsd s_hpm_sdxc0 =
  191. {
  192. .name = "sd0",
  193. .sdxc_base = HPM_SDXC0,
  194. .sdxc_adma2_table = s_sdxc0_adma2_table,
  195. .irq_num = IRQn_SDXC0,
  196. #if defined(BSP_SDXC0_BUS_WIDTH_8BIT)
  197. .support_8bit = true,
  198. .support_4bit = true,
  199. #elif defined(BSP_SDXC0_BUS_WIDTH_4BIT)
  200. .support_4bit = true,
  201. #elif defined(BSP_SDXC0_BUS_WIDTH_1BIT)
  202. #else
  203. .support_4bit = true,
  204. #endif
  205. #if defined(BSP_SDXC0_VOLTAGE_3V3)
  206. .support_3v3 = true,
  207. #endif
  208. #if defined(BSP_SDXC0_VOLTAGE_1V8)
  209. .support_1v8 = true,
  210. #endif
  211. #if defined(BSP_SDXC0_VOLTAGE_DUAL)
  212. .support_3v3 = true,
  213. .support_1v8 = true,
  214. #endif
  215. #if defined(BSP_SDXC0_VSEL_PIN)
  216. .vsel_pin_name = BSP_SDXC0_VSEL_PIN,
  217. #endif
  218. #if defined(BSP_SDXC0_PWR_PIN)
  219. .pwr_pin_name = BSP_SDXC0_PWR_PIN,
  220. #endif
  221. };
  222. #endif
  223. #if defined(BSP_USING_SDXC1)
  224. /* Place the ADMA2 table to non-cacheable region */
  225. ATTR_PLACE_AT_NONCACHEABLE static uint32_t s_sdxc1_adma2_table[SDXC_ADMA_TABLE_WORDS];
  226. static struct hpm_mmcsd s_hpm_sdxc1 =
  227. {
  228. .name = "sd1",
  229. .sdxc_base = HPM_SDXC1,
  230. .sdxc_adma2_table = s_sdxc1_adma2_table,
  231. .irq_num = IRQn_SDXC1,
  232. #if defined(BSP_SDXC1_BUS_WIDTH_8BIT)
  233. .support_8bit = true,
  234. .support_4bit = true,
  235. #elif defined(BSP_SDXC1_BUS_WIDTH_4BIT)
  236. .support_4bit = true,
  237. #elif defined(BSP_SDXC1_BUS_WIDTH_1BIT)
  238. #else
  239. .support_4bit = true,
  240. #endif
  241. #if defined(BSP_SDXC1_VOLTAGE_3V3)
  242. .support_3v3 = true,
  243. #endif
  244. #if defined(BSP_SDXC1_VOLTAGE_1V8)
  245. .support_1v8 = true,
  246. #endif
  247. #if defined(BSP_SDXC1_VOLTAGE_DUAL)
  248. .support_3v3 = true,
  249. .support_1v8 = true,
  250. #endif
  251. #if defined(BSP_SDXC1_VSEL_PIN)
  252. .vsel_pin_name = BSP_SDXC1_VSEL_PIN,
  253. #endif
  254. #if defined(BSP_SDXC1_PWR_PIN)
  255. .pwr_pin_name = BSP_SDXC1_PWR_PIN,
  256. #endif
  257. };
  258. #endif
  259. static struct hpm_mmcsd *hpm_sdxcs[] =
  260. {
  261. #if defined(BSP_USING_SDXC0)
  262. &s_hpm_sdxc0,
  263. #endif
  264. #if defined(BSP_USING_SDXC1)
  265. &s_hpm_sdxc1,
  266. #endif
  267. };
  268. static rt_int32_t hpm_sdmmc_execute_tuning(struct rt_mmcsd_host *host, rt_int32_t opcode)
  269. {
  270. RT_ASSERT(host != RT_NULL); RT_ASSERT(host->private_data != RT_NULL);
  271. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  272. SDXC_Type *base = mmcsd->sdxc_base;
  273. /* Prepare the Auto tuning environment */
  274. sdxc_stop_clock_during_phase_code_change(base, true);
  275. sdxc_set_post_change_delay(base, 3U);
  276. sdxc_select_cardclk_delay_source(base, false);
  277. sdxc_enable_power(base, true);
  278. hpm_stat_t err = sdxc_perform_auto_tuning(base, opcode);
  279. return (err != status_success) ? -RT_EPERM : RT_EOK;
  280. }
  281. static hpm_stat_t hpm_sdmmc_transfer(SDXC_Type *base, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer)
  282. {
  283. hpm_stat_t status;
  284. sdxc_command_t *cmd = xfer->command;
  285. sdxc_data_t *data = xfer->data;
  286. status = sdxc_transfer_nonblocking(base, dma_config, xfer);
  287. if (status != status_success)
  288. {
  289. return -RT_ERROR;
  290. }
  291. /* Wait until idle */
  292. volatile uint32_t interrupt_status = sdxc_get_interrupt_status(base);
  293. volatile rt_base_t start_tick = rt_tick_get();
  294. while (!IS_HPM_BITMASK_SET(interrupt_status, SDXC_INT_STAT_CMD_COMPLETE_MASK))
  295. {
  296. interrupt_status = sdxc_get_interrupt_status(base);
  297. status = sdxc_parse_interrupt_status(base);
  298. HPM_BREAK_IF(status != status_success);
  299. rt_base_t current_tick = rt_tick_get();
  300. if ((current_tick - start_tick) > RT_TICK_PER_SECOND)
  301. {
  302. return -RT_ETIMEOUT;
  303. }
  304. }
  305. sdxc_clear_interrupt_status(base, SDXC_INT_STAT_CMD_COMPLETE_MASK);
  306. if (status == status_success)
  307. {
  308. status = sdxc_receive_cmd_response(base, cmd);
  309. }
  310. if ((status == status_success) && (data != RT_NULL))
  311. {
  312. interrupt_status = sdxc_get_interrupt_status(base);
  313. start_tick = rt_tick_get();
  314. while (!IS_HPM_BITMASK_SET(interrupt_status, SDXC_INT_STAT_XFER_COMPLETE_MASK | SDXC_STS_ERROR))
  315. {
  316. interrupt_status = sdxc_get_interrupt_status(base);
  317. status = sdxc_parse_interrupt_status(base);
  318. HPM_BREAK_IF(status != status_success);
  319. rt_base_t current_tick = rt_tick_get();
  320. if ((current_tick - start_tick) > RT_TICK_PER_SECOND)
  321. {
  322. return -RT_ETIMEOUT;
  323. }
  324. }
  325. }
  326. return status;
  327. }
  328. /**
  329. * !@brief SDMMC request implementation based on HPMicro SDXC Host
  330. */
  331. static void hpm_sdmmc_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req)
  332. {
  333. RT_ASSERT(host != RT_NULL);
  334. RT_ASSERT(host->private_data != RT_NULL);
  335. RT_ASSERT(req != RT_NULL);
  336. RT_ASSERT(req->cmd != RT_NULL);
  337. sdxc_adma_config_t adma_config = { 0 };
  338. sdxc_xfer_t xfer = { 0 };
  339. sdxc_command_t sdxc_cmd = { 0 };
  340. sdxc_data_t sdxc_data = { 0 };
  341. uint32_t *raw_alloc_buf = RT_NULL;
  342. uint32_t *aligned_buf = RT_NULL;
  343. hpm_stat_t err = status_invalid_argument;
  344. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  345. struct rt_mmcsd_cmd *cmd = req->cmd;
  346. struct rt_mmcsd_data *data = cmd->data;
  347. /* configure command */
  348. sdxc_cmd.cmd_index = cmd->cmd_code;
  349. sdxc_cmd.cmd_argument = cmd->arg;
  350. sdxc_cmd.cmd_type = (cmd->cmd_code == STOP_TRANSMISSION) ? sdxc_cmd_type_abort_cmd : sdxc_cmd_type_normal_cmd;
  351. switch (cmd->flags & RESP_MASK)
  352. {
  353. case RESP_NONE:
  354. sdxc_cmd.resp_type = sdxc_dev_resp_none;
  355. break;
  356. case RESP_R1:
  357. sdxc_cmd.resp_type = sdxc_dev_resp_r1;
  358. break;
  359. case RESP_R1B:
  360. sdxc_cmd.resp_type = sdxc_dev_resp_r1b;
  361. break;
  362. case RESP_R2:
  363. sdxc_cmd.resp_type = sdxc_dev_resp_r2;
  364. break;
  365. case RESP_R3:
  366. sdxc_cmd.resp_type = sdxc_dev_resp_r3;
  367. break;
  368. case RESP_R4:
  369. sdxc_cmd.resp_type = sdxc_dev_resp_r4;
  370. break;
  371. case RESP_R6:
  372. sdxc_cmd.resp_type = sdxc_dev_resp_r6;
  373. break;
  374. case RESP_R7:
  375. sdxc_cmd.resp_type = sdxc_dev_resp_r7;
  376. break;
  377. case RESP_R5:
  378. sdxc_cmd.resp_type = sdxc_dev_resp_r5;
  379. break;
  380. default:
  381. RT_ASSERT(NULL);
  382. break;
  383. }
  384. sdxc_cmd.cmd_flags = 0UL;
  385. xfer.command = &sdxc_cmd;
  386. xfer.data = NULL;
  387. if (data != NULL)
  388. {
  389. sdxc_data.enable_auto_cmd12 = false;
  390. sdxc_data.enable_auto_cmd23 = false;
  391. sdxc_data.enable_ignore_error = false;
  392. sdxc_data.data_type = sdxc_xfer_data_normal;
  393. sdxc_data.block_size = data->blksize;
  394. sdxc_data.block_cnt = data->blks;
  395. /* configure adma2 */
  396. adma_config.dma_type = sdxc_dmasel_adma2;
  397. adma_config.adma_table = (uint32_t*) core_local_mem_to_sys_address(BOARD_RUNNING_CORE,
  398. (uint32_t) mmcsd->sdxc_adma2_table);
  399. adma_config.adma_table_words = SDXC_ADMA_TABLE_WORDS;
  400. rt_size_t xfer_buf_addr = (uint32_t)data->buf;
  401. uint32_t xfer_len = data->blks * data->blksize;
  402. if ((req->data->flags & DATA_DIR_WRITE) != 0U)
  403. {
  404. uint32_t write_size = xfer_len;
  405. rt_size_t aligned_start;
  406. uint32_t aligned_size;
  407. #if defined(HPM_SDXC_ALLOC_CACHELINE_ALIGNED_BUF) && (HPM_SDXC_ALLOC_CACHELINE_ALIGNED_BUF == 1)
  408. if (!SDXC_IS_CACHELINE_ALIGNED(xfer_buf_addr) || !SDXC_IS_CACHELINE_ALIGNED(write_size))
  409. #else
  410. if ((xfer_buf_addr % 4 != 0) || (write_size % 4 != 0))
  411. #endif
  412. {
  413. write_size = SDXC_CACHELINE_ALIGN_UP(xfer_len);
  414. raw_alloc_buf = (uint32_t *) rt_malloc(write_size + CACHE_LINESIZE - RT_ALIGN_SIZE);
  415. RT_ASSERT(raw_alloc_buf != RT_NULL);
  416. aligned_buf = (uint32_t *) SDXC_CACHELINE_ALIGN_UP(raw_alloc_buf);
  417. RT_ASSERT(aligned_buf != RT_NULL);
  418. memcpy(aligned_buf, data->buf, xfer_len);
  419. memset(aligned_buf + write_size, 0, write_size - xfer_len);
  420. sdxc_data.tx_data = (uint32_t const *) core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t) aligned_buf);
  421. aligned_start = (uint32_t)sdxc_data.tx_data;
  422. aligned_size = write_size;
  423. }
  424. else
  425. {
  426. sdxc_data.tx_data = (uint32_t const *) core_local_mem_to_sys_address(BOARD_RUNNING_CORE, xfer_buf_addr);
  427. aligned_start = SDXC_CACHELINE_ALIGN_DOWN(sdxc_data.tx_data);
  428. rt_size_t aligned_end = SDXC_CACHELINE_ALIGN_UP((uint32_t)sdxc_data.tx_data + write_size);
  429. aligned_size = aligned_end - aligned_start;
  430. }
  431. l1c_dc_flush(aligned_start, aligned_size);
  432. sdxc_data.rx_data = NULL;
  433. }
  434. else
  435. {
  436. uint32_t read_size = xfer_len;
  437. #if defined(HPM_SDXC_ALLOC_CACHELINE_ALIGNED_BUF) && (HPM_SDXC_ALLOC_CACHELINE_ALIGNED_BUF == 1)
  438. if (!SDXC_IS_CACHELINE_ALIGNED(xfer_buf_addr) || !SDXC_IS_CACHELINE_ALIGNED(read_size))
  439. #else
  440. if ((xfer_buf_addr % 4 != 0) || (read_size % 4 != 0))
  441. #endif
  442. {
  443. uint32_t aligned_read_size = SDXC_CACHELINE_ALIGN_UP(read_size);
  444. raw_alloc_buf = (uint32_t *) rt_malloc(aligned_read_size + CACHE_LINESIZE - RT_ALIGN_SIZE);
  445. RT_ASSERT(raw_alloc_buf != RT_NULL);
  446. aligned_buf = (uint32_t *) SDXC_CACHELINE_ALIGN_UP(raw_alloc_buf);
  447. sdxc_data.rx_data = (uint32_t*) core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t) aligned_buf);
  448. /* Invalidate cache-line for the new allocated buffer */
  449. l1c_dc_invalidate((uint32_t) sdxc_data.rx_data, aligned_read_size);
  450. }
  451. else
  452. {
  453. sdxc_data.rx_data = (uint32_t*) core_local_mem_to_sys_address(BOARD_RUNNING_CORE, xfer_buf_addr);
  454. rt_size_t buf_start = (uint32_t) sdxc_data.rx_data;
  455. rt_size_t aligned_start = HPM_L1C_CACHELINE_ALIGN_DOWN(buf_start);
  456. rt_size_t end_addr = buf_start + xfer_len;
  457. /* FLUSH un-cacheline aligned memory region */
  458. if ((buf_start % HPM_L1C_CACHELINE_SIZE) != 0) {
  459. l1c_dc_writeback(aligned_start, HPM_L1C_CACHELINE_SIZE);
  460. }
  461. if ((end_addr % HPM_L1C_CACHELINE_SIZE) != 0) {
  462. uint32_t aligned_tail = HPM_L1C_CACHELINE_ALIGN_DOWN(end_addr);
  463. l1c_dc_writeback(aligned_tail, HPM_L1C_CACHELINE_SIZE);
  464. }
  465. }
  466. sdxc_data.tx_data = RT_NULL;
  467. }
  468. xfer.data = &sdxc_data;
  469. /* Align the write/read size since the ADMA2 engine in the SDXC cannot transfer unaligned size of data */
  470. if ((cmd->cmd_code == SD_IO_RW_EXTENDED) && (xfer_len % 4 != 0))
  471. {
  472. sdio_cmd53_arg_t cmd53_arg;
  473. cmd53_arg.value = sdxc_cmd.cmd_argument;
  474. cmd53_arg.count = HPM_ALIGN_UP(xfer_len, 4);
  475. sdxc_cmd.cmd_argument = cmd53_arg.value;
  476. sdxc_data.block_size = HPM_ALIGN_UP(xfer_len, 4);
  477. }
  478. }
  479. if ((req->data->blks > 1) && ((cmd->cmd_code == READ_MULTIPLE_BLOCK) || ((cmd->cmd_code == WRITE_MULTIPLE_BLOCK))))
  480. {
  481. xfer.data->enable_auto_cmd12 = true;
  482. }
  483. err = hpm_sdmmc_transfer(mmcsd->sdxc_base, &adma_config, &xfer);
  484. LOG_I("cmd=%d, arg=%x\n", cmd->cmd_code, cmd->arg);
  485. if (err != status_success)
  486. {
  487. hpm_sdmmc_host_recovery(mmcsd->sdxc_base);
  488. if (err != status_sdxc_cmd_timeout_error) /* Ignore command timeout error by default */
  489. {
  490. LOG_E(" ***hpm_sdmmc_transfer error: %d, cmd:%d, arg:0x%x*** -->\n", err, cmd->cmd_code, cmd->arg);
  491. }
  492. cmd->err = -RT_ERROR;
  493. }
  494. else
  495. {
  496. LOG_I(" ***hpm_sdmmc_transfer passed: %d*** -->\n", err);
  497. if (sdxc_cmd.resp_type == sdxc_dev_resp_r2)
  498. {
  499. LOG_I("resp:0x%08x 0x%08x 0x%08x 0x%08x\n", sdxc_cmd.response[0],
  500. sdxc_cmd.response[1], sdxc_cmd.response[2], sdxc_cmd.response[3]);
  501. }
  502. else
  503. {
  504. LOG_I("resp:0x%08x\n", sdxc_cmd.response[0]);
  505. }
  506. }
  507. if ((sdxc_data.rx_data != NULL) && (cmd->err == RT_EOK))
  508. {
  509. uint32_t read_size = data->blks * data->blksize;
  510. if (aligned_buf != RT_NULL)
  511. {
  512. uint32_t aligned_read_size = SDXC_CACHELINE_ALIGN_UP(read_size);
  513. rt_base_t level = rt_hw_interrupt_disable();
  514. l1c_dc_invalidate((uint32_t) aligned_buf, aligned_read_size);
  515. rt_hw_interrupt_enable(level);
  516. memcpy(data->buf, aligned_buf, read_size);
  517. }
  518. else
  519. {
  520. rt_size_t aligned_start = SDXC_CACHELINE_ALIGN_DOWN(sdxc_data.rx_data);
  521. rt_size_t aligned_end = SDXC_CACHELINE_ALIGN_UP((uint32_t)sdxc_data.rx_data + read_size);
  522. uint32_t aligned_size = aligned_end - aligned_start;
  523. rt_base_t level = rt_hw_interrupt_disable();
  524. l1c_dc_invalidate(aligned_start, aligned_size);
  525. rt_hw_interrupt_enable(level);
  526. }
  527. }
  528. if (raw_alloc_buf != RT_NULL)
  529. {
  530. rt_free(raw_alloc_buf);
  531. raw_alloc_buf = RT_NULL;
  532. aligned_buf = RT_NULL;
  533. }
  534. if ((cmd->flags & RESP_MASK) == RESP_R2)
  535. {
  536. cmd->resp[3] = sdxc_cmd.response[0];
  537. cmd->resp[2] = sdxc_cmd.response[1];
  538. cmd->resp[1] = sdxc_cmd.response[2];
  539. cmd->resp[0] = sdxc_cmd.response[3];
  540. }
  541. else
  542. {
  543. cmd->resp[0] = sdxc_cmd.response[0];
  544. }
  545. mmcsd_req_complete(host);
  546. }
  547. static void hpm_sdmmc_set_cardclk_delay_chain(struct hpm_mmcsd *mmcsd)
  548. {
  549. SDXC_Type *base = mmcsd->sdxc_base;
  550. bool need_inverse = sdxc_is_inverse_clock_enabled(base);
  551. sdxc_enable_inverse_clock(base, false);
  552. sdxc_enable_sd_clock(base, false);
  553. uint32_t num_delaycells = sdxc_get_default_cardclk_delay_chain(base, mmcsd->freq);
  554. sdxc_set_cardclk_delay_chain(base, num_delaycells);
  555. sdxc_enable_inverse_clock(base, need_inverse);
  556. sdxc_enable_sd_clock(base, true);
  557. }
  558. ATTR_WEAK void init_sdxc_ds_pin(SDXC_Type *base)
  559. {
  560. LOG_W("Ignore this warning if the DS pin is not supported\n");
  561. }
  562. /**
  563. * !@brief Set IO Configuration for HPMicro IO and SDXC Host
  564. */
  565. static void hpm_sdmmc_set_iocfg(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg)
  566. {
  567. RT_ASSERT(host != RT_NULL);
  568. RT_ASSERT(host->private_data != RT_NULL);
  569. RT_ASSERT(io_cfg != RT_NULL);
  570. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  571. /* Power control */
  572. uint32_t vdd = io_cfg->vdd;
  573. if (io_cfg->power_mode != mmcsd->power_mode)
  574. {
  575. switch(io_cfg->power_mode)
  576. {
  577. case MMCSD_POWER_OFF:
  578. hpm_sdmmc_power_off_via_pin(mmcsd);
  579. break;
  580. case MMCSD_POWER_ON:
  581. hpm_sdmmc_power_on_via_pin(mmcsd);
  582. break;
  583. case MMCSD_POWER_UP:
  584. hpm_sdmmc_power_off_via_pin(mmcsd);
  585. rt_thread_mdelay(10);
  586. hpm_sdmmc_power_on_via_pin(mmcsd);
  587. /* After power up, wait 1ms, then wait 74 card clock */
  588. rt_thread_mdelay(1);
  589. sdxc_wait_card_active(mmcsd->sdxc_base);
  590. break;
  591. default:
  592. /* Do nothing */
  593. break;
  594. }
  595. mmcsd->power_mode = io_cfg->power_mode;
  596. }
  597. /* Voltage switch */
  598. if (mmcsd->vdd != vdd)
  599. {
  600. if (vdd == 7)
  601. {
  602. /* Switch to 1.8V */
  603. hpm_sdmmc_switch_to_1v8_via_pin(mmcsd);
  604. }
  605. else
  606. {
  607. /* Switch to 3V */
  608. hpm_sdmmc_switch_to_3v3_via_pin(mmcsd);
  609. }
  610. mmcsd->vdd = vdd;
  611. }
  612. /* Set bus width */
  613. if (mmcsd->bus_width != io_cfg->bus_width)
  614. {
  615. switch (io_cfg->bus_width)
  616. {
  617. case MMCSD_BUS_WIDTH_4:
  618. sdxc_set_data_bus_width(mmcsd->sdxc_base, sdxc_bus_width_4bit);
  619. break;
  620. case MMCSD_BUS_WIDTH_8:
  621. sdxc_set_data_bus_width(mmcsd->sdxc_base, sdxc_bus_width_8bit);
  622. break;
  623. default:
  624. sdxc_set_data_bus_width(mmcsd->sdxc_base, sdxc_bus_width_1bit);
  625. break;
  626. }
  627. mmcsd->bus_width = io_cfg->bus_width;
  628. }
  629. /* Set timing mode */
  630. bool need_config_ds = false;
  631. if (mmcsd->timing != io_cfg->timing)
  632. {
  633. switch (io_cfg->timing)
  634. {
  635. case MMCSD_TIMING_LEGACY:
  636. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_normal);
  637. break;
  638. case MMCSD_TIMING_SD_HS:
  639. case MMCSD_TIMING_MMC_HS:
  640. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_high);
  641. break;
  642. case MMCSD_TIMING_UHS_SDR12:
  643. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_sdr12);
  644. break;
  645. case MMCSD_TIMING_UHS_SDR25:
  646. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_sdr25);
  647. break;
  648. case MMCSD_TIMING_UHS_SDR50:
  649. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_sdr50);
  650. break;
  651. case MMCSD_TIMING_UHS_SDR104:
  652. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_sdr104);
  653. break;
  654. case MMCSD_TIMING_UHS_DDR50:
  655. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_ddr50);
  656. /* Must switch to 1.8V signaling for UHS_DDR50 */
  657. sdxc_select_voltage(mmcsd->sdxc_base, sdxc_bus_voltage_sd_1v8);
  658. break;
  659. case MMCSD_TIMING_MMC_DDR52:
  660. sdxc_enable_emmc_support(mmcsd->sdxc_base, true);
  661. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_emmc_speed_high_speed_ddr);
  662. break;
  663. case MMCSD_TIMING_MMC_HS200:
  664. sdxc_enable_emmc_support(mmcsd->sdxc_base, true);
  665. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_emmc_speed_hs200);
  666. break;
  667. case MMCSD_TIMING_MMC_HS400:
  668. case MMCSD_TIMING_MMC_HS400_ENH_DS:
  669. sdxc_enable_emmc_support(mmcsd->sdxc_base, true);
  670. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_emmc_speed_hs400);
  671. if (io_cfg->timing == MMCSD_TIMING_MMC_HS400_ENH_DS)
  672. {
  673. sdxc_enable_enhanced_strobe(mmcsd->sdxc_base, true);
  674. uint32_t num_delaycells = sdxc_get_default_strobe_delay(mmcsd->sdxc_base);
  675. sdxc_set_data_strobe_delay(mmcsd->sdxc_base, num_delaycells);
  676. }
  677. need_config_ds = true;
  678. break;
  679. }
  680. mmcsd->timing = io_cfg->timing;
  681. }
  682. /* Initialize SDXC Pins */
  683. bool open_drain = io_cfg->bus_mode == MMCSD_BUSMODE_OPENDRAIN;
  684. bool is_1v8 = (io_cfg->vdd == 7) || (mmcsd->host->valid_ocr == VDD_165_195);
  685. uint32_t width = (io_cfg->bus_width == MMCSD_BUS_WIDTH_8) ? 8 : ((io_cfg->bus_width == MMCSD_BUS_WIDTH_4) ? 4 : 1);
  686. init_sdxc_cmd_pin(mmcsd->sdxc_base, open_drain, is_1v8);
  687. init_sdxc_clk_data_pins(mmcsd->sdxc_base, width, is_1v8);
  688. rt_thread_mdelay(1);
  689. if (need_config_ds)
  690. {
  691. init_sdxc_ds_pin(mmcsd->sdxc_base);
  692. rt_thread_mdelay(1);
  693. }
  694. /* Initialize SDXC clock */
  695. uint32_t sdxc_clock = io_cfg->clock;
  696. if (sdxc_clock != 0U)
  697. {
  698. if (mmcsd->freq != sdxc_clock)
  699. {
  700. bool need_reverse = true;
  701. bool need_card_delay_clk = false;
  702. if ((mmcsd->timing == MMCSD_TIMING_UHS_DDR50) ||
  703. (mmcsd->timing == MMCSD_TIMING_MMC_DDR52) ||
  704. (mmcsd->timing == MMCSD_TIMING_MMC_HS400) ||
  705. (mmcsd->timing == MMCSD_TIMING_MMC_HS400_ENH_DS))
  706. {
  707. need_reverse = false;
  708. need_card_delay_clk = true;
  709. }
  710. /* Ensure request frequency from mmcsd stack level doesn't exceed maximum supported frequency by host */
  711. uint32_t clock_freq = MIN(mmcsd->host->freq_max, sdxc_clock);
  712. clock_freq = board_sd_configure_clock(mmcsd->sdxc_base, clock_freq, need_reverse);
  713. LOG_I("mmcsd clock: %dHz\n", clock_freq);
  714. mmcsd->freq = sdxc_clock;
  715. if (need_card_delay_clk)
  716. {
  717. hpm_sdmmc_set_cardclk_delay_chain(mmcsd);
  718. }
  719. }
  720. }
  721. }
  722. static void hpm_sdmmc_enable_sdio_irq(struct rt_mmcsd_host *host, rt_int32_t en)
  723. {
  724. RT_ASSERT(host != RT_NULL);
  725. RT_ASSERT(host->private_data != RT_NULL);
  726. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  727. if (en != 0)
  728. {
  729. intc_m_enable_irq_with_priority(mmcsd->irq_num, 1);
  730. }
  731. else
  732. {
  733. intc_m_disable_irq(mmcsd->irq_num);
  734. }
  735. }
  736. static void hpm_sdmmc_host_recovery(SDXC_Type *base)
  737. {
  738. uint32_t pstate = sdxc_get_present_status(base);
  739. bool need_reset_cmd_line = false;
  740. bool need_reset_data_line = false;
  741. if ((pstate & SDXC_PSTATE_CMD_INHIBIT_MASK) != 0U)
  742. {
  743. /* Reset command line */
  744. need_reset_cmd_line = true;
  745. }
  746. if ((pstate & SDXC_PSTATE_DAT_INHIBIT_MASK) != 0U)
  747. {
  748. /* Reset data line */
  749. need_reset_data_line = true;
  750. }
  751. uint32_t int_stat = sdxc_get_interrupt_status(base);
  752. if ((int_stat & 0xF0000UL) != 0U)
  753. {
  754. need_reset_cmd_line = true;
  755. }
  756. if ((int_stat & 0x700000) != 0U)
  757. {
  758. need_reset_data_line = true;
  759. }
  760. if (need_reset_cmd_line)
  761. {
  762. sdxc_reset(base, sdxc_reset_cmd_line, 0xFFFFUL);
  763. }
  764. if (need_reset_data_line)
  765. {
  766. sdxc_reset(base, sdxc_reset_data_line, 0xFFFFUL);
  767. }
  768. if (need_reset_cmd_line || need_reset_data_line)
  769. {
  770. sdxc_clear_interrupt_status(base, ~0UL);
  771. }
  772. rt_thread_mdelay(10);
  773. }
  774. int rt_hw_sdio_init(void)
  775. {
  776. rt_err_t err = RT_EOK;
  777. struct rt_mmcsd_host *host = NULL;
  778. struct hpm_mmcsd *mmcsd = NULL;
  779. for (uint32_t i = 0; i < ARRAY_SIZE(hpm_sdxcs); i++) {
  780. host = mmcsd_alloc_host();
  781. if (host == NULL)
  782. {
  783. err = -RT_ERROR;
  784. break;
  785. }
  786. mmcsd = hpm_sdxcs[i];
  787. host->ops = &hpm_mmcsd_host_ops;
  788. host->freq_min = 375000;
  789. host->freq_max = 50000000;
  790. host->valid_ocr = 0;
  791. /* Determine supported Voltage range */
  792. if (mmcsd->support_3v3)
  793. {
  794. host->valid_ocr |= VDD_30_31 | VDD_31_32 | VDD_32_33 | VDD_33_34;
  795. }
  796. if (mmcsd->support_1v8)
  797. {
  798. host->valid_ocr |= VDD_165_195;
  799. }
  800. /* Determine Host supported features */
  801. host->flags = MMCSD_MUTBLKWRITE | MMCSD_SUP_HIGHSPEED | MMCSD_SUP_SDIO_IRQ;
  802. if (mmcsd->support_4bit)
  803. {
  804. host->flags |= MMCSD_BUSWIDTH_4;
  805. }
  806. if (mmcsd->support_8bit) {
  807. host->flags |= MMCSD_BUSWIDTH_8;
  808. }
  809. if (mmcsd->support_1v8)
  810. {
  811. host->freq_max = 166000000;
  812. host->flags |= MMCSD_SUP_HS200_1V8;
  813. host->flags |= MMCSD_SUP_SDR50 | MMCSD_SUP_SDR104;
  814. if (sdxc_is_ddr50_supported(mmcsd->sdxc_base))
  815. {
  816. host->flags |= MMCSD_SUP_DDR50;
  817. }
  818. if (mmcsd->support_8bit)
  819. {
  820. host->flags |= MMCSD_SUP_HS400_1V8 | MMCSD_SUP_ENH_DS;
  821. }
  822. }
  823. /* For eMMC device, add High Speed DDR mode support as long as it is supported by the host controller */
  824. if (sdxc_is_ddr50_supported(mmcsd->sdxc_base))
  825. {
  826. host->flags |= MMCSD_SUP_HIGHSPEED_DDR;
  827. }
  828. rt_strncpy(host->name, mmcsd->name, RT_NAME_MAX);
  829. host->max_seg_size = 0x80000;
  830. host->max_dma_segs = 1;
  831. host->max_blk_size = 512;
  832. host->max_blk_count = 1024;
  833. mmcsd->host = host;
  834. /* Perform necessary initialization */
  835. board_sd_configure_clock(mmcsd->sdxc_base, 375000, true);
  836. sdxc_config_t sdxc_config = { 0 };
  837. sdxc_config.data_timeout = 1000;
  838. sdxc_init(mmcsd->sdxc_base, &sdxc_config);
  839. host->private_data = mmcsd;
  840. /* Initialize PWR pin and VSEL pin */
  841. if (mmcsd->pwr_pin_name != RT_NULL)
  842. {
  843. hpm_sdmmc_pin_init(mmcsd->pwr_pin_name, true);
  844. rt_thread_mdelay(1);
  845. if (host->valid_ocr == VDD_165_195)
  846. {
  847. hpm_sdmmc_switch_to_1v8_via_pin(mmcsd);
  848. }
  849. else
  850. {
  851. hpm_sdmmc_switch_to_3v3_via_pin(mmcsd);
  852. }
  853. }
  854. if (mmcsd->vsel_pin_name != RT_NULL)
  855. {
  856. hpm_sdmmc_pin_init(mmcsd->vsel_pin_name, true);
  857. rt_thread_mdelay(1);
  858. }
  859. mmcsd_change(host);
  860. };
  861. if (err != RT_EOK)
  862. {
  863. if (host != NULL)
  864. {
  865. mmcsd_free_host(host);
  866. host = NULL;
  867. }
  868. }
  869. return err;
  870. }
  871. INIT_DEVICE_EXPORT(rt_hw_sdio_init);
  872. #endif