hpm_spi.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254
  1. /*
  2. * Copyright (c) 2021 HPMicro
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. *
  6. */
  7. #include "hpm_spi.h"
  8. #include "hpm_clock_drv.h"
  9. #include <stdlib.h>
  10. #if USE_DMA_MGR
  11. #include "hpm_dma_mgr.h"
  12. #endif
  13. typedef struct {
  14. SPI_Type *spi_ptr;
  15. clock_name_t spi_clock_name;
  16. #if USE_DMA_MGR
  17. uint8_t tx_dmamux_src;
  18. uint8_t rx_dmamux_src;
  19. dma_resource_t txdma_resource;
  20. dma_resource_t rxdma_resource;
  21. spi_dma_complete_cb tx_dma_complete;
  22. spi_dma_complete_cb rx_dma_complete;
  23. #endif
  24. } hpm_spi_cfg_t;
  25. static hpm_spi_cfg_t spi_dma_cfg_table[] = {
  26. #if defined(HPM_SPI0)
  27. {
  28. .spi_ptr = HPM_SPI0,
  29. .spi_clock_name = clock_spi0,
  30. #if USE_DMA_MGR
  31. .tx_dmamux_src = HPM_DMA_SRC_SPI0_TX,
  32. .rx_dmamux_src = HPM_DMA_SRC_SPI0_RX,
  33. .rx_dma_complete = NULL,
  34. .tx_dma_complete = NULL
  35. #endif
  36. },
  37. #endif
  38. #if defined(HPM_SPI1)
  39. {
  40. .spi_ptr = HPM_SPI1,
  41. .spi_clock_name = clock_spi1,
  42. #if USE_DMA_MGR
  43. .tx_dmamux_src = HPM_DMA_SRC_SPI1_TX,
  44. .rx_dmamux_src = HPM_DMA_SRC_SPI1_RX,
  45. .rx_dma_complete = NULL,
  46. .tx_dma_complete = NULL,
  47. #endif
  48. },
  49. #endif
  50. #if defined(HPM_SPI2)
  51. {
  52. .spi_ptr = HPM_SPI2,
  53. .spi_clock_name = clock_spi2,
  54. #if USE_DMA_MGR
  55. .tx_dmamux_src = HPM_DMA_SRC_SPI2_TX,
  56. .rx_dmamux_src = HPM_DMA_SRC_SPI2_RX,
  57. .rx_dma_complete = NULL,
  58. .tx_dma_complete = NULL,
  59. #endif
  60. },
  61. #endif
  62. #if defined(HPM_SPI3)
  63. {
  64. .spi_ptr = HPM_SPI3,
  65. .spi_clock_name = clock_spi3,
  66. #if USE_DMA_MGR
  67. .tx_dmamux_src = HPM_DMA_SRC_SPI3_TX,
  68. .rx_dmamux_src = HPM_DMA_SRC_SPI3_RX,
  69. .rx_dma_complete = NULL,
  70. .tx_dma_complete = NULL,
  71. #endif
  72. },
  73. #endif
  74. #if defined(HPM_SPI4)
  75. {
  76. .spi_ptr = HPM_SPI4,
  77. .spi_clock_name = clock_spi4,
  78. #if USE_DMA_MGR
  79. .tx_dmamux_src = HPM_DMA_SRC_SPI4_TX,
  80. .rx_dmamux_src = HPM_DMA_SRC_SPI4_RX,
  81. .rx_dma_complete = NULL,
  82. .tx_dma_complete = NULL,
  83. #endif
  84. },
  85. #endif
  86. #if defined(HPM_SPI5)
  87. {
  88. .spi_ptr = HPM_SPI5,
  89. .spi_clock_name = clock_spi5,
  90. #if USE_DMA_MGR
  91. .tx_dmamux_src = HPM_DMA_SRC_SPI5_TX,
  92. .rx_dmamux_src = HPM_DMA_SRC_SPI5_RX,
  93. .rx_dma_complete = NULL,
  94. .tx_dma_complete = NULL,
  95. #endif
  96. },
  97. #endif
  98. #if defined(HPM_SPI6)
  99. {
  100. .spi_ptr = HPM_SPI6,
  101. .spi_clock_name = clock_spi6,
  102. #if USE_DMA_MGR
  103. .tx_dmamux_src = HPM_DMA_SRC_SPI6_TX,
  104. .rx_dmamux_src = HPM_DMA_SRC_SPI6_RX,
  105. .rx_dma_complete = NULL,
  106. .tx_dma_complete = NULL,
  107. #endif
  108. },
  109. #endif
  110. #if defined(HPM_SPI7)
  111. {
  112. .spi_ptr = HPM_SPI7,
  113. .spi_clock_name = clock_spi7,
  114. #if USE_DMA_MGR
  115. .tx_dmamux_src = HPM_DMA_SRC_SPI7_TX,
  116. .rx_dmamux_src = HPM_DMA_SRC_SPI7_RX,
  117. .rx_dma_complete = NULL,
  118. .tx_dma_complete = NULL,
  119. #endif
  120. },
  121. #endif
  122. #if defined(HPM_SPI8)
  123. {
  124. .spi_ptr = HPM_SPI8,
  125. .spi_clock_name = clock_spi8,
  126. #if USE_DMA_MGR
  127. .tx_dmamux_src = HPM_DMA_SRC_SPI8_TX,
  128. .rx_dmamux_src = HPM_DMA_SRC_SPI8_RX,
  129. .rx_dma_complete = NULL,
  130. .tx_dma_complete = NULL,
  131. #endif
  132. },
  133. #endif
  134. #if defined(HPM_SPI9)
  135. {
  136. .spi_ptr = HPM_SPI9,
  137. .spi_clock_name = clock_spi9,
  138. #if USE_DMA_MGR
  139. .tx_dmamux_src = HPM_DMA_SRC_SPI9_TX,
  140. .rx_dmamux_src = HPM_DMA_SRC_SPI9_RX,
  141. .rx_dma_complete = NULL,
  142. .tx_dma_complete = NULL,
  143. #endif
  144. },
  145. #endif
  146. #if defined(HPM_SPI10)
  147. {
  148. .spi_ptr = HPM_SPI10,
  149. .spi_clock_name = clock_spi10,
  150. #if USE_DMA_MGR
  151. .tx_dmamux_src = HPM_DMA_SRC_SPI10_TX,
  152. .rx_dmamux_src = HPM_DMA_SRC_SPI10_RX,
  153. .rx_dma_complete = NULL,
  154. .tx_dma_complete = NULL,
  155. #endif
  156. },
  157. #endif
  158. };
  159. static hpm_stat_t hpm_spi_tx_trigger_dma(DMA_Type *dma_ptr, uint8_t ch_num, SPI_Type *spi_ptr, uint32_t src, uint8_t data_width, uint32_t size)
  160. {
  161. dma_handshake_config_t config;
  162. dma_default_handshake_config(dma_ptr, &config);
  163. config.ch_index = ch_num;
  164. config.dst = (uint32_t)&spi_ptr->DATA;
  165. config.dst_fixed = true;
  166. config.src = src;
  167. config.src_fixed = false;
  168. config.data_width = data_width;
  169. config.size_in_byte = size;
  170. return dma_setup_handshake(dma_ptr, &config, true);
  171. }
  172. static hpm_stat_t hpm_spi_rx_trigger_dma(DMA_Type *dma_ptr, uint8_t ch_num, SPI_Type *spi_ptr, uint32_t dst, uint8_t data_width, uint32_t size)
  173. {
  174. dma_handshake_config_t config;
  175. dma_default_handshake_config(dma_ptr, &config);
  176. config.ch_index = ch_num;
  177. config.dst = dst;
  178. config.dst_fixed = false;
  179. config.src = (uint32_t)&spi_ptr->DATA;
  180. config.src_fixed = true;
  181. config.data_width = data_width;
  182. config.size_in_byte = size;
  183. return dma_setup_handshake(dma_ptr, &config, true);
  184. }
  185. void hpm_spi_prepare_dma_tx_descriptors(spi_context_t *context, spi_control_config_t *config, uint32_t trans_count,
  186. uint32_t *spi_transctrl, dma_linked_descriptor_t *tx_dma_descriptors)
  187. {
  188. SPI_Type *ptr = context->ptr;
  189. uint32_t dma_transfer_size[trans_count];
  190. uint32_t tx_count = context->tx_count;
  191. uint32_t per_trans_size = context->per_trans_max;
  192. uint32_t dma_ch = context->dma_context.tx_dma_ch;
  193. uint8_t *tx_buff = context->tx_buff;
  194. dma_channel_config_t dma_ch_config;
  195. static uint8_t dummy_cmd = 0xff;
  196. uint32_t temp32;
  197. uint32_t tx_buff_index = 0;
  198. dma_default_channel_config(context->dma_context.dma_ptr, &dma_ch_config);
  199. for (uint32_t i = 0; i < trans_count; i++) {
  200. if (tx_count > per_trans_size) {
  201. temp32 = per_trans_size;
  202. tx_count -= per_trans_size;
  203. } else {
  204. temp32 = tx_count;
  205. }
  206. *(spi_transctrl + i) = SPI_TRANSCTRL_TRANSMODE_SET(config->common_config.trans_mode == spi_trans_write_read_together ?
  207. spi_trans_write_read_together : spi_trans_write_only)
  208. | SPI_TRANSCTRL_DUALQUAD_SET(config->common_config.data_phase_fmt)
  209. | SPI_TRANSCTRL_WRTRANCNT_SET(temp32 - 1)
  210. | SPI_TRANSCTRL_RDTRANCNT_SET(temp32 - 1);
  211. if (i == 0) {
  212. /* Set the count of data transferred by dma to be one more than that of spi */
  213. /* when dma transfer finished, there are data in SPI fifo, dma should not execute the dma descriptor which changes SPI CTRL register */
  214. temp32 = temp32 + 1;
  215. }
  216. if (i == trans_count - 1) {
  217. temp32 = temp32 - 1;
  218. }
  219. dma_transfer_size[i] = temp32;
  220. /* SPI CTRL */
  221. dma_ch_config.size_in_byte = 4;
  222. dma_ch_config.src_addr = core_local_mem_to_sys_address(context->running_core, (uint32_t)(spi_transctrl + i));
  223. dma_ch_config.dst_addr = core_local_mem_to_sys_address(context->running_core, (uint32_t)&ptr->TRANSCTRL);
  224. dma_ch_config.src_width = DMA_TRANSFER_WIDTH_WORD;
  225. dma_ch_config.dst_width = DMA_TRANSFER_WIDTH_WORD;
  226. dma_ch_config.src_burst_size = DMA_NUM_TRANSFER_PER_BURST_1T;
  227. dma_ch_config.src_mode = DMA_HANDSHAKE_MODE_NORMAL;
  228. dma_ch_config.dst_mode = DMA_HANDSHAKE_MODE_NORMAL;
  229. dma_ch_config.src_addr_ctrl = DMA_ADDRESS_CONTROL_FIXED;
  230. dma_ch_config.dst_addr_ctrl = DMA_ADDRESS_CONTROL_FIXED;
  231. dma_ch_config.linked_ptr = core_local_mem_to_sys_address(context->running_core, (uint32_t)(tx_dma_descriptors + i * SPI_DMA_DESC_COUNT_PER_TRANS + 1));
  232. dma_config_linked_descriptor(context->dma_context.dma_ptr, tx_dma_descriptors + i * SPI_DMA_DESC_COUNT_PER_TRANS, dma_ch, &dma_ch_config);
  233. /* SPI CMD */
  234. dma_ch_config.size_in_byte = 1;
  235. dma_ch_config.src_addr = core_local_mem_to_sys_address(context->running_core, (uint32_t)&dummy_cmd);
  236. dma_ch_config.dst_addr = core_local_mem_to_sys_address(context->running_core, (uint32_t)&ptr->CMD);
  237. dma_ch_config.src_width = DMA_TRANSFER_WIDTH_BYTE;
  238. dma_ch_config.dst_width = DMA_TRANSFER_WIDTH_BYTE;
  239. dma_ch_config.src_burst_size = DMA_NUM_TRANSFER_PER_BURST_1T;
  240. dma_ch_config.src_mode = DMA_HANDSHAKE_MODE_NORMAL;
  241. dma_ch_config.dst_mode = DMA_HANDSHAKE_MODE_NORMAL;
  242. dma_ch_config.src_addr_ctrl = DMA_ADDRESS_CONTROL_FIXED;
  243. dma_ch_config.dst_addr_ctrl = DMA_ADDRESS_CONTROL_FIXED;
  244. dma_ch_config.linked_ptr = core_local_mem_to_sys_address(context->running_core, (uint32_t)(tx_dma_descriptors + i * SPI_DMA_DESC_COUNT_PER_TRANS + 2));
  245. dma_config_linked_descriptor(context->dma_context.dma_ptr, tx_dma_descriptors + i * SPI_DMA_DESC_COUNT_PER_TRANS + 1, dma_ch, &dma_ch_config);
  246. /* SPI DATA */
  247. dma_ch_config.size_in_byte = dma_transfer_size[i] << context->dma_context.data_width;
  248. dma_ch_config.src_addr = core_local_mem_to_sys_address(context->running_core, (uint32_t)(tx_buff + tx_buff_index));
  249. dma_ch_config.dst_addr = core_local_mem_to_sys_address(context->running_core, (uint32_t)&ptr->DATA);
  250. dma_ch_config.src_width = context->dma_context.data_width;
  251. dma_ch_config.dst_width = context->dma_context.data_width;
  252. dma_ch_config.src_burst_size = DMA_NUM_TRANSFER_PER_BURST_1T;
  253. dma_ch_config.src_mode = DMA_HANDSHAKE_MODE_NORMAL;
  254. dma_ch_config.dst_mode = DMA_HANDSHAKE_MODE_HANDSHAKE;
  255. dma_ch_config.src_addr_ctrl = DMA_ADDRESS_CONTROL_INCREMENT;
  256. dma_ch_config.dst_addr_ctrl = DMA_ADDRESS_CONTROL_FIXED;
  257. if (i == trans_count - 1) {
  258. dma_ch_config.linked_ptr = 0;
  259. } else {
  260. dma_ch_config.linked_ptr = core_local_mem_to_sys_address(context->running_core, (uint32_t)(tx_dma_descriptors + (i + 1) * SPI_DMA_DESC_COUNT_PER_TRANS));
  261. }
  262. dma_config_linked_descriptor(context->dma_context.dma_ptr, tx_dma_descriptors + i * SPI_DMA_DESC_COUNT_PER_TRANS + 2, dma_ch, &dma_ch_config);
  263. tx_buff_index += temp32 * context->data_len_in_byte;
  264. }
  265. }
  266. void hpm_prepare_dma_rx_descriptors(spi_context_t *context, spi_control_config_t *config, uint32_t trans_count,
  267. uint32_t *spi_transctrl, dma_linked_descriptor_t *rx_dma_descriptors)
  268. {
  269. SPI_Type *ptr = context->ptr;
  270. uint32_t dma_transfer_size[trans_count];
  271. uint32_t rx_count = context->rx_count;
  272. uint32_t per_trans_size = context->per_trans_max;
  273. uint32_t dma_ch = context->dma_context.rx_dma_ch;
  274. uint8_t *rx_buff = context->rx_buff;
  275. dma_channel_config_t dma_ch_config;
  276. static uint8_t dummy_cmd = 0xff;
  277. uint32_t temp32;
  278. uint32_t rx_buff_index = 0;
  279. dma_default_channel_config(context->dma_context.dma_ptr, &dma_ch_config);
  280. for (uint32_t i = 0; i < trans_count; i++) {
  281. if (rx_count > per_trans_size) {
  282. temp32 = per_trans_size;
  283. rx_count -= per_trans_size;
  284. } else {
  285. temp32 = rx_count;
  286. }
  287. *(spi_transctrl + i) = SPI_TRANSCTRL_TRANSMODE_SET(spi_trans_read_only) |
  288. SPI_TRANSCTRL_DUALQUAD_SET(config->common_config.data_phase_fmt) |
  289. SPI_TRANSCTRL_WRTRANCNT_SET(temp32 - 1) |
  290. SPI_TRANSCTRL_RDTRANCNT_SET(temp32 - 1);
  291. dma_transfer_size[i] = temp32;
  292. /* SPI CTRL */
  293. dma_ch_config.size_in_byte = 4;
  294. dma_ch_config.src_addr = core_local_mem_to_sys_address(context->running_core, (uint32_t)(spi_transctrl + i));
  295. dma_ch_config.dst_addr = core_local_mem_to_sys_address(context->running_core, (uint32_t)&ptr->TRANSCTRL);
  296. dma_ch_config.src_width = DMA_TRANSFER_WIDTH_WORD;
  297. dma_ch_config.dst_width = DMA_TRANSFER_WIDTH_WORD;
  298. dma_ch_config.src_burst_size = DMA_NUM_TRANSFER_PER_BURST_1T;
  299. dma_ch_config.src_mode = DMA_HANDSHAKE_MODE_NORMAL;
  300. dma_ch_config.dst_mode = DMA_HANDSHAKE_MODE_NORMAL;
  301. dma_ch_config.src_addr_ctrl = DMA_ADDRESS_CONTROL_FIXED;
  302. dma_ch_config.dst_addr_ctrl = DMA_ADDRESS_CONTROL_FIXED;
  303. dma_ch_config.linked_ptr = core_local_mem_to_sys_address(context->running_core, (uint32_t)(rx_dma_descriptors + i * SPI_DMA_DESC_COUNT_PER_TRANS + 1));
  304. dma_config_linked_descriptor(context->dma_context.dma_ptr, rx_dma_descriptors + i * SPI_DMA_DESC_COUNT_PER_TRANS, dma_ch, &dma_ch_config);
  305. /* SPI CMD */
  306. dma_ch_config.size_in_byte = 1;
  307. dma_ch_config.src_addr = core_local_mem_to_sys_address(context->running_core, (uint32_t)&dummy_cmd);
  308. dma_ch_config.dst_addr = core_local_mem_to_sys_address(context->running_core, (uint32_t)&ptr->CMD);
  309. dma_ch_config.src_width = DMA_TRANSFER_WIDTH_BYTE;
  310. dma_ch_config.dst_width = DMA_TRANSFER_WIDTH_BYTE;
  311. dma_ch_config.src_burst_size = DMA_NUM_TRANSFER_PER_BURST_1T;
  312. dma_ch_config.src_mode = DMA_HANDSHAKE_MODE_NORMAL;
  313. dma_ch_config.dst_mode = DMA_HANDSHAKE_MODE_NORMAL;
  314. dma_ch_config.src_addr_ctrl = DMA_ADDRESS_CONTROL_FIXED;
  315. dma_ch_config.dst_addr_ctrl = DMA_ADDRESS_CONTROL_FIXED;
  316. dma_ch_config.linked_ptr = core_local_mem_to_sys_address(context->running_core, (uint32_t)(rx_dma_descriptors + i * SPI_DMA_DESC_COUNT_PER_TRANS + 2));
  317. dma_config_linked_descriptor(context->dma_context.dma_ptr, rx_dma_descriptors + i * SPI_DMA_DESC_COUNT_PER_TRANS + 1, dma_ch, &dma_ch_config);
  318. /* SPI DATA */
  319. dma_ch_config.size_in_byte = dma_transfer_size[i] << context->dma_context.data_width;
  320. dma_ch_config.src_addr = core_local_mem_to_sys_address(context->running_core, (uint32_t)&ptr->DATA);
  321. dma_ch_config.dst_addr = core_local_mem_to_sys_address(context->running_core, (uint32_t)(rx_buff + rx_buff_index));
  322. dma_ch_config.src_width = context->dma_context.data_width;
  323. dma_ch_config.dst_width = context->dma_context.data_width;
  324. dma_ch_config.src_burst_size = DMA_NUM_TRANSFER_PER_BURST_1T;
  325. dma_ch_config.src_mode = DMA_HANDSHAKE_MODE_HANDSHAKE;
  326. dma_ch_config.dst_mode = DMA_HANDSHAKE_MODE_NORMAL;
  327. dma_ch_config.src_addr_ctrl = DMA_ADDRESS_CONTROL_FIXED;
  328. dma_ch_config.dst_addr_ctrl = DMA_ADDRESS_CONTROL_INCREMENT;
  329. if (i == trans_count - 1) {
  330. dma_ch_config.linked_ptr = 0;
  331. } else {
  332. dma_ch_config.linked_ptr = core_local_mem_to_sys_address(context->running_core, (uint32_t)(rx_dma_descriptors + (i + 1) * SPI_DMA_DESC_COUNT_PER_TRANS));
  333. }
  334. dma_config_linked_descriptor(context->dma_context.dma_ptr, rx_dma_descriptors + i * SPI_DMA_DESC_COUNT_PER_TRANS + 2, dma_ch, &dma_ch_config);
  335. rx_buff_index += temp32 * context->data_len_in_byte;
  336. }
  337. }
  338. static uint32_t hpm_spi_get_trans_count(spi_context_t *context, spi_control_config_t *config)
  339. {
  340. uint32_t total_trans_count, per_trans_count, trans_count;
  341. per_trans_count = context->per_trans_max;
  342. if (config->common_config.trans_mode == spi_trans_write_only || config->common_config.trans_mode == spi_trans_dummy_write) {
  343. total_trans_count = context->tx_count;
  344. } else if (config->common_config.trans_mode == spi_trans_read_only || config->common_config.trans_mode == spi_trans_dummy_read) {
  345. total_trans_count = context->rx_count;
  346. } else {
  347. /* write read together */
  348. assert(context->tx_count == context->rx_count);
  349. total_trans_count = context->tx_count;
  350. }
  351. trans_count = (total_trans_count + per_trans_count - 1) / per_trans_count;
  352. return trans_count;
  353. }
  354. /**
  355. * spi with dma chain workflow
  356. *
  357. * 1. call spi_setup_dma_transfer to config SPI for first transmission
  358. * 2. execute data transmission phase in dma chain descriptor
  359. * 3. execute setting SPI CTRL register phase in dma chain descriptor
  360. * 4. execute writing SPI CMD register phase in dma chain descriptor
  361. * 5. Repeat steps 2-4 until finish the transmission
  362. */
  363. static hpm_stat_t spi_setup_trans_with_dma_chain(spi_context_t *context, spi_control_config_t *config)
  364. {
  365. hpm_stat_t stat = status_success;
  366. SPI_Type *spi_ptr = context->ptr;
  367. DMA_Type *dma_ptr = context->dma_context.dma_ptr;
  368. DMAMUX_Type *dmamux_ptr = context->dma_context.dmamux_ptr;
  369. dma_linked_descriptor_t *dma_linked_descriptor = context->dma_linked_descriptor;
  370. uint32_t *spi_transctrl = context->spi_transctrl;
  371. uint32_t dma_channel = 0;
  372. uint32_t trans_count;
  373. dma_channel_config_t dma_ch_config = {0};
  374. /* use a dummy dma transfer to start SPI trans dma chain */
  375. static uint32_t dummy_data1 = 0xff, dummy_data2 = 0xff;
  376. trans_count = hpm_spi_get_trans_count(context, config);
  377. /* active spi cs pin */
  378. context->write_cs(context->cs_pin, SPI_CS_ACTIVE);
  379. /* config SPI for first dma transmission */
  380. stat = spi_setup_dma_transfer(spi_ptr,
  381. config,
  382. &context->cmd,
  383. &context->addr,
  384. MIN(context->tx_count, context->per_trans_max),
  385. MIN(context->rx_count, context->per_trans_max));
  386. if (stat != status_success) {
  387. return stat;
  388. }
  389. if (config->common_config.trans_mode == spi_trans_write_only || config->common_config.trans_mode == spi_trans_dummy_write) {
  390. /* write only */
  391. hpm_spi_prepare_dma_tx_descriptors(context, config, trans_count, spi_transctrl, dma_linked_descriptor);
  392. dma_channel = context->dma_context.tx_dma_ch;
  393. dmamux_config(dmamux_ptr, context->dma_context.tx_dmamux_ch, context->dma_context.tx_req, true);
  394. } else if (config->common_config.trans_mode == spi_trans_read_only || config->common_config.trans_mode == spi_trans_dummy_read) {
  395. /* read only */
  396. hpm_prepare_dma_rx_descriptors(context, config, trans_count, spi_transctrl, dma_linked_descriptor);
  397. dma_channel = context->dma_context.rx_dma_ch;
  398. dmamux_config(dmamux_ptr, context->dma_context.rx_dmamux_ch, context->dma_context.rx_req, true);
  399. } else if (config->common_config.trans_mode == spi_trans_write_read_together) {
  400. /* write and read together */
  401. hpm_spi_prepare_dma_tx_descriptors(context, config, trans_count, spi_transctrl, dma_linked_descriptor);
  402. dma_channel = context->dma_context.tx_dma_ch;
  403. dmamux_config(dmamux_ptr, context->dma_context.tx_dmamux_ch, context->dma_context.tx_req, true);
  404. dmamux_config(dmamux_ptr, context->dma_context.rx_dmamux_ch, context->dma_context.rx_req, true);
  405. /* spi tx use chained dma descriptor, spi rx use unchained dma */
  406. stat = hpm_spi_rx_trigger_dma(dma_ptr,
  407. context->dma_context.rx_dma_ch,
  408. spi_ptr,
  409. core_local_mem_to_sys_address(context->running_core, (uint32_t)context->rx_buff),
  410. context->dma_context.data_width,
  411. context->rx_size);
  412. if (stat != status_success) {
  413. return stat;
  414. }
  415. } else {
  416. return status_invalid_argument;
  417. }
  418. dma_default_channel_config(context->dma_context.dma_ptr, &dma_ch_config);
  419. dma_ch_config.src_addr = core_local_mem_to_sys_address(context->running_core, (uint32_t)&dummy_data1);
  420. dma_ch_config.dst_addr = core_local_mem_to_sys_address(context->running_core, (uint32_t)&dummy_data2);
  421. dma_ch_config.src_burst_size = DMA_NUM_TRANSFER_PER_BURST_1T;
  422. dma_ch_config.src_width = DMA_TRANSFER_WIDTH_WORD;
  423. dma_ch_config.dst_width = DMA_TRANSFER_WIDTH_WORD;
  424. dma_ch_config.size_in_byte = 4;
  425. /* start data transmission phase in dma chain */
  426. dma_ch_config.linked_ptr = core_local_mem_to_sys_address(context->running_core, (uint32_t)(dma_linked_descriptor + SPI_DMA_DESC_COUNT_PER_TRANS - 1));
  427. stat = dma_setup_channel(dma_ptr, dma_channel, &dma_ch_config, true);
  428. if (stat != status_success) {
  429. return stat;
  430. }
  431. return stat;
  432. }
  433. static hpm_stat_t spi_setup_trans_with_dma(spi_context_t *context, spi_control_config_t *config)
  434. {
  435. hpm_stat_t stat = status_success;
  436. SPI_Type *spi_ptr = context->ptr;
  437. DMA_Type *dma_ptr = context->dma_context.dma_ptr;
  438. DMAMUX_Type *dmamux_ptr = context->dma_context.dmamux_ptr;
  439. uint32_t trans_mode = config->common_config.trans_mode;
  440. if (context->write_cs != NULL) {
  441. context->write_cs(context->cs_pin, SPI_CS_ACTIVE);
  442. }
  443. stat = spi_setup_dma_transfer(spi_ptr, config,
  444. &context->cmd, &context->addr,
  445. context->tx_count, context->rx_count);
  446. if (stat != status_success) {
  447. return stat;
  448. }
  449. if (trans_mode != spi_trans_write_only && trans_mode != spi_trans_dummy_write && trans_mode != spi_trans_no_data) {
  450. dmamux_config(dmamux_ptr, context->dma_context.rx_dmamux_ch, context->dma_context.rx_req, true);
  451. stat = hpm_spi_rx_trigger_dma(dma_ptr,
  452. context->dma_context.rx_dma_ch,
  453. spi_ptr,
  454. core_local_mem_to_sys_address(context->running_core, (uint32_t)context->rx_buff),
  455. context->dma_context.data_width,
  456. context->rx_size);
  457. if (stat != status_success) {
  458. return stat;
  459. }
  460. }
  461. if (trans_mode != spi_trans_read_only && trans_mode != spi_trans_dummy_read && trans_mode != spi_trans_no_data) {
  462. dmamux_config(dmamux_ptr, context->dma_context.tx_dmamux_ch, context->dma_context.tx_req, true);
  463. stat = hpm_spi_tx_trigger_dma(dma_ptr,
  464. context->dma_context.tx_dma_ch,
  465. spi_ptr,
  466. core_local_mem_to_sys_address(context->running_core, (uint32_t)context->tx_buff),
  467. context->dma_context.data_width,
  468. context->tx_size);
  469. if (stat != status_success) {
  470. return stat;
  471. }
  472. }
  473. return stat;
  474. }
  475. hpm_stat_t hpm_spi_setup_dma_transfer(spi_context_t *context, spi_control_config_t *config)
  476. {
  477. assert(context != NULL || config != NULL);
  478. /* use dma */
  479. assert(&context->dma_context != NULL);
  480. /* spi per trans data size not zero */
  481. assert(context->per_trans_max);
  482. hpm_stat_t stat = status_success;
  483. if (l1c_dc_is_enabled()) {
  484. /* cache writeback for tx buff */
  485. if (context->tx_buff != NULL && context->tx_size != 0) {
  486. uint32_t aligned_start = HPM_L1C_CACHELINE_ALIGN_DOWN((uint32_t)context->tx_buff);
  487. uint32_t aligned_end = HPM_L1C_CACHELINE_ALIGN_UP((uint32_t)context->tx_buff + context->tx_size);
  488. uint32_t aligned_size = aligned_end - aligned_start;
  489. l1c_dc_writeback(aligned_start, aligned_size);
  490. }
  491. /* cache invalidate for receive buff */
  492. if (context->rx_buff != NULL && context->rx_size != 0) {
  493. uint32_t aligned_start = HPM_L1C_CACHELINE_ALIGN_DOWN((uint32_t)context->rx_buff);
  494. uint32_t aligned_end = HPM_L1C_CACHELINE_ALIGN_UP((uint32_t)context->rx_buff + context->rx_size);
  495. uint32_t aligned_size = aligned_end - aligned_start;
  496. l1c_dc_invalidate(aligned_start, aligned_size);
  497. }
  498. }
  499. if ((context->rx_count > context->per_trans_max) || (context->tx_count > context->per_trans_max)) {
  500. /* multiple SPI transmissions with chained DMA */
  501. assert(config->common_config.trans_mode == spi_trans_read_only || config->common_config.trans_mode == spi_trans_dummy_read
  502. || config->common_config.trans_mode == spi_trans_write_only || config->common_config.trans_mode == spi_trans_dummy_write
  503. || config->common_config.trans_mode == spi_trans_write_read_together);
  504. /* master mode */
  505. assert((context->ptr->TRANSFMT & SPI_TRANSFMT_SLVMODE_MASK) != SPI_TRANSFMT_SLVMODE_MASK);
  506. /* GPIO should be used to replace SPI CS pin for SPI chained DMA transmissions */
  507. assert(context->write_cs != NULL);
  508. stat = spi_setup_trans_with_dma_chain(context, config);
  509. } else {
  510. /* one SPI transmissions with chained DMA */
  511. stat = spi_setup_trans_with_dma(context, config);
  512. }
  513. return stat;
  514. }
  515. /* Using GPIO as SPI CS pin */
  516. /* When SPI trans completed, GPIO cs pin should be released manually */
  517. hpm_stat_t hpm_spi_release_gpio_cs(spi_context_t *context)
  518. {
  519. hpm_stat_t stat;
  520. SPI_Type *ptr = context->ptr;
  521. assert(context->write_cs != NULL);
  522. stat = spi_wait_for_idle_status(ptr);
  523. if (stat != status_success) {
  524. return stat;
  525. }
  526. context->write_cs(context->cs_pin, !SPI_CS_ACTIVE);
  527. return status_success;
  528. }
  529. static hpm_stat_t wait_spi_slave_active(SPI_Type *ptr, bool active_status, uint32_t timeout)
  530. {
  531. uint32_t ticks_per_us = (hpm_core_clock + 1000000 - 1U) / 1000000;
  532. uint64_t expected_ticks = hpm_csr_get_core_cycle() + (uint64_t)ticks_per_us * 1000UL * timeout;
  533. do {
  534. if (hpm_csr_get_core_cycle() > expected_ticks) {
  535. return status_timeout;
  536. }
  537. } while (spi_is_active(ptr) == !active_status);
  538. return status_success;
  539. }
  540. static hpm_spi_cfg_t *hpm_spi_get_cfg_obj(SPI_Type *ptr)
  541. {
  542. hpm_spi_cfg_t *obj;
  543. uint8_t i = 0;
  544. for (i = 0; i < (sizeof(spi_dma_cfg_table) / sizeof(hpm_spi_cfg_t)); i++) {
  545. obj = &spi_dma_cfg_table[i];
  546. if (obj->spi_ptr == ptr) {
  547. return obj;
  548. }
  549. }
  550. return NULL;
  551. }
  552. static void hpm_spi_transfer_init(SPI_Type *ptr, spi_trans_mode_t mode, uint32_t size)
  553. {
  554. uint32_t slv_mode = SPI_TRANSFMT_SLVMODE_GET(ptr->TRANSFMT);
  555. uint8_t data_len_in_bytes = spi_get_data_length_in_bytes(ptr);
  556. if (data_len_in_bytes > 2) {
  557. data_len_in_bytes = 4;
  558. }
  559. if (slv_mode == spi_master_mode) {
  560. spi_set_transfer_mode(ptr, mode);
  561. } else {
  562. /* for slave mode, only support trans_write_read_together mode in only_data_mode */
  563. spi_set_transfer_mode(ptr, spi_trans_write_read_together);
  564. }
  565. if ((mode == spi_trans_write_read_together) || (slv_mode == spi_slave_mode)) {
  566. spi_set_write_data_count(ptr, size / data_len_in_bytes);
  567. spi_set_read_data_count(ptr, size / data_len_in_bytes);
  568. } else if (mode == spi_trans_write_only) {
  569. spi_set_write_data_count(ptr, size / data_len_in_bytes);
  570. } else {
  571. spi_set_read_data_count(ptr, size / data_len_in_bytes);
  572. }
  573. /* start new transmission, reset spi for slave*/
  574. if (slv_mode == spi_slave_mode) {
  575. spi_reset(ptr);
  576. }
  577. spi_transmit_fifo_reset(ptr);
  578. spi_receive_fifo_reset(ptr);
  579. while (ptr->CTRL & (SPI_CTRL_TXFIFORST_MASK | SPI_CTRL_RXFIFORST_MASK)) {
  580. }
  581. }
  582. static hpm_stat_t write_read_data_together(SPI_Type *ptr, uint8_t data_len_in_bytes, uint8_t *wbuff, uint32_t wsize,
  583. uint8_t *rbuff, uint32_t rsize, uint32_t timeout)
  584. {
  585. uint8_t txfifo_size = spi_get_tx_fifo_size(ptr);
  586. uint32_t rx_index = 0, tx_index = 0;
  587. uint8_t rxfifo_valid_size, txfifo_valid_size, j;
  588. uint32_t ticks_per_us = (hpm_core_clock + 1000000 - 1U) / 1000000;
  589. uint64_t expected_ticks = hpm_csr_get_core_cycle() + (uint64_t)ticks_per_us * 1000UL * timeout;
  590. while ((rx_index < rsize) || (tx_index < wsize)) {
  591. if (tx_index < wsize) {
  592. txfifo_valid_size = spi_get_tx_fifo_valid_data_size(ptr);
  593. if ((txfifo_size - txfifo_valid_size) > 0) {
  594. for (j = 0; j < (txfifo_size - txfifo_valid_size); j++) {
  595. if (tx_index >= wsize) {
  596. break;
  597. }
  598. switch (data_len_in_bytes) {
  599. case 1:
  600. ptr->DATA = *(uint8_t *)wbuff;
  601. break;
  602. case 2:
  603. ptr->DATA = *(uint16_t *)wbuff;
  604. break;
  605. default:
  606. ptr->DATA = *(uint32_t *)wbuff;
  607. break;
  608. }
  609. wbuff += data_len_in_bytes;
  610. tx_index++;
  611. }
  612. }
  613. }
  614. if (rx_index < rsize) {
  615. rxfifo_valid_size = spi_get_rx_fifo_valid_data_size(ptr);
  616. if (rxfifo_valid_size > 0) {
  617. for (j = 0; j < rxfifo_valid_size; j++) {
  618. if (rx_index >= rsize) {
  619. break;
  620. }
  621. switch (data_len_in_bytes) {
  622. case 1:
  623. *(uint8_t *)rbuff = (uint8_t)ptr->DATA;
  624. break;
  625. case 2:
  626. *(uint16_t *)rbuff = (uint16_t)ptr->DATA;
  627. break;
  628. default:
  629. *(uint32_t *)rbuff = ptr->DATA;
  630. break;
  631. }
  632. rbuff += data_len_in_bytes;
  633. rx_index++;
  634. }
  635. }
  636. }
  637. if (hpm_csr_get_core_cycle() > expected_ticks) {
  638. return status_timeout;
  639. }
  640. }
  641. return status_success;
  642. }
  643. static hpm_stat_t read_data_single(SPI_Type *ptr, uint8_t data_len_in_bytes, uint8_t *rbuff, uint32_t rsize, uint32_t timeout)
  644. {
  645. uint32_t rx_index = 0;
  646. uint8_t rxfifo_valid_size, j;
  647. uint32_t ticks_per_us = (hpm_core_clock + 1000000 - 1U) / 1000000;
  648. uint64_t expected_ticks = hpm_csr_get_core_cycle() + (uint64_t)ticks_per_us * 1000UL * timeout;
  649. while (rx_index < rsize) {
  650. rxfifo_valid_size = spi_get_rx_fifo_valid_data_size(ptr);
  651. if (rxfifo_valid_size > 0) {
  652. for (j = 0; j < rxfifo_valid_size; j++) {
  653. if (rx_index >= rsize) {
  654. break;
  655. }
  656. switch (data_len_in_bytes) {
  657. case 1:
  658. *(uint8_t *)rbuff = (uint8_t)ptr->DATA;
  659. break;
  660. case 2:
  661. *(uint16_t *)rbuff = (uint16_t)ptr->DATA;
  662. break;
  663. default:
  664. *(uint32_t *)rbuff = ptr->DATA;
  665. break;
  666. }
  667. rbuff += data_len_in_bytes;
  668. rx_index++;
  669. }
  670. }
  671. if (hpm_csr_get_core_cycle() > expected_ticks) {
  672. return status_timeout;
  673. }
  674. }
  675. return status_success;
  676. }
  677. static hpm_stat_t write_data_single(SPI_Type *ptr, uint8_t data_len_in_bytes, uint8_t *wbuff, uint32_t wsize, uint32_t timeout)
  678. {
  679. uint8_t txfifo_size = spi_get_tx_fifo_size(ptr);
  680. uint32_t tx_index = 0;
  681. uint8_t txfifo_valid_size, j;
  682. uint32_t ticks_per_us = (hpm_core_clock + 1000000 - 1U) / 1000000;
  683. uint64_t expected_ticks = hpm_csr_get_core_cycle() + (uint64_t)ticks_per_us * 1000UL * timeout;
  684. while (tx_index < wsize) {
  685. txfifo_valid_size = spi_get_tx_fifo_valid_data_size(ptr);
  686. if ((txfifo_size - txfifo_valid_size) > 0) {
  687. for (j = 0; j < (txfifo_size - txfifo_valid_size); j++) {
  688. if (tx_index >= wsize) {
  689. break;
  690. }
  691. switch (data_len_in_bytes) {
  692. case 1:
  693. ptr->DATA = *(uint8_t *)wbuff;
  694. break;
  695. case 2:
  696. ptr->DATA = *(uint16_t *)wbuff;
  697. break;
  698. default:
  699. ptr->DATA = *(uint32_t *)wbuff;
  700. break;
  701. }
  702. wbuff += data_len_in_bytes;
  703. tx_index++;
  704. }
  705. }
  706. if (hpm_csr_get_core_cycle() > expected_ticks) {
  707. return status_timeout;
  708. }
  709. }
  710. return status_success;
  711. }
  712. void hpm_spi_get_default_init_config(spi_initialize_config_t *config)
  713. {
  714. config->mode = spi_master_mode;
  715. config->io_mode = spi_single_io_mode;
  716. config->clk_polarity = spi_sclk_low_idle;
  717. config->clk_phase = spi_sclk_sampling_odd_clk_edges;
  718. config->data_len = 8;
  719. config->data_merge = false;
  720. config->direction = msb_first;
  721. }
  722. hpm_stat_t hpm_spi_initialize(SPI_Type *ptr, spi_initialize_config_t *config)
  723. {
  724. if (config->data_len == 0) {
  725. return status_invalid_argument;
  726. }
  727. /* frist init TRANSFMT reg*/
  728. ptr->TRANSFMT = SPI_TRANSFMT_DATALEN_SET(config->data_len - 1) |
  729. SPI_TRANSFMT_DATAMERGE_SET(config->data_merge) |
  730. SPI_TRANSFMT_LSB_SET(config->direction) |
  731. SPI_TRANSFMT_SLVMODE_SET(config->mode) |
  732. SPI_TRANSFMT_CPOL_SET(config->clk_polarity) |
  733. SPI_TRANSFMT_CPHA_SET(config->clk_phase);
  734. /* second init TRANSCTRL reg
  735. * default phase: disable command/address/token phase,
  736. * default Transfer mode: write and read at the same time
  737. */
  738. ptr->TRANSCTRL = SPI_TRANSCTRL_SLVDATAONLY_SET(1) | /* it's only data anyway for slave*/
  739. SPI_TRANSCTRL_DUALQUAD_SET(config->io_mode);
  740. /* third: init TIMING reg */
  741. ptr->TIMING = SPI_TIMING_CS2SCLK_SET(spi_cs2sclk_half_sclk_1) |
  742. SPI_TIMING_CSHT_SET(spi_csht_half_sclk_1) |
  743. SPI_TIMING_SCLK_DIV_SET(0x10);
  744. return status_success;
  745. }
  746. hpm_stat_t hpm_spi_set_sclk_frequency(SPI_Type *ptr, uint32_t freq)
  747. {
  748. int freq_list[clock_source_general_source_end] = {0};
  749. int _freq = freq;
  750. uint8_t i = 0;
  751. int min_diff_freq;
  752. int current_diff_freq;
  753. int best_freq;
  754. hpm_stat_t stat;
  755. uint32_t div;
  756. clock_source_t clock_source;
  757. clk_src_t clk_src;
  758. static spi_timing_config_t timing_config = {0};
  759. uint32_t pll_clk = 0;
  760. hpm_spi_cfg_t *obj = hpm_spi_get_cfg_obj(ptr);
  761. if (obj == NULL) {
  762. return status_invalid_argument;
  763. }
  764. spi_master_get_default_timing_config(&timing_config);
  765. timing_config.master_config.clk_src_freq_in_hz = clock_get_frequency(obj->spi_clock_name);
  766. timing_config.master_config.sclk_freq_in_hz = freq;
  767. timing_config.master_config.cs2sclk = spi_cs2sclk_half_sclk_1;
  768. timing_config.master_config.csht = spi_csht_half_sclk_1;
  769. stat = spi_master_timing_init(ptr, &timing_config);
  770. if (stat != status_success) {
  771. spi_master_set_sclk_div(ptr, 0xFF);
  772. for (clock_source = (clock_source_t)0; clock_source < clock_source_general_source_end; clock_source++) {
  773. pll_clk = get_frequency_for_source(clock_source);
  774. div = pll_clk / freq;
  775. /* The division factor ranges from 1 to 256 as any integer */
  776. if ((div > 0) && (div <= 0x100)) {
  777. freq_list[clock_source] = pll_clk / div;
  778. }
  779. }
  780. /* Find the best sclk frequency */
  781. min_diff_freq = abs(freq_list[0] - _freq);
  782. best_freq = freq_list[0];
  783. for (i = 1; i < clock_source_general_source_end; i++) {
  784. current_diff_freq = abs(freq_list[i] - _freq);
  785. if (current_diff_freq < min_diff_freq) {
  786. min_diff_freq = current_diff_freq;
  787. best_freq = freq_list[i];
  788. }
  789. }
  790. /* Find the best spi clock frequency */
  791. for (i = 0; i < clock_source_general_source_end; i++) {
  792. if (best_freq == freq_list[i]) {
  793. pll_clk = get_frequency_for_source((clock_source_t)i);
  794. clk_src = MAKE_CLK_SRC(CLK_SRC_GROUP_COMMON, i);
  795. div = pll_clk / best_freq;
  796. clock_set_source_divider(obj->spi_clock_name, clk_src, div);
  797. break;
  798. }
  799. }
  800. stat = status_success;
  801. }
  802. return stat;
  803. }
  804. hpm_stat_t hpm_spi_transmit_receive_blocking(SPI_Type *ptr, uint8_t *wbuff, uint8_t *rbuff, uint32_t size, uint32_t timeout)
  805. {
  806. hpm_stat_t stat = status_success;
  807. uint8_t data_len_in_bytes = spi_get_data_length_in_bytes(ptr);
  808. uint8_t txfifo_size = spi_get_tx_fifo_size(ptr);
  809. uint8_t remain_size = 0, j = 0;
  810. uint32_t temp, count;
  811. if (data_len_in_bytes > 2) {
  812. data_len_in_bytes = 4; /* must be 4 aglin */
  813. }
  814. if ((wbuff == NULL) || (rbuff == NULL) || (size == 0) ||
  815. ((SPI_SOC_TRANSFER_COUNT_MAX == 512) && (size > (SPI_SOC_TRANSFER_COUNT_MAX * data_len_in_bytes))) ||
  816. (spi_master_get_data_phase_format(ptr) != spi_single_io_mode)) {
  817. return status_invalid_argument;
  818. }
  819. count = (size / data_len_in_bytes);
  820. hpm_spi_transfer_init(ptr, spi_trans_write_read_together, size);
  821. /* for master mode, This CMD register must be written with a dummy value
  822. * to start a SPI transfer even when the command phase is not enabled
  823. */
  824. if (SPI_TRANSFMT_SLVMODE_GET(ptr->TRANSFMT) == spi_master_mode) {
  825. ptr->CMD = 0xFF;
  826. stat = write_read_data_together(ptr, data_len_in_bytes, wbuff, count, rbuff, count, timeout);
  827. if (stat == status_success) {
  828. /* waiting....in master mode, becomes 0 after the transfer is finished */
  829. stat = wait_spi_slave_active(ptr, false, timeout);
  830. }
  831. } else {
  832. /* Before receiving the host to start transmission, fill the TX FIFO */
  833. remain_size = txfifo_size - spi_get_tx_fifo_valid_data_size(ptr);
  834. for (j = 0; j < remain_size; j++) {
  835. temp = 0;
  836. for (uint8_t i = 0; i < data_len_in_bytes; i++) {
  837. temp += *(wbuff++) << i * 8;
  838. }
  839. ptr->DATA = temp;
  840. }
  841. count -= remain_size;
  842. /* waiting....in slave mode, becomes 1 after the SPI CS signal is asserted */
  843. stat = wait_spi_slave_active(ptr, true, timeout);
  844. /* no need to read RXFIFO because no effect SPI bus for slave mode */
  845. if (((size - remain_size) > 0) && (stat == status_success)) {
  846. stat = write_read_data_together(ptr, data_len_in_bytes, wbuff, count, rbuff, count + remain_size, timeout);
  847. }
  848. }
  849. return stat;
  850. }
  851. hpm_stat_t hpm_spi_receive_blocking(SPI_Type *ptr, uint8_t *buff, uint32_t size, uint32_t timeout)
  852. {
  853. hpm_stat_t stat = status_success;
  854. uint32_t count;
  855. uint8_t data_len_in_bytes = spi_get_data_length_in_bytes(ptr);
  856. if (data_len_in_bytes > 2) {
  857. data_len_in_bytes = 4; /* must be 4 aglin */
  858. }
  859. if ((buff == NULL) || (size == 0) ||
  860. ((SPI_SOC_TRANSFER_COUNT_MAX == 512) && (size > (SPI_SOC_TRANSFER_COUNT_MAX * data_len_in_bytes)))) {
  861. return status_invalid_argument;
  862. }
  863. count = (size / data_len_in_bytes);
  864. hpm_spi_transfer_init(ptr, spi_trans_read_only, size);
  865. /* for master mode, This CMD register must be written with a dummy value
  866. * to start a SPI transfer even when the command phase is not enabled
  867. */
  868. if (SPI_TRANSFMT_SLVMODE_GET(ptr->TRANSFMT) == spi_master_mode) {
  869. ptr->CMD = 0xFF;
  870. } else {
  871. /* waiting....in slave mode, becomes 1 after the SPI CS signal is asserted */
  872. stat = wait_spi_slave_active(ptr, true, timeout);
  873. }
  874. /* no need to write TXFIFO because no effect SPI bus for slave mode */
  875. if (stat == status_success) {
  876. stat = read_data_single(ptr, data_len_in_bytes, buff, count, timeout);
  877. }
  878. if (SPI_TRANSFMT_SLVMODE_GET(ptr->TRANSFMT) == spi_master_mode) {
  879. /* waiting....in master mode, becomes 0 after the transfer is finished */
  880. if (stat == status_success) {
  881. stat = wait_spi_slave_active(ptr, false, timeout);
  882. }
  883. }
  884. return stat;
  885. }
  886. hpm_stat_t hpm_spi_transmit_blocking(SPI_Type *ptr, uint8_t *buff, uint32_t size, uint32_t timeout)
  887. {
  888. hpm_stat_t stat = status_success;
  889. uint8_t txfifo_size = spi_get_tx_fifo_size(ptr);
  890. uint8_t remain_size = 0, j = 0;
  891. uint32_t temp, count;
  892. uint8_t data_len_in_bytes = spi_get_data_length_in_bytes(ptr);
  893. if (data_len_in_bytes > 2) {
  894. data_len_in_bytes = 4; /* must be 4 aglin */
  895. }
  896. if ((buff == NULL) || (size == 0) ||
  897. ((SPI_SOC_TRANSFER_COUNT_MAX == 512) && (size > (SPI_SOC_TRANSFER_COUNT_MAX * data_len_in_bytes)))) {
  898. return status_invalid_argument;
  899. }
  900. count = (size / data_len_in_bytes);
  901. hpm_spi_transfer_init(ptr, spi_trans_write_only, size);
  902. /* for master mode, This CMD register must be written with a dummy value
  903. * to start a SPI transfer even when the command phase is not enabled
  904. */
  905. if (SPI_TRANSFMT_SLVMODE_GET(ptr->TRANSFMT) == spi_master_mode) {
  906. ptr->CMD = 0xFF;
  907. stat = write_data_single(ptr, data_len_in_bytes, buff, count, timeout);
  908. if (stat == status_success) {
  909. /* waiting....in master mode, becomes 0 after the transfer is finished */
  910. stat = wait_spi_slave_active(ptr, false, timeout);
  911. }
  912. } else {
  913. /* Before receiving the host to start transmission, fill the TX FIFO */
  914. remain_size = txfifo_size - spi_get_tx_fifo_valid_data_size(ptr);
  915. for (j = 0; j < remain_size; j++) {
  916. temp = 0;
  917. for (uint8_t i = 0; i < data_len_in_bytes; i++) {
  918. temp += *(buff++) << i * 8;
  919. }
  920. ptr->DATA = temp;
  921. }
  922. count -= remain_size;
  923. /* waiting....in slave mode, becomes 1 after the SPI CS signal is asserted */
  924. stat = wait_spi_slave_active(ptr, true, timeout);
  925. /* no need to read RXFIFO because no effect SPI bus for slave mode */
  926. if ((count > 0) && (stat == status_success)) {
  927. stat = write_data_single(ptr, data_len_in_bytes, buff, count, timeout);
  928. }
  929. }
  930. return stat;
  931. }
  932. hpm_stat_t hpm_spi_transmit_receive_setup_dma(SPI_Type *ptr, uint32_t size)
  933. {
  934. hpm_stat_t stat = status_success;
  935. uint8_t data_len_in_bytes = spi_get_data_length_in_bytes(ptr);
  936. if (data_len_in_bytes > 2) {
  937. data_len_in_bytes = 4; /* must be 4 aglin */
  938. }
  939. if ((size == 0) ||
  940. ((SPI_SOC_TRANSFER_COUNT_MAX == 512) && (size > (SPI_SOC_TRANSFER_COUNT_MAX * data_len_in_bytes)))) {
  941. return status_invalid_argument;
  942. }
  943. hpm_spi_transfer_init(ptr, spi_trans_write_read_together, size);
  944. spi_enable_tx_dma(ptr);
  945. spi_enable_rx_dma(ptr);
  946. /* for master mode, This CMD register must be written with a dummy value
  947. * to start a SPI transfer even when the command phase is not enabled
  948. */
  949. if (SPI_TRANSFMT_SLVMODE_GET(ptr->TRANSFMT) == spi_master_mode) {
  950. ptr->CMD = 0xFF;
  951. }
  952. return stat;
  953. }
  954. hpm_stat_t hpm_spi_receive_setup_dma(SPI_Type *ptr, uint32_t size)
  955. {
  956. hpm_stat_t stat = status_success;
  957. uint8_t data_len_in_bytes = spi_get_data_length_in_bytes(ptr);
  958. if (data_len_in_bytes > 2) {
  959. data_len_in_bytes = 4; /* must be 4 aglin */
  960. }
  961. if ((size == 0) ||
  962. ((SPI_SOC_TRANSFER_COUNT_MAX == 512) && (size > (SPI_SOC_TRANSFER_COUNT_MAX * data_len_in_bytes)))) {
  963. return status_invalid_argument;
  964. }
  965. hpm_spi_transfer_init(ptr, spi_trans_read_only, size);
  966. spi_disable_tx_dma(ptr);
  967. spi_enable_rx_dma(ptr);
  968. /* for master mode, This CMD register must be written with a dummy value
  969. to start a SPI transfer even when the command phase is not enabled
  970. */
  971. if (SPI_TRANSFMT_SLVMODE_GET(ptr->TRANSFMT) == spi_master_mode) {
  972. ptr->CMD = 0xFF;
  973. }
  974. return stat;
  975. }
  976. hpm_stat_t hpm_spi_transmit_setup_dma(SPI_Type *ptr, uint32_t size)
  977. {
  978. hpm_stat_t stat = status_success;
  979. uint8_t data_len_in_bytes = spi_get_data_length_in_bytes(ptr);
  980. if (data_len_in_bytes > 2) {
  981. data_len_in_bytes = 4; /* must be 4 aglin */
  982. }
  983. if ((size == 0) ||
  984. ((SPI_SOC_TRANSFER_COUNT_MAX == 512) && (size > (SPI_SOC_TRANSFER_COUNT_MAX * data_len_in_bytes)))) {
  985. return status_invalid_argument;
  986. }
  987. hpm_spi_transfer_init(ptr, spi_trans_write_only, size);
  988. spi_enable_tx_dma(ptr);
  989. spi_disable_rx_dma(ptr);
  990. /* for master mode, This CMD register must be written with a dummy value
  991. * to start a SPI transfer even when the command phase is not enabled
  992. */
  993. if (SPI_TRANSFMT_SLVMODE_GET(ptr->TRANSFMT) == spi_master_mode) {
  994. ptr->CMD = 0xFF;
  995. }
  996. return stat;
  997. }
  998. #if USE_DMA_MGR
  999. void dma_channel_tc_callback(DMA_Type *ptr, uint32_t channel, void *user_data)
  1000. {
  1001. hpm_spi_cfg_t *obj = (hpm_spi_cfg_t *)user_data;
  1002. if ((obj->rxdma_resource.channel == channel) &&
  1003. (obj->rxdma_resource.base == ptr) &&
  1004. (obj->rx_dma_complete != NULL)) {
  1005. obj->rx_dma_complete(channel);
  1006. }
  1007. if ((obj->txdma_resource.channel == channel) &&
  1008. (obj->txdma_resource.base == ptr) &&
  1009. (obj->tx_dma_complete != NULL)) {
  1010. obj->tx_dma_complete(channel);
  1011. }
  1012. }
  1013. hpm_stat_t hpm_spi_dma_install_callback(SPI_Type *ptr, spi_dma_complete_cb tx_complete, spi_dma_complete_cb rx_complete)
  1014. {
  1015. dma_mgr_chn_conf_t chg_config;
  1016. dma_resource_t *resource = NULL;
  1017. hpm_spi_cfg_t *obj = hpm_spi_get_cfg_obj(ptr);
  1018. if (obj == NULL) {
  1019. return status_invalid_argument;
  1020. }
  1021. dma_mgr_get_default_chn_config(&chg_config);
  1022. chg_config.src_width = DMA_MGR_TRANSFER_WIDTH_BYTE;
  1023. chg_config.dst_width = DMA_MGR_TRANSFER_WIDTH_BYTE;
  1024. /* spi rx dma config */
  1025. resource = &obj->rxdma_resource;
  1026. if (dma_mgr_request_resource(resource) == status_success) {
  1027. chg_config.src_mode = DMA_MGR_HANDSHAKE_MODE_HANDSHAKE;
  1028. chg_config.src_addr_ctrl = DMA_MGR_ADDRESS_CONTROL_FIXED;
  1029. chg_config.src_addr = (uint32_t)&ptr->DATA;
  1030. chg_config.dst_mode = DMA_MGR_HANDSHAKE_MODE_NORMAL;
  1031. chg_config.dst_addr_ctrl = DMA_MGR_ADDRESS_CONTROL_INCREMENT;
  1032. chg_config.en_dmamux = true;
  1033. chg_config.dmamux_src = obj->rx_dmamux_src;
  1034. dma_mgr_setup_channel(resource, &chg_config);
  1035. dma_mgr_install_chn_tc_callback(resource, dma_channel_tc_callback, (void *)obj);
  1036. dma_mgr_enable_chn_irq(resource, DMA_MGR_INTERRUPT_MASK_TC);
  1037. dma_mgr_enable_dma_irq_with_priority(resource, 1);
  1038. obj->rx_dma_complete = rx_complete;
  1039. }
  1040. /* spi tx dma config */
  1041. resource = &obj->txdma_resource;
  1042. if (dma_mgr_request_resource(resource) == status_success) {
  1043. chg_config.src_mode = DMA_MGR_HANDSHAKE_MODE_NORMAL;
  1044. chg_config.src_addr_ctrl = DMA_MGR_ADDRESS_CONTROL_INCREMENT;
  1045. chg_config.dst_mode = DMA_MGR_HANDSHAKE_MODE_HANDSHAKE;
  1046. chg_config.dst_addr_ctrl = DMA_MGR_ADDRESS_CONTROL_FIXED;
  1047. chg_config.dst_addr = (uint32_t)&ptr->DATA;
  1048. chg_config.en_dmamux = true;
  1049. chg_config.dmamux_src = obj->tx_dmamux_src;
  1050. dma_mgr_setup_channel(resource, &chg_config);
  1051. dma_mgr_install_chn_tc_callback(resource, dma_channel_tc_callback, (void *)obj);
  1052. dma_mgr_enable_chn_irq(resource, DMA_MGR_INTERRUPT_MASK_TC);
  1053. dma_mgr_enable_dma_irq_with_priority(resource, 1);
  1054. obj->tx_dma_complete = tx_complete;
  1055. }
  1056. return status_success;
  1057. }
  1058. hpm_stat_t hpm_spi_transmit_receive_nonblocking(SPI_Type *ptr, uint8_t *wbuff, uint8_t *rbuff, uint32_t size)
  1059. {
  1060. hpm_stat_t stat = status_success;
  1061. uint8_t transfer_width;
  1062. dma_resource_t *resource;
  1063. uint32_t buf_addr;
  1064. hpm_spi_cfg_t *obj = hpm_spi_get_cfg_obj(ptr);
  1065. uint8_t data_len_in_bytes = spi_get_data_length_in_bytes(ptr);
  1066. if (data_len_in_bytes > 2) {
  1067. /* word */
  1068. transfer_width = DMA_MGR_TRANSFER_WIDTH_WORD;
  1069. data_len_in_bytes = 4; /* must be 4 aglin */
  1070. } else {
  1071. /* byte or half_word*/
  1072. transfer_width = data_len_in_bytes - 1;
  1073. }
  1074. if ((obj == NULL) ||
  1075. (spi_master_get_data_phase_format(ptr) != spi_single_io_mode) ||
  1076. (size > (SPI_SOC_TRANSFER_COUNT_MAX * data_len_in_bytes))) {
  1077. return status_invalid_argument;
  1078. }
  1079. spi_disable_rx_dma(ptr);
  1080. spi_disable_tx_dma(ptr);
  1081. spi_transmit_fifo_reset(ptr);
  1082. spi_receive_fifo_reset(ptr);
  1083. resource = &obj->rxdma_resource;
  1084. buf_addr = core_local_mem_to_sys_address(HPM_CORE0, (uint32_t)rbuff);
  1085. dma_mgr_set_chn_dst_width(resource, transfer_width);
  1086. dma_mgr_set_chn_src_width(resource, transfer_width);
  1087. dma_mgr_set_chn_dst_addr(resource, buf_addr);
  1088. dma_mgr_set_chn_transize(resource, size / data_len_in_bytes);
  1089. dma_mgr_enable_channel(resource);
  1090. resource = &obj->txdma_resource;
  1091. buf_addr = core_local_mem_to_sys_address(HPM_CORE0, (uint32_t)wbuff);
  1092. dma_mgr_set_chn_dst_width(resource, transfer_width);
  1093. dma_mgr_set_chn_src_width(resource, transfer_width);
  1094. dma_mgr_set_chn_src_addr(resource, buf_addr);
  1095. dma_mgr_set_chn_transize(resource, size / data_len_in_bytes);
  1096. dma_mgr_enable_channel(resource);
  1097. stat = hpm_spi_transmit_receive_setup_dma(ptr, size);
  1098. return stat;
  1099. }
  1100. hpm_stat_t hpm_spi_receive_nonblocking(SPI_Type *ptr, uint8_t *buff, uint32_t size)
  1101. {
  1102. hpm_stat_t stat = status_success;
  1103. uint8_t transfer_width;
  1104. dma_resource_t *resource;
  1105. uint32_t buf_addr;
  1106. hpm_spi_cfg_t *obj = hpm_spi_get_cfg_obj(ptr);
  1107. uint8_t data_len_in_bytes = spi_get_data_length_in_bytes(ptr);
  1108. if (data_len_in_bytes > 2) {
  1109. data_len_in_bytes = 4; /* must be 4 aglin */
  1110. /* word */
  1111. transfer_width = DMA_MGR_TRANSFER_WIDTH_WORD;
  1112. } else {
  1113. /* byte or half_word*/
  1114. transfer_width = data_len_in_bytes - 1;
  1115. }
  1116. if ((obj == NULL) ||
  1117. ((size > (SPI_SOC_TRANSFER_COUNT_MAX * data_len_in_bytes)))) {
  1118. return status_invalid_argument;
  1119. }
  1120. spi_disable_rx_dma(ptr);
  1121. spi_disable_tx_dma(ptr);
  1122. spi_transmit_fifo_reset(ptr);
  1123. spi_receive_fifo_reset(ptr);
  1124. resource = &obj->rxdma_resource;
  1125. buf_addr = core_local_mem_to_sys_address(HPM_CORE0, (uint32_t)buff);
  1126. dma_mgr_set_chn_dst_width(resource, transfer_width);
  1127. dma_mgr_set_chn_src_width(resource, transfer_width);
  1128. dma_mgr_set_chn_dst_addr(resource, buf_addr);
  1129. dma_mgr_set_chn_transize(resource, size / data_len_in_bytes);
  1130. dma_mgr_enable_channel(resource);
  1131. stat = hpm_spi_receive_setup_dma(ptr, size);
  1132. return stat;
  1133. }
  1134. hpm_stat_t hpm_spi_transmit_nonblocking(SPI_Type *ptr, uint8_t *buff, uint32_t size)
  1135. {
  1136. hpm_stat_t stat = status_success;
  1137. uint8_t transfer_width;
  1138. dma_resource_t *resource;
  1139. uint32_t buf_addr;
  1140. hpm_spi_cfg_t *obj = hpm_spi_get_cfg_obj(ptr);
  1141. uint8_t data_len_in_bytes = spi_get_data_length_in_bytes(ptr);
  1142. if (data_len_in_bytes > 2) {
  1143. data_len_in_bytes = 4; /* must be 4 aglin */
  1144. /* word */
  1145. transfer_width = DMA_MGR_TRANSFER_WIDTH_WORD;
  1146. } else {
  1147. /* byte or half_word*/
  1148. transfer_width = data_len_in_bytes - 1;
  1149. }
  1150. if ((obj == NULL) ||
  1151. (size > (SPI_SOC_TRANSFER_COUNT_MAX * data_len_in_bytes))) {
  1152. return status_invalid_argument;
  1153. }
  1154. resource = &obj->txdma_resource;
  1155. buf_addr = core_local_mem_to_sys_address(HPM_CORE0, (uint32_t)buff);
  1156. spi_disable_rx_dma(ptr);
  1157. spi_disable_tx_dma(ptr);
  1158. spi_transmit_fifo_reset(ptr);
  1159. spi_receive_fifo_reset(ptr);
  1160. dma_mgr_set_chn_src_addr(resource, buf_addr);
  1161. dma_mgr_set_chn_dst_width(resource, transfer_width);
  1162. dma_mgr_set_chn_src_width(resource, transfer_width);
  1163. dma_mgr_set_chn_transize(resource, size / data_len_in_bytes);
  1164. dma_mgr_enable_channel(resource);
  1165. stat = hpm_spi_transmit_setup_dma(ptr, size);
  1166. return stat;
  1167. }
  1168. #endif