fsl_spi.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864
  1. /*
  2. * The Clear BSD License
  3. * Copyright (c) 2016, Freescale Semiconductor, Inc.
  4. * Copyright 2016-2017 NXP
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without modification,
  8. * are permitted (subject to the limitations in the disclaimer below) provided
  9. * that the following conditions are met:
  10. *
  11. * o Redistributions of source code must retain the above copyright notice, this list
  12. * of conditions and the following disclaimer.
  13. *
  14. * o Redistributions in binary form must reproduce the above copyright notice, this
  15. * list of conditions and the following disclaimer in the documentation and/or
  16. * other materials provided with the distribution.
  17. *
  18. * o Neither the name of the copyright holder nor the names of its
  19. * contributors may be used to endorse or promote products derived from this
  20. * software without specific prior written permission.
  21. *
  22. * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE.
  23. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
  24. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  25. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  26. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
  27. * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  28. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  29. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  30. * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  32. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #include "fsl_spi.h"
  35. #include "fsl_flexcomm.h"
  36. /*******************************************************************************
  37. * Definitions
  38. ******************************************************************************/
  39. /* Component ID definition, used by tools. */
  40. #ifndef FSL_COMPONENT_ID
  41. #define FSL_COMPONENT_ID "platform.drivers.flexcomm_spi"
  42. #endif
  43. /* Note: FIFOCFG[SIZE] has always value 1 = 8 items depth */
  44. #define SPI_FIFO_DEPTH(base) ((((base)->FIFOCFG & SPI_FIFOCFG_SIZE_MASK) >> SPI_FIFOCFG_SIZE_SHIFT) << 3)
  45. /* Convert transfer count to transfer bytes. dataWidth is a
  46. * range <0,15>. Range <8,15> represents 2B transfer */
  47. #define SPI_COUNT_TO_BYTES(dataWidth, count) ((count) << ((dataWidth) >> 3U))
  48. #define SPI_BYTES_TO_COUNT(dataWidth, bytes) ((bytes) >> ((dataWidth) >> 3U))
  49. #define SPI_SSELPOL_MASK ((SPI_CFG_SPOL0_MASK) | (SPI_CFG_SPOL1_MASK) | (SPI_CFG_SPOL2_MASK) | (SPI_CFG_SPOL3_MASK))
  50. /*******************************************************************************
  51. * Variables
  52. ******************************************************************************/
  53. /*! @brief internal SPI config array */
  54. static spi_config_t g_configs[FSL_FEATURE_SOC_SPI_COUNT] = {(spi_data_width_t)0};
  55. /*! @brief Array to map SPI instance number to base address. */
  56. static const uint32_t s_spiBaseAddrs[FSL_FEATURE_SOC_SPI_COUNT] = SPI_BASE_ADDRS;
  57. /*! @brief IRQ name array */
  58. static const IRQn_Type s_spiIRQ[] = SPI_IRQS;
  59. /* @brief Dummy data for each instance. This data is used when user's tx buffer is NULL*/
  60. volatile uint8_t s_dummyData[FSL_FEATURE_SOC_SPI_COUNT] = {0};
  61. /*******************************************************************************
  62. * Code
  63. ******************************************************************************/
  64. /* Get the index corresponding to the FLEXCOMM */
  65. uint32_t SPI_GetInstance(SPI_Type *base)
  66. {
  67. int i;
  68. for (i = 0; i < FSL_FEATURE_SOC_SPI_COUNT; i++)
  69. {
  70. if ((uint32_t)base == s_spiBaseAddrs[i])
  71. {
  72. return i;
  73. }
  74. }
  75. assert(false);
  76. return 0;
  77. }
  78. void SPI_SetDummyData(SPI_Type *base, uint8_t dummyData)
  79. {
  80. uint32_t instance = SPI_GetInstance(base);
  81. s_dummyData[instance] = dummyData;
  82. }
  83. void *SPI_GetConfig(SPI_Type *base)
  84. {
  85. int32_t instance;
  86. instance = SPI_GetInstance(base);
  87. if (instance < 0)
  88. {
  89. return NULL;
  90. }
  91. return &g_configs[instance];
  92. }
  93. void SPI_MasterGetDefaultConfig(spi_master_config_t *config)
  94. {
  95. assert(NULL != config);
  96. config->enableLoopback = false;
  97. config->enableMaster = true;
  98. config->polarity = kSPI_ClockPolarityActiveHigh;
  99. config->phase = kSPI_ClockPhaseFirstEdge;
  100. config->direction = kSPI_MsbFirst;
  101. config->baudRate_Bps = 500000U;
  102. config->dataWidth = kSPI_Data8Bits;
  103. config->sselNum = kSPI_Ssel0;
  104. config->txWatermark = kSPI_TxFifo0;
  105. config->rxWatermark = kSPI_RxFifo1;
  106. config->sselPol = kSPI_SpolActiveAllLow;
  107. config->delayConfig.preDelay = 0U;
  108. config->delayConfig.postDelay = 0U;
  109. config->delayConfig.frameDelay = 0U;
  110. config->delayConfig.transferDelay = 0U;
  111. }
  112. status_t SPI_MasterInit(SPI_Type *base, const spi_master_config_t *config, uint32_t srcClock_Hz)
  113. {
  114. int32_t result = 0, instance = 0;
  115. uint32_t tmp;
  116. /* assert params */
  117. assert(!((NULL == base) || (NULL == config) || (0 == srcClock_Hz)));
  118. if ((NULL == base) || (NULL == config) || (0 == srcClock_Hz))
  119. {
  120. return kStatus_InvalidArgument;
  121. }
  122. /* initialize flexcomm to SPI mode */
  123. result = FLEXCOMM_Init(base, FLEXCOMM_PERIPH_SPI);
  124. assert(kStatus_Success == result);
  125. if (kStatus_Success != result)
  126. {
  127. return result;
  128. }
  129. /* set divider */
  130. result = SPI_MasterSetBaud(base, config->baudRate_Bps, srcClock_Hz);
  131. if (kStatus_Success != result)
  132. {
  133. return result;
  134. }
  135. /* get instance number */
  136. instance = SPI_GetInstance(base);
  137. assert(instance >= 0);
  138. /* configure SPI mode */
  139. tmp = base->CFG;
  140. tmp &= ~(SPI_CFG_MASTER_MASK | SPI_CFG_LSBF_MASK | SPI_CFG_CPHA_MASK | SPI_CFG_CPOL_MASK | SPI_CFG_LOOP_MASK |
  141. SPI_CFG_ENABLE_MASK | SPI_SSELPOL_MASK);
  142. /* phase */
  143. tmp |= SPI_CFG_CPHA(config->phase);
  144. /* polarity */
  145. tmp |= SPI_CFG_CPOL(config->polarity);
  146. /* direction */
  147. tmp |= SPI_CFG_LSBF(config->direction);
  148. /* master mode */
  149. tmp |= SPI_CFG_MASTER(1);
  150. /* loopback */
  151. tmp |= SPI_CFG_LOOP(config->enableLoopback);
  152. /* configure active level for all CS */
  153. tmp |= ((uint32_t)config->sselPol & (SPI_SSELPOL_MASK));
  154. base->CFG = tmp;
  155. /* store configuration */
  156. g_configs[instance].dataWidth = config->dataWidth;
  157. g_configs[instance].sselNum = config->sselNum;
  158. /* enable FIFOs */
  159. base->FIFOCFG |= SPI_FIFOCFG_EMPTYTX_MASK | SPI_FIFOCFG_EMPTYRX_MASK;
  160. base->FIFOCFG |= SPI_FIFOCFG_ENABLETX_MASK | SPI_FIFOCFG_ENABLERX_MASK;
  161. /* trigger level - empty txFIFO, one item in rxFIFO */
  162. tmp = base->FIFOTRIG & (~(SPI_FIFOTRIG_RXLVL_MASK | SPI_FIFOTRIG_TXLVL_MASK));
  163. tmp |= SPI_FIFOTRIG_TXLVL(config->txWatermark) | SPI_FIFOTRIG_RXLVL(config->rxWatermark);
  164. /* enable generating interrupts for FIFOTRIG levels */
  165. tmp |= SPI_FIFOTRIG_TXLVLENA_MASK | SPI_FIFOTRIG_RXLVLENA_MASK;
  166. /* set FIFOTRIG */
  167. base->FIFOTRIG = tmp;
  168. /* Set the delay configuration. */
  169. SPI_SetTransferDelay(base, &config->delayConfig);
  170. /* Set the dummy data. */
  171. SPI_SetDummyData(base, (uint8_t)SPI_DUMMYDATA);
  172. SPI_Enable(base, config->enableMaster);
  173. return kStatus_Success;
  174. }
  175. void SPI_SlaveGetDefaultConfig(spi_slave_config_t *config)
  176. {
  177. assert(NULL != config);
  178. config->enableSlave = true;
  179. config->polarity = kSPI_ClockPolarityActiveHigh;
  180. config->phase = kSPI_ClockPhaseFirstEdge;
  181. config->direction = kSPI_MsbFirst;
  182. config->dataWidth = kSPI_Data8Bits;
  183. config->txWatermark = kSPI_TxFifo0;
  184. config->rxWatermark = kSPI_RxFifo1;
  185. config->sselPol = kSPI_SpolActiveAllLow;
  186. }
  187. status_t SPI_SlaveInit(SPI_Type *base, const spi_slave_config_t *config)
  188. {
  189. int32_t result = 0, instance;
  190. uint32_t tmp;
  191. /* assert params */
  192. assert(!((NULL == base) || (NULL == config)));
  193. if ((NULL == base) || (NULL == config))
  194. {
  195. return kStatus_InvalidArgument;
  196. }
  197. /* configure flexcomm to SPI, enable clock gate */
  198. result = FLEXCOMM_Init(base, FLEXCOMM_PERIPH_SPI);
  199. assert(kStatus_Success == result);
  200. if (kStatus_Success != result)
  201. {
  202. return result;
  203. }
  204. instance = SPI_GetInstance(base);
  205. /* configure SPI mode */
  206. tmp = base->CFG;
  207. tmp &= ~(SPI_CFG_MASTER_MASK | SPI_CFG_LSBF_MASK | SPI_CFG_CPHA_MASK | SPI_CFG_CPOL_MASK | SPI_CFG_ENABLE_MASK |
  208. SPI_SSELPOL_MASK);
  209. /* phase */
  210. tmp |= SPI_CFG_CPHA(config->phase);
  211. /* polarity */
  212. tmp |= SPI_CFG_CPOL(config->polarity);
  213. /* direction */
  214. tmp |= SPI_CFG_LSBF(config->direction);
  215. /* configure active level for all CS */
  216. tmp |= ((uint32_t)config->sselPol & (SPI_SSELPOL_MASK));
  217. base->CFG = tmp;
  218. /* store configuration */
  219. g_configs[instance].dataWidth = config->dataWidth;
  220. /* empty and enable FIFOs */
  221. base->FIFOCFG |= SPI_FIFOCFG_EMPTYTX_MASK | SPI_FIFOCFG_EMPTYRX_MASK;
  222. base->FIFOCFG |= SPI_FIFOCFG_ENABLETX_MASK | SPI_FIFOCFG_ENABLERX_MASK;
  223. /* trigger level - empty txFIFO, one item in rxFIFO */
  224. tmp = base->FIFOTRIG & (~(SPI_FIFOTRIG_RXLVL_MASK | SPI_FIFOTRIG_TXLVL_MASK));
  225. tmp |= SPI_FIFOTRIG_TXLVL(config->txWatermark) | SPI_FIFOTRIG_RXLVL(config->rxWatermark);
  226. /* enable generating interrupts for FIFOTRIG levels */
  227. tmp |= SPI_FIFOTRIG_TXLVLENA_MASK | SPI_FIFOTRIG_RXLVLENA_MASK;
  228. /* set FIFOTRIG */
  229. base->FIFOTRIG = tmp;
  230. SPI_SetDummyData(base, (uint8_t)SPI_DUMMYDATA);
  231. SPI_Enable(base, config->enableSlave);
  232. return kStatus_Success;
  233. }
  234. void SPI_Deinit(SPI_Type *base)
  235. {
  236. /* Assert arguments */
  237. assert(NULL != base);
  238. /* Disable interrupts, disable dma requests, disable peripheral */
  239. base->FIFOINTENCLR = SPI_FIFOINTENCLR_TXERR_MASK | SPI_FIFOINTENCLR_RXERR_MASK | SPI_FIFOINTENCLR_TXLVL_MASK |
  240. SPI_FIFOINTENCLR_RXLVL_MASK;
  241. base->FIFOCFG &= ~(SPI_FIFOCFG_DMATX_MASK | SPI_FIFOCFG_DMARX_MASK);
  242. base->CFG &= ~(SPI_CFG_ENABLE_MASK);
  243. }
  244. void SPI_EnableTxDMA(SPI_Type *base, bool enable)
  245. {
  246. if (enable)
  247. {
  248. base->FIFOCFG |= SPI_FIFOCFG_DMATX_MASK;
  249. }
  250. else
  251. {
  252. base->FIFOCFG &= ~SPI_FIFOCFG_DMATX_MASK;
  253. }
  254. }
  255. void SPI_EnableRxDMA(SPI_Type *base, bool enable)
  256. {
  257. if (enable)
  258. {
  259. base->FIFOCFG |= SPI_FIFOCFG_DMARX_MASK;
  260. }
  261. else
  262. {
  263. base->FIFOCFG &= ~SPI_FIFOCFG_DMARX_MASK;
  264. }
  265. }
  266. status_t SPI_MasterSetBaud(SPI_Type *base, uint32_t baudrate_Bps, uint32_t srcClock_Hz)
  267. {
  268. uint32_t tmp;
  269. /* assert params */
  270. assert(!((NULL == base) || (0 == baudrate_Bps) || (0 == srcClock_Hz)));
  271. if ((NULL == base) || (0 == baudrate_Bps) || (0 == srcClock_Hz))
  272. {
  273. return kStatus_InvalidArgument;
  274. }
  275. /* calculate baudrate */
  276. tmp = (srcClock_Hz / baudrate_Bps) - 1;
  277. if (tmp > 0xFFFF)
  278. {
  279. return kStatus_SPI_BaudrateNotSupport;
  280. }
  281. base->DIV &= ~SPI_DIV_DIVVAL_MASK;
  282. base->DIV |= SPI_DIV_DIVVAL(tmp);
  283. return kStatus_Success;
  284. }
  285. void SPI_WriteData(SPI_Type *base, uint16_t data, uint32_t configFlags)
  286. {
  287. uint32_t control = 0;
  288. int32_t instance;
  289. /* check params */
  290. assert(NULL != base);
  291. /* get and check instance */
  292. instance = SPI_GetInstance(base);
  293. assert(!(instance < 0));
  294. if (instance < 0)
  295. {
  296. return;
  297. }
  298. /* set data width */
  299. control |= SPI_FIFOWR_LEN(g_configs[instance].dataWidth);
  300. /* set sssel */
  301. control |= (SPI_DEASSERT_ALL & (~SPI_DEASSERTNUM_SSEL(g_configs[instance].sselNum)));
  302. /* mask configFlags */
  303. control |= (configFlags & SPI_FIFOWR_FLAGS_MASK);
  304. /* control should not affect lower 16 bits */
  305. assert(!(control & 0xFFFF));
  306. base->FIFOWR = data | control;
  307. }
  308. status_t SPI_MasterTransferCreateHandle(SPI_Type *base,
  309. spi_master_handle_t *handle,
  310. spi_master_callback_t callback,
  311. void *userData)
  312. {
  313. int32_t instance = 0;
  314. /* check 'base' */
  315. assert(!(NULL == base));
  316. if (NULL == base)
  317. {
  318. return kStatus_InvalidArgument;
  319. }
  320. /* check 'handle' */
  321. assert(!(NULL == handle));
  322. if (NULL == handle)
  323. {
  324. return kStatus_InvalidArgument;
  325. }
  326. /* get flexcomm instance by 'base' param */
  327. instance = SPI_GetInstance(base);
  328. assert(!(instance < 0));
  329. if (instance < 0)
  330. {
  331. return kStatus_InvalidArgument;
  332. }
  333. memset(handle, 0, sizeof(*handle));
  334. /* Initialize the handle */
  335. if (base->CFG & SPI_CFG_MASTER_MASK)
  336. {
  337. FLEXCOMM_SetIRQHandler(base, (flexcomm_irq_handler_t)SPI_MasterTransferHandleIRQ, handle);
  338. }
  339. else
  340. {
  341. FLEXCOMM_SetIRQHandler(base, (flexcomm_irq_handler_t)SPI_SlaveTransferHandleIRQ, handle);
  342. }
  343. handle->dataWidth = g_configs[instance].dataWidth;
  344. /* in slave mode, the sselNum is not important */
  345. handle->sselNum = g_configs[instance].sselNum;
  346. handle->txWatermark = (spi_txfifo_watermark_t)SPI_FIFOTRIG_TXLVL_GET(base);
  347. handle->rxWatermark = (spi_rxfifo_watermark_t)SPI_FIFOTRIG_RXLVL_GET(base);
  348. handle->callback = callback;
  349. handle->userData = userData;
  350. /* Enable SPI NVIC */
  351. EnableIRQ(s_spiIRQ[instance]);
  352. return kStatus_Success;
  353. }
  354. status_t SPI_MasterTransferBlocking(SPI_Type *base, spi_transfer_t *xfer)
  355. {
  356. int32_t instance;
  357. uint32_t tx_ctrl = 0, last_ctrl = 0;
  358. uint32_t tmp32, rxRemainingBytes, txRemainingBytes, dataWidth;
  359. uint32_t toReceiveCount = 0;
  360. uint8_t *txData, *rxData;
  361. uint32_t fifoDepth;
  362. /* check params */
  363. // assert(!((NULL == base) || (NULL == xfer) || ((NULL == xfer->txData) && (NULL == xfer->rxData))));
  364. if ((NULL == base) || (NULL == xfer) || ((NULL == xfer->txData) && (NULL == xfer->rxData)))
  365. {
  366. return kStatus_InvalidArgument;
  367. }
  368. fifoDepth = SPI_FIFO_DEPTH(base);
  369. txData = xfer->txData;
  370. rxData = xfer->rxData;
  371. txRemainingBytes = txData ? xfer->dataSize : 0;
  372. rxRemainingBytes = rxData ? xfer->dataSize : 0;
  373. instance = SPI_GetInstance(base);
  374. assert(instance >= 0);
  375. dataWidth = g_configs[instance].dataWidth;
  376. /* dataSize (in bytes) is not aligned to 16bit (2B) transfer */
  377. assert(!((dataWidth > kSPI_Data8Bits) && (xfer->dataSize & 0x1)));
  378. if ((dataWidth > kSPI_Data8Bits) && (xfer->dataSize & 0x1))
  379. {
  380. return kStatus_InvalidArgument;
  381. }
  382. /* clear tx/rx errors and empty FIFOs */
  383. base->FIFOCFG |= SPI_FIFOCFG_EMPTYTX_MASK | SPI_FIFOCFG_EMPTYRX_MASK;
  384. base->FIFOSTAT |= SPI_FIFOSTAT_TXERR_MASK | SPI_FIFOSTAT_RXERR_MASK;
  385. /* select slave to talk with */
  386. tx_ctrl |= (SPI_DEASSERT_ALL & (~SPI_DEASSERTNUM_SSEL(g_configs[instance].sselNum)));
  387. /* set width of data - range asserted at entry */
  388. tx_ctrl |= SPI_FIFOWR_LEN(dataWidth);
  389. /* delay for frames */
  390. tx_ctrl |= (xfer->configFlags & (uint32_t)kSPI_FrameDelay) ? (uint32_t)kSPI_FrameDelay : 0;
  391. /* end of transfer */
  392. last_ctrl |= (xfer->configFlags & (uint32_t)kSPI_FrameAssert) ? (uint32_t)kSPI_FrameAssert : 0;
  393. /* last index of loop */
  394. while (txRemainingBytes || rxRemainingBytes || toReceiveCount)
  395. {
  396. /* if rxFIFO is not empty */
  397. if (base->FIFOSTAT & SPI_FIFOSTAT_RXNOTEMPTY_MASK)
  398. {
  399. tmp32 = base->FIFORD;
  400. /* rxBuffer is not empty */
  401. if (rxRemainingBytes)
  402. {
  403. *(rxData++) = tmp32;
  404. rxRemainingBytes--;
  405. /* read 16 bits at once */
  406. if (dataWidth > 8)
  407. {
  408. *(rxData++) = tmp32 >> 8;
  409. rxRemainingBytes--;
  410. }
  411. }
  412. /* decrease number of data expected to receive */
  413. toReceiveCount -= 1;
  414. }
  415. /* transmit if txFIFO is not full and data to receive does not exceed FIFO depth */
  416. if ((base->FIFOSTAT & SPI_FIFOSTAT_TXNOTFULL_MASK) && (toReceiveCount < fifoDepth) &&
  417. ((txRemainingBytes) || (rxRemainingBytes >= SPI_COUNT_TO_BYTES(dataWidth, toReceiveCount + 1))))
  418. {
  419. /* txBuffer is not empty */
  420. if (txRemainingBytes)
  421. {
  422. tmp32 = *(txData++);
  423. txRemainingBytes--;
  424. /* write 16 bit at once */
  425. if (dataWidth > 8)
  426. {
  427. tmp32 |= ((uint32_t)(*(txData++))) << 8U;
  428. txRemainingBytes--;
  429. }
  430. if (!txRemainingBytes)
  431. {
  432. tx_ctrl |= last_ctrl;
  433. }
  434. }
  435. else
  436. {
  437. tmp32 = ((uint32_t)s_dummyData[instance] << 8U | (s_dummyData[instance]));
  438. /* last transfer */
  439. if (rxRemainingBytes == SPI_COUNT_TO_BYTES(dataWidth, toReceiveCount + 1))
  440. {
  441. tx_ctrl |= last_ctrl;
  442. }
  443. }
  444. /* send data */
  445. tmp32 = tx_ctrl | tmp32;
  446. base->FIFOWR = tmp32;
  447. toReceiveCount += 1;
  448. }
  449. }
  450. /* wait if TX FIFO of previous transfer is not empty */
  451. while (!(base->FIFOSTAT & SPI_FIFOSTAT_TXEMPTY_MASK))
  452. {
  453. }
  454. return kStatus_Success;
  455. }
  456. status_t SPI_MasterTransferNonBlocking(SPI_Type *base, spi_master_handle_t *handle, spi_transfer_t *xfer)
  457. {
  458. /* check params */
  459. assert(
  460. !((NULL == base) || (NULL == handle) || (NULL == xfer) || ((NULL == xfer->txData) && (NULL == xfer->rxData))));
  461. if ((NULL == base) || (NULL == handle) || (NULL == xfer) || ((NULL == xfer->txData) && (NULL == xfer->rxData)))
  462. {
  463. return kStatus_InvalidArgument;
  464. }
  465. /* dataSize (in bytes) is not aligned to 16bit (2B) transfer */
  466. assert(!((handle->dataWidth > kSPI_Data8Bits) && (xfer->dataSize & 0x1)));
  467. if ((handle->dataWidth > kSPI_Data8Bits) && (xfer->dataSize & 0x1))
  468. {
  469. return kStatus_InvalidArgument;
  470. }
  471. /* Check if SPI is busy */
  472. if (handle->state == kStatus_SPI_Busy)
  473. {
  474. return kStatus_SPI_Busy;
  475. }
  476. /* Set the handle information */
  477. handle->txData = xfer->txData;
  478. handle->rxData = xfer->rxData;
  479. /* set count */
  480. handle->txRemainingBytes = xfer->txData ? xfer->dataSize : 0;
  481. handle->rxRemainingBytes = xfer->rxData ? xfer->dataSize : 0;
  482. handle->totalByteCount = xfer->dataSize;
  483. /* other options */
  484. handle->toReceiveCount = 0;
  485. handle->configFlags = xfer->configFlags;
  486. /* Set the SPI state to busy */
  487. handle->state = kStatus_SPI_Busy;
  488. /* clear FIFOs when transfer starts */
  489. base->FIFOCFG |= SPI_FIFOCFG_EMPTYTX_MASK | SPI_FIFOCFG_EMPTYRX_MASK;
  490. base->FIFOSTAT |= SPI_FIFOSTAT_TXERR_MASK | SPI_FIFOSTAT_RXERR_MASK;
  491. /* enable generating txIRQ and rxIRQ, first transfer is fired by empty txFIFO */
  492. base->FIFOINTENSET |= SPI_FIFOINTENSET_TXLVL_MASK | SPI_FIFOINTENSET_RXLVL_MASK;
  493. return kStatus_Success;
  494. }
  495. status_t SPI_MasterHalfDuplexTransferBlocking(SPI_Type *base, spi_half_duplex_transfer_t *xfer)
  496. {
  497. assert(xfer);
  498. spi_transfer_t tempXfer = {0};
  499. status_t status;
  500. if (xfer->isTransmitFirst)
  501. {
  502. tempXfer.txData = xfer->txData;
  503. tempXfer.rxData = NULL;
  504. tempXfer.dataSize = xfer->txDataSize;
  505. }
  506. else
  507. {
  508. tempXfer.txData = NULL;
  509. tempXfer.rxData = xfer->rxData;
  510. tempXfer.dataSize = xfer->rxDataSize;
  511. }
  512. /* If the pcs pin keep assert between transmit and receive. */
  513. if (xfer->isPcsAssertInTransfer)
  514. {
  515. tempXfer.configFlags = (xfer->configFlags) & (uint32_t)(~kSPI_FrameAssert);
  516. }
  517. else
  518. {
  519. tempXfer.configFlags = (xfer->configFlags) | kSPI_FrameAssert;
  520. }
  521. status = SPI_MasterTransferBlocking(base, &tempXfer);
  522. if (status != kStatus_Success)
  523. {
  524. return status;
  525. }
  526. if (xfer->isTransmitFirst)
  527. {
  528. tempXfer.txData = NULL;
  529. tempXfer.rxData = xfer->rxData;
  530. tempXfer.dataSize = xfer->rxDataSize;
  531. }
  532. else
  533. {
  534. tempXfer.txData = xfer->txData;
  535. tempXfer.rxData = NULL;
  536. tempXfer.dataSize = xfer->txDataSize;
  537. }
  538. tempXfer.configFlags = xfer->configFlags;
  539. /* SPI transfer blocking. */
  540. status = SPI_MasterTransferBlocking(base, &tempXfer);
  541. return status;
  542. }
  543. status_t SPI_MasterHalfDuplexTransferNonBlocking(SPI_Type *base,
  544. spi_master_handle_t *handle,
  545. spi_half_duplex_transfer_t *xfer)
  546. {
  547. assert(xfer);
  548. assert(handle);
  549. spi_transfer_t tempXfer = {0};
  550. status_t status;
  551. if (xfer->isTransmitFirst)
  552. {
  553. tempXfer.txData = xfer->txData;
  554. tempXfer.rxData = NULL;
  555. tempXfer.dataSize = xfer->txDataSize;
  556. }
  557. else
  558. {
  559. tempXfer.txData = NULL;
  560. tempXfer.rxData = xfer->rxData;
  561. tempXfer.dataSize = xfer->rxDataSize;
  562. }
  563. /* If the PCS pin keep assert between transmit and receive. */
  564. if (xfer->isPcsAssertInTransfer)
  565. {
  566. tempXfer.configFlags = (xfer->configFlags) & (uint32_t)(~kSPI_FrameAssert);
  567. }
  568. else
  569. {
  570. tempXfer.configFlags = (xfer->configFlags) | kSPI_FrameAssert;
  571. }
  572. status = SPI_MasterTransferBlocking(base, &tempXfer);
  573. if (status != kStatus_Success)
  574. {
  575. return status;
  576. }
  577. if (xfer->isTransmitFirst)
  578. {
  579. tempXfer.txData = NULL;
  580. tempXfer.rxData = xfer->rxData;
  581. tempXfer.dataSize = xfer->rxDataSize;
  582. }
  583. else
  584. {
  585. tempXfer.txData = xfer->txData;
  586. tempXfer.rxData = NULL;
  587. tempXfer.dataSize = xfer->txDataSize;
  588. }
  589. tempXfer.configFlags = xfer->configFlags;
  590. status = SPI_MasterTransferNonBlocking(base, handle, &tempXfer);
  591. return status;
  592. }
  593. status_t SPI_MasterTransferGetCount(SPI_Type *base, spi_master_handle_t *handle, size_t *count)
  594. {
  595. assert(NULL != handle);
  596. if (!count)
  597. {
  598. return kStatus_InvalidArgument;
  599. }
  600. /* Catch when there is not an active transfer. */
  601. if (handle->state != kStatus_SPI_Busy)
  602. {
  603. *count = 0;
  604. return kStatus_NoTransferInProgress;
  605. }
  606. *count = handle->totalByteCount - handle->rxRemainingBytes;
  607. return kStatus_Success;
  608. }
  609. void SPI_MasterTransferAbort(SPI_Type *base, spi_master_handle_t *handle)
  610. {
  611. assert(NULL != handle);
  612. /* Disable interrupt requests*/
  613. base->FIFOINTENSET &= ~(SPI_FIFOINTENSET_TXLVL_MASK | SPI_FIFOINTENSET_RXLVL_MASK);
  614. /* Empty FIFOs */
  615. base->FIFOCFG |= SPI_FIFOCFG_EMPTYTX_MASK | SPI_FIFOCFG_EMPTYRX_MASK;
  616. handle->state = kStatus_SPI_Idle;
  617. handle->txRemainingBytes = 0;
  618. handle->rxRemainingBytes = 0;
  619. }
  620. static void SPI_TransferHandleIRQInternal(SPI_Type *base, spi_master_handle_t *handle)
  621. {
  622. uint32_t tx_ctrl = 0, last_ctrl = 0, tmp32;
  623. bool loopContinue;
  624. uint32_t fifoDepth;
  625. /* Get flexcomm instance by 'base' param */
  626. uint32_t instance = SPI_GetInstance(base);
  627. /* check params */
  628. assert((NULL != base) && (NULL != handle) && ((NULL != handle->txData) || (NULL != handle->rxData)));
  629. fifoDepth = SPI_FIFO_DEPTH(base);
  630. /* select slave to talk with */
  631. tx_ctrl |= (SPI_DEASSERT_ALL & SPI_ASSERTNUM_SSEL(handle->sselNum));
  632. /* set width of data */
  633. tx_ctrl |= SPI_FIFOWR_LEN(handle->dataWidth);
  634. /* delay for frames */
  635. tx_ctrl |= (handle->configFlags & (uint32_t)kSPI_FrameDelay) ? (uint32_t)kSPI_FrameDelay : 0;
  636. /* end of transfer */
  637. last_ctrl |= (handle->configFlags & (uint32_t)kSPI_FrameAssert) ? (uint32_t)kSPI_FrameAssert : 0;
  638. do
  639. {
  640. loopContinue = false;
  641. /* rxFIFO is not empty */
  642. if (base->FIFOSTAT & SPI_FIFOSTAT_RXNOTEMPTY_MASK)
  643. {
  644. tmp32 = base->FIFORD;
  645. /* rxBuffer is not empty */
  646. if (handle->rxRemainingBytes)
  647. {
  648. /* low byte must go first */
  649. *(handle->rxData++) = tmp32;
  650. handle->rxRemainingBytes--;
  651. /* read 16 bits at once */
  652. if (handle->dataWidth > kSPI_Data8Bits)
  653. {
  654. *(handle->rxData++) = tmp32 >> 8;
  655. handle->rxRemainingBytes--;
  656. }
  657. }
  658. /* decrease number of data expected to receive */
  659. handle->toReceiveCount -= 1;
  660. loopContinue = true;
  661. }
  662. /* - txFIFO is not full
  663. * - we cannot cause rxFIFO overflow by sending more data than is the depth of FIFO
  664. * - txBuffer is not empty or the next 'toReceiveCount' data can fit into rxBuffer
  665. */
  666. if ((base->FIFOSTAT & SPI_FIFOSTAT_TXNOTFULL_MASK) && (handle->toReceiveCount < fifoDepth) &&
  667. ((handle->txRemainingBytes) ||
  668. (handle->rxRemainingBytes >= SPI_COUNT_TO_BYTES(handle->dataWidth, handle->toReceiveCount + 1))))
  669. {
  670. /* txBuffer is not empty */
  671. if (handle->txRemainingBytes)
  672. {
  673. /* low byte must go first */
  674. tmp32 = *(handle->txData++);
  675. handle->txRemainingBytes--;
  676. /* write 16 bit at once */
  677. if (handle->dataWidth > kSPI_Data8Bits)
  678. {
  679. tmp32 |= ((uint32_t)(*(handle->txData++))) << 8U;
  680. handle->txRemainingBytes--;
  681. }
  682. /* last transfer */
  683. if (!handle->txRemainingBytes)
  684. {
  685. tx_ctrl |= last_ctrl;
  686. }
  687. }
  688. else
  689. {
  690. tmp32 = ((uint32_t)s_dummyData[instance] << 8U | (s_dummyData[instance]));
  691. /* last transfer */
  692. if (handle->rxRemainingBytes == SPI_COUNT_TO_BYTES(handle->dataWidth, handle->toReceiveCount + 1))
  693. {
  694. tx_ctrl |= last_ctrl;
  695. }
  696. }
  697. /* send data */
  698. tmp32 = tx_ctrl | tmp32;
  699. base->FIFOWR = tmp32;
  700. /* increase number of expected data to receive */
  701. handle->toReceiveCount += 1;
  702. loopContinue = true;
  703. }
  704. } while (loopContinue);
  705. }
  706. void SPI_MasterTransferHandleIRQ(SPI_Type *base, spi_master_handle_t *handle)
  707. {
  708. assert((NULL != base) && (NULL != handle));
  709. /* IRQ behaviour:
  710. * - first interrupt is triggered by empty txFIFO. The transfer function
  711. * then tries empty rxFIFO and fill txFIFO interleaved that results to
  712. * strategy to process as many items as possible.
  713. * - the next IRQs can be:
  714. * rxIRQ from nonempty rxFIFO which requires to empty rxFIFO.
  715. * txIRQ from empty txFIFO which requires to refill txFIFO.
  716. * - last interrupt is triggered by empty txFIFO. The last state is
  717. * known by empty rxBuffer and txBuffer. If there is nothing to receive
  718. * or send - both operations have been finished and interrupts can be
  719. * disabled.
  720. */
  721. /* Data to send or read or expected to receive */
  722. if ((handle->txRemainingBytes) || (handle->rxRemainingBytes) || (handle->toReceiveCount))
  723. {
  724. /* Transmit or receive data */
  725. SPI_TransferHandleIRQInternal(base, handle);
  726. /* No data to send or read or receive. Transfer ends. Set txTrigger to 0 level and
  727. * enable txIRQ to confirm when txFIFO becomes empty */
  728. if ((!handle->txRemainingBytes) && (!handle->rxRemainingBytes) && (!handle->toReceiveCount))
  729. {
  730. base->FIFOTRIG = base->FIFOTRIG & (~SPI_FIFOTRIG_TXLVL_MASK);
  731. base->FIFOINTENSET |= SPI_FIFOINTENSET_TXLVL_MASK;
  732. }
  733. else
  734. {
  735. uint32_t rxRemainingCount = SPI_BYTES_TO_COUNT(handle->dataWidth, handle->rxRemainingBytes);
  736. /* If, there are no data to send or rxFIFO is already filled with necessary number of dummy data,
  737. * disable txIRQ. From this point only rxIRQ is used to receive data without any transmission */
  738. if ((!handle->txRemainingBytes) && (rxRemainingCount <= handle->toReceiveCount))
  739. {
  740. base->FIFOINTENCLR = SPI_FIFOINTENCLR_TXLVL_MASK;
  741. }
  742. /* Nothing to receive or transmit, but we still have pending data which are bellow rxLevel.
  743. * Cannot clear rxFIFO, txFIFO might be still active */
  744. if (rxRemainingCount == 0)
  745. {
  746. if ((handle->txRemainingBytes == 0) && (handle->toReceiveCount != 0) &&
  747. (handle->toReceiveCount < SPI_FIFOTRIG_RXLVL_GET(base) + 1))
  748. {
  749. base->FIFOTRIG =
  750. (base->FIFOTRIG & (~SPI_FIFOTRIG_RXLVL_MASK)) | SPI_FIFOTRIG_RXLVL(handle->toReceiveCount - 1);
  751. }
  752. }
  753. /* Expected to receive less data than rxLevel value, we have to update rxLevel */
  754. else
  755. {
  756. if (rxRemainingCount < (SPI_FIFOTRIG_RXLVL_GET(base) + 1))
  757. {
  758. base->FIFOTRIG =
  759. (base->FIFOTRIG & (~SPI_FIFOTRIG_RXLVL_MASK)) | SPI_FIFOTRIG_RXLVL(rxRemainingCount - 1);
  760. }
  761. }
  762. }
  763. }
  764. else
  765. {
  766. /* Empty txFIFO is confirmed. Disable IRQs and restore triggers values */
  767. base->FIFOINTENCLR = SPI_FIFOINTENCLR_RXLVL_MASK | SPI_FIFOINTENCLR_TXLVL_MASK;
  768. base->FIFOTRIG = (base->FIFOTRIG & (~(SPI_FIFOTRIG_RXLVL_MASK | SPI_FIFOTRIG_RXLVL_MASK))) |
  769. SPI_FIFOTRIG_RXLVL(handle->rxWatermark) | SPI_FIFOTRIG_TXLVL(handle->txWatermark);
  770. /* set idle state and call user callback */
  771. handle->state = kStatus_SPI_Idle;
  772. if (handle->callback)
  773. {
  774. (handle->callback)(base, handle, handle->state, handle->userData);
  775. }
  776. }
  777. }