fsl_dma.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481
  1. /*
  2. * The Clear BSD License
  3. * Copyright (c) 2016, Freescale Semiconductor, Inc.
  4. * Copyright 2016-2017 NXP
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without modification,
  8. * are permitted (subject to the limitations in the disclaimer below) provided
  9. * that the following conditions are met:
  10. *
  11. * o Redistributions of source code must retain the above copyright notice, this list
  12. * of conditions and the following disclaimer.
  13. *
  14. * o Redistributions in binary form must reproduce the above copyright notice, this
  15. * list of conditions and the following disclaimer in the documentation and/or
  16. * other materials provided with the distribution.
  17. *
  18. * o Neither the name of the copyright holder nor the names of its
  19. * contributors may be used to endorse or promote products derived from this
  20. * software without specific prior written permission.
  21. *
  22. * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE.
  23. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
  24. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  25. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  26. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
  27. * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  28. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  29. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  30. * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  32. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #include "fsl_dma.h"
  35. /*******************************************************************************
  36. * Definitions
  37. ******************************************************************************/
  38. /* Component ID definition, used by tools. */
  39. #ifndef FSL_COMPONENT_ID
  40. #define FSL_COMPONENT_ID "platform.drivers.lpc_dma"
  41. #endif
  42. /*******************************************************************************
  43. * Prototypes
  44. ******************************************************************************/
  45. /*!
  46. * @brief Get instance number for DMA.
  47. *
  48. * @param base DMA peripheral base address.
  49. */
  50. static uint32_t DMA_GetInstance(DMA_Type *base);
  51. /*!
  52. * @brief Get virtual channel number.
  53. *
  54. * @param base DMA peripheral base address.
  55. */
  56. static uint32_t DMA_GetVirtualStartChannel(DMA_Type *base);
  57. /*******************************************************************************
  58. * Variables
  59. ******************************************************************************/
  60. /*! @brief Array to map DMA instance number to base pointer. */
  61. static DMA_Type *const s_dmaBases[] = DMA_BASE_PTRS;
  62. #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
  63. /*! @brief Array to map DMA instance number to clock name. */
  64. static const clock_ip_name_t s_dmaClockName[] = DMA_CLOCKS;
  65. #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
  66. /*! @brief Array to map DMA instance number to IRQ number. */
  67. static const IRQn_Type s_dmaIRQNumber[] = DMA_IRQS;
  68. /*! @brief Pointers to transfer handle for each DMA channel. */
  69. static dma_handle_t *s_DMAHandle[FSL_FEATURE_DMA_ALL_CHANNELS];
  70. /*! @brief Static table of descriptors */
  71. #if defined(__ICCARM__)
  72. #pragma data_alignment = FSL_FEATURE_DMA_DESCRIPTOR_ALIGN_SIZE
  73. static dma_descriptor_t s_dma_descriptor_table[FSL_FEATURE_SOC_DMA_COUNT][FSL_FEATURE_DMA_MAX_CHANNELS] = {0};
  74. #elif defined(__CC_ARM)
  75. __attribute__((aligned(FSL_FEATURE_DMA_DESCRIPTOR_ALIGN_SIZE)))
  76. static dma_descriptor_t s_dma_descriptor_table[FSL_FEATURE_SOC_DMA_COUNT][FSL_FEATURE_DMA_MAX_CHANNELS] = {0};
  77. #elif defined(__GNUC__)
  78. __attribute__((aligned(FSL_FEATURE_DMA_DESCRIPTOR_ALIGN_SIZE)))
  79. static dma_descriptor_t s_dma_descriptor_table[FSL_FEATURE_SOC_DMA_COUNT][FSL_FEATURE_DMA_MAX_CHANNELS] = {0};
  80. #endif
  81. /*******************************************************************************
  82. * Code
  83. ******************************************************************************/
  84. static uint32_t DMA_GetInstance(DMA_Type *base)
  85. {
  86. int32_t instance;
  87. /* Find the instance index from base address mappings. */
  88. for (instance = 0; instance < ARRAY_SIZE(s_dmaBases); instance++)
  89. {
  90. if (s_dmaBases[instance] == base)
  91. {
  92. break;
  93. }
  94. }
  95. assert(instance < ARRAY_SIZE(s_dmaBases));
  96. return instance;
  97. }
  98. static uint32_t DMA_GetVirtualStartChannel(DMA_Type *base)
  99. {
  100. uint32_t startChannel = 0, instance = 0;
  101. uint32_t i = 0;
  102. instance = DMA_GetInstance(base);
  103. /* Compute start channel */
  104. for (i = 0; i < instance; i++)
  105. {
  106. startChannel += FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(s_dmaBases[i]);
  107. }
  108. return startChannel;
  109. }
  110. void DMA_Init(DMA_Type *base)
  111. {
  112. #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
  113. /* enable dma clock gate */
  114. CLOCK_EnableClock(s_dmaClockName[DMA_GetInstance(base)]);
  115. #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
  116. /* set descriptor table */
  117. base->SRAMBASE = (uint32_t)s_dma_descriptor_table;
  118. /* enable dma peripheral */
  119. base->CTRL |= DMA_CTRL_ENABLE_MASK;
  120. }
  121. void DMA_Deinit(DMA_Type *base)
  122. {
  123. #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
  124. CLOCK_DisableClock(s_dmaClockName[DMA_GetInstance(base)]);
  125. #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
  126. /* Disable DMA peripheral */
  127. base->CTRL &= ~(DMA_CTRL_ENABLE_MASK);
  128. }
  129. void DMA_ConfigureChannelTrigger(DMA_Type *base, uint32_t channel, dma_channel_trigger_t *trigger)
  130. {
  131. assert((channel < FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(base)) && (NULL != trigger));
  132. uint32_t tmp = (DMA_CHANNEL_CFG_HWTRIGEN_MASK | DMA_CHANNEL_CFG_TRIGPOL_MASK | DMA_CHANNEL_CFG_TRIGTYPE_MASK |
  133. DMA_CHANNEL_CFG_TRIGBURST_MASK | DMA_CHANNEL_CFG_BURSTPOWER_MASK |
  134. DMA_CHANNEL_CFG_SRCBURSTWRAP_MASK | DMA_CHANNEL_CFG_DSTBURSTWRAP_MASK);
  135. tmp = base->CHANNEL[channel].CFG & (~tmp);
  136. tmp |= (uint32_t)(trigger->type) | (uint32_t)(trigger->burst) | (uint32_t)(trigger->wrap);
  137. base->CHANNEL[channel].CFG = tmp;
  138. }
  139. /*!
  140. * @brief Gets the remaining bytes of the current DMA descriptor transfer.
  141. *
  142. * @param base DMA peripheral base address.
  143. * @param channel DMA channel number.
  144. * @return The number of bytes which have not been transferred yet.
  145. */
  146. uint32_t DMA_GetRemainingBytes(DMA_Type *base, uint32_t channel)
  147. {
  148. assert(channel < FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(base));
  149. /* NOTE: when descriptors are chained, ACTIVE bit is set for whole chain. It makes
  150. * impossible to distinguish between:
  151. * - transfer finishes (represented by value '0x3FF')
  152. * - and remaining 1024 bytes to transfer (value 0x3FF)
  153. * for all descriptor in chain, except the last one.
  154. * If you decide to use this function, please use 1023 transfers as maximal value */
  155. /* Channel not active (transfer finished) and value is 0x3FF - nothing to transfer */
  156. if ((!DMA_ChannelIsActive(base, channel)) &&
  157. (0x3FF == ((base->CHANNEL[channel].XFERCFG & DMA_CHANNEL_XFERCFG_XFERCOUNT_MASK) >>
  158. DMA_CHANNEL_XFERCFG_XFERCOUNT_SHIFT)))
  159. {
  160. return 0;
  161. }
  162. return ((base->CHANNEL[channel].XFERCFG & DMA_CHANNEL_XFERCFG_XFERCOUNT_MASK) >>
  163. DMA_CHANNEL_XFERCFG_XFERCOUNT_SHIFT) +
  164. 1;
  165. }
  166. static void DMA_SetupDescriptor(
  167. dma_descriptor_t *desc, uint32_t xfercfg, void *srcEndAddr, void *dstEndAddr, void *nextDesc)
  168. {
  169. desc->xfercfg = xfercfg;
  170. desc->srcEndAddr = srcEndAddr;
  171. desc->dstEndAddr = dstEndAddr;
  172. desc->linkToNextDesc = nextDesc;
  173. }
  174. /* Verify and convert dma_xfercfg_t to XFERCFG register */
  175. static void DMA_SetupXferCFG(dma_xfercfg_t *xfercfg, uint32_t *xfercfg_addr)
  176. {
  177. assert(xfercfg != NULL);
  178. /* check source increment */
  179. assert((xfercfg->srcInc == 0) || (xfercfg->srcInc == 1) || (xfercfg->srcInc == 2) || (xfercfg->srcInc == 4));
  180. /* check destination increment */
  181. assert((xfercfg->dstInc == 0) || (xfercfg->dstInc == 1) || (xfercfg->dstInc == 2) || (xfercfg->dstInc == 4));
  182. /* check data width */
  183. assert((xfercfg->byteWidth == 1) || (xfercfg->byteWidth == 2) || (xfercfg->byteWidth == 4));
  184. /* check transfer count */
  185. assert(xfercfg->transferCount <= DMA_MAX_TRANSFER_COUNT);
  186. uint32_t xfer = 0, tmp;
  187. /* set valid flag - descriptor is ready now */
  188. xfer |= DMA_CHANNEL_XFERCFG_CFGVALID(xfercfg->valid ? 1 : 0);
  189. /* set reload - allow link to next descriptor */
  190. xfer |= DMA_CHANNEL_XFERCFG_RELOAD(xfercfg->reload ? 1 : 0);
  191. /* set swtrig flag - start transfer */
  192. xfer |= DMA_CHANNEL_XFERCFG_SWTRIG(xfercfg->swtrig ? 1 : 0);
  193. /* set transfer count */
  194. xfer |= DMA_CHANNEL_XFERCFG_CLRTRIG(xfercfg->clrtrig ? 1 : 0);
  195. /* set INTA */
  196. xfer |= DMA_CHANNEL_XFERCFG_SETINTA(xfercfg->intA ? 1 : 0);
  197. /* set INTB */
  198. xfer |= DMA_CHANNEL_XFERCFG_SETINTB(xfercfg->intB ? 1 : 0);
  199. /* set data width */
  200. tmp = xfercfg->byteWidth == 4 ? 2 : xfercfg->byteWidth - 1;
  201. xfer |= DMA_CHANNEL_XFERCFG_WIDTH(tmp);
  202. /* set source increment value */
  203. tmp = xfercfg->srcInc == 4 ? 3 : xfercfg->srcInc;
  204. xfer |= DMA_CHANNEL_XFERCFG_SRCINC(tmp);
  205. /* set destination increment value */
  206. tmp = xfercfg->dstInc == 4 ? 3 : xfercfg->dstInc;
  207. xfer |= DMA_CHANNEL_XFERCFG_DSTINC(tmp);
  208. /* set transfer count */
  209. xfer |= DMA_CHANNEL_XFERCFG_XFERCOUNT(xfercfg->transferCount - 1);
  210. /* store xferCFG */
  211. *xfercfg_addr = xfer;
  212. }
  213. void DMA_CreateDescriptor(dma_descriptor_t *desc, dma_xfercfg_t *xfercfg, void *srcAddr, void *dstAddr, void *nextDesc)
  214. {
  215. uint32_t xfercfg_reg = 0;
  216. assert((NULL != desc) && (0 == (uint32_t)desc % 16) && (NULL != xfercfg));
  217. assert((NULL != srcAddr) && (0 == (uint32_t)srcAddr % xfercfg->byteWidth));
  218. assert((NULL != dstAddr) && (0 == (uint32_t)dstAddr % xfercfg->byteWidth));
  219. assert((NULL == nextDesc) || (0 == (uint32_t)nextDesc % 16));
  220. /* Setup channel configuration */
  221. DMA_SetupXferCFG(xfercfg, &xfercfg_reg);
  222. /* Set descriptor structure */
  223. DMA_SetupDescriptor(
  224. desc, xfercfg_reg, (uint8_t *)srcAddr + (xfercfg->srcInc * xfercfg->byteWidth * (xfercfg->transferCount - 1)),
  225. (uint8_t *)dstAddr + (xfercfg->dstInc * xfercfg->byteWidth * (xfercfg->transferCount - 1)), nextDesc);
  226. }
  227. void DMA_AbortTransfer(dma_handle_t *handle)
  228. {
  229. assert(NULL != handle);
  230. DMA_DisableChannel(handle->base, handle->channel);
  231. while (DMA_COMMON_CONST_REG_GET(handle->base, handle->channel, BUSY) & (1U << DMA_CHANNEL_INDEX(handle->channel)))
  232. {
  233. }
  234. DMA_COMMON_REG_GET(handle->base, handle->channel, ABORT) |= 1U << DMA_CHANNEL_INDEX(handle->channel);
  235. DMA_EnableChannel(handle->base, handle->channel);
  236. }
  237. void DMA_CreateHandle(dma_handle_t *handle, DMA_Type *base, uint32_t channel)
  238. {
  239. assert((NULL != handle) && (channel < FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(base)));
  240. int32_t dmaInstance;
  241. uint32_t startChannel = 0;
  242. /* base address is invalid DMA instance */
  243. dmaInstance = DMA_GetInstance(base);
  244. startChannel = DMA_GetVirtualStartChannel(base);
  245. memset(handle, 0, sizeof(*handle));
  246. handle->base = base;
  247. handle->channel = channel;
  248. s_DMAHandle[startChannel + channel] = handle;
  249. /* Enable NVIC interrupt */
  250. EnableIRQ(s_dmaIRQNumber[dmaInstance]);
  251. }
  252. void DMA_SetCallback(dma_handle_t *handle, dma_callback callback, void *userData)
  253. {
  254. assert(handle != NULL);
  255. handle->callback = callback;
  256. handle->userData = userData;
  257. }
  258. void DMA_PrepareTransfer(dma_transfer_config_t *config,
  259. void *srcAddr,
  260. void *dstAddr,
  261. uint32_t byteWidth,
  262. uint32_t transferBytes,
  263. dma_transfer_type_t type,
  264. void *nextDesc)
  265. {
  266. uint32_t xfer_count;
  267. assert((NULL != config) && (NULL != srcAddr) && (NULL != dstAddr));
  268. assert((byteWidth == 1) || (byteWidth == 2) || (byteWidth == 4));
  269. /* check max */
  270. xfer_count = transferBytes / byteWidth;
  271. assert((xfer_count <= DMA_MAX_TRANSFER_COUNT) && (0 == transferBytes % byteWidth));
  272. memset(config, 0, sizeof(*config));
  273. switch (type)
  274. {
  275. case kDMA_MemoryToMemory:
  276. config->xfercfg.srcInc = 1;
  277. config->xfercfg.dstInc = 1;
  278. config->isPeriph = false;
  279. break;
  280. case kDMA_PeripheralToMemory:
  281. /* Peripheral register - source doesn't increment */
  282. config->xfercfg.srcInc = 0;
  283. config->xfercfg.dstInc = 1;
  284. config->isPeriph = true;
  285. break;
  286. case kDMA_MemoryToPeripheral:
  287. /* Peripheral register - destination doesn't increment */
  288. config->xfercfg.srcInc = 1;
  289. config->xfercfg.dstInc = 0;
  290. config->isPeriph = true;
  291. break;
  292. case kDMA_StaticToStatic:
  293. config->xfercfg.srcInc = 0;
  294. config->xfercfg.dstInc = 0;
  295. config->isPeriph = true;
  296. break;
  297. default:
  298. return;
  299. }
  300. config->dstAddr = (uint8_t *)dstAddr;
  301. config->srcAddr = (uint8_t *)srcAddr;
  302. config->nextDesc = (uint8_t *)nextDesc;
  303. config->xfercfg.transferCount = xfer_count;
  304. config->xfercfg.byteWidth = byteWidth;
  305. config->xfercfg.intA = true;
  306. config->xfercfg.reload = nextDesc != NULL;
  307. config->xfercfg.valid = true;
  308. }
  309. status_t DMA_SubmitTransfer(dma_handle_t *handle, dma_transfer_config_t *config)
  310. {
  311. assert((NULL != handle) && (NULL != config));
  312. uint32_t instance = DMA_GetInstance(handle->base);
  313. /* Previous transfer has not finished */
  314. if (DMA_ChannelIsActive(handle->base, handle->channel))
  315. {
  316. return kStatus_DMA_Busy;
  317. }
  318. /* enable/disable peripheral request */
  319. if (config->isPeriph)
  320. {
  321. DMA_EnableChannelPeriphRq(handle->base, handle->channel);
  322. }
  323. else
  324. {
  325. DMA_DisableChannelPeriphRq(handle->base, handle->channel);
  326. }
  327. DMA_CreateDescriptor(&(s_dma_descriptor_table[instance][handle->channel]), &config->xfercfg, config->srcAddr,
  328. config->dstAddr, config->nextDesc);
  329. return kStatus_Success;
  330. }
  331. void DMA_StartTransfer(dma_handle_t *handle)
  332. {
  333. assert(NULL != handle);
  334. uint32_t instance = DMA_GetInstance(handle->base);
  335. /* Enable channel interrupt */
  336. DMA_EnableChannelInterrupts(handle->base, handle->channel);
  337. /* If HW trigger is enabled - disable SW trigger */
  338. if (handle->base->CHANNEL[handle->channel].CFG & DMA_CHANNEL_CFG_HWTRIGEN_MASK)
  339. {
  340. s_dma_descriptor_table[instance][handle->channel].xfercfg &= ~(DMA_CHANNEL_XFERCFG_SWTRIG_MASK);
  341. }
  342. /* Otherwise enable SW trigger */
  343. else
  344. {
  345. s_dma_descriptor_table[instance][handle->channel].xfercfg |= DMA_CHANNEL_XFERCFG_SWTRIG_MASK;
  346. }
  347. /* Set channel XFERCFG register according first channel descriptor. */
  348. handle->base->CHANNEL[handle->channel].XFERCFG = s_dma_descriptor_table[instance][handle->channel].xfercfg;
  349. /* At this moment, the channel ACTIVE bit is set and application cannot modify
  350. * or start another transfer using this channel. Channel ACTIVE bit is cleared by
  351. * 'AbortTransfer' function or when the transfer finishes */
  352. }
  353. void DMA_IRQHandle(DMA_Type *base)
  354. {
  355. dma_handle_t *handle;
  356. int32_t channel_index;
  357. uint32_t startChannel = DMA_GetVirtualStartChannel(base);
  358. uint32_t i = 0;
  359. /* Find channels that have completed transfer */
  360. for (i = 0; i < FSL_FEATURE_DMA_NUMBER_OF_CHANNELSn(base); i++)
  361. {
  362. handle = s_DMAHandle[i + startChannel];
  363. /* Handle is not present */
  364. if (NULL == handle)
  365. {
  366. continue;
  367. }
  368. channel_index = DMA_CHANNEL_INDEX(handle->channel);
  369. /* Channel uses INTA flag */
  370. if (DMA_COMMON_REG_GET(handle->base, handle->channel, INTA) & (1U << channel_index))
  371. {
  372. /* Clear INTA flag */
  373. DMA_COMMON_REG_SET(handle->base, handle->channel, INTA, (1U << channel_index));
  374. if (handle->callback)
  375. {
  376. (handle->callback)(handle, handle->userData, true, kDMA_IntA);
  377. }
  378. }
  379. /* Channel uses INTB flag */
  380. if (DMA_COMMON_REG_GET(handle->base, handle->channel, INTB) & (1U << channel_index))
  381. {
  382. /* Clear INTB flag */
  383. DMA_COMMON_REG_SET(handle->base, handle->channel, INTB, (1U << channel_index));
  384. if (handle->callback)
  385. {
  386. (handle->callback)(handle, handle->userData, true, kDMA_IntB);
  387. }
  388. }
  389. /* Error flag */
  390. if (DMA_COMMON_REG_GET(handle->base, handle->channel, ERRINT) & (1U << channel_index))
  391. {
  392. /* Clear error flag */
  393. DMA_COMMON_REG_SET(handle->base, handle->channel, ERRINT, (1U << channel_index));
  394. if (handle->callback)
  395. {
  396. (handle->callback)(handle, handle->userData, false, kDMA_IntError);
  397. }
  398. }
  399. }
  400. }
  401. void DMA0_DriverIRQHandler(void)
  402. {
  403. DMA_IRQHandle(DMA0);
  404. /* Add for ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping
  405. exception return operation might vector to incorrect interrupt */
  406. #if defined __CORTEX_M && (__CORTEX_M == 4U)
  407. __DSB();
  408. #endif
  409. }
  410. #if defined(DMA1)
  411. void DMA1_DriverIRQHandler(void)
  412. {
  413. DMA_IRQHandle(DMA1);
  414. /* Add for ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping
  415. exception return operation might vector to incorrect interrupt */
  416. #if defined __CORTEX_M && (__CORTEX_M == 4U)
  417. __DSB();
  418. #endif
  419. }
  420. #endif