fsl_common_arm.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. /*
  2. * Copyright (c) 2015-2016, Freescale Semiconductor, Inc.
  3. * Copyright 2016-2021 NXP
  4. * All rights reserved.
  5. *
  6. * SPDX-License-Identifier: BSD-3-Clause
  7. */
  8. #ifndef _FSL_COMMON_ARM_H_
  9. #define _FSL_COMMON_ARM_H_
  10. /*
  11. * For CMSIS pack RTE.
  12. * CMSIS pack RTE generates "RTC_Components.h" which contains the statements
  13. * of the related <RTE_Components_h> element for all selected software components.
  14. */
  15. #ifdef _RTE_
  16. #include "RTE_Components.h"
  17. #endif
  18. /*!
  19. * @addtogroup ksdk_common
  20. * @{
  21. */
  22. /*! @name Atomic modification
  23. *
  24. * These macros are used for atomic access, such as read-modify-write
  25. * to the peripheral registers.
  26. *
  27. * - SDK_ATOMIC_LOCAL_ADD
  28. * - SDK_ATOMIC_LOCAL_SET
  29. * - SDK_ATOMIC_LOCAL_CLEAR
  30. * - SDK_ATOMIC_LOCAL_TOGGLE
  31. * - SDK_ATOMIC_LOCAL_CLEAR_AND_SET
  32. *
  33. * Take SDK_ATOMIC_LOCAL_CLEAR_AND_SET as an example: the parameter @c addr
  34. * means the address of the peripheral register or variable you want to modify
  35. * atomically, the parameter @c clearBits is the bits to clear, the parameter
  36. * @c setBits it the bits to set.
  37. * For example, to set a 32-bit register bit1:bit0 to 0b10, use like this:
  38. *
  39. * @code
  40. volatile uint32_t * reg = (volatile uint32_t *)REG_ADDR;
  41. SDK_ATOMIC_LOCAL_CLEAR_AND_SET(reg, 0x03, 0x02);
  42. @endcode
  43. *
  44. * In this example, the register bit1:bit0 are cleared and bit1 is set, as a result,
  45. * register bit1:bit0 = 0b10.
  46. *
  47. * @note For the platforms don't support exclusive load and store, these macros
  48. * disable the global interrupt to pretect the modification.
  49. *
  50. * @note These macros only guarantee the local processor atomic operations. For
  51. * the multi-processor devices, use hardware semaphore such as SEMA42 to
  52. * guarantee exclusive access if necessary.
  53. *
  54. * @{
  55. */
  56. /* clang-format off */
  57. #if ((defined(__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
  58. (defined(__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
  59. (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
  60. (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ == 1)))
  61. /* clang-format on */
  62. /* If the LDREX and STREX are supported, use them. */
  63. #define _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, val, ops) \
  64. do \
  65. { \
  66. (val) = __LDREXB(addr); \
  67. (ops); \
  68. } while (0UL != __STREXB((val), (addr)))
  69. #define _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, val, ops) \
  70. do \
  71. { \
  72. (val) = __LDREXH(addr); \
  73. (ops); \
  74. } while (0UL != __STREXH((val), (addr)))
  75. #define _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, val, ops) \
  76. do \
  77. { \
  78. (val) = __LDREXW(addr); \
  79. (ops); \
  80. } while (0UL != __STREXW((val), (addr)))
  81. static inline void _SDK_AtomicLocalAdd1Byte(volatile uint8_t *addr, uint8_t val)
  82. {
  83. uint8_t s_val;
  84. _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val += val);
  85. }
  86. static inline void _SDK_AtomicLocalAdd2Byte(volatile uint16_t *addr, uint16_t val)
  87. {
  88. uint16_t s_val;
  89. _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val += val);
  90. }
  91. static inline void _SDK_AtomicLocalAdd4Byte(volatile uint32_t *addr, uint32_t val)
  92. {
  93. uint32_t s_val;
  94. _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val += val);
  95. }
  96. static inline void _SDK_AtomicLocalSub1Byte(volatile uint8_t *addr, uint8_t val)
  97. {
  98. uint8_t s_val;
  99. _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val -= val);
  100. }
  101. static inline void _SDK_AtomicLocalSub2Byte(volatile uint16_t *addr, uint16_t val)
  102. {
  103. uint16_t s_val;
  104. _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val -= val);
  105. }
  106. static inline void _SDK_AtomicLocalSub4Byte(volatile uint32_t *addr, uint32_t val)
  107. {
  108. uint32_t s_val;
  109. _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val -= val);
  110. }
  111. static inline void _SDK_AtomicLocalSet1Byte(volatile uint8_t *addr, uint8_t bits)
  112. {
  113. uint8_t s_val;
  114. _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val |= bits);
  115. }
  116. static inline void _SDK_AtomicLocalSet2Byte(volatile uint16_t *addr, uint16_t bits)
  117. {
  118. uint16_t s_val;
  119. _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val |= bits);
  120. }
  121. static inline void _SDK_AtomicLocalSet4Byte(volatile uint32_t *addr, uint32_t bits)
  122. {
  123. uint32_t s_val;
  124. _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val |= bits);
  125. }
  126. static inline void _SDK_AtomicLocalClear1Byte(volatile uint8_t *addr, uint8_t bits)
  127. {
  128. uint8_t s_val;
  129. _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val &= ~bits);
  130. }
  131. static inline void _SDK_AtomicLocalClear2Byte(volatile uint16_t *addr, uint16_t bits)
  132. {
  133. uint16_t s_val;
  134. _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val &= ~bits);
  135. }
  136. static inline void _SDK_AtomicLocalClear4Byte(volatile uint32_t *addr, uint32_t bits)
  137. {
  138. uint32_t s_val;
  139. _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val &= ~bits);
  140. }
  141. static inline void _SDK_AtomicLocalToggle1Byte(volatile uint8_t *addr, uint8_t bits)
  142. {
  143. uint8_t s_val;
  144. _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val ^= bits);
  145. }
  146. static inline void _SDK_AtomicLocalToggle2Byte(volatile uint16_t *addr, uint16_t bits)
  147. {
  148. uint16_t s_val;
  149. _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val ^= bits);
  150. }
  151. static inline void _SDK_AtomicLocalToggle4Byte(volatile uint32_t *addr, uint32_t bits)
  152. {
  153. uint32_t s_val;
  154. _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val ^= bits);
  155. }
  156. static inline void _SDK_AtomicLocalClearAndSet1Byte(volatile uint8_t *addr, uint8_t clearBits, uint8_t setBits)
  157. {
  158. uint8_t s_val;
  159. _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
  160. }
  161. static inline void _SDK_AtomicLocalClearAndSet2Byte(volatile uint16_t *addr, uint16_t clearBits, uint16_t setBits)
  162. {
  163. uint16_t s_val;
  164. _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
  165. }
  166. static inline void _SDK_AtomicLocalClearAndSet4Byte(volatile uint32_t *addr, uint32_t clearBits, uint32_t setBits)
  167. {
  168. uint32_t s_val;
  169. _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
  170. }
  171. #define SDK_ATOMIC_LOCAL_ADD(addr, val) \
  172. ((1UL == sizeof(*(addr))) ? \
  173. _SDK_AtomicLocalAdd1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(val)) : \
  174. ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalAdd2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(val)) : \
  175. _SDK_AtomicLocalAdd4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(val))))
  176. #define SDK_ATOMIC_LOCAL_SET(addr, bits) \
  177. ((1UL == sizeof(*(addr))) ? \
  178. _SDK_AtomicLocalSet1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
  179. ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalSet2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
  180. _SDK_AtomicLocalSet4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
  181. #define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
  182. ((1UL == sizeof(*(addr))) ? \
  183. _SDK_AtomicLocalClear1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
  184. ((2UL == sizeof(*(addr))) ? \
  185. _SDK_AtomicLocalClear2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
  186. _SDK_AtomicLocalClear4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
  187. #define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
  188. ((1UL == sizeof(*(addr))) ? \
  189. _SDK_AtomicLocalToggle1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
  190. ((2UL == sizeof(*(addr))) ? \
  191. _SDK_AtomicLocalToggle2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
  192. _SDK_AtomicLocalToggle4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
  193. #define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
  194. ((1UL == sizeof(*(addr))) ? \
  195. _SDK_AtomicLocalClearAndSet1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(clearBits), (uint8_t)(setBits)) : \
  196. ((2UL == sizeof(*(addr))) ? \
  197. _SDK_AtomicLocalClearAndSet2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(clearBits), (uint16_t)(setBits)) : \
  198. _SDK_AtomicLocalClearAndSet4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(clearBits), (uint32_t)(setBits))))
  199. #else
  200. #define SDK_ATOMIC_LOCAL_ADD(addr, val) \
  201. do \
  202. { \
  203. uint32_t s_atomicOldInt; \
  204. s_atomicOldInt = DisableGlobalIRQ(); \
  205. *(addr) += (val); \
  206. EnableGlobalIRQ(s_atomicOldInt); \
  207. } while (0)
  208. #define SDK_ATOMIC_LOCAL_SET(addr, bits) \
  209. do \
  210. { \
  211. uint32_t s_atomicOldInt; \
  212. s_atomicOldInt = DisableGlobalIRQ(); \
  213. *(addr) |= (bits); \
  214. EnableGlobalIRQ(s_atomicOldInt); \
  215. } while (0)
  216. #define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
  217. do \
  218. { \
  219. uint32_t s_atomicOldInt; \
  220. s_atomicOldInt = DisableGlobalIRQ(); \
  221. *(addr) &= ~(bits); \
  222. EnableGlobalIRQ(s_atomicOldInt); \
  223. } while (0)
  224. #define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
  225. do \
  226. { \
  227. uint32_t s_atomicOldInt; \
  228. s_atomicOldInt = DisableGlobalIRQ(); \
  229. *(addr) ^= (bits); \
  230. EnableGlobalIRQ(s_atomicOldInt); \
  231. } while (0)
  232. #define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
  233. do \
  234. { \
  235. uint32_t s_atomicOldInt; \
  236. s_atomicOldInt = DisableGlobalIRQ(); \
  237. *(addr) = (*(addr) & ~(clearBits)) | (setBits); \
  238. EnableGlobalIRQ(s_atomicOldInt); \
  239. } while (0)
  240. #endif
  241. /* @} */
  242. /*! @name Timer utilities */
  243. /* @{ */
  244. /*! Macro to convert a microsecond period to raw count value */
  245. #define USEC_TO_COUNT(us, clockFreqInHz) (uint64_t)(((uint64_t)(us) * (clockFreqInHz)) / 1000000U)
  246. /*! Macro to convert a raw count value to microsecond */
  247. #define COUNT_TO_USEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count)*1000000U / (clockFreqInHz))
  248. /*! Macro to convert a millisecond period to raw count value */
  249. #define MSEC_TO_COUNT(ms, clockFreqInHz) (uint64_t)((uint64_t)(ms) * (clockFreqInHz) / 1000U)
  250. /*! Macro to convert a raw count value to millisecond */
  251. #define COUNT_TO_MSEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count)*1000U / (clockFreqInHz))
  252. /* @} */
  253. /*! @name ISR exit barrier
  254. * @{
  255. *
  256. * ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping
  257. * exception return operation might vector to incorrect interrupt.
  258. * For Cortex-M7, if core speed much faster than peripheral register write speed,
  259. * the peripheral interrupt flags may be still set after exiting ISR, this results to
  260. * the same error similar with errata 83869.
  261. */
  262. #if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U))
  263. #define SDK_ISR_EXIT_BARRIER __DSB()
  264. #else
  265. #define SDK_ISR_EXIT_BARRIER
  266. #endif
  267. /* @} */
  268. /*! @name Alignment variable definition macros */
  269. /* @{ */
  270. #if (defined(__ICCARM__))
  271. /*
  272. * Workaround to disable MISRA C message suppress warnings for IAR compiler.
  273. * http:/ /supp.iar.com/Support/?note=24725
  274. */
  275. _Pragma("diag_suppress=Pm120")
  276. #define SDK_PRAGMA(x) _Pragma(#x)
  277. _Pragma("diag_error=Pm120")
  278. /*! Macro to define a variable with alignbytes alignment */
  279. #define SDK_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var
  280. #elif defined(__CC_ARM) || defined(__ARMCC_VERSION)
  281. /*! Macro to define a variable with alignbytes alignment */
  282. #define SDK_ALIGN(var, alignbytes) __attribute__((aligned(alignbytes))) var
  283. #elif defined(__GNUC__)
  284. /*! Macro to define a variable with alignbytes alignment */
  285. #define SDK_ALIGN(var, alignbytes) var __attribute__((aligned(alignbytes)))
  286. #else
  287. #error Toolchain not supported
  288. #endif
  289. /*! Macro to define a variable with L1 d-cache line size alignment */
  290. #if defined(FSL_FEATURE_L1DCACHE_LINESIZE_BYTE)
  291. #define SDK_L1DCACHE_ALIGN(var) SDK_ALIGN(var, FSL_FEATURE_L1DCACHE_LINESIZE_BYTE)
  292. #endif
  293. /*! Macro to define a variable with L2 cache line size alignment */
  294. #if defined(FSL_FEATURE_L2CACHE_LINESIZE_BYTE)
  295. #define SDK_L2CACHE_ALIGN(var) SDK_ALIGN(var, FSL_FEATURE_L2CACHE_LINESIZE_BYTE)
  296. #endif
  297. /*! Macro to change a value to a given size aligned value */
  298. #define SDK_SIZEALIGN(var, alignbytes) \
  299. ((unsigned int)((var) + ((alignbytes)-1U)) & (unsigned int)(~(unsigned int)((alignbytes)-1U)))
  300. /* @} */
  301. /*! @name Non-cacheable region definition macros */
  302. /* For initialized non-zero non-cacheable variables, please using "AT_NONCACHEABLE_SECTION_INIT(var) ={xx};" or
  303. * "AT_NONCACHEABLE_SECTION_ALIGN_INIT(var) ={xx};" in your projects to define them, for zero-inited non-cacheable
  304. * variables, please using "AT_NONCACHEABLE_SECTION(var);" or "AT_NONCACHEABLE_SECTION_ALIGN(var);" to define them,
  305. * these zero-inited variables will be initialized to zero in system startup.
  306. */
  307. /* @{ */
  308. #if ((!(defined(FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION) && FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION)) && \
  309. defined(FSL_FEATURE_L1ICACHE_LINESIZE_BYTE))
  310. #if (defined(__ICCARM__))
  311. #define AT_NONCACHEABLE_SECTION(var) var @"NonCacheable"
  312. #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable"
  313. #define AT_NONCACHEABLE_SECTION_INIT(var) var @"NonCacheable.init"
  314. #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
  315. SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable.init"
  316. #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
  317. #define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
  318. #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
  319. __attribute__((section("NonCacheable.init"))) __attribute__((aligned(alignbytes))) var
  320. #if (defined(__CC_ARM))
  321. #define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable"), zero_init)) var
  322. #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
  323. __attribute__((section("NonCacheable"), zero_init)) __attribute__((aligned(alignbytes))) var
  324. #else
  325. #define AT_NONCACHEABLE_SECTION(var) __attribute__((section(".bss.NonCacheable"))) var
  326. #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
  327. __attribute__((section(".bss.NonCacheable"))) __attribute__((aligned(alignbytes))) var
  328. #endif
  329. #elif (defined(__GNUC__))
  330. /* For GCC, when the non-cacheable section is required, please define "__STARTUP_INITIALIZE_NONCACHEDATA"
  331. * in your projects to make sure the non-cacheable section variables will be initialized in system startup.
  332. */
  333. #define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
  334. #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
  335. __attribute__((section("NonCacheable.init"))) var __attribute__((aligned(alignbytes)))
  336. #define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable,\"aw\",%nobits @"))) var
  337. #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
  338. __attribute__((section("NonCacheable,\"aw\",%nobits @"))) var __attribute__((aligned(alignbytes)))
  339. #else
  340. #error Toolchain not supported.
  341. #endif
  342. #else
  343. #define AT_NONCACHEABLE_SECTION(var) var
  344. #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_ALIGN(var, alignbytes)
  345. #define AT_NONCACHEABLE_SECTION_INIT(var) var
  346. #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) SDK_ALIGN(var, alignbytes)
  347. #endif
  348. /* @} */
  349. /*!
  350. * @name Time sensitive region
  351. * @{
  352. */
  353. #if (defined(__ICCARM__))
  354. #define AT_QUICKACCESS_SECTION_CODE(func) func @"CodeQuickAccess"
  355. #define AT_QUICKACCESS_SECTION_DATA(var) var @"DataQuickAccess"
  356. #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
  357. SDK_PRAGMA(data_alignment = alignbytes) var @"DataQuickAccess"
  358. #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
  359. #define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
  360. #define AT_QUICKACCESS_SECTION_DATA(var) __attribute__((section("DataQuickAccess"))) var
  361. #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
  362. __attribute__((section("DataQuickAccess"))) __attribute__((aligned(alignbytes))) var
  363. #elif (defined(__GNUC__))
  364. #define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
  365. #define AT_QUICKACCESS_SECTION_DATA(var) __attribute__((section("DataQuickAccess"))) var
  366. #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
  367. __attribute__((section("DataQuickAccess"))) var __attribute__((aligned(alignbytes)))
  368. #else
  369. #error Toolchain not supported.
  370. #endif /* defined(__ICCARM__) */
  371. /*! @name Ram Function */
  372. #if (defined(__ICCARM__))
  373. #define RAMFUNCTION_SECTION_CODE(func) func @"RamFunction"
  374. #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
  375. #define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
  376. #elif (defined(__GNUC__))
  377. #define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
  378. #else
  379. #error Toolchain not supported.
  380. #endif /* defined(__ICCARM__) */
  381. /* @} */
  382. #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
  383. void DefaultISR(void);
  384. #endif
  385. /*
  386. * The fsl_clock.h is included here because it needs MAKE_VERSION/MAKE_STATUS/status_t
  387. * defined in previous of this file.
  388. */
  389. #include "fsl_clock.h"
  390. /*
  391. * Chip level peripheral reset API, for MCUs that implement peripheral reset control external to a peripheral
  392. */
  393. #if ((defined(FSL_FEATURE_SOC_SYSCON_COUNT) && (FSL_FEATURE_SOC_SYSCON_COUNT > 0)) || \
  394. (defined(FSL_FEATURE_SOC_ASYNC_SYSCON_COUNT) && (FSL_FEATURE_SOC_ASYNC_SYSCON_COUNT > 0)))
  395. #include "fsl_reset.h"
  396. #endif
  397. /*******************************************************************************
  398. * API
  399. ******************************************************************************/
  400. #if defined(__cplusplus)
  401. extern "C" {
  402. #endif /* __cplusplus*/
  403. /*!
  404. * @brief Enable specific interrupt.
  405. *
  406. * Enable LEVEL1 interrupt. For some devices, there might be multiple interrupt
  407. * levels. For example, there are NVIC and intmux. Here the interrupts connected
  408. * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
  409. * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
  410. * to NVIC first then routed to core.
  411. *
  412. * This function only enables the LEVEL1 interrupts. The number of LEVEL1 interrupts
  413. * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
  414. *
  415. * @param interrupt The IRQ number.
  416. * @retval kStatus_Success Interrupt enabled successfully
  417. * @retval kStatus_Fail Failed to enable the interrupt
  418. */
  419. static inline status_t EnableIRQ(IRQn_Type interrupt)
  420. {
  421. status_t status = kStatus_Success;
  422. if (NotAvail_IRQn == interrupt)
  423. {
  424. status = kStatus_Fail;
  425. }
  426. #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
  427. else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
  428. {
  429. status = kStatus_Fail;
  430. }
  431. #endif
  432. else
  433. {
  434. #if defined(__GIC_PRIO_BITS)
  435. GIC_EnableIRQ(interrupt);
  436. #else
  437. NVIC_EnableIRQ(interrupt);
  438. #endif
  439. }
  440. return status;
  441. }
  442. /*!
  443. * @brief Disable specific interrupt.
  444. *
  445. * Disable LEVEL1 interrupt. For some devices, there might be multiple interrupt
  446. * levels. For example, there are NVIC and intmux. Here the interrupts connected
  447. * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
  448. * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
  449. * to NVIC first then routed to core.
  450. *
  451. * This function only disables the LEVEL1 interrupts. The number of LEVEL1 interrupts
  452. * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
  453. *
  454. * @param interrupt The IRQ number.
  455. * @retval kStatus_Success Interrupt disabled successfully
  456. * @retval kStatus_Fail Failed to disable the interrupt
  457. */
  458. static inline status_t DisableIRQ(IRQn_Type interrupt)
  459. {
  460. status_t status = kStatus_Success;
  461. if (NotAvail_IRQn == interrupt)
  462. {
  463. status = kStatus_Fail;
  464. }
  465. #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
  466. else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
  467. {
  468. status = kStatus_Fail;
  469. }
  470. #endif
  471. else
  472. {
  473. #if defined(__GIC_PRIO_BITS)
  474. GIC_DisableIRQ(interrupt);
  475. #else
  476. NVIC_DisableIRQ(interrupt);
  477. #endif
  478. }
  479. return status;
  480. }
  481. /*!
  482. * @brief Disable the global IRQ
  483. *
  484. * Disable the global interrupt and return the current primask register. User is required to provided the primask
  485. * register for the EnableGlobalIRQ().
  486. *
  487. * @return Current primask value.
  488. */
  489. static inline uint32_t DisableGlobalIRQ(void)
  490. {
  491. #if defined(CPSR_I_Msk)
  492. uint32_t cpsr = __get_CPSR() & CPSR_I_Msk;
  493. __disable_irq();
  494. return cpsr;
  495. #else
  496. uint32_t regPrimask = __get_PRIMASK();
  497. __disable_irq();
  498. return regPrimask;
  499. #endif
  500. }
  501. /*!
  502. * @brief Enable the global IRQ
  503. *
  504. * Set the primask register with the provided primask value but not just enable the primask. The idea is for the
  505. * convenience of integration of RTOS. some RTOS get its own management mechanism of primask. User is required to
  506. * use the EnableGlobalIRQ() and DisableGlobalIRQ() in pair.
  507. *
  508. * @param primask value of primask register to be restored. The primask value is supposed to be provided by the
  509. * DisableGlobalIRQ().
  510. */
  511. static inline void EnableGlobalIRQ(uint32_t primask)
  512. {
  513. #if defined(CPSR_I_Msk)
  514. __set_CPSR((__get_CPSR() & ~CPSR_I_Msk) | primask);
  515. #else
  516. __set_PRIMASK(primask);
  517. #endif
  518. }
  519. #if defined(ENABLE_RAM_VECTOR_TABLE)
  520. /*!
  521. * @brief install IRQ handler
  522. *
  523. * @param irq IRQ number
  524. * @param irqHandler IRQ handler address
  525. * @return The old IRQ handler address
  526. */
  527. uint32_t InstallIRQHandler(IRQn_Type irq, uint32_t irqHandler);
  528. #endif /* ENABLE_RAM_VECTOR_TABLE. */
  529. #if (defined(FSL_FEATURE_SOC_SYSCON_COUNT) && (FSL_FEATURE_SOC_SYSCON_COUNT > 0))
  530. /*
  531. * When FSL_FEATURE_POWERLIB_EXTEND is defined to non-zero value,
  532. * powerlib should be used instead of these functions.
  533. */
  534. #if !(defined(FSL_FEATURE_POWERLIB_EXTEND) && (FSL_FEATURE_POWERLIB_EXTEND != 0))
  535. /*!
  536. * @brief Enable specific interrupt for wake-up from deep-sleep mode.
  537. *
  538. * Enable the interrupt for wake-up from deep sleep mode.
  539. * Some interrupts are typically used in sleep mode only and will not occur during
  540. * deep-sleep mode because relevant clocks are stopped. However, it is possible to enable
  541. * those clocks (significantly increasing power consumption in the reduced power mode),
  542. * making these wake-ups possible.
  543. *
  544. * @note This function also enables the interrupt in the NVIC (EnableIRQ() is called internaly).
  545. *
  546. * @param interrupt The IRQ number.
  547. */
  548. void EnableDeepSleepIRQ(IRQn_Type interrupt);
  549. /*!
  550. * @brief Disable specific interrupt for wake-up from deep-sleep mode.
  551. *
  552. * Disable the interrupt for wake-up from deep sleep mode.
  553. * Some interrupts are typically used in sleep mode only and will not occur during
  554. * deep-sleep mode because relevant clocks are stopped. However, it is possible to enable
  555. * those clocks (significantly increasing power consumption in the reduced power mode),
  556. * making these wake-ups possible.
  557. *
  558. * @note This function also disables the interrupt in the NVIC (DisableIRQ() is called internaly).
  559. *
  560. * @param interrupt The IRQ number.
  561. */
  562. void DisableDeepSleepIRQ(IRQn_Type interrupt);
  563. #endif /* FSL_FEATURE_POWERLIB_EXTEND */
  564. #endif /* FSL_FEATURE_SOC_SYSCON_COUNT */
  565. #if defined(__cplusplus)
  566. }
  567. #endif /* __cplusplus*/
  568. /*! @} */
  569. #endif /* _FSL_COMMON_ARM_H_ */