Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:22:58

0001 /*
0002  * Copyright (c) 2015-2016, Freescale Semiconductor, Inc.
0003  * Copyright 2016-2022 NXP
0004  * All rights reserved.
0005  *
0006  * SPDX-License-Identifier: BSD-3-Clause
0007  */
0008 
0009 #ifndef _FSL_COMMON_ARM_H_
0010 #define _FSL_COMMON_ARM_H_
0011 
0012 /*
0013  * For CMSIS pack RTE.
0014  * CMSIS pack RTE generates "RTC_Components.h" which contains the statements
0015  * of the related <RTE_Components_h> element for all selected software components.
0016  */
0017 #ifdef _RTE_
0018 #include "RTE_Components.h"
0019 #endif
0020 
0021 /*!
0022  * @addtogroup ksdk_common
0023  * @{
0024  */
0025 
0026 /*! @name Atomic modification
0027  *
0028  * These macros are used for atomic access, such as read-modify-write
0029  * to the peripheral registers.
0030  *
0031  * - SDK_ATOMIC_LOCAL_ADD
0032  * - SDK_ATOMIC_LOCAL_SET
0033  * - SDK_ATOMIC_LOCAL_CLEAR
0034  * - SDK_ATOMIC_LOCAL_TOGGLE
0035  * - SDK_ATOMIC_LOCAL_CLEAR_AND_SET
0036  *
0037  * Take SDK_ATOMIC_LOCAL_CLEAR_AND_SET as an example: the parameter @c addr
0038  * means the address of the peripheral register or variable you want to modify
0039  * atomically, the parameter @c clearBits is the bits to clear, the parameter
0040  * @c setBits it the bits to set.
0041  * For example, to set a 32-bit register bit1:bit0 to 0b10, use like this:
0042  *
0043  * @code
0044    volatile uint32_t * reg = (volatile uint32_t *)REG_ADDR;
0045 
0046    SDK_ATOMIC_LOCAL_CLEAR_AND_SET(reg, 0x03, 0x02);
0047    @endcode
0048  *
0049  * In this example, the register bit1:bit0 are cleared and bit1 is set, as a result,
0050  * register bit1:bit0 = 0b10.
0051  *
0052  * @note For the platforms don't support exclusive load and store, these macros
0053  * disable the global interrupt to pretect the modification.
0054  *
0055  * @note These macros only guarantee the local processor atomic operations. For
0056  * the multi-processor devices, use hardware semaphore such as SEMA42 to
0057  * guarantee exclusive access if necessary.
0058  *
0059  * @{
0060  */
0061 
0062 /* clang-format off */
0063 #if ((defined(__ARM_ARCH_7M__     ) && (__ARM_ARCH_7M__      == 1)) || \
0064      (defined(__ARM_ARCH_7EM__    ) && (__ARM_ARCH_7EM__     == 1)) || \
0065      (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
0066      (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ == 1)))
0067 /* clang-format on */
0068 
0069 /* If the LDREX and STREX are supported, use them. */
0070 #define _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, val, ops) \
0071     do                                              \
0072     {                                               \
0073         (val) = __LDREXB(addr);                     \
0074         (ops);                                      \
0075     } while (0UL != __STREXB((val), (addr)))
0076 
0077 #define _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, val, ops) \
0078     do                                              \
0079     {                                               \
0080         (val) = __LDREXH(addr);                     \
0081         (ops);                                      \
0082     } while (0UL != __STREXH((val), (addr)))
0083 
0084 #define _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, val, ops) \
0085     do                                              \
0086     {                                               \
0087         (val) = __LDREXW(addr);                     \
0088         (ops);                                      \
0089     } while (0UL != __STREXW((val), (addr)))
0090 
0091 static inline void _SDK_AtomicLocalAdd1Byte(volatile uint8_t *addr, uint8_t val)
0092 {
0093     uint8_t s_val;
0094 
0095     _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val += val);
0096 }
0097 
0098 static inline void _SDK_AtomicLocalAdd2Byte(volatile uint16_t *addr, uint16_t val)
0099 {
0100     uint16_t s_val;
0101 
0102     _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val += val);
0103 }
0104 
0105 static inline void _SDK_AtomicLocalAdd4Byte(volatile uint32_t *addr, uint32_t val)
0106 {
0107     uint32_t s_val;
0108 
0109     _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val += val);
0110 }
0111 
0112 static inline void _SDK_AtomicLocalSub1Byte(volatile uint8_t *addr, uint8_t val)
0113 {
0114     uint8_t s_val;
0115 
0116     _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val -= val);
0117 }
0118 
0119 static inline void _SDK_AtomicLocalSub2Byte(volatile uint16_t *addr, uint16_t val)
0120 {
0121     uint16_t s_val;
0122 
0123     _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val -= val);
0124 }
0125 
0126 static inline void _SDK_AtomicLocalSub4Byte(volatile uint32_t *addr, uint32_t val)
0127 {
0128     uint32_t s_val;
0129 
0130     _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val -= val);
0131 }
0132 
0133 static inline void _SDK_AtomicLocalSet1Byte(volatile uint8_t *addr, uint8_t bits)
0134 {
0135     uint8_t s_val;
0136 
0137     _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val |= bits);
0138 }
0139 
0140 static inline void _SDK_AtomicLocalSet2Byte(volatile uint16_t *addr, uint16_t bits)
0141 {
0142     uint16_t s_val;
0143 
0144     _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val |= bits);
0145 }
0146 
0147 static inline void _SDK_AtomicLocalSet4Byte(volatile uint32_t *addr, uint32_t bits)
0148 {
0149     uint32_t s_val;
0150 
0151     _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val |= bits);
0152 }
0153 
0154 static inline void _SDK_AtomicLocalClear1Byte(volatile uint8_t *addr, uint8_t bits)
0155 {
0156     uint8_t s_val;
0157 
0158     _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val &= ~bits);
0159 }
0160 
0161 static inline void _SDK_AtomicLocalClear2Byte(volatile uint16_t *addr, uint16_t bits)
0162 {
0163     uint16_t s_val;
0164 
0165     _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val &= ~bits);
0166 }
0167 
0168 static inline void _SDK_AtomicLocalClear4Byte(volatile uint32_t *addr, uint32_t bits)
0169 {
0170     uint32_t s_val;
0171 
0172     _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val &= ~bits);
0173 }
0174 
0175 static inline void _SDK_AtomicLocalToggle1Byte(volatile uint8_t *addr, uint8_t bits)
0176 {
0177     uint8_t s_val;
0178 
0179     _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val ^= bits);
0180 }
0181 
0182 static inline void _SDK_AtomicLocalToggle2Byte(volatile uint16_t *addr, uint16_t bits)
0183 {
0184     uint16_t s_val;
0185 
0186     _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val ^= bits);
0187 }
0188 
0189 static inline void _SDK_AtomicLocalToggle4Byte(volatile uint32_t *addr, uint32_t bits)
0190 {
0191     uint32_t s_val;
0192 
0193     _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val ^= bits);
0194 }
0195 
0196 static inline void _SDK_AtomicLocalClearAndSet1Byte(volatile uint8_t *addr, uint8_t clearBits, uint8_t setBits)
0197 {
0198     uint8_t s_val;
0199 
0200     _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
0201 }
0202 
0203 static inline void _SDK_AtomicLocalClearAndSet2Byte(volatile uint16_t *addr, uint16_t clearBits, uint16_t setBits)
0204 {
0205     uint16_t s_val;
0206 
0207     _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
0208 }
0209 
0210 static inline void _SDK_AtomicLocalClearAndSet4Byte(volatile uint32_t *addr, uint32_t clearBits, uint32_t setBits)
0211 {
0212     uint32_t s_val;
0213 
0214     _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
0215 }
0216 
0217 #define SDK_ATOMIC_LOCAL_ADD(addr, val)                                                                                        \
0218     ((1UL == sizeof(*(addr))) ?                                                                                                \
0219          _SDK_AtomicLocalAdd1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(val)) :                               \
0220          ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalAdd2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(val)) : \
0221                                      _SDK_AtomicLocalAdd4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(val))))
0222 
0223 #define SDK_ATOMIC_LOCAL_SET(addr, bits)                                                                                        \
0224     ((1UL == sizeof(*(addr))) ?                                                                                                 \
0225          _SDK_AtomicLocalSet1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) :                               \
0226          ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalSet2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
0227                                      _SDK_AtomicLocalSet4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
0228 
0229 #define SDK_ATOMIC_LOCAL_CLEAR(addr, bits)                                                                 \
0230     ((1UL == sizeof(*(addr))) ?                                                                            \
0231          _SDK_AtomicLocalClear1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) :        \
0232          ((2UL == sizeof(*(addr))) ?                                                                       \
0233               _SDK_AtomicLocalClear2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
0234               _SDK_AtomicLocalClear4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
0235 
0236 #define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits)                                                                 \
0237     ((1UL == sizeof(*(addr))) ?                                                                             \
0238          _SDK_AtomicLocalToggle1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) :        \
0239          ((2UL == sizeof(*(addr))) ?                                                                        \
0240               _SDK_AtomicLocalToggle2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
0241               _SDK_AtomicLocalToggle4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
0242 
0243 #define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits)                                                                           \
0244     ((1UL == sizeof(*(addr))) ?                                                                                                            \
0245          _SDK_AtomicLocalClearAndSet1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(clearBits), (uint8_t)(setBits)) :         \
0246          ((2UL == sizeof(*(addr))) ?                                                                                                       \
0247               _SDK_AtomicLocalClearAndSet2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(clearBits), (uint16_t)(setBits)) : \
0248               _SDK_AtomicLocalClearAndSet4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(clearBits), (uint32_t)(setBits))))
0249 #else
0250 
0251 #define SDK_ATOMIC_LOCAL_ADD(addr, val)      \
0252     do                                       \
0253     {                                        \
0254         uint32_t s_atomicOldInt;             \
0255         s_atomicOldInt = DisableGlobalIRQ(); \
0256         *(addr) += (val);                    \
0257         EnableGlobalIRQ(s_atomicOldInt);     \
0258     } while (0)
0259 
0260 #define SDK_ATOMIC_LOCAL_SET(addr, bits)     \
0261     do                                       \
0262     {                                        \
0263         uint32_t s_atomicOldInt;             \
0264         s_atomicOldInt = DisableGlobalIRQ(); \
0265         *(addr) |= (bits);                   \
0266         EnableGlobalIRQ(s_atomicOldInt);     \
0267     } while (0)
0268 
0269 #define SDK_ATOMIC_LOCAL_CLEAR(addr, bits)   \
0270     do                                       \
0271     {                                        \
0272         uint32_t s_atomicOldInt;             \
0273         s_atomicOldInt = DisableGlobalIRQ(); \
0274         *(addr) &= ~(bits);                  \
0275         EnableGlobalIRQ(s_atomicOldInt);     \
0276     } while (0)
0277 
0278 #define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits)  \
0279     do                                       \
0280     {                                        \
0281         uint32_t s_atomicOldInt;             \
0282         s_atomicOldInt = DisableGlobalIRQ(); \
0283         *(addr) ^= (bits);                   \
0284         EnableGlobalIRQ(s_atomicOldInt);     \
0285     } while (0)
0286 
0287 #define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
0288     do                                                           \
0289     {                                                            \
0290         uint32_t s_atomicOldInt;                                 \
0291         s_atomicOldInt = DisableGlobalIRQ();                     \
0292         *(addr)        = (*(addr) & ~(clearBits)) | (setBits);   \
0293         EnableGlobalIRQ(s_atomicOldInt);                         \
0294     } while (0)
0295 
0296 #endif
0297 /* @} */
0298 
0299 /*! @name Timer utilities */
0300 /* @{ */
0301 /*! Macro to convert a microsecond period to raw count value */
0302 #define USEC_TO_COUNT(us, clockFreqInHz) (uint64_t)(((uint64_t)(us) * (clockFreqInHz)) / 1000000U)
0303 /*! Macro to convert a raw count value to microsecond */
0304 #define COUNT_TO_USEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count)*1000000U / (clockFreqInHz))
0305 
0306 /*! Macro to convert a millisecond period to raw count value */
0307 #define MSEC_TO_COUNT(ms, clockFreqInHz) (uint64_t)((uint64_t)(ms) * (clockFreqInHz) / 1000U)
0308 /*! Macro to convert a raw count value to millisecond */
0309 #define COUNT_TO_MSEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count)*1000U / (clockFreqInHz))
0310 /* @} */
0311 
0312 /*! @name ISR exit barrier
0313  * @{
0314  *
0315  * ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping
0316  * exception return operation might vector to incorrect interrupt.
0317  * For Cortex-M7, if core speed much faster than peripheral register write speed,
0318  * the peripheral interrupt flags may be still set after exiting ISR, this results to
0319  * the same error similar with errata 83869.
0320  */
0321 #if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U))
0322 #define SDK_ISR_EXIT_BARRIER __DSB()
0323 #else
0324 #define SDK_ISR_EXIT_BARRIER
0325 #endif
0326 
0327 /* @} */
0328 
0329 /*! @name Alignment variable definition macros */
0330 /* @{ */
0331 #if (defined(__ICCARM__))
0332 /*
0333  * Workaround to disable MISRA C message suppress warnings for IAR compiler.
0334  * http:/ /supp.iar.com/Support/?note=24725
0335  */
0336 _Pragma("diag_suppress=Pm120")
0337 #define SDK_PRAGMA(x) _Pragma(#x)
0338     _Pragma("diag_error=Pm120")
0339 /*! Macro to define a variable with alignbytes alignment */
0340 #define SDK_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var
0341 #elif defined(__CC_ARM) || defined(__ARMCC_VERSION)
0342 /*! Macro to define a variable with alignbytes alignment */
0343 #define SDK_ALIGN(var, alignbytes) __attribute__((aligned(alignbytes))) var
0344 #elif defined(__GNUC__)
0345 /*! Macro to define a variable with alignbytes alignment */
0346 #define SDK_ALIGN(var, alignbytes) var __attribute__((aligned(alignbytes)))
0347 #else
0348 #error Toolchain not supported
0349 #endif
0350 
0351 /*! Macro to define a variable with L1 d-cache line size alignment */
0352 #if defined(FSL_FEATURE_L1DCACHE_LINESIZE_BYTE)
0353 #define SDK_L1DCACHE_ALIGN(var) SDK_ALIGN(var, FSL_FEATURE_L1DCACHE_LINESIZE_BYTE)
0354 #endif
0355 /*! Macro to define a variable with L2 cache line size alignment */
0356 #if defined(FSL_FEATURE_L2CACHE_LINESIZE_BYTE)
0357 #define SDK_L2CACHE_ALIGN(var) SDK_ALIGN(var, FSL_FEATURE_L2CACHE_LINESIZE_BYTE)
0358 #endif
0359 
0360 /*! Macro to change a value to a given size aligned value */
0361 #define SDK_SIZEALIGN(var, alignbytes) \
0362     ((unsigned int)((var) + ((alignbytes)-1U)) & (unsigned int)(~(unsigned int)((alignbytes)-1U)))
0363 /* @} */
0364 
0365 /*! @name Non-cacheable region definition macros */
0366 /* For initialized non-zero non-cacheable variables, please using "AT_NONCACHEABLE_SECTION_INIT(var) ={xx};" or
0367  * "AT_NONCACHEABLE_SECTION_ALIGN_INIT(var) ={xx};" in your projects to define them, for zero-inited non-cacheable
0368  * variables, please using "AT_NONCACHEABLE_SECTION(var);" or "AT_NONCACHEABLE_SECTION_ALIGN(var);" to define them,
0369  * these zero-inited variables will be initialized to zero in system startup.
0370  */
0371 /* @{ */
0372 
0373 #if ((!(defined(FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION) && FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION)) && \
0374      defined(FSL_FEATURE_L1ICACHE_LINESIZE_BYTE))
0375 
0376 #if (defined(__ICCARM__))
0377 #define AT_NONCACHEABLE_SECTION(var)                   var @"NonCacheable"
0378 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable"
0379 #define AT_NONCACHEABLE_SECTION_INIT(var)              var @"NonCacheable.init"
0380 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
0381     SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable.init"
0382 
0383 #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
0384 #define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
0385 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
0386     __attribute__((section("NonCacheable.init"))) __attribute__((aligned(alignbytes))) var
0387 #if (defined(__CC_ARM))
0388 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable"), zero_init)) var
0389 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
0390     __attribute__((section("NonCacheable"), zero_init)) __attribute__((aligned(alignbytes))) var
0391 #else
0392 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section(".bss.NonCacheable"))) var
0393 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
0394     __attribute__((section(".bss.NonCacheable"))) __attribute__((aligned(alignbytes))) var
0395 #endif
0396 
0397 #elif (defined(__GNUC__))
0398 #if defined(__ARM_ARCH_8A__) /* This macro is ARMv8-A specific */
0399 #define __CS "//"
0400 #else
0401 #define __CS "@"
0402 #endif
0403 
0404 /* For GCC, when the non-cacheable section is required, please define "__STARTUP_INITIALIZE_NONCACHEDATA"
0405  * in your projects to make sure the non-cacheable section variables will be initialized in system startup.
0406  */
0407 #define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
0408 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
0409     __attribute__((section("NonCacheable.init"))) var __attribute__((aligned(alignbytes)))
0410 #define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable,\"aw\",%nobits " __CS))) var
0411 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
0412     __attribute__((section("NonCacheable,\"aw\",%nobits " __CS))) var __attribute__((aligned(alignbytes)))
0413 #else
0414 #error Toolchain not supported.
0415 #endif
0416 
0417 #else
0418 
0419 #define AT_NONCACHEABLE_SECTION(var)                        var
0420 #define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes)      SDK_ALIGN(var, alignbytes)
0421 #define AT_NONCACHEABLE_SECTION_INIT(var)                   var
0422 #define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) SDK_ALIGN(var, alignbytes)
0423 
0424 #endif
0425 
0426 /* @} */
0427 
0428 /*!
0429  * @name Time sensitive region
0430  * @{
0431  */
0432 #if (defined(__ICCARM__))
0433 #define AT_QUICKACCESS_SECTION_CODE(func) func @"CodeQuickAccess"
0434 #define AT_QUICKACCESS_SECTION_DATA(var)  var @"DataQuickAccess"
0435 #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
0436     SDK_PRAGMA(data_alignment = alignbytes) var @"DataQuickAccess"
0437 #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
0438 #define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
0439 #define AT_QUICKACCESS_SECTION_DATA(var)  __attribute__((section("DataQuickAccess"))) var
0440 #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
0441     __attribute__((section("DataQuickAccess"))) __attribute__((aligned(alignbytes))) var
0442 #elif (defined(__GNUC__))
0443 #define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
0444 #define AT_QUICKACCESS_SECTION_DATA(var)  __attribute__((section("DataQuickAccess"))) var
0445 #define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
0446     __attribute__((section("DataQuickAccess"))) var __attribute__((aligned(alignbytes)))
0447 #else
0448 #error Toolchain not supported.
0449 #endif /* defined(__ICCARM__) */
0450 
0451 /*! @name Ram Function */
0452 #if (defined(__ICCARM__))
0453 #define RAMFUNCTION_SECTION_CODE(func) func @"RamFunction"
0454 #elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
0455 #define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
0456 #elif (defined(__GNUC__))
0457 #define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
0458 #else
0459 #error Toolchain not supported.
0460 #endif /* defined(__ICCARM__) */
0461 /* @} */
0462 
0463 #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
0464         void DefaultISR(void);
0465 #endif
0466 
0467 /*
0468  * The fsl_clock.h is included here because it needs MAKE_VERSION/MAKE_STATUS/status_t
0469  * defined in previous of this file.
0470  */
0471 #include "fsl_clock.h"
0472 
0473 /*
0474  * Chip level peripheral reset API, for MCUs that implement peripheral reset control external to a peripheral
0475  */
0476 #if ((defined(FSL_FEATURE_SOC_SYSCON_COUNT) && (FSL_FEATURE_SOC_SYSCON_COUNT > 0)) || \
0477      (defined(FSL_FEATURE_SOC_ASYNC_SYSCON_COUNT) && (FSL_FEATURE_SOC_ASYNC_SYSCON_COUNT > 0)))
0478 #include "fsl_reset.h"
0479 #endif
0480 
0481 /*******************************************************************************
0482  * API
0483  ******************************************************************************/
0484 
0485 #if defined(__cplusplus)
0486 extern "C" {
0487 #endif /* __cplusplus*/
0488 
0489 /*!
0490  * @brief Enable specific interrupt.
0491  *
0492  * Enable LEVEL1 interrupt. For some devices, there might be multiple interrupt
0493  * levels. For example, there are NVIC and intmux. Here the interrupts connected
0494  * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
0495  * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
0496  * to NVIC first then routed to core.
0497  *
0498  * This function only enables the LEVEL1 interrupts. The number of LEVEL1 interrupts
0499  * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
0500  *
0501  * @param interrupt The IRQ number.
0502  * @retval kStatus_Success Interrupt enabled successfully
0503  * @retval kStatus_Fail Failed to enable the interrupt
0504  */
0505 static inline status_t EnableIRQ(IRQn_Type interrupt)
0506 {
0507     status_t status = kStatus_Success;
0508 
0509     if (NotAvail_IRQn == interrupt)
0510     {
0511         status = kStatus_Fail;
0512     }
0513 
0514 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
0515     else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
0516     {
0517         status = kStatus_Fail;
0518     }
0519 #endif
0520 
0521     else
0522     {
0523 #if defined(__GIC_PRIO_BITS)
0524         GIC_EnableIRQ(interrupt);
0525 #else
0526         NVIC_EnableIRQ(interrupt);
0527 #endif
0528     }
0529 
0530     return status;
0531 }
0532 
0533 /*!
0534  * @brief Disable specific interrupt.
0535  *
0536  * Disable LEVEL1 interrupt. For some devices, there might be multiple interrupt
0537  * levels. For example, there are NVIC and intmux. Here the interrupts connected
0538  * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
0539  * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
0540  * to NVIC first then routed to core.
0541  *
0542  * This function only disables the LEVEL1 interrupts. The number of LEVEL1 interrupts
0543  * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
0544  *
0545  * @param interrupt The IRQ number.
0546  * @retval kStatus_Success Interrupt disabled successfully
0547  * @retval kStatus_Fail Failed to disable the interrupt
0548  */
0549 static inline status_t DisableIRQ(IRQn_Type interrupt)
0550 {
0551     status_t status = kStatus_Success;
0552 
0553     if (NotAvail_IRQn == interrupt)
0554     {
0555         status = kStatus_Fail;
0556     }
0557 
0558 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
0559     else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
0560     {
0561         status = kStatus_Fail;
0562     }
0563 #endif
0564 
0565     else
0566     {
0567 #if defined(__GIC_PRIO_BITS)
0568         GIC_DisableIRQ(interrupt);
0569 #else
0570         NVIC_DisableIRQ(interrupt);
0571 #endif
0572     }
0573 
0574     return status;
0575 }
0576 
0577 #if defined(__GIC_PRIO_BITS)
0578 #define NVIC_SetPriority(irq, prio) do {} while(0)
0579 #endif
0580 
0581 /*!
0582  * @brief Enable the IRQ, and also set the interrupt priority.
0583  *
0584  * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
0585  * levels. For example, there are NVIC and intmux. Here the interrupts connected
0586  * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
0587  * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
0588  * to NVIC first then routed to core.
0589  *
0590  * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
0591  * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
0592  *
0593  * @param interrupt The IRQ to Enable.
0594  * @param priNum Priority number set to interrupt controller register.
0595  * @retval kStatus_Success Interrupt priority set successfully
0596  * @retval kStatus_Fail Failed to set the interrupt priority.
0597  */
0598 static inline status_t EnableIRQWithPriority(IRQn_Type interrupt, uint8_t priNum)
0599 {
0600     status_t status = kStatus_Success;
0601 
0602     if (NotAvail_IRQn == interrupt)
0603     {
0604         status = kStatus_Fail;
0605     }
0606 
0607 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
0608     else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
0609     {
0610         status = kStatus_Fail;
0611     }
0612 #endif
0613 
0614     else
0615     {
0616 #if defined(__GIC_PRIO_BITS)
0617         GIC_SetPriority(interrupt, priNum);
0618         GIC_EnableIRQ(interrupt);
0619 #else
0620         NVIC_SetPriority(interrupt, priNum);
0621         NVIC_EnableIRQ(interrupt);
0622 #endif
0623     }
0624 
0625     return status;
0626 }
0627 
0628 /*!
0629  * @brief Set the IRQ priority.
0630  *
0631  * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
0632  * levels. For example, there are NVIC and intmux. Here the interrupts connected
0633  * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
0634  * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
0635  * to NVIC first then routed to core.
0636  *
0637  * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
0638  * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
0639  *
0640  * @param interrupt The IRQ to set.
0641  * @param priNum Priority number set to interrupt controller register.
0642  *
0643  * @retval kStatus_Success Interrupt priority set successfully
0644  * @retval kStatus_Fail Failed to set the interrupt priority.
0645  */
0646 static inline status_t IRQ_SetPriority(IRQn_Type interrupt, uint8_t priNum)
0647 {
0648     status_t status = kStatus_Success;
0649 
0650     if (NotAvail_IRQn == interrupt)
0651     {
0652         status = kStatus_Fail;
0653     }
0654 
0655 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
0656     else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
0657     {
0658         status = kStatus_Fail;
0659     }
0660 #endif
0661 
0662     else
0663     {
0664 #if defined(__GIC_PRIO_BITS)
0665         GIC_SetPriority(interrupt, priNum);
0666 #else
0667         NVIC_SetPriority(interrupt, priNum);
0668 #endif
0669     }
0670 
0671     return status;
0672 }
0673 
0674 /*!
0675  * @brief Clear the pending IRQ flag.
0676  *
0677  * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
0678  * levels. For example, there are NVIC and intmux. Here the interrupts connected
0679  * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
0680  * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
0681  * to NVIC first then routed to core.
0682  *
0683  * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
0684  * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
0685  *
0686  * @param interrupt The flag which IRQ to clear.
0687  *
0688  * @retval kStatus_Success Interrupt priority set successfully
0689  * @retval kStatus_Fail Failed to set the interrupt priority.
0690  */
0691 static inline status_t IRQ_ClearPendingIRQ(IRQn_Type interrupt)
0692 {
0693     status_t status = kStatus_Success;
0694 
0695     if (NotAvail_IRQn == interrupt)
0696     {
0697         status = kStatus_Fail;
0698     }
0699 
0700 #if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
0701     else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
0702     {
0703         status = kStatus_Fail;
0704     }
0705 #endif
0706 
0707     else
0708     {
0709 #if defined(__GIC_PRIO_BITS)
0710         GIC_ClearPendingIRQ(interrupt);
0711 #else
0712         NVIC_ClearPendingIRQ(interrupt);
0713 #endif
0714     }
0715 
0716     return status;
0717 }
0718 
0719 /*!
0720  * @brief Disable the global IRQ
0721  *
0722  * Disable the global interrupt and return the current primask register. User is required to provided the primask
0723  * register for the EnableGlobalIRQ().
0724  *
0725  * @return Current primask value.
0726  */
0727 static inline uint32_t DisableGlobalIRQ(void)
0728 {
0729     uint32_t mask;
0730 
0731 #if defined(CPSR_I_Msk)
0732     mask = __get_CPSR() & CPSR_I_Msk;
0733 #elif defined(DAIF_I_BIT)
0734     mask = __get_DAIF() & DAIF_I_BIT;
0735 #else
0736     mask = __get_PRIMASK();
0737 #endif
0738     __disable_irq();
0739 
0740     return mask;
0741 }
0742 
0743 /*!
0744  * @brief Enable the global IRQ
0745  *
0746  * Set the primask register with the provided primask value but not just enable the primask. The idea is for the
0747  * convenience of integration of RTOS. some RTOS get its own management mechanism of primask. User is required to
0748  * use the EnableGlobalIRQ() and DisableGlobalIRQ() in pair.
0749  *
0750  * @param primask value of primask register to be restored. The primask value is supposed to be provided by the
0751  * DisableGlobalIRQ().
0752  */
0753 static inline void EnableGlobalIRQ(uint32_t primask)
0754 {
0755 #if defined(CPSR_I_Msk)
0756     __set_CPSR((__get_CPSR() & ~CPSR_I_Msk) | primask);
0757 #elif defined(DAIF_I_BIT)
0758     if (0UL == primask)
0759     {
0760         __enable_irq();
0761     }
0762 #else
0763     __set_PRIMASK(primask);
0764 #endif
0765 }
0766 
0767 #if defined(ENABLE_RAM_VECTOR_TABLE)
0768 /*!
0769  * @brief install IRQ handler
0770  *
0771  * @param irq IRQ number
0772  * @param irqHandler IRQ handler address
0773  * @return The old IRQ handler address
0774  */
0775 uint32_t InstallIRQHandler(IRQn_Type irq, uint32_t irqHandler);
0776 #endif /* ENABLE_RAM_VECTOR_TABLE. */
0777 
0778 #if (defined(FSL_FEATURE_SOC_SYSCON_COUNT) && (FSL_FEATURE_SOC_SYSCON_COUNT > 0))
0779 
0780 /*
0781  * When FSL_FEATURE_POWERLIB_EXTEND is defined to non-zero value,
0782  * powerlib should be used instead of these functions.
0783  */
0784 #if !(defined(FSL_FEATURE_POWERLIB_EXTEND) && (FSL_FEATURE_POWERLIB_EXTEND != 0))
0785 /*!
0786  * @brief Enable specific interrupt for wake-up from deep-sleep mode.
0787  *
0788  * Enable the interrupt for wake-up from deep sleep mode.
0789  * Some interrupts are typically used in sleep mode only and will not occur during
0790  * deep-sleep mode because relevant clocks are stopped. However, it is possible to enable
0791  * those clocks (significantly increasing power consumption in the reduced power mode),
0792  * making these wake-ups possible.
0793  *
0794  * @note This function also enables the interrupt in the NVIC (EnableIRQ() is called internaly).
0795  *
0796  * @param interrupt The IRQ number.
0797  */
0798 void EnableDeepSleepIRQ(IRQn_Type interrupt);
0799 
0800 /*!
0801  * @brief Disable specific interrupt for wake-up from deep-sleep mode.
0802  *
0803  * Disable the interrupt for wake-up from deep sleep mode.
0804  * Some interrupts are typically used in sleep mode only and will not occur during
0805  * deep-sleep mode because relevant clocks are stopped. However, it is possible to enable
0806  * those clocks (significantly increasing power consumption in the reduced power mode),
0807  * making these wake-ups possible.
0808  *
0809  * @note This function also disables the interrupt in the NVIC (DisableIRQ() is called internaly).
0810  *
0811  * @param interrupt The IRQ number.
0812  */
0813 void DisableDeepSleepIRQ(IRQn_Type interrupt);
0814 #endif /* FSL_FEATURE_POWERLIB_EXTEND */
0815 #endif /* FSL_FEATURE_SOC_SYSCON_COUNT */
0816 
0817 #if defined(DWT)
0818 /*!
0819  * @brief Enable the counter to get CPU cycles.
0820  */
0821 void MSDK_EnableCpuCycleCounter(void);
0822 
0823 /*!
0824  * @brief Get the current CPU cycle count.
0825  *
0826  * @return Current CPU cycle count.
0827  */
0828 uint32_t MSDK_GetCpuCycleCount(void);
0829 #endif
0830 
0831 #if defined(__cplusplus)
0832 }
0833 #endif /* __cplusplus*/
0834 
0835 /*! @} */
0836 
0837 #endif /* _FSL_COMMON_ARM_H_ */