summaryrefslogtreecommitdiff
path: root/FreeRTOS/Demo/CORTEX_MPU_M33F_NXP_LPC55S69_MCUXpresso/NXP_Code/drivers/fsl_common_arm.h
diff options
context:
space:
mode:
Diffstat (limited to 'FreeRTOS/Demo/CORTEX_MPU_M33F_NXP_LPC55S69_MCUXpresso/NXP_Code/drivers/fsl_common_arm.h')
-rw-r--r--FreeRTOS/Demo/CORTEX_MPU_M33F_NXP_LPC55S69_MCUXpresso/NXP_Code/drivers/fsl_common_arm.h357
1 files changed, 262 insertions, 95 deletions
diff --git a/FreeRTOS/Demo/CORTEX_MPU_M33F_NXP_LPC55S69_MCUXpresso/NXP_Code/drivers/fsl_common_arm.h b/FreeRTOS/Demo/CORTEX_MPU_M33F_NXP_LPC55S69_MCUXpresso/NXP_Code/drivers/fsl_common_arm.h
index 8b28aa888..1da9dff8d 100644
--- a/FreeRTOS/Demo/CORTEX_MPU_M33F_NXP_LPC55S69_MCUXpresso/NXP_Code/drivers/fsl_common_arm.h
+++ b/FreeRTOS/Demo/CORTEX_MPU_M33F_NXP_LPC55S69_MCUXpresso/NXP_Code/drivers/fsl_common_arm.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2015-2016, Freescale Semiconductor, Inc.
- * Copyright 2016-2021 NXP
+ * Copyright 2016-2022 NXP
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -214,69 +214,82 @@ static inline void _SDK_AtomicLocalClearAndSet4Byte(volatile uint32_t *addr, uin
_SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
}
-#define SDK_ATOMIC_LOCAL_ADD(addr, val) \
- ((1UL == sizeof(*(addr))) ? _SDK_AtomicLocalAdd1Byte((volatile uint8_t*)(volatile void*)(addr), (val)) : \
- ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalAdd2Byte((volatile uint16_t*)(volatile void*)(addr), (val)) : \
- _SDK_AtomicLocalAdd4Byte((volatile uint32_t *)(volatile void*)(addr), (val))))
-
-#define SDK_ATOMIC_LOCAL_SET(addr, bits) \
- ((1UL == sizeof(*(addr))) ? _SDK_AtomicLocalSet1Byte((volatile uint8_t*)(volatile void*)(addr), (bits)) : \
- ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalSet2Byte((volatile uint16_t*)(volatile void*)(addr), (bits)) : \
- _SDK_AtomicLocalSet4Byte((volatile uint32_t *)(volatile void*)(addr), (bits))))
-
-#define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
- ((1UL == sizeof(*(addr))) ? _SDK_AtomicLocalClear1Byte((volatile uint8_t*)(volatile void*)(addr), (bits)) : \
- ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalClear2Byte((volatile uint16_t*)(volatile void*)(addr), (bits)) : \
- _SDK_AtomicLocalClear4Byte((volatile uint32_t *)(volatile void*)(addr), (bits))))
-
-#define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
- ((1UL == sizeof(*(addr))) ? _SDK_AtomicLocalToggle1Byte((volatile uint8_t*)(volatile void*)(addr), (bits)) : \
- ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalToggle2Byte((volatile uint16_t*)(volatile void*)(addr), (bits)) : \
- _SDK_AtomicLocalToggle4Byte((volatile uint32_t *)(volatile void*)(addr), (bits))))
-
-#define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
- ((1UL == sizeof(*(addr))) ? _SDK_AtomicLocalClearAndSet1Byte((volatile uint8_t*)(volatile void*)(addr), (clearBits), (setBits)) : \
- ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalClearAndSet2Byte((volatile uint16_t*)(volatile void*)(addr), (clearBits), (setBits)) : \
- _SDK_AtomicLocalClearAndSet4Byte((volatile uint32_t *)(volatile void*)(addr), (clearBits), (setBits))))
+#define SDK_ATOMIC_LOCAL_ADD(addr, val) \
+ ((1UL == sizeof(*(addr))) ? \
+ _SDK_AtomicLocalAdd1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(val)) : \
+ ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalAdd2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(val)) : \
+ _SDK_AtomicLocalAdd4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(val))))
+
+#define SDK_ATOMIC_LOCAL_SET(addr, bits) \
+ ((1UL == sizeof(*(addr))) ? \
+ _SDK_AtomicLocalSet1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
+ ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalSet2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
+ _SDK_AtomicLocalSet4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
+
+#define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
+ ((1UL == sizeof(*(addr))) ? \
+ _SDK_AtomicLocalClear1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
+ ((2UL == sizeof(*(addr))) ? \
+ _SDK_AtomicLocalClear2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
+ _SDK_AtomicLocalClear4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
+
+#define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
+ ((1UL == sizeof(*(addr))) ? \
+ _SDK_AtomicLocalToggle1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
+ ((2UL == sizeof(*(addr))) ? \
+ _SDK_AtomicLocalToggle2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
+ _SDK_AtomicLocalToggle4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
+
+#define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
+ ((1UL == sizeof(*(addr))) ? \
+ _SDK_AtomicLocalClearAndSet1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(clearBits), (uint8_t)(setBits)) : \
+ ((2UL == sizeof(*(addr))) ? \
+ _SDK_AtomicLocalClearAndSet2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(clearBits), (uint16_t)(setBits)) : \
+ _SDK_AtomicLocalClearAndSet4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(clearBits), (uint32_t)(setBits))))
#else
-#define SDK_ATOMIC_LOCAL_ADD(addr, val) \
- do { \
- uint32_t s_atomicOldInt; \
- s_atomicOldInt = DisableGlobalIRQ(); \
- *(addr) += (val); \
- EnableGlobalIRQ(s_atomicOldInt); \
+#define SDK_ATOMIC_LOCAL_ADD(addr, val) \
+ do \
+ { \
+ uint32_t s_atomicOldInt; \
+ s_atomicOldInt = DisableGlobalIRQ(); \
+ *(addr) += (val); \
+ EnableGlobalIRQ(s_atomicOldInt); \
} while (0)
-#define SDK_ATOMIC_LOCAL_SET(addr, bits) \
- do { \
- uint32_t s_atomicOldInt; \
- s_atomicOldInt = DisableGlobalIRQ(); \
- *(addr) |= (bits); \
- EnableGlobalIRQ(s_atomicOldInt); \
+#define SDK_ATOMIC_LOCAL_SET(addr, bits) \
+ do \
+ { \
+ uint32_t s_atomicOldInt; \
+ s_atomicOldInt = DisableGlobalIRQ(); \
+ *(addr) |= (bits); \
+ EnableGlobalIRQ(s_atomicOldInt); \
} while (0)
-#define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
- do { \
- uint32_t s_atomicOldInt; \
- s_atomicOldInt = DisableGlobalIRQ(); \
- *(addr) &= ~(bits); \
- EnableGlobalIRQ(s_atomicOldInt); \
+#define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
+ do \
+ { \
+ uint32_t s_atomicOldInt; \
+ s_atomicOldInt = DisableGlobalIRQ(); \
+ *(addr) &= ~(bits); \
+ EnableGlobalIRQ(s_atomicOldInt); \
} while (0)
-#define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
- do { \
- uint32_t s_atomicOldInt; \
- s_atomicOldInt = DisableGlobalIRQ(); \
- *(addr) ^= (bits); \
- EnableGlobalIRQ(s_atomicOldInt); \
+#define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
+ do \
+ { \
+ uint32_t s_atomicOldInt; \
+ s_atomicOldInt = DisableGlobalIRQ(); \
+ *(addr) ^= (bits); \
+ EnableGlobalIRQ(s_atomicOldInt); \
} while (0)
#define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
- do { \
+ do \
+ { \
uint32_t s_atomicOldInt; \
s_atomicOldInt = DisableGlobalIRQ(); \
- *(addr) = (*(addr) & ~(clearBits)) | (setBits); \
+ *(addr) = (*(addr) & ~(clearBits)) | (setBits); \
EnableGlobalIRQ(s_atomicOldInt); \
} while (0)
@@ -288,12 +301,12 @@ static inline void _SDK_AtomicLocalClearAndSet4Byte(volatile uint32_t *addr, uin
/*! Macro to convert a microsecond period to raw count value */
#define USEC_TO_COUNT(us, clockFreqInHz) (uint64_t)(((uint64_t)(us) * (clockFreqInHz)) / 1000000U)
/*! Macro to convert a raw count value to microsecond */
-#define COUNT_TO_USEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count) * 1000000U / (clockFreqInHz))
+#define COUNT_TO_USEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count)*1000000U / (clockFreqInHz))
/*! Macro to convert a millisecond period to raw count value */
#define MSEC_TO_COUNT(ms, clockFreqInHz) (uint64_t)((uint64_t)(ms) * (clockFreqInHz) / 1000U)
/*! Macro to convert a raw count value to millisecond */
-#define COUNT_TO_MSEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count) * 1000U / (clockFreqInHz))
+#define COUNT_TO_MSEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count)*1000U / (clockFreqInHz))
/* @} */
/*! @name ISR exit barrier
@@ -322,7 +335,7 @@ static inline void _SDK_AtomicLocalClearAndSet4Byte(volatile uint32_t *addr, uin
*/
_Pragma("diag_suppress=Pm120")
#define SDK_PRAGMA(x) _Pragma(#x)
-_Pragma("diag_error=Pm120")
+ _Pragma("diag_error=Pm120")
/*! Macro to define a variable with alignbytes alignment */
#define SDK_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var
#elif defined(__CC_ARM) || defined(__ARMCC_VERSION)
@@ -351,25 +364,27 @@ _Pragma("diag_error=Pm120")
/*! @name Non-cacheable region definition macros */
/* For initialized non-zero non-cacheable variables, please using "AT_NONCACHEABLE_SECTION_INIT(var) ={xx};" or
- * "AT_NONCACHEABLE_SECTION_ALIGN_INIT(var) ={xx};" in your projects to define them, for zero-inited non-cacheable variables,
- * please using "AT_NONCACHEABLE_SECTION(var);" or "AT_NONCACHEABLE_SECTION_ALIGN(var);" to define them, these zero-inited variables
- * will be initialized to zero in system startup.
+ * "AT_NONCACHEABLE_SECTION_ALIGN_INIT(var) ={xx};" in your projects to define them, for zero-inited non-cacheable
+ * variables, please using "AT_NONCACHEABLE_SECTION(var);" or "AT_NONCACHEABLE_SECTION_ALIGN(var);" to define them,
+ * these zero-inited variables will be initialized to zero in system startup.
*/
/* @{ */
-#if ((!(defined(FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION) && FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION)) && defined(FSL_FEATURE_L1ICACHE_LINESIZE_BYTE))
+#if ((!(defined(FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION) && FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION)) && \
+ defined(FSL_FEATURE_L1ICACHE_LINESIZE_BYTE))
#if (defined(__ICCARM__))
-#define AT_NONCACHEABLE_SECTION(var) var @"NonCacheable"
+#define AT_NONCACHEABLE_SECTION(var) var @"NonCacheable"
#define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable"
-#define AT_NONCACHEABLE_SECTION_INIT(var) var @"NonCacheable.init"
-#define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable.init"
+#define AT_NONCACHEABLE_SECTION_INIT(var) var @"NonCacheable.init"
+#define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
+ SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable.init"
-#elif(defined(__CC_ARM) || defined(__ARMCC_VERSION))
+#elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
#define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
#define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
__attribute__((section("NonCacheable.init"))) __attribute__((aligned(alignbytes))) var
-#if(defined(__CC_ARM))
+#if (defined(__CC_ARM))
#define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable"), zero_init)) var
#define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
__attribute__((section("NonCacheable"), zero_init)) __attribute__((aligned(alignbytes))) var
@@ -379,7 +394,7 @@ _Pragma("diag_error=Pm120")
__attribute__((section(".bss.NonCacheable"))) __attribute__((aligned(alignbytes))) var
#endif
-#elif(defined(__GNUC__))
+#elif (defined(__GNUC__))
/* For GCC, when the non-cacheable section is required, please define "__STARTUP_INITIALIZE_NONCACHEDATA"
* in your projects to make sure the non-cacheable section variables will be initialized in system startup.
*/
@@ -395,9 +410,9 @@ _Pragma("diag_error=Pm120")
#else
-#define AT_NONCACHEABLE_SECTION(var) var
-#define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_ALIGN(var, alignbytes)
-#define AT_NONCACHEABLE_SECTION_INIT(var) var
+#define AT_NONCACHEABLE_SECTION(var) var
+#define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_ALIGN(var, alignbytes)
+#define AT_NONCACHEABLE_SECTION_INIT(var) var
#define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) SDK_ALIGN(var, alignbytes)
#endif
@@ -408,43 +423,39 @@ _Pragma("diag_error=Pm120")
* @name Time sensitive region
* @{
*/
-#if (defined(FSL_SDK_DRIVER_QUICK_ACCESS_ENABLE) && FSL_SDK_DRIVER_QUICK_ACCESS_ENABLE)
-
#if (defined(__ICCARM__))
#define AT_QUICKACCESS_SECTION_CODE(func) func @"CodeQuickAccess"
-#define AT_QUICKACCESS_SECTION_DATA(func) func @"DataQuickAccess"
-#elif(defined(__CC_ARM) || defined(__ARMCC_VERSION))
+#define AT_QUICKACCESS_SECTION_DATA(var) var @"DataQuickAccess"
+#define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
+ SDK_PRAGMA(data_alignment = alignbytes) var @"DataQuickAccess"
+#elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
#define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
-#define AT_QUICKACCESS_SECTION_DATA(func) __attribute__((section("DataQuickAccess"))) func
-#elif(defined(__GNUC__))
+#define AT_QUICKACCESS_SECTION_DATA(var) __attribute__((section("DataQuickAccess"))) var
+#define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
+ __attribute__((section("DataQuickAccess"))) __attribute__((aligned(alignbytes))) var
+#elif (defined(__GNUC__))
#define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
-#define AT_QUICKACCESS_SECTION_DATA(func) __attribute__((section("DataQuickAccess"))) func
+#define AT_QUICKACCESS_SECTION_DATA(var) __attribute__((section("DataQuickAccess"))) var
+#define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
+ __attribute__((section("DataQuickAccess"))) var __attribute__((aligned(alignbytes)))
#else
#error Toolchain not supported.
#endif /* defined(__ICCARM__) */
-#else /* __FSL_SDK_DRIVER_QUICK_ACCESS_ENABLE */
-
-#define AT_QUICKACCESS_SECTION_CODE(func) func
-#define AT_QUICKACCESS_SECTION_DATA(func) func
-
-#endif /* __FSL_SDK_DRIVER_QUICK_ACCESS_ENABLE */
-/* @} */
-
/*! @name Ram Function */
#if (defined(__ICCARM__))
#define RAMFUNCTION_SECTION_CODE(func) func @"RamFunction"
-#elif(defined(__CC_ARM) || defined(__ARMCC_VERSION))
+#elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
#define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
-#elif(defined(__GNUC__))
+#elif (defined(__GNUC__))
#define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
#else
#error Toolchain not supported.
#endif /* defined(__ICCARM__) */
/* @} */
-#if defined ( __ARMCC_VERSION ) && ( __ARMCC_VERSION >= 6010050 )
-void DefaultISR(void);
+#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
+ void DefaultISR(void);
#endif
/*
@@ -558,6 +569,144 @@ static inline status_t DisableIRQ(IRQn_Type interrupt)
}
/*!
+ * @brief Enable the IRQ, and also set the interrupt priority.
+ *
+ * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
+ * levels. For example, there are NVIC and intmux. Here the interrupts connected
+ * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
+ * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
+ * to NVIC first then routed to core.
+ *
+ * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
+ * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
+ *
+ * @param interrupt The IRQ to Enable.
+ * @param priNum Priority number set to interrupt controller register.
+ * @retval kStatus_Success Interrupt priority set successfully
+ * @retval kStatus_Fail Failed to set the interrupt priority.
+ */
+static inline status_t EnableIRQWithPriority(IRQn_Type interrupt, uint8_t priNum)
+{
+ status_t status = kStatus_Success;
+
+ if (NotAvail_IRQn == interrupt)
+ {
+ status = kStatus_Fail;
+ }
+
+#if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
+ else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
+ {
+ status = kStatus_Fail;
+ }
+#endif
+
+ else
+ {
+#if defined(__GIC_PRIO_BITS)
+ GIC_SetPriority(interrupt, priNum);
+ GIC_EnableIRQ(interrupt);
+#else
+ NVIC_SetPriority(interrupt, priNum);
+ NVIC_EnableIRQ(interrupt);
+#endif
+ }
+
+ return status;
+}
+
+/*!
+ * @brief Set the IRQ priority.
+ *
+ * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
+ * levels. For example, there are NVIC and intmux. Here the interrupts connected
+ * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
+ * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
+ * to NVIC first then routed to core.
+ *
+ * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
+ * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
+ *
+ * @param interrupt The IRQ to set.
+ * @param priNum Priority number set to interrupt controller register.
+ *
+ * @retval kStatus_Success Interrupt priority set successfully
+ * @retval kStatus_Fail Failed to set the interrupt priority.
+ */
+static inline status_t IRQ_SetPriority(IRQn_Type interrupt, uint8_t priNum)
+{
+ status_t status = kStatus_Success;
+
+ if (NotAvail_IRQn == interrupt)
+ {
+ status = kStatus_Fail;
+ }
+
+#if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
+ else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
+ {
+ status = kStatus_Fail;
+ }
+#endif
+
+ else
+ {
+#if defined(__GIC_PRIO_BITS)
+ GIC_SetPriority(interrupt, priNum);
+#else
+ NVIC_SetPriority(interrupt, priNum);
+#endif
+ }
+
+ return status;
+}
+
+/*!
+ * @brief Clear the pending IRQ flag.
+ *
+ * Only handle LEVEL1 interrupt. For some devices, there might be multiple interrupt
+ * levels. For example, there are NVIC and intmux. Here the interrupts connected
+ * to NVIC are the LEVEL1 interrupts, because they are routed to the core directly.
+ * The interrupts connected to intmux are the LEVEL2 interrupts, they are routed
+ * to NVIC first then routed to core.
+ *
+ * This function only handles the LEVEL1 interrupts. The number of LEVEL1 interrupts
+ * is indicated by the feature macro FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS.
+ *
+ * @param interrupt The flag which IRQ to clear.
+ *
+ * @retval kStatus_Success Interrupt priority set successfully
+ * @retval kStatus_Fail Failed to set the interrupt priority.
+ */
+static inline status_t IRQ_ClearPendingIRQ(IRQn_Type interrupt)
+{
+ status_t status = kStatus_Success;
+
+ if (NotAvail_IRQn == interrupt)
+ {
+ status = kStatus_Fail;
+ }
+
+#if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
+ else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
+ {
+ status = kStatus_Fail;
+ }
+#endif
+
+ else
+ {
+#if defined(__GIC_PRIO_BITS)
+ GIC_ClearPendingIRQ(interrupt);
+#else
+ NVIC_ClearPendingIRQ(interrupt);
+#endif
+ }
+
+ return status;
+}
+
+/*!
* @brief Disable the global IRQ
*
* Disable the global interrupt and return the current primask register. User is required to provided the primask
@@ -567,19 +716,18 @@ static inline status_t DisableIRQ(IRQn_Type interrupt)
*/
static inline uint32_t DisableGlobalIRQ(void)
{
-#if defined(CPSR_I_Msk)
- uint32_t cpsr = __get_CPSR() & CPSR_I_Msk;
-
- __disable_irq();
+ uint32_t mask;
- return cpsr;
+#if defined(CPSR_I_Msk)
+ mask = __get_CPSR() & CPSR_I_Msk;
+#elif defined(DAIF_I_BIT)
+ mask = __get_DAIF() & DAIF_I_BIT;
#else
- uint32_t regPrimask = __get_PRIMASK();
-
+ mask = __get_PRIMASK();
+#endif
__disable_irq();
- return regPrimask;
-#endif
+ return mask;
}
/*!
@@ -596,6 +744,11 @@ static inline void EnableGlobalIRQ(uint32_t primask)
{
#if defined(CPSR_I_Msk)
__set_CPSR((__get_CPSR() & ~CPSR_I_Msk) | primask);
+#elif defined(DAIF_I_BIT)
+ if (0UL == primask)
+ {
+ __enable_irq();
+ }
#else
__set_PRIMASK(primask);
#endif
@@ -651,6 +804,20 @@ void DisableDeepSleepIRQ(IRQn_Type interrupt);
#endif /* FSL_FEATURE_POWERLIB_EXTEND */
#endif /* FSL_FEATURE_SOC_SYSCON_COUNT */
+#if defined(DWT)
+/*!
+ * @brief Enable the counter to get CPU cycles.
+ */
+void MSDK_EnableCpuCycleCounter(void);
+
+/*!
+ * @brief Get the current CPU cycle count.
+ *
+ * @return Current CPU cycle count.
+ */
+uint32_t MSDK_GetCpuCycleCount(void);
+#endif
+
#if defined(__cplusplus)
}
#endif /* __cplusplus*/