The code I am using is the following one:
#define portCPU_IRQ_DISABLE() \
__asm volatile ( "CPSID i" ::: "memory" ); \
__asm volatile ( "DSB" ); \
__asm volatile ( "ISB" );
#define portCPU_IRQ_ENABLE() \
__asm volatile ( "CPSIE i" ::: "memory" ); \
__asm volatile ( "DSB" ); \
__asm volatile ( "ISB" );
#define portCHECK_IF_INTERRUPTS_DISABLED() \
({ \
uint32_t ulInterruptsDisabled; \
__asm volatile ( \
"MRS %[IntDisabled], cpsr \n\t" \
"AND %[IntDisabled], %[IntDisabled], %[iMask] \n\t" \
:[IntDisabled] "+r" (ulInterruptsDisabled) \
:[iMask] "I" (1 << 7) \
: \
); \
ulInterruptsDisabled; \
})
uint32_t ulPortSetInterruptMask( void )
{
uint32_t ulSavedInterruptState;
const uint32_t ulInterruptsEnabled = !portCHECK_IF_INTERRUPTS_DISABLED();
/* Interrupts are disabled before the ICCPMR is updated */
if( ulInterruptsEnabled )
{
portCPU_IRQ_DISABLE();
}
ulSavedInterruptState = portICCPMR_PRIORITY_MASK_REGISTER;
portICCPMR_PRIORITY_MASK_REGISTER = ( uint32_t )
(configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT );
__asm volatile ( "DSB \n"
"ISB \n" ::: "memory" );
if( ulInterruptsEnabled )
{
portCPU_IRQ_ENABLE();
}
return ulSavedInterruptState;
}
void vPortClearInterruptMask( uint32_t ulSavedInterruptState )
{
const uint32_t ulInterruptsEnabled = !portCHECK_IF_INTERRUPTS_DISABLED();
/* Interrupts are disabled before the ICCPMR is updated */
if( ulInterruptsEnabled )
{
portCPU_IRQ_DISABLE();
}
portICCPMR_PRIORITY_MASK_REGISTER = ulSavedInterruptState;
__asm volatile ( "DSB \n"
"ISB \n" ::: "memory" );
if( ulInterruptsEnabled )
{
portCPU_IRQ_ENABLE();
}
}
If the interrupts are disabled properly, so placing the appropriate barrier to disable them exactly when they should be, I don’t see how the condition could change within ulPortSetInterruptMask
and vPortClearInterruptMask
.