Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
14ba4c5
refactor(freertos/smp): Move critical sections inside xTaskPriorityIn…
Dazza0 Jun 15, 2024
ea3e9d1
feat(freertos/smp): Allow vTaskPreemptionEnable() to be nested
Dazza0 Jun 15, 2024
d99bdef
feat(freertos/smp): Add granular locking port macros checks
Dazza0 Jun 15, 2024
1ba1584
feat(granular_locks): Add granular locking functions
Dazza0 Jun 15, 2024
f761e9b
change(freertos/smp): Update tasks.c locking
Dazza0 Jun 15, 2024
2ba07ac
change(freertos/smp): Update queue.c locking
Dazza0 Jun 15, 2024
b3e2444
change(freertos/smp): Update event_groups.c locking
Dazza0 Jun 15, 2024
32d8176
change(freertos/smp): Update stream_buffer.c locking
Dazza0 Jun 15, 2024
a196372
change(freertos/smp): Update timers.c locking
Dazza0 Jun 15, 2024
0847cd0
feat(freertos/smp): Add Granular Locking V4 proposal documents
Dazza0 Jun 17, 2024
ea17e85
Update granular lock port
chinglee-iot Mar 21, 2025
4c517bc
Update RP2040 to support granular lock
chinglee-iot Mar 21, 2025
f836da4
refactor(freertos/smp): Move critical sections inside xTaskPriorityIn…
Dazza0 Jun 15, 2024
94c320c
feat(freertos/smp): Allow vTaskPreemptionEnable() to be nested
Dazza0 Jun 15, 2024
308d050
feat(freertos/smp): Add granular locking port macros checks
Dazza0 Jun 15, 2024
322512c
feat(granular_locks): Add granular locking functions
Dazza0 Jun 15, 2024
21f42e6
change(freertos/smp): Update tasks.c locking
Dazza0 Jun 15, 2024
e67f166
change(freertos/smp): Update queue.c locking
Dazza0 Jun 15, 2024
a03ce9c
change(freertos/smp): Update event_groups.c locking
Dazza0 Jun 15, 2024
ca6db79
change(freertos/smp): Update stream_buffer.c locking
Dazza0 Jun 15, 2024
b8611c9
change(freertos/smp): Update timers.c locking
Dazza0 Jun 15, 2024
fb77de7
feat(freertos/smp): Add Granular Locking V4 proposal documents
Dazza0 Jun 17, 2024
5b0528a
Merge branch 'feature/smp_granular_locks_v4' into rp2040_pico_support…
chinglee-iot Jun 27, 2025
30a82b9
Add lightweight critical section and XMOS port
chinglee-iot Jul 2, 2025
3c1375c
Update lightweight critical section implementation
chinglee-iot Jul 3, 2025
674813e
Update XMOS AI granular lock implementation
chinglee-iot Aug 5, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
154 changes: 137 additions & 17 deletions event_groups.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,48 @@
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
#endif

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
portSPINLOCK_TYPE xTaskSpinlock;
portSPINLOCK_TYPE xISRSpinlock;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
} EventGroup_t;

/*-----------------------------------------------------------*/

/*
* Macros to mark the start and end of a critical code region.
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define event_groupsENTER_CRITICAL( pxEventBits ) taskDATA_GROUP_ENTER_CRITICAL( &pxEventBits->xTaskSpinlock, &pxEventBits->xISRSpinlock )
#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits, puxSavedInterruptStatus ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( &pxEventBits->xISRSpinlock, puxSavedInterruptStatus )
#define event_groupsEXIT_CRITICAL( pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL( &pxEventBits->xTaskSpinlock, &pxEventBits->xISRSpinlock )
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, &pxEventBits->xISRSpinlock )
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#define event_groupsENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL();
#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits, puxSavedInterruptStatus ) do { *( puxSavedInterruptStatus ) = taskENTER_CRITICAL_FROM_ISR(); } while( 0 )
#define event_groupsEXIT_CRITICAL( pxEventBits ) taskEXIT_CRITICAL();
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */

/*
* Locks an event group for tasks. Prevents other tasks from accessing the event group but allows
* ISRs to pend access to the event group. Caller cannot be preempted by other tasks
* after locking the event group, thus allowing the caller to execute non-deterministic
* operations.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

/*
* Unlocks an event group for tasks. Handles all pended access from ISRs, then reenables
* preemption for the caller.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

/*
* Test the bits set in uxCurrentEventBits to see if the wait condition is met.
* The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
Expand All @@ -79,6 +117,25 @@
const EventBits_t uxBitsToWaitFor,
const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION;

/*-----------------------------------------------------------*/

/*
* Macros used to lock and unlock an event group. When a task locks an,
* event group, the task will have thread safe non-deterministic access to
* the event group.
* - Concurrent access from other tasks will be blocked by the xTaskSpinlock
* - Concurrent access from ISRs will be pended
*
* When the task unlocks the event group, all pended access attempts are handled.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#define event_groupsLOCK( pxEventBits ) prvLockEventGroupForTasks( pxEventBits )
#define event_groupsUNLOCK( pxEventBits ) prvUnlockEventGroupForTasks( pxEventBits );
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Early Return Pattern Concern - This macro violates the FreeRTOS coding standard discouraging early returns.

The current implementation:

#define event_groupsUNLOCK( pxEventBits )    prvUnlockEventGroupForTasks( pxEventBits );

Creates this usage pattern:

xAlreadyYielded = event_groupsUNLOCK( pxEventBits ); // Direct return

Issue: This creates multiple exit points in functions, contradicting FreeRTOS coding standards that prefer single exit points for maintainability.

Recommendation: Consider restructuring the implementation to maintain single exit points:

// In the calling function, instead of direct return:
BaseType_t xYieldResult;
event_groupsLOCK( pxEventBits );
{
    // ... critical section work ...
    xYieldResult = event_groupsUNLOCK( pxEventBits );
}

// Single exit point
xAlreadyYielded = xYieldResult;

This pattern should be applied consistently across all similar macros to maintain FreeRTOS coding standards.

#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#define event_groupsLOCK( pxEventBits ) vTaskSuspendAll()
#define event_groupsUNLOCK( pxEventBits ) xTaskResumeAll()
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

/*-----------------------------------------------------------*/

#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
Expand Down Expand Up @@ -122,6 +179,13 @@
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
{
portINIT_SPINLOCK( &( pxEventBits->xTaskSpinlock ) );
portINIT_SPINLOCK( &( pxEventBits->xISRSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

traceEVENT_GROUP_CREATE( pxEventBits );
}
else
Expand Down Expand Up @@ -167,6 +231,13 @@
}
#endif /* configSUPPORT_STATIC_ALLOCATION */

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
{
portINIT_SPINLOCK( &( pxEventBits->xTaskSpinlock ) );
portINIT_SPINLOCK( &( pxEventBits->xISRSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

traceEVENT_GROUP_CREATE( pxEventBits );
}
else
Expand Down Expand Up @@ -202,7 +273,7 @@
}
#endif

vTaskSuspendAll();
event_groupsLOCK( pxEventBits );
{
uxOriginalBitValue = pxEventBits->uxEventBits;

Expand Down Expand Up @@ -245,7 +316,7 @@
}
}
}
xAlreadyYielded = xTaskResumeAll();
xAlreadyYielded = event_groupsUNLOCK( pxEventBits );

if( xTicksToWait != ( TickType_t ) 0 )
{
Expand All @@ -267,7 +338,7 @@
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{
/* The task timed out, just return the current event bit value. */
taskENTER_CRITICAL();
event_groupsENTER_CRITICAL( pxEventBits );
{
uxReturn = pxEventBits->uxEventBits;

Expand All @@ -284,7 +355,7 @@
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
event_groupsEXIT_CRITICAL( pxEventBits );

xTimeoutOccurred = pdTRUE;
}
Expand Down Expand Up @@ -333,7 +404,7 @@
}
#endif

vTaskSuspendAll();
event_groupsLOCK( pxEventBits );
{
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;

Expand Down Expand Up @@ -401,7 +472,7 @@
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
}
}
xAlreadyYielded = xTaskResumeAll();
xAlreadyYielded = event_groupsUNLOCK( pxEventBits );

if( xTicksToWait != ( TickType_t ) 0 )
{
Expand All @@ -422,7 +493,7 @@

if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{
taskENTER_CRITICAL();
event_groupsENTER_CRITICAL( pxEventBits );
{
/* The task timed out, just return the current event bit value. */
uxReturn = pxEventBits->uxEventBits;
Expand All @@ -447,7 +518,7 @@

xTimeoutOccurred = pdTRUE;
}
taskEXIT_CRITICAL();
event_groupsEXIT_CRITICAL( pxEventBits );
}
else
{
Expand Down Expand Up @@ -482,7 +553,7 @@
configASSERT( xEventGroup );
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );

taskENTER_CRITICAL();
event_groupsENTER_CRITICAL( pxEventBits );
{
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );

Expand All @@ -493,7 +564,7 @@
/* Clear the bits. */
pxEventBits->uxEventBits &= ~uxBitsToClear;
}
taskEXIT_CRITICAL();
event_groupsEXIT_CRITICAL( pxEventBits );

traceRETURN_xEventGroupClearBits( uxReturn );

Expand Down Expand Up @@ -524,19 +595,19 @@
EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
{
UBaseType_t uxSavedInterruptStatus;
EventGroup_t const * const pxEventBits = xEventGroup;
EventGroup_t * const pxEventBits = xEventGroup;
EventBits_t uxReturn;

traceENTER_xEventGroupGetBitsFromISR( xEventGroup );

/* MISRA Ref 4.7.1 [Return value shall be checked] */
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
/* coverity[misra_c_2012_directive_4_7_violation] */
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits, &uxSavedInterruptStatus );
{
uxReturn = pxEventBits->uxEventBits;
}
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits );

traceRETURN_xEventGroupGetBitsFromISR( uxReturn );

Expand Down Expand Up @@ -564,10 +635,17 @@

pxList = &( pxEventBits->xTasksWaitingForBits );
pxListEnd = listGET_END_MARKER( pxList );
vTaskSuspendAll();
event_groupsLOCK( pxEventBits );
{
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )

/* We are about to access the kernel data group non-deterministically,
* thus we suspend the kernel data group.*/
vTaskSuspendAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

pxListItem = listGET_HEAD_ENTRY( pxList );

/* Set the bits. */
Expand Down Expand Up @@ -638,8 +716,12 @@

/* Snapshot resulting bits. */
uxReturnBits = pxEventBits->uxEventBits;

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
( void ) xTaskResumeAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
}
( void ) xTaskResumeAll();
( void ) event_groupsUNLOCK( pxEventBits );

traceRETURN_xEventGroupSetBits( uxReturnBits );

Expand All @@ -658,19 +740,30 @@

pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );

vTaskSuspendAll();
event_groupsLOCK( pxEventBits );
{
traceEVENT_GROUP_DELETE( xEventGroup );

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )

/* We are about to access the kernel data group non-deterministically,
* thus we suspend the kernel data group.*/
vTaskSuspendAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
{
/* Unblock the task, returning 0 as the event list is being deleted
* and cannot therefore have any bits set. */
configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
}

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
( void ) xTaskResumeAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
}
( void ) xTaskResumeAll();
( void ) event_groupsUNLOCK( pxEventBits );

#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{
Expand Down Expand Up @@ -774,6 +867,33 @@
traceRETURN_vEventGroupClearBitsCallback();
}
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits )
{
/* Disable preemption so that the current task cannot be preempted by another task */
vTaskPreemptionDisable( NULL );

/* Keep holding xTaskSpinlock to prevent tasks on other cores from accessing
* the event group while it is suspended. */
portGET_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits )
{
/* Release the previously held task spinlock */
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );

/* Re-enable preemption */
vTaskPreemptionEnable( NULL );

/* We assume that the task was preempted when preemption was enabled */
return pdTRUE;
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/

static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
const EventBits_t uxBitsToWaitFor,
Expand Down
Loading
Loading