File indexing completed on 2025-05-11 08:24:13
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 #ifndef _RTEMS_SCORE_SMPLOCKMCS_H
0038 #define _RTEMS_SCORE_SMPLOCKMCS_H
0039
0040 #include <rtems/score/cpuopts.h>
0041
0042 #if defined(RTEMS_SMP)
0043
0044 #include <rtems/score/atomic.h>
0045 #include <rtems/score/smplockstats.h>
0046
0047 #ifdef __cplusplus
0048 extern "C" {
0049 #endif
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060 typedef struct SMP_MCS_lock_Context {
0061
0062
0063
0064 union {
0065
0066
0067
0068 Atomic_Uintptr atomic;
0069
0070
0071
0072
0073
0074
0075 struct SMP_MCS_lock_Context *normal;
0076 } next;
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086 Atomic_Uint locked;
0087
0088 #if defined(RTEMS_PROFILING)
0089 SMP_lock_Stats_context Stats_context;
0090
0091 unsigned int queue_length;
0092 #endif
0093 } SMP_MCS_lock_Context;
0094
0095
0096
0097
0098 typedef struct {
0099
0100
0101
0102
0103
0104
0105 union {
0106
0107
0108
0109
0110 Atomic_Uintptr atomic;
0111
0112
0113
0114
0115
0116
0117 struct SMP_MCS_lock_Context *normal;
0118 } queue;
0119 } SMP_MCS_lock_Control;
0120
0121
0122
0123
0124 #define SMP_MCS_LOCK_INITIALIZER { { ATOMIC_INITIALIZER_UINTPTR( 0 ) } }
0125
0126
0127
0128
0129
0130
0131
0132
0133 static inline void _SMP_MCS_lock_Initialize( SMP_MCS_lock_Control *lock )
0134 {
0135 _Atomic_Init_uintptr( &lock->queue.atomic, 0 );
0136 }
0137
0138
0139
0140
0141
0142
0143
0144
0145 static inline void _SMP_MCS_lock_Destroy( SMP_MCS_lock_Control *lock )
0146 {
0147 (void) lock;
0148 }
0149
0150
0151
0152
0153
0154
0155
0156
0157 static inline void _SMP_MCS_lock_Do_acquire(
0158 SMP_MCS_lock_Control *lock,
0159 SMP_MCS_lock_Context *context
0160 #if defined(RTEMS_PROFILING)
0161 ,
0162 SMP_lock_Stats *stats
0163 #endif
0164 )
0165 {
0166 SMP_MCS_lock_Context *previous;
0167 #if defined(RTEMS_PROFILING)
0168 SMP_lock_Stats_acquire_context acquire_context;
0169
0170 _SMP_lock_Stats_acquire_begin( &acquire_context );
0171 context->queue_length = 0;
0172 #endif
0173
0174 _Atomic_Store_uintptr( &context->next.atomic, 0, ATOMIC_ORDER_RELAXED );
0175 _Atomic_Store_uint( &context->locked, 1, ATOMIC_ORDER_RELAXED );
0176
0177 previous = (SMP_MCS_lock_Context *) _Atomic_Exchange_uintptr(
0178 &lock->queue.atomic,
0179 (uintptr_t) context,
0180 ATOMIC_ORDER_ACQ_REL
0181 );
0182
0183 if ( previous != NULL ) {
0184 unsigned int locked;
0185
0186 _Atomic_Store_uintptr(
0187 &previous->next.atomic,
0188 (uintptr_t) context,
0189 ATOMIC_ORDER_RELAXED
0190 );
0191
0192 do {
0193 locked = _Atomic_Load_uint( &context->locked, ATOMIC_ORDER_ACQUIRE );
0194 } while ( locked != 0 );
0195 }
0196
0197 #if defined(RTEMS_PROFILING)
0198 _SMP_lock_Stats_acquire_end(
0199 &acquire_context,
0200 stats,
0201 &context->Stats_context,
0202 context->queue_length
0203 );
0204 #endif
0205 }
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218 #if defined(RTEMS_PROFILING)
0219 #define _SMP_MCS_lock_Acquire( lock, context, stats ) \
0220 _SMP_MCS_lock_Do_acquire( lock, context, stats )
0221 #else
0222 #define _SMP_MCS_lock_Acquire( lock, context, stats ) \
0223 _SMP_MCS_lock_Do_acquire( lock, context )
0224 #endif
0225
0226
0227
0228
0229
0230
0231
0232 static inline void _SMP_MCS_lock_Release(
0233 SMP_MCS_lock_Control *lock,
0234 SMP_MCS_lock_Context *context
0235 )
0236 {
0237 SMP_MCS_lock_Context *next;
0238
0239 next = (SMP_MCS_lock_Context *) _Atomic_Load_uintptr(
0240 &context->next.atomic,
0241 ATOMIC_ORDER_RELAXED
0242 );
0243
0244 if ( next == NULL ) {
0245 uintptr_t expected;
0246 bool success;
0247
0248 expected = (uintptr_t) context;
0249 success = _Atomic_Compare_exchange_uintptr(
0250 &lock->queue.atomic,
0251 &expected,
0252 0,
0253 ATOMIC_ORDER_RELEASE,
0254 ATOMIC_ORDER_RELAXED
0255 );
0256
0257 if ( success ) {
0258 #if defined(RTEMS_PROFILING)
0259 _SMP_lock_Stats_release_update( &context->Stats_context );
0260 #endif
0261
0262 return;
0263 }
0264
0265 do {
0266 next = (SMP_MCS_lock_Context *) _Atomic_Load_uintptr(
0267 &context->next.atomic,
0268 ATOMIC_ORDER_RELAXED
0269 );
0270 } while ( next == NULL );
0271 }
0272
0273 #if defined(RTEMS_PROFILING)
0274 next->queue_length = context->queue_length + 1;
0275 _SMP_lock_Stats_release_update( &context->Stats_context );
0276 #endif
0277
0278 _Atomic_Store_uint( &next->locked, 0, ATOMIC_ORDER_RELEASE );
0279 }
0280
0281
0282
0283 #ifdef __cplusplus
0284 }
0285 #endif
0286
0287 #endif
0288
0289 #endif