Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:13

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreSMPLock
0007  *
0008  * @brief This header file provides the interfaces of the
0009  *   @ref RTEMSScoreSMPLock related to MCS locks.
0010  */
0011 
0012 /*
0013  * Copyright (c) 2016 embedded brains GmbH & Co. KG
0014  *
0015  * Redistribution and use in source and binary forms, with or without
0016  * modification, are permitted provided that the following conditions
0017  * are met:
0018  * 1. Redistributions of source code must retain the above copyright
0019  *    notice, this list of conditions and the following disclaimer.
0020  * 2. Redistributions in binary form must reproduce the above copyright
0021  *    notice, this list of conditions and the following disclaimer in the
0022  *    documentation and/or other materials provided with the distribution.
0023  *
0024  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0025  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0026  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0027  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0028  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0029  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0030  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0031  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0032  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0033  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0034  * POSSIBILITY OF SUCH DAMAGE.
0035  */
0036 
0037 #ifndef _RTEMS_SCORE_SMPLOCKMCS_H
0038 #define _RTEMS_SCORE_SMPLOCKMCS_H
0039 
0040 #include <rtems/score/cpuopts.h>
0041 
0042 #if defined(RTEMS_SMP)
0043 
0044 #include <rtems/score/atomic.h>
0045 #include <rtems/score/smplockstats.h>
0046 
0047 #ifdef __cplusplus
0048 extern "C" {
0049 #endif /* __cplusplus */
0050 
0051 /**
0052  * @addtogroup RTEMSScoreSMPLock
0053  *
0054  * @{
0055  */
0056 
0057 /**
0058  * @brief SMP Mellor-Crummey and Scott (MCS) lock context.
0059  */
0060 typedef struct SMP_MCS_lock_Context {
0061   /**
0062    * @brief The next context on the queue if it exists.
0063    */
0064   union {
0065     /**
0066      * @brief The next context as an atomic unsigned integer pointer value.
0067      */
0068     Atomic_Uintptr atomic;
0069 
0070     /**
0071      * @brief The next context as a normal pointer.
0072      *
0073      * Only provided for debugging purposes.
0074      */
0075     struct SMP_MCS_lock_Context *normal;
0076   } next;
0077 
0078   /**
0079    * @brief Indicates if the lock is owned or free in case a previous context
0080    * exits on the queue.
0081    *
0082    * This field is initialized to a non-zero value.  The previous lock owner
0083    * (which is the owner of the previous context) will set it to zero during
0084    * its lock release.
0085    */
0086   Atomic_Uint locked;
0087 
0088 #if defined(RTEMS_PROFILING)
0089   SMP_lock_Stats_context Stats_context;
0090 
0091   unsigned int queue_length;
0092 #endif
0093 } SMP_MCS_lock_Context;
0094 
0095 /**
0096  * @brief SMP Mellor-Crummey and Scott (MCS) lock control.
0097  */
0098 typedef struct {
0099   /**
0100    * @brief The queue tail context.
0101    *
0102    * The lock is free, in case this field is zero, otherwise it is locked by
0103    * the owner of the queue head.
0104    */
0105   union {
0106     /**
0107      * @brief The queue tail context as an atomic unsigned integer pointer
0108      * value.
0109      */
0110     Atomic_Uintptr atomic;
0111 
0112     /**
0113      * @brief The queue tail context as a normal pointer.
0114      *
0115      * Only provided for debugging purposes.
0116      */
0117     struct SMP_MCS_lock_Context *normal;
0118   } queue;
0119 } SMP_MCS_lock_Control;
0120 
0121 /**
0122  * @brief SMP MCS lock control initializer for static initialization.
0123  */
0124 #define SMP_MCS_LOCK_INITIALIZER { { ATOMIC_INITIALIZER_UINTPTR( 0 ) } }
0125 
0126 /**
0127  * @brief Initializes the SMP MCS lock.
0128  *
0129  * Concurrent initialization leads to unpredictable results.
0130  *
0131  * @param[in, out] lock The SMP MCS lock control.
0132  */
0133 static inline void _SMP_MCS_lock_Initialize( SMP_MCS_lock_Control *lock )
0134 {
0135   _Atomic_Init_uintptr( &lock->queue.atomic, 0 );
0136 }
0137 
0138 /**
0139  * @brief Destroys the SMP MCS lock.
0140  *
0141  * Concurrent destruction leads to unpredictable results.
0142  *
0143  * @param[out] lock The SMP MCS lock control.
0144  */
0145 static inline void _SMP_MCS_lock_Destroy( SMP_MCS_lock_Control *lock )
0146 {
0147   (void) lock;
0148 }
0149 
0150 /**
0151  * @brief Acquires the SMP MCS lock.
0152  *
0153  * @param[in, out] lock The lock to acquire.
0154  * @param[in, out] context The lock context.
0155  * @param stats the SMP lock statistics.
0156  */
0157 static inline void _SMP_MCS_lock_Do_acquire(
0158   SMP_MCS_lock_Control   *lock,
0159   SMP_MCS_lock_Context   *context
0160 #if defined(RTEMS_PROFILING)
0161   ,
0162   SMP_lock_Stats         *stats
0163 #endif
0164 )
0165 {
0166   SMP_MCS_lock_Context           *previous;
0167 #if defined(RTEMS_PROFILING)
0168   SMP_lock_Stats_acquire_context  acquire_context;
0169 
0170   _SMP_lock_Stats_acquire_begin( &acquire_context );
0171   context->queue_length = 0;
0172 #endif
0173 
0174   _Atomic_Store_uintptr( &context->next.atomic, 0, ATOMIC_ORDER_RELAXED );
0175   _Atomic_Store_uint( &context->locked, 1, ATOMIC_ORDER_RELAXED );
0176 
0177   previous = (SMP_MCS_lock_Context *) _Atomic_Exchange_uintptr(
0178     &lock->queue.atomic,
0179     (uintptr_t) context,
0180     ATOMIC_ORDER_ACQ_REL
0181   );
0182 
0183   if ( previous != NULL ) {
0184     unsigned int locked;
0185 
0186     _Atomic_Store_uintptr(
0187       &previous->next.atomic,
0188       (uintptr_t) context,
0189       ATOMIC_ORDER_RELAXED
0190     );
0191 
0192     do {
0193       locked = _Atomic_Load_uint( &context->locked, ATOMIC_ORDER_ACQUIRE );
0194     } while ( locked != 0 );
0195   }
0196 
0197 #if defined(RTEMS_PROFILING)
0198   _SMP_lock_Stats_acquire_end(
0199     &acquire_context,
0200     stats,
0201     &context->Stats_context,
0202     context->queue_length
0203   );
0204 #endif
0205 }
0206 
0207 /**
0208  * @brief Acquires an SMP MCS lock.
0209  *
0210  * This function will not disable interrupts.  The caller must ensure that the
0211  * current thread of execution is not interrupted indefinite once it obtained
0212  * the SMP MCS lock.
0213  *
0214  * @param lock The SMP MCS lock control.
0215  * @param context The SMP MCS lock context.
0216  * @param stats The SMP lock statistics.
0217  */
0218 #if defined(RTEMS_PROFILING)
0219   #define _SMP_MCS_lock_Acquire( lock, context, stats ) \
0220     _SMP_MCS_lock_Do_acquire( lock, context, stats )
0221 #else
0222   #define _SMP_MCS_lock_Acquire( lock, context, stats ) \
0223     _SMP_MCS_lock_Do_acquire( lock, context )
0224 #endif
0225 
0226 /**
0227  * @brief Releases an SMP MCS lock.
0228  *
0229  * @param[in, out] lock The SMP MCS lock control.
0230  * @param[in, out] context The SMP MCS lock context.
0231  */
0232 static inline void _SMP_MCS_lock_Release(
0233   SMP_MCS_lock_Control *lock,
0234   SMP_MCS_lock_Context *context
0235 )
0236 {
0237   SMP_MCS_lock_Context *next;
0238 
0239   next = (SMP_MCS_lock_Context *) _Atomic_Load_uintptr(
0240     &context->next.atomic,
0241     ATOMIC_ORDER_RELAXED
0242   );
0243 
0244   if ( next == NULL ) {
0245     uintptr_t expected;
0246     bool      success;
0247 
0248     expected = (uintptr_t) context;
0249     success = _Atomic_Compare_exchange_uintptr(
0250       &lock->queue.atomic,
0251       &expected,
0252       0,
0253       ATOMIC_ORDER_RELEASE,
0254       ATOMIC_ORDER_RELAXED
0255     );
0256 
0257     if ( success ) {
0258 #if defined(RTEMS_PROFILING)
0259       _SMP_lock_Stats_release_update( &context->Stats_context );
0260 #endif
0261       /* Nobody waits. So, we are done */
0262       return;
0263     }
0264 
0265     do {
0266       next = (SMP_MCS_lock_Context *) _Atomic_Load_uintptr(
0267         &context->next.atomic,
0268         ATOMIC_ORDER_RELAXED
0269       );
0270     } while ( next == NULL );
0271   }
0272 
0273 #if defined(RTEMS_PROFILING)
0274   next->queue_length = context->queue_length + 1;
0275   _SMP_lock_Stats_release_update( &context->Stats_context );
0276 #endif
0277 
0278   _Atomic_Store_uint( &next->locked, 0, ATOMIC_ORDER_RELEASE );
0279 }
0280 
0281 /** @} */
0282 
0283 #ifdef __cplusplus
0284 }
0285 #endif /* __cplusplus */
0286 
0287 #endif /* RTEMS_SMP */
0288 
0289 #endif /* _RTEMS_SCORE_SMPLOCKMCS_H */