Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:13

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreSMPLock
0007  *
0008  * @brief This header file provides the interfaces of the
0009  *   @ref RTEMSScoreSMPLock related to sequence locks.
0010  */
0011 
0012 /*
0013  * Copyright (c) 2016 embedded brains GmbH & Co. KG
0014  *
0015  * Redistribution and use in source and binary forms, with or without
0016  * modification, are permitted provided that the following conditions
0017  * are met:
0018  * 1. Redistributions of source code must retain the above copyright
0019  *    notice, this list of conditions and the following disclaimer.
0020  * 2. Redistributions in binary form must reproduce the above copyright
0021  *    notice, this list of conditions and the following disclaimer in the
0022  *    documentation and/or other materials provided with the distribution.
0023  *
0024  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0025  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0026  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0027  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0028  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0029  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0030  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0031  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0032  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0033  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0034  * POSSIBILITY OF SUCH DAMAGE.
0035  */
0036 
0037 #ifndef _RTEMS_SCORE_SMPLOCKSEQ_H
0038 #define _RTEMS_SCORE_SMPLOCKSEQ_H
0039 
0040 #include <rtems/score/cpuopts.h>
0041 
0042 #if defined(RTEMS_SMP)
0043 
0044 #include <rtems/score/assert.h>
0045 #include <rtems/score/atomic.h>
0046 
0047 #ifdef __cplusplus
0048 extern "C" {
0049 #endif /* __cplusplus */
0050 
0051 /**
0052  * @addtogroup RTEMSScoreSMPLock
0053  *
0054  * @{
0055  */
0056 
0057 /**
0058  * @brief SMP sequence lock control.
0059  *
0060  * The sequence lock offers a consistent data set for readers in the presence
0061  * of at most one concurrent writer.  Due to the read-modify-write operation in
0062  * _SMP_sequence_lock_Read_retry() the data corresponding to the last written
0063  * sequence number is observed.  To allow multiple writers an additional SMP
0064  * lock is necessary to serialize writes.
0065  *
0066  * See also Hans-J. Boehm, HP Laboratories,
0067  * "Can Seqlocks Get Along With Programming Language Memory Models?",
0068  * http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
0069  */
0070 typedef struct {
0071   /**
0072    * @brief The sequence number.
0073    *
0074    * An odd value indicates that a write is in progress.
0075    */
0076   Atomic_Uint sequence;
0077 } SMP_sequence_lock_Control;
0078 
0079 /**
0080  * @brief SMP sequence lock control initializer for static initialization.
0081  */
0082 #define SMP_SEQUENCE_LOCK_INITIALIZER { ATOMIC_INITIALIZER_UINT( 0 ) }
0083 
0084 /**
0085  * @brief Initializes an SMP sequence lock.
0086  *
0087  * Concurrent initialization leads to unpredictable results.
0088  *
0089  * @param[out] lock The SMP sequence lock control.
0090  */
0091 static inline void _SMP_sequence_lock_Initialize( SMP_sequence_lock_Control *lock )
0092 {
0093   _Atomic_Init_uint( &lock->sequence, 0 );
0094 }
0095 
0096 /**
0097  * @brief Destroys an SMP sequence lock.
0098  *
0099  * Concurrent destruction leads to unpredictable results.
0100  *
0101  * @param lock The SMP sequence lock control.
0102  */
0103 static inline void _SMP_sequence_lock_Destroy( SMP_sequence_lock_Control *lock )
0104 {
0105   (void) lock;
0106 }
0107 
0108 /**
0109  * @brief Begins an SMP sequence lock write operation.
0110  *
0111  * This function will not disable interrupts.  The caller must ensure that the
0112  * current thread of execution is not interrupted indefinite since this would
0113  * starve readers.
0114  *
0115  * @param[out] lock The SMP sequence lock control.
0116  *
0117  * @return The current sequence number.
0118  */
0119 static inline unsigned int _SMP_sequence_lock_Write_begin(
0120   SMP_sequence_lock_Control *lock
0121 )
0122 {
0123   unsigned int seq;
0124 
0125   seq = _Atomic_Load_uint( &lock->sequence, ATOMIC_ORDER_RELAXED );
0126   _Assert( seq % 2 == 0 );
0127 
0128   _Atomic_Store_uint( &lock->sequence, seq + 1, ATOMIC_ORDER_RELAXED );
0129 
0130   /* There is no atomic store with acquire/release semantics */
0131   _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
0132 
0133   return seq;
0134 }
0135 
0136 /**
0137  * @brief Ends an SMP sequence lock write operation.
0138  *
0139  * @param[out] lock The SMP sequence lock control.
0140  * @param seq The sequence number returned by _SMP_sequence_lock_Write_begin().
0141  */
0142 static inline void _SMP_sequence_lock_Write_end(
0143   SMP_sequence_lock_Control *lock,
0144   unsigned int               seq
0145 )
0146 {
0147   _Atomic_Store_uint( &lock->sequence, seq + 2, ATOMIC_ORDER_RELEASE );
0148 }
0149 
0150 /**
0151  * @brief Begins an SMP sequence lock read operation.
0152  *
0153  * This function will not disable interrupts.
0154  *
0155  * @param[out] lock The SMP sequence lock control.
0156  *
0157  * @return The current sequence number.
0158  */
0159 static inline unsigned int _SMP_sequence_lock_Read_begin(
0160   const SMP_sequence_lock_Control *lock
0161 )
0162 {
0163   return _Atomic_Load_uint( &lock->sequence, ATOMIC_ORDER_ACQUIRE );
0164 }
0165 
0166 /**
0167  * @brief Ends an SMP sequence lock read operation and indicates if a retry is
0168  * necessary.
0169  *
0170  * @param[in, out] lock The SMP sequence lock control.
0171  * @param seq The sequence number returned by _SMP_sequence_lock_Read_begin().
0172  *
0173  * @retval true The read operation must be retried with a call to
0174  *   _SMP_sequence_lock_Read_begin().
0175  * @retval false The read operation need not be retried.
0176  */
0177 static inline bool _SMP_sequence_lock_Read_retry(
0178   SMP_sequence_lock_Control *lock,
0179   unsigned int               seq
0180 )
0181 {
0182   unsigned int seq2;
0183 
0184   seq2 = _Atomic_Fetch_add_uint( &lock->sequence, 0, ATOMIC_ORDER_RELEASE );
0185   return seq != seq2 || seq % 2 != 0;
0186 }
0187 
0188 /** @} */
0189 
0190 #ifdef __cplusplus
0191 }
0192 #endif /* __cplusplus */
0193 
0194 #endif /* RTEMS_SMP */
0195 
0196 #endif /* _RTEMS_SCORE_SMPLOCKSEQ_H */