Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:13

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreThread
0007  *
0008  * @brief This header file provides the interfaces of the
0009  *   @ref RTEMSScoreThread related to thread dispatching.
0010  */
0011 
0012 /*
0013  * COPYRIGHT (c) 1989-2009.
0014  * On-Line Applications Research Corporation (OAR).
0015  *
0016  * Redistribution and use in source and binary forms, with or without
0017  * modification, are permitted provided that the following conditions
0018  * are met:
0019  * 1. Redistributions of source code must retain the above copyright
0020  *    notice, this list of conditions and the following disclaimer.
0021  * 2. Redistributions in binary form must reproduce the above copyright
0022  *    notice, this list of conditions and the following disclaimer in the
0023  *    documentation and/or other materials provided with the distribution.
0024  *
0025  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0026  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0027  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0028  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0029  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0030  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0031  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0032  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0033  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0034  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0035  * POSSIBILITY OF SUCH DAMAGE.
0036  */
0037 
0038 #ifndef _RTEMS_SCORE_THREADDISPATCH_H
0039 #define _RTEMS_SCORE_THREADDISPATCH_H
0040 
0041 #include <rtems/score/percpu.h>
0042 #include <rtems/score/isrlock.h>
0043 #include <rtems/score/profiling.h>
0044 
0045 #ifdef __cplusplus
0046 extern "C" {
0047 #endif /* __cplusplus */
0048 
0049 /**
0050  * @addtogroup RTEMSScoreThread
0051  *
0052  * @{
0053  */
0054 
0055 #if defined(RTEMS_SMP) || ( CPU_ENABLE_ROBUST_THREAD_DISPATCH == TRUE )
0056 /**
0057  * @brief Enables a robust thread dispatch.
0058  *
0059  * On each change of the thread dispatch disable level from one to zero the
0060  * interrupt status is checked.  In case interrupts are disabled and SMP is
0061  * enabled or the CPU port needs it, then the system terminates with the fatal
0062  * internal error INTERNAL_ERROR_BAD_THREAD_DISPATCH_ENVIRONMENT.
0063  */
0064 #define RTEMS_SCORE_ROBUST_THREAD_DISPATCH
0065 #endif
0066 
0067 /**
0068  * @brief Indicates if the executing thread is inside a thread dispatch
0069  * critical section.
0070  *
0071  * @retval true Thread dispatching is enabled.
0072  * @retval false The executing thread is inside a thread dispatch critical
0073  * section and dispatching is not allowed.
0074  */
0075 static inline bool _Thread_Dispatch_is_enabled(void)
0076 {
0077   bool enabled;
0078 
0079 #if defined(RTEMS_SMP)
0080   ISR_Level level;
0081 
0082   _ISR_Local_disable( level );
0083 #endif
0084 
0085   enabled = _Thread_Dispatch_disable_level == 0;
0086 
0087 #if defined(RTEMS_SMP)
0088   _ISR_Local_enable( level );
0089 #endif
0090 
0091   return enabled;
0092 }
0093 
0094 /**
0095  * @brief Gets thread dispatch disable level.
0096  *
0097  * @return The value of the thread dispatch level.
0098  */
0099 static inline uint32_t _Thread_Dispatch_get_disable_level(void)
0100 {
0101   return _Thread_Dispatch_disable_level;
0102 }
0103 
0104 /**
0105  * @brief Thread dispatch initialization.
0106  *
0107  * This routine initializes the thread dispatching subsystem.
0108  */
0109 static inline void _Thread_Dispatch_initialization( void )
0110 {
0111   _Thread_Dispatch_disable_level = 1;
0112 }
0113 
0114 /**
0115  * @brief Performs a thread dispatch if necessary.
0116  *
0117  * This routine is responsible for transferring control of the processor from
0118  * the executing thread to the heir thread.  Once the heir is running an
0119  * attempt is made to run the pending post-switch thread actions.
0120  *
0121  * As part of this process, it is responsible for the following actions
0122  *   - update timing information of the executing thread,
0123  *   - save the context of the executing thread,
0124  *   - invokation of the thread switch user extensions,
0125  *   - restore the context of the heir thread, and
0126  *   - run of pending post-switch thread actions of the resulting executing
0127  *     thread.
0128  *
0129  * On entry the thread dispatch level must be equal to zero.
0130  */
0131 void _Thread_Dispatch( void );
0132 
0133 /**
0134  * @brief Directly do a thread dispatch.
0135  *
0136  * Must be called with a thread dispatch disable level of one, otherwise the
0137  * INTERNAL_ERROR_BAD_THREAD_DISPATCH_DISABLE_LEVEL will occur.  This function
0138  * is useful for operations which synchronously block, e.g. self restart, self
0139  * deletion, yield, sleep.
0140  *
0141  * @param cpu_self The current processor.
0142  *
0143  * @see _Thread_Dispatch().
0144  */
0145 void _Thread_Dispatch_direct( Per_CPU_Control *cpu_self );
0146 
0147 /**
0148  * @brief Directly do a thread dispatch and do not return.
0149  *
0150  * @param cpu_self is the current processor.
0151  *
0152  * @see _Thread_Dispatch_direct().
0153  */
0154 RTEMS_NO_RETURN void _Thread_Dispatch_direct_no_return(
0155   Per_CPU_Control *cpu_self
0156 );
0157 
0158 /**
0159  * @brief Performs a thread dispatch on the current processor.
0160  *
0161  * On entry the thread dispatch disable level must be equal to one and
0162  * interrupts must be disabled.
0163  *
0164  * This function assumes that a thread dispatch is necessary.
0165  *
0166  * @param cpu_self The current processor.
0167  * @param level The previous interrupt level.
0168  *
0169  * @see _Thread_Dispatch().
0170  */
0171 void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level );
0172 
0173 /**
0174  * @brief Disables thread dispatching inside a critical section (interrupts
0175  * disabled) with the current processor.
0176  *
0177  * @param cpu_self The current processor.
0178  * @param lock_context The lock context of the corresponding
0179  * _ISR_lock_ISR_disable() that started the critical section.
0180  *
0181  * @return The current processor.
0182  */
0183 static inline Per_CPU_Control *_Thread_Dispatch_disable_with_CPU(
0184   Per_CPU_Control        *cpu_self,
0185   const ISR_lock_Context *lock_context
0186 )
0187 {
0188   uint32_t disable_level;
0189 
0190   disable_level = cpu_self->thread_dispatch_disable_level;
0191   _Profiling_Thread_dispatch_disable_critical(
0192     cpu_self,
0193     disable_level,
0194     lock_context
0195   );
0196   cpu_self->thread_dispatch_disable_level = disable_level + 1;
0197 
0198   return cpu_self;
0199 }
0200 
0201 /**
0202  * @brief Disables thread dispatching inside a critical section (interrupts
0203  * disabled).
0204  *
0205  * @param lock_context The lock context of the corresponding
0206  * _ISR_lock_ISR_disable() that started the critical section.
0207  *
0208  * @return The current processor.
0209  */
0210 static inline Per_CPU_Control *_Thread_Dispatch_disable_critical(
0211   const ISR_lock_Context *lock_context
0212 )
0213 {
0214   return _Thread_Dispatch_disable_with_CPU( _Per_CPU_Get(), lock_context );
0215 }
0216 
0217 /**
0218  * @brief Disables thread dispatching.
0219  *
0220  * @return The current processor.
0221  */
0222 static inline Per_CPU_Control *_Thread_Dispatch_disable( void )
0223 {
0224   Per_CPU_Control  *cpu_self;
0225 
0226 #if defined( RTEMS_SMP ) || defined( RTEMS_PROFILING )
0227   ISR_lock_Context  lock_context;
0228 
0229   _ISR_lock_ISR_disable( &lock_context );
0230 
0231   cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
0232 
0233   _ISR_lock_ISR_enable( &lock_context );
0234 #else
0235   cpu_self = _Thread_Dispatch_disable_critical( NULL );
0236 #endif
0237 
0238   return cpu_self;
0239 }
0240 
0241 /**
0242  * @brief Enables thread dispatching.
0243  *
0244  * May perform a thread dispatch if necessary as a side-effect.
0245  *
0246  * @param[in, out] cpu_self The current processor.
0247  */
0248 void _Thread_Dispatch_enable( Per_CPU_Control *cpu_self );
0249 
0250 /**
0251  * @brief Unnests thread dispatching.
0252  *
0253  * @param[in, out] cpu_self The current processor.
0254  */
0255 static inline void _Thread_Dispatch_unnest( Per_CPU_Control *cpu_self )
0256 {
0257   _Assert( cpu_self->thread_dispatch_disable_level > 0 );
0258   --cpu_self->thread_dispatch_disable_level;
0259 }
0260 
0261 /**
0262  * @brief Requests a thread dispatch on the target processor.
0263  *
0264  * @param[in, out] cpu_self The current processor.
0265  * @param[in, out] cpu_target The target processor to request a thread dispatch.
0266  */
0267 static inline void _Thread_Dispatch_request(
0268   Per_CPU_Control *cpu_self,
0269   Per_CPU_Control *cpu_target
0270 )
0271 {
0272 #if defined( RTEMS_SMP )
0273   if ( cpu_self == cpu_target ) {
0274     cpu_self->dispatch_necessary = true;
0275   } else {
0276     _Atomic_Fetch_or_ulong( &cpu_target->message, 0, ATOMIC_ORDER_RELEASE );
0277     _CPU_SMP_Send_interrupt( _Per_CPU_Get_index( cpu_target ) );
0278   }
0279 #else
0280  cpu_self->dispatch_necessary = true;
0281  (void) cpu_target;
0282 #endif
0283 }
0284 
0285 /** @} */
0286 
0287 #ifdef __cplusplus
0288 }
0289 #endif /* __cplusplus */
0290 
0291 #endif /* _RTEMS_SCORE_THREADDISPATCH_H */