Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:13

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreThread
0007  *
0008  * @brief This header file provides interfaces of the
0009  *   @ref RTEMSScoreThread which are only used by the implementation.
0010  */
0011 
0012 /*
0013  *  COPYRIGHT (c) 1989-2008.
0014  *  On-Line Applications Research Corporation (OAR).
0015  *
0016  *  Copyright (C) 2014, 2017 embedded brains GmbH & Co. KG
0017  *
0018  * Redistribution and use in source and binary forms, with or without
0019  * modification, are permitted provided that the following conditions
0020  * are met:
0021  * 1. Redistributions of source code must retain the above copyright
0022  *    notice, this list of conditions and the following disclaimer.
0023  * 2. Redistributions in binary form must reproduce the above copyright
0024  *    notice, this list of conditions and the following disclaimer in the
0025  *    documentation and/or other materials provided with the distribution.
0026  *
0027  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0028  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0029  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0030  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0031  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0032  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0033  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0034  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0035  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0036  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0037  * POSSIBILITY OF SUCH DAMAGE.
0038  */
0039 
0040 #ifndef _RTEMS_SCORE_THREADIMPL_H
0041 #define _RTEMS_SCORE_THREADIMPL_H
0042 
0043 #include <rtems/score/thread.h>
0044 #include <rtems/score/assert.h>
0045 #include <rtems/score/chainimpl.h>
0046 #include <rtems/score/interr.h>
0047 #include <rtems/score/isr.h>
0048 #include <rtems/score/objectimpl.h>
0049 #include <rtems/score/schedulernodeimpl.h>
0050 #include <rtems/score/statesimpl.h>
0051 #include <rtems/score/status.h>
0052 #include <rtems/score/sysstate.h>
0053 #include <rtems/score/timestampimpl.h>
0054 #include <rtems/score/threadqimpl.h>
0055 #include <rtems/score/todimpl.h>
0056 #include <rtems/score/watchdogimpl.h>
0057 #include <rtems/config.h>
0058 
0059 #ifdef __cplusplus
0060 extern "C" {
0061 #endif
0062 
0063 /**
0064  * @addtogroup RTEMSScoreThread
0065  *
0066  * @{
0067  */
0068 
0069 /**
0070  * @brief The thread zombie registry is used to register threads in the
0071  *   #STATES_ZOMBIE state.
0072  */
0073 typedef struct {
0074 #if defined(RTEMS_SMP)
0075   /**
0076    * @brief This lock protects the zombie chain.
0077    */
0078   ISR_lock_Control Lock;
0079 #endif
0080 
0081   /**
0082    * @brief This chain contains the registered zombie threads.
0083    */
0084   Chain_Control Chain;
0085 } Thread_Zombie_registry;
0086 
0087 /**
0088  * @brief This object is a registry for threads in the #STATES_ZOMBIE state.
0089  *
0090  * The registry contains zombie threads waiting to get killed by
0091  * _Thread_Kill_zombies().  Use _Thread_Add_to_zombie_registry() to add zombie
0092  * threads to the registry.
0093  */
0094 extern Thread_Zombie_registry _Thread_Zombies;
0095 
0096 /**
0097  * @brief Object identifier of the global constructor thread.
0098  *
0099  * This variable is set by _RTEMS_tasks_Initialize_user_tasks_body() or
0100  * _POSIX_Threads_Initialize_user_threads_body().
0101  *
0102  * It is consumed by _Thread_Handler().
0103  */
0104 extern Objects_Id _Thread_Global_constructor;
0105 
0106 /**
0107  *  The following points to the thread whose floating point
0108  *  context is currently loaded.
0109  */
0110 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
0111 extern Thread_Control *_Thread_Allocated_fp;
0112 #endif
0113 
0114 #if defined(RTEMS_SMP)
0115 #define THREAD_OF_SCHEDULER_HELP_NODE( node ) \
0116   RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node )
0117 #endif
0118 
0119 typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg );
0120 
0121 /**
0122  * @brief Calls the visitor with all threads and the given argument until
0123  *      it is done.
0124  *
0125  * @param visitor Function that gets a thread and @a arg as parameters and
0126  *      returns if it is done.
0127  * @param arg Parameter for @a visitor
0128  */
0129 void _Thread_Iterate(
0130   Thread_Visitor  visitor,
0131   void           *arg
0132 );
0133 
0134 /**
0135  * @brief Initializes the thread information
0136  *
0137  * @param[out] information Information to initialize.
0138  */
0139 void _Thread_Initialize_information( Thread_Information *information );
0140 
0141 /**
0142  * @brief Initializes thread handler.
0143  *
0144  * This routine performs the initialization necessary for this handler.
0145  */
0146 void _Thread_Handler_initialization(void);
0147 
0148 /**
0149  * @brief Creates idle thread.
0150  *
0151  * This routine creates the idle thread.
0152  *
0153  * @warning No thread should be created before this one.
0154  */
0155 void _Thread_Create_idle(void);
0156 
0157 /**
0158  * @brief Starts thread multitasking.
0159  *
0160  * This routine initiates multitasking.  It is invoked only as
0161  * part of initialization and its invocation is the last act of
0162  * the non-multitasking part of the system initialization.
0163  */
0164 RTEMS_NO_RETURN void _Thread_Start_multitasking( void );
0165 
0166 /**
0167  * @brief The configuration of a new thread to initialize.
0168  */
0169 typedef struct {
0170   /**
0171    * @brief The scheduler control instance for the thread.
0172    */
0173   const struct _Scheduler_Control *scheduler;
0174 
0175   /**
0176    * @brief The starting address of the stack area.
0177    */
0178   void *stack_area;
0179 
0180   /**
0181    * @brief The size of the stack area in bytes.
0182    */
0183   size_t stack_size;
0184 
0185   /**
0186    * @brief This member contains the handler to free the stack.
0187    *
0188    * It shall not be NULL.  Use _Objects_Free_nothing() if nothing is to free.
0189    */
0190   void ( *stack_free )( void * );
0191 
0192   /**
0193    * @brief The new thread's priority.
0194    */
0195   Priority_Control priority;
0196 
0197   /**
0198    * @brief The thread's initial CPU budget operations.
0199    */
0200   const Thread_CPU_budget_operations *cpu_budget_operations;
0201 
0202   /**
0203    * @brief 32-bit unsigned integer name of the object for the thread.
0204    */
0205   uint32_t name;
0206 
0207   /**
0208    * @brief The thread's initial ISR level.
0209    */
0210   uint32_t isr_level;
0211 
0212   /**
0213    * @brief Indicates whether the thread needs a floating-point area.
0214    */
0215   bool is_fp;
0216 
0217   /**
0218    * @brief Indicates whether the new thread is preemptible.
0219    */
0220   bool is_preemptible;
0221 } Thread_Configuration;
0222 
0223 /**
0224  * @brief Initializes thread.
0225  *
0226  * This routine initializes the specified the thread.  It allocates
0227  * all memory associated with this thread.  It completes by adding
0228  * the thread to the local object table so operations on this
0229  * thread id are allowed.
0230  *
0231  * @note If stack_area is NULL, it is allocated from the workspace.
0232  *
0233  * @note If the stack is allocated from the workspace, then it is
0234  *       guaranteed to be of at least minimum size.
0235  *
0236  * @param information The thread information.
0237  * @param the_thread The thread to initialize.
0238  * @param config The configuration of the thread to initialize.
0239  *
0240  * @retval STATUS_SUCCESSFUL The thread initialization was successful.
0241  *
0242  * @retval STATUS_UNSATISFIED The thread initialization failed.
0243  */
0244 Status_Control _Thread_Initialize(
0245   Thread_Information         *information,
0246   Thread_Control             *the_thread,
0247   const Thread_Configuration *config
0248 );
0249 
0250 /**
0251  * @brief Frees the thread.
0252  *
0253  * This routine invokes the thread delete extensions and frees all resources
0254  * associated with the thread.  Afterwards the thread object is closed.
0255  *
0256  * @param[in, out] information is the thread information.
0257  *
0258  * @param[in, out] the_thread is the thread to free.
0259  */
0260 void _Thread_Free(
0261   Thread_Information *information,
0262   Thread_Control     *the_thread
0263 );
0264 
0265 /**
0266  * @brief Starts the specified thread.
0267  *
0268  * If the thread is not in the dormant state, the routine returns with a value
0269  * of false and performs no actions except enabling interrupts as indicated by
0270  * the ISR lock context.
0271  *
0272  * Otherwise, this routine initializes the executable information for the
0273  * thread and makes it ready to execute.  After the call of this routine, the
0274  * thread competes with all other ready threads for CPU time.
0275  *
0276  * Then the routine enables the local interrupts as indicated by the ISR lock
0277  * context.
0278  *
0279  * Then the thread start user extensions are called with thread dispatching
0280  * disabled and interrupts enabled after making the thread ready.  Please note
0281  * that in SMP configurations, the thread switch and begin user extensions may
0282  * be called in parallel on another processor.
0283  *
0284  * Then thread dispatching is enabled and other threads may execute before the
0285  * routine returns.
0286  *
0287  * @param[in, out] the_thread is the thread to start.
0288  *
0289  * @param entry is the thread entry information.
0290  *
0291  * @param[in, out] is the ISR lock context which shall be used to disable the
0292  *   local interrupts before the call of this routine.
0293  *
0294  * @retval STATUS_SUCCESSFUL The thread start was successful.
0295  *
0296  * @retval STATUS_INCORRECT_STATE The thread was already started.
0297  */
0298 Status_Control _Thread_Start(
0299   Thread_Control                 *the_thread,
0300   const Thread_Entry_information *entry,
0301   ISR_lock_Context               *lock_context
0302 );
0303 
0304 /**
0305  * @brief Restarts the thread.
0306  *
0307  * @param[in, out] the_thread is the thread to restart.
0308  *
0309  * @param entry is the new start entry information for the thread to restart.
0310  *
0311  * @param[in, out] lock_context is the lock context with interrupts disabled.
0312  *
0313  * @retval STATUS_SUCCESSFUL The operation was successful.
0314  *
0315  * @retval STATUS_INCORRECT_STATE The thread was dormant.
0316  */
0317 Status_Control _Thread_Restart(
0318   Thread_Control                 *the_thread,
0319   const Thread_Entry_information *entry,
0320   ISR_lock_Context               *lock_context
0321 );
0322 
0323 /**
0324  * @brief Yields the currently executing thread.
0325  *
0326  * @param[in, out] executing The thread that performs a yield.
0327  */
0328 void _Thread_Yield( Thread_Control *executing );
0329 
0330 /**
0331  * @brief Changes the life of currently executing thread.
0332  *
0333  * @param life_states_to_clear are the thread life states to clear.
0334  *
0335  * @param life_states_to_set are the thread life states to set.
0336  *
0337  * @param ignored_life_states are the ignored thread life states.
0338  *
0339  * @return Returns the thread life state before the changes.
0340  */
0341 Thread_Life_state _Thread_Change_life(
0342   Thread_Life_state life_states_to_clear,
0343   Thread_Life_state life_states_to_set,
0344   Thread_Life_state ignored_life_states
0345 );
0346 
0347 /**
0348  * @brief Set the thread to life protected.
0349  *
0350  * Calls _Thread_Change_life with the given state AND THREAD_LIFE_PROTECTED to
0351  * set and THREAD_LIFE_PROTECTED to clear.
0352  *
0353  * @param state The states to set.
0354  *
0355  * @return The previous state the thread was in.
0356  */
0357 Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
0358 
0359 /**
0360  * @brief Kills all zombie threads in the system.
0361  *
0362  * Threads change into the zombie state as the last step in the thread
0363  * termination sequence right before a context switch to the heir thread is
0364  * initiated.  Since the thread stack is still in use during this phase we have
0365  * to postpone the thread stack reclamation until this point.  On SMP
0366  * configurations we may have to busy wait for context switch completion here.
0367  */
0368 void _Thread_Kill_zombies( void );
0369 
0370 /**
0371  * @brief Exits the currently executing thread.
0372  *
0373  * @param exit_value is the exit value of the thread.
0374  *
0375  * @param life_states_to_set are the thread life states to set.
0376  */
0377 RTEMS_NO_RETURN void _Thread_Exit(
0378   void              *exit_value,
0379   Thread_Life_state  life_states_to_set
0380 );
0381 
0382 /**
0383  * @brief Joins the currently executing thread with the thread.
0384  *
0385  * @param[in, out] the_thread is the thread to join.
0386  *
0387  * @param waiting_for_join is the thread state for the currently executing
0388  *   thread.
0389  *
0390  * @param[in, out] executing is the currently executing thread.
0391  *
0392  * @param queue_context[in, out] is the thread queue context.  The caller shall
0393  *   have acquired the thread state lock using the thread queue context.
0394  *
0395  * @retval STATUS_SUCCESSFUL The operation was successful.
0396  *
0397  * @retval STATUS_DEADLOCK A deadlock occurred.
0398  */
0399 Status_Control _Thread_Join(
0400   Thread_Control       *the_thread,
0401   States_Control        waiting_for_join,
0402   Thread_Control       *executing,
0403   Thread_queue_Context *queue_context
0404 );
0405 
0406 /**
0407  * @brief Indicates the resulting state of _Thread_Cancel().
0408  */
0409 typedef enum {
0410   /**
0411    * @brief Indicates that the thread cancel operation is done.
0412    */
0413   THREAD_CANCEL_DONE,
0414 
0415   /**
0416    * @brief Indicates that the thread cancel operation is in progress.
0417    *
0418    * The cancelled thread is terminating.  It may be joined.
0419    */
0420   THREAD_CANCEL_IN_PROGRESS
0421 } Thread_Cancel_state;
0422 
0423 /**
0424  * @brief Cancels the thread.
0425  *
0426  * @param[in, out] the_thread is the thread to cancel.
0427 
0428  * @param[in, out] executing is the currently executing thread.
0429 
0430  * @param[in, out] life_states_to_clear is the set of thread life states to
0431  *   clear for the thread to cancel.
0432  */
0433 Thread_Cancel_state _Thread_Cancel(
0434   Thread_Control   *the_thread,
0435   Thread_Control   *executing,
0436   Thread_Life_state life_states_to_clear
0437 );
0438 
0439 /**
0440  * @brief Closes the thread.
0441  *
0442  * Closes the thread object and starts the thread termination sequence.  In
0443  * case the executing thread is not terminated, then this function waits until
0444  * the terminating thread reached the zombie state.
0445  *
0446  * @param the_thread is the thread to close.
0447  *
0448  * @param[in, out] executing is the currently executing thread.
0449  *
0450  * @param[in, out] queue_context is the thread queue context.  The caller shall
0451  *   have disabled interrupts using the thread queue context.
0452  *
0453  * @retval STATUS_SUCCESSFUL The operation was successful.
0454  *
0455  * @retval STATUS_DEADLOCK A deadlock occurred.
0456  */
0457 Status_Control _Thread_Close(
0458   Thread_Control       *the_thread,
0459   Thread_Control       *executing,
0460   Thread_queue_Context *queue_context
0461 );
0462 
0463 /**
0464  * @brief Checks if the thread is ready.
0465  *
0466  * @param the_thread The thread to check if it is ready.
0467  *
0468  * @retval true The thread is currently in the ready state.
0469  * @retval false The thread is currently not ready.
0470  */
0471 static inline bool _Thread_Is_ready( const Thread_Control *the_thread )
0472 {
0473   return _States_Is_ready( the_thread->current_state );
0474 }
0475 
0476 /**
0477  * @brief Clears the specified thread state without locking the lock context.
0478  *
0479  * In the case the previous state is a non-ready state and the next state is
0480  * the ready state, then the thread is unblocked by the scheduler.
0481  *
0482  * @param[in, out] the_thread The thread.
0483  * @param state The state to clear.  It must not be zero.
0484  *
0485  * @return The thread's previous state.
0486  */
0487 States_Control _Thread_Clear_state_locked(
0488   Thread_Control *the_thread,
0489   States_Control  state
0490 );
0491 
0492 /**
0493  * @brief Clears the specified thread state.
0494  *
0495  * In the case the previous state is a non-ready state and the next state is
0496  * the ready state, then the thread is unblocked by the scheduler.
0497  *
0498  * @param[in, out] the_thread The thread.
0499  * @param state The state to clear.  It must not be zero.
0500  *
0501  * @return The previous state.
0502  */
0503 States_Control _Thread_Clear_state(
0504   Thread_Control *the_thread,
0505   States_Control  state
0506 );
0507 
0508 /**
0509  * @brief Sets the specified thread state without locking the lock context.
0510  *
0511  * In the case the previous state is the ready state, then the thread is blocked
0512  * by the scheduler.
0513  *
0514  * @param[in, out] the_thread The thread.
0515  * @param state The state to set.  It must not be zero.
0516  *
0517  * @return The previous state.
0518  */
0519 States_Control _Thread_Set_state_locked(
0520   Thread_Control *the_thread,
0521   States_Control  state
0522 );
0523 
0524 /**
0525  * @brief Sets the specified thread state.
0526  *
0527  * In the case the previous state is the ready state, then the thread is blocked
0528  * by the scheduler.
0529  *
0530  * @param[in, out] the_thread The thread.
0531  * @param state The state to set.  It must not be zero.
0532  *
0533  * @return The previous state.
0534  */
0535 States_Control _Thread_Set_state(
0536   Thread_Control *the_thread,
0537   States_Control  state
0538 );
0539 
0540 /**
0541  * @brief Initializes enviroment for a thread.
0542  *
0543  * This routine initializes the context of @a the_thread to its
0544  * appropriate starting state.
0545  *
0546  * @param[in, out] the_thread The pointer to the thread control block.
0547  */
0548 void _Thread_Load_environment(
0549   Thread_Control *the_thread
0550 );
0551 
0552 /**
0553  * @brief Calls the start kinds idle entry of the thread.
0554  *
0555  * @param executing The currently executing thread.
0556  */
0557 void _Thread_Entry_adaptor_idle( Thread_Control *executing );
0558 
0559 /**
0560  * @brief Calls the start kinds numeric entry of the thread.
0561  *
0562  * @param executing The currently executing thread.
0563  */
0564 void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
0565 
0566 /**
0567  * @brief Calls the start kinds pointer entry of the thread.
0568  *
0569  * Stores the return value in the Wait.return_argument of the thread.
0570  *
0571  * @param executing The currently executing thread.
0572  */
0573 void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
0574 
0575 /**
0576  * @brief Wrapper function for all threads.
0577  *
0578  * This routine is the wrapper function for all threads.  It is
0579  * the starting point for all threads.  The user provided thread
0580  * entry point is invoked by this routine.  Operations
0581  * which must be performed immediately before and after the user's
0582  * thread executes are found here.
0583  *
0584  * @note On entry, it is assumed all interrupts are blocked and that this
0585  * routine needs to set the initial isr level.  This may or may not
0586  * actually be needed by the context switch routine and as a result
0587  * interrupts may already be at there proper level.  Either way,
0588  * setting the initial isr level properly here is safe.
0589  */
0590 void _Thread_Handler( void );
0591 
0592 /**
0593  * @brief Acquires the lock context in a critical section.
0594  *
0595  * @param the_thread The thread to acquire the lock context.
0596  * @param lock_context The lock context.
0597  */
0598 static inline void _Thread_State_acquire_critical(
0599   Thread_Control   *the_thread,
0600   ISR_lock_Context *lock_context
0601 )
0602 {
0603   _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
0604 }
0605 
0606 /**
0607  * @brief Disables interrupts and acquires the lock_context.
0608  *
0609  * @param the_thread The thread to acquire the lock context.
0610  * @param lock_context The lock context.
0611  */
0612 static inline void _Thread_State_acquire(
0613   Thread_Control   *the_thread,
0614   ISR_lock_Context *lock_context
0615 )
0616 {
0617   _ISR_lock_ISR_disable( lock_context );
0618   _Thread_State_acquire_critical( the_thread, lock_context );
0619 }
0620 
0621 /**
0622  * @brief Disables interrupts and acquires the lock context for the currently
0623  *      executing thread.
0624  *
0625  * @param lock_context The lock context.
0626  *
0627  * @return The currently executing thread.
0628  */
0629 static inline Thread_Control *_Thread_State_acquire_for_executing(
0630   ISR_lock_Context *lock_context
0631 )
0632 {
0633   Thread_Control *executing;
0634 
0635   _ISR_lock_ISR_disable( lock_context );
0636   executing = _Thread_Executing;
0637   _Thread_State_acquire_critical( executing, lock_context );
0638 
0639   return executing;
0640 }
0641 
0642 /**
0643  * @brief Release the lock context in a critical section.
0644  *
0645  * @param the_thread The thread to release the lock context.
0646  * @param lock_context The lock context.
0647  */
0648 static inline void _Thread_State_release_critical(
0649   Thread_Control   *the_thread,
0650   ISR_lock_Context *lock_context
0651 )
0652 {
0653   _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
0654 }
0655 
0656 /**
0657  * @brief Releases the lock context and enables interrupts.
0658  *
0659  * @param[in, out] the_thread The thread to release the lock context.
0660  * @param[out] lock_context The lock context.
0661  */
0662 static inline void _Thread_State_release(
0663   Thread_Control   *the_thread,
0664   ISR_lock_Context *lock_context
0665 )
0666 {
0667   _Thread_State_release_critical( the_thread, lock_context );
0668   _ISR_lock_ISR_enable( lock_context );
0669 }
0670 
0671 /**
0672  * @brief Checks if the thread is owner of the lock of the join queue.
0673  *
0674  * @param the_thread The thread for the verification.
0675  *
0676  * @retval true The thread is owner of the lock of the join queue.
0677  * @retval false The thread is not owner of the lock of the join queue.
0678  */
0679 #if defined(RTEMS_DEBUG)
0680 static inline bool _Thread_State_is_owner(
0681   const Thread_Control *the_thread
0682 )
0683 {
0684   return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
0685 }
0686 #endif
0687 
0688 /**
0689  * @brief Performs the priority actions specified by the thread queue context
0690  * along the thread queue path.
0691  *
0692  * The caller must be the owner of the thread wait lock.
0693  *
0694  * @param start_of_path The start thread of the thread queue path.
0695  * @param queue_context The thread queue context specifying the thread queue
0696  *   path and initial thread priority actions.
0697  *
0698  * @see _Thread_queue_Path_acquire_critical().
0699  */
0700 void _Thread_Priority_perform_actions(
0701   Thread_Control       *start_of_path,
0702   Thread_queue_Context *queue_context
0703 );
0704 
0705 /**
0706  * @brief Adds the specified thread priority node to the corresponding thread
0707  * priority aggregation.
0708  *
0709  * The caller must be the owner of the thread wait lock.
0710  *
0711  * @param the_thread The thread.
0712  * @param priority_node The thread priority node to add.
0713  * @param queue_context The thread queue context to return an updated set of
0714  *   threads for _Thread_Priority_update().  The thread queue context must be
0715  *   initialized via _Thread_queue_Context_clear_priority_updates() before a
0716  *   call of this function.
0717  *
0718  * @see _Thread_Wait_acquire().
0719  */
0720 void _Thread_Priority_add(
0721   Thread_Control       *the_thread,
0722   Priority_Node        *priority_node,
0723   Thread_queue_Context *queue_context
0724 );
0725 
0726 /**
0727  * @brief Removes the specified thread priority node from the corresponding
0728  * thread priority aggregation.
0729  *
0730  * The caller must be the owner of the thread wait lock.
0731  *
0732  * @param the_thread The thread.
0733  * @param priority_node The thread priority node to remove.
0734  * @param queue_context The thread queue context to return an updated set of
0735  *   threads for _Thread_Priority_update().  The thread queue context must be
0736  *   initialized via _Thread_queue_Context_clear_priority_updates() before a
0737  *   call of this function.
0738  *
0739  * @see _Thread_Wait_acquire().
0740  */
0741 void _Thread_Priority_remove(
0742   Thread_Control       *the_thread,
0743   Priority_Node        *priority_node,
0744   Thread_queue_Context *queue_context
0745 );
0746 
0747 /**
0748  * @brief Propagates a thread priority value change in the specified thread
0749  * priority node to the corresponding thread priority aggregation.
0750  *
0751  * The caller must be the owner of the thread wait lock.
0752  *
0753  * @param the_thread The thread.
0754  * @param[out] priority_node The thread priority node to change.
0755  * @param priority_group_order The priority group order determines if the
0756  *   thread is inserted as the first or last node into the ready or scheduled
0757  *   queues of its home scheduler, see #PRIORITY_GROUP_FIRST and
0758  *   #PRIORITY_GROUP_LAST.
0759  * @param queue_context The thread queue context to return an updated set of
0760  *   threads for _Thread_Priority_update().  The thread queue context must be
0761  *   initialized via _Thread_queue_Context_clear_priority_updates() before a
0762  *   call of this function.
0763  *
0764  * @see _Thread_Wait_acquire().
0765  */
0766 void _Thread_Priority_changed(
0767   Thread_Control       *the_thread,
0768   Priority_Node        *priority_node,
0769   Priority_Group_order  priority_group_order,
0770   Thread_queue_Context *queue_context
0771 );
0772 
0773 /**
0774  * @brief Changes the thread priority value of the specified thread priority
0775  * node in the corresponding thread priority aggregation.
0776  *
0777  * The caller must be the owner of the thread wait lock.
0778  *
0779  * @param the_thread The thread.
0780  * @param[out] priority_node The thread priority node to change.
0781  * @param new_priority The new thread priority value of the thread priority
0782  *   node to change.
0783  * @param priority_group_order The priority group order determines if the
0784  *   thread is inserted as the first or last node into the ready or scheduled
0785  *   queues of its home scheduler, see #PRIORITY_GROUP_FIRST and
0786  *   #PRIORITY_GROUP_LAST.
0787  * @param queue_context The thread queue context to return an updated set of
0788  *   threads for _Thread_Priority_update().  The thread queue context must be
0789  *   initialized via _Thread_queue_Context_clear_priority_updates() before a
0790  *   call of this function.
0791  *
0792  * @see _Thread_Wait_acquire().
0793  */
0794 static inline void _Thread_Priority_change(
0795   Thread_Control       *the_thread,
0796   Priority_Node        *priority_node,
0797   Priority_Control      new_priority,
0798   Priority_Group_order  priority_group_order,
0799   Thread_queue_Context *queue_context
0800 )
0801 {
0802   _Priority_Node_set_priority( priority_node, new_priority );
0803 
0804 #if defined(RTEMS_SCORE_THREAD_REAL_PRIORITY_MAY_BE_INACTIVE)
0805   if ( !_Priority_Node_is_active( priority_node ) ) {
0806     /* The priority change is picked up once the node is added */
0807     return;
0808   }
0809 #endif
0810 
0811   _Thread_Priority_changed(
0812     the_thread,
0813     priority_node,
0814     priority_group_order,
0815     queue_context
0816   );
0817 }
0818 
0819 #if defined(RTEMS_SMP)
0820 /**
0821  * @brief Replaces the victim priority node with the replacement priority node
0822  * in the corresponding thread priority aggregation.
0823  *
0824  * The caller must be the owner of the thread wait lock.
0825  *
0826  * @param the_thread The thread.
0827  * @param victim_node The victim thread priority node.
0828  * @param replacement_node The replacement thread priority node.
0829  *
0830  * @see _Thread_Wait_acquire().
0831  */
0832 void _Thread_Priority_replace(
0833   Thread_Control *the_thread,
0834   Priority_Node  *victim_node,
0835   Priority_Node  *replacement_node
0836 );
0837 #endif
0838 
0839 /**
0840  * @brief Updates the priority of all threads in the set
0841  *
0842  * @param queue_context The thread queue context to return an updated set of
0843  *   threads for _Thread_Priority_update().  The thread queue context must be
0844  *   initialized via _Thread_queue_Context_clear_priority_updates() before a
0845  *   call of this function.
0846  *
0847  * @see _Thread_Priority_add(), _Thread_Priority_change(),
0848  *   _Thread_Priority_changed() and _Thread_Priority_remove().
0849  */
0850 void _Thread_Priority_update( Thread_queue_Context *queue_context );
0851 
0852 #if defined(RTEMS_SMP)
0853 /**
0854  * @brief Updates the priority of the thread and makes its home scheduler node
0855  *   sticky.
0856  *
0857  * @param the_thread is the thread to work on.
0858  */
0859 void _Thread_Priority_update_and_make_sticky( Thread_Control *the_thread );
0860 
0861 /**
0862  * @brief Updates the priority of the thread and cleans the sticky property of
0863  *   its home scheduler node.
0864  *
0865  * @param the_thread is the thread to work on.
0866  */
0867 void _Thread_Priority_update_and_clean_sticky( Thread_Control *the_thread );
0868 
0869 /**
0870  * @brief Updates the priority of the thread.
0871  *
0872  * @param the_thread is the thread to update the priority.
0873  */
0874 void _Thread_Priority_update_ignore_sticky( Thread_Control *the_thread );
0875 #endif
0876 
0877 /**
0878  * @brief Checks if the left thread priority is less than the right thread
0879  *      priority in the intuitive sense of priority.
0880  *
0881  * @param left The left thread priority.
0882  * @param right The right thread priority.
0883  *
0884  * @retval true The left priority is less in the intuitive sense.
0885  * @retval false The left priority is greater or equal in the intuitive sense.
0886  */
0887 static inline bool _Thread_Priority_less_than(
0888   Priority_Control left,
0889   Priority_Control right
0890 )
0891 {
0892   return left > right;
0893 }
0894 
0895 /**
0896  * @brief Returns the highest priority of the left and right thread priorities
0897  * in the intuitive sense of priority.
0898  *
0899  * @param left The left thread priority.
0900  * @param right The right thread priority.
0901  *
0902  * @return The highest priority in the intuitive sense of priority.
0903  */
0904 static inline Priority_Control _Thread_Priority_highest(
0905   Priority_Control left,
0906   Priority_Control right
0907 )
0908 {
0909   return _Thread_Priority_less_than( left, right ) ? right : left;
0910 }
0911 
0912 /**
0913  * @brief Gets the thread object information for the API of the object
0914  *   identifier.
0915  *
0916  * @param id is an object identifier which defines the API to get the
0917  *   associated thread objects information.
0918  *
0919  * @retval NULL The object identifier had an invalid API.
0920  *
0921  * @return Returns the thread object information associated with the API of the
0922  *   object identifier.
0923  */
0924 static inline Objects_Information *_Thread_Get_objects_information_by_id(
0925   Objects_Id id
0926 )
0927 {
0928   uint32_t the_api;
0929 
0930   the_api = _Objects_Get_API( id );
0931 
0932   if ( !_Objects_Is_api_valid( the_api ) ) {
0933     return NULL;
0934   }
0935 
0936   /*
0937    * Threads are always first class :)
0938    *
0939    * There is no need to validate the object class of the object identifier,
0940    * since this will be done by the object get methods.
0941    */
0942   return _Objects_Information_table[ the_api ][ 1 ];
0943 }
0944 
0945 /**
0946  * @brief Gets the thread object information of the thread.
0947  *
0948  * @param the_thread is the thread to get the thread object information.
0949  *
0950  * @return Returns the thread object information of the thread.
0951  */
0952 static inline Thread_Information *_Thread_Get_objects_information(
0953   Thread_Control *the_thread
0954 )
0955 {
0956   size_t              the_api;
0957   Thread_Information *information;
0958 
0959   the_api = (size_t) _Objects_Get_API( the_thread->Object.id );
0960   _Assert( _Objects_Is_api_valid( the_api ) );
0961 
0962   information = (Thread_Information *)
0963     _Objects_Information_table[ the_api ][ 1 ];
0964   _Assert( information != NULL );
0965 
0966   return information;
0967 }
0968 
0969 /**
0970  * @brief Gets a thread by its identifier.
0971  *
0972  * @see _Objects_Get().
0973  *
0974  * @param id The id of the thread.
0975  * @param lock_context The lock context.
0976  */
0977 Thread_Control *_Thread_Get(
0978   Objects_Id         id,
0979   ISR_lock_Context  *lock_context
0980 );
0981 
0982 /**
0983  * @brief Gets the identifier of the calling thread.
0984  *
0985  * @return Returns the identifier of the calling thread.
0986  */
0987 Objects_Id _Thread_Self_id( void );
0988 
0989 /**
0990  * @brief Gets the cpu of the thread's scheduler.
0991  *
0992  * @param thread The thread.
0993  *
0994  * @return The cpu of the thread's scheduler.
0995  */
0996 static inline Per_CPU_Control *_Thread_Get_CPU(
0997   const Thread_Control *thread
0998 )
0999 {
1000 #if defined(RTEMS_SMP)
1001   return thread->Scheduler.cpu;
1002 #else
1003   (void) thread;
1004 
1005   return _Per_CPU_Get();
1006 #endif
1007 }
1008 
1009 /**
1010  * @brief Sets the cpu of the thread's scheduler.
1011  *
1012  * @param[out] thread The thread.
1013  * @param cpu The cpu to set.
1014  */
1015 static inline void _Thread_Set_CPU(
1016   Thread_Control *thread,
1017   Per_CPU_Control *cpu
1018 )
1019 {
1020 #if defined(RTEMS_SMP)
1021   thread->Scheduler.cpu = cpu;
1022 #else
1023   (void) thread;
1024   (void) cpu;
1025 #endif
1026 }
1027 
1028 /**
1029  * @brief Checks if the thread is the currently executing thread.
1030  *
1031  * This function returns true if the_thread is the currently executing
1032  * thread, and false otherwise.
1033  *
1034  * @param the_thread The thread to verify if it is the currently executing thread.
1035  *
1036  * @retval true @a the_thread is the currently executing one.
1037  * @retval false @a the_thread is not the currently executing one.
1038  */
1039 static inline bool _Thread_Is_executing (
1040   const Thread_Control *the_thread
1041 )
1042 {
1043   return ( the_thread == _Thread_Executing );
1044 }
1045 
1046 #if defined(RTEMS_SMP)
1047 /**
1048  * @brief Checks if the thread executes currently on some processor in the
1049  * system.
1050  *
1051  * Do not confuse this with _Thread_Is_executing() which checks only the
1052  * current processor.
1053  *
1054  * @param the_thread The thread for the verification.
1055  *
1056  * @retval true @a the_thread is the currently executing one.
1057  * @retval false @a the_thread is not the currently executing one.
1058  */
1059 static inline bool _Thread_Is_executing_on_a_processor(
1060   const Thread_Control *the_thread
1061 )
1062 {
1063   return _CPU_Context_Get_is_executing( &the_thread->Registers );
1064 }
1065 #endif
1066 
1067 /**
1068  * @brief Checks if the thread is the heir.
1069  *
1070  * This function returns true if the_thread is the heir
1071  * thread, and false otherwise.
1072  *
1073  * @param the_thread The thread for the verification.
1074  *
1075  * @retval true @a the_thread is the heir.
1076  * @retval false @a the_thread is not the heir.
1077  */
1078 static inline bool _Thread_Is_heir (
1079   const Thread_Control *the_thread
1080 )
1081 {
1082   return ( the_thread == _Thread_Heir );
1083 }
1084 
1085 /**
1086  * @brief Unblocks the thread.
1087  *
1088  * This routine clears any blocking state for the_thread.  It performs
1089  * any necessary scheduling operations including the selection of
1090  * a new heir thread.
1091  *
1092  * @param[in, out] the_thread The thread to unblock.
1093  */
1094 static inline void _Thread_Unblock (
1095   Thread_Control *the_thread
1096 )
1097 {
1098   _Thread_Clear_state( the_thread, STATES_BLOCKED );
1099 }
1100 
1101 /**
1102  * @brief Checks if the floating point context of the thread is currently
1103  *      loaded in the floating point unit.
1104  *
1105  * This function returns true if the floating point context of
1106  * the_thread is currently loaded in the floating point unit, and
1107  * false otherwise.
1108  *
1109  * @param the_thread The thread for the verification.
1110  *
1111  * @retval true The floating point context of @a the_thread is currently
1112  *      loaded in the floating point unit.
1113  * @retval false The floating point context of @a the_thread is currently not
1114  *      loaded in the floating point unit.
1115  */
1116 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1117 static inline bool _Thread_Is_allocated_fp (
1118   const Thread_Control *the_thread
1119 )
1120 {
1121   return ( the_thread == _Thread_Allocated_fp );
1122 }
1123 #endif
1124 
1125 /*
1126  * If the CPU has hardware floating point, then we must address saving
1127  * and restoring it as part of the context switch.
1128  *
1129  * The second conditional compilation section selects the algorithm used
1130  * to context switch between floating point tasks.  The deferred algorithm
1131  * can be significantly better in a system with few floating point tasks
1132  * because it reduces the total number of save and restore FP context
1133  * operations.  However, this algorithm can not be used on all CPUs due
1134  * to unpredictable use of FP registers by some compilers for integer
1135  * operations.
1136  */
1137 
1138 /**
1139  * @brief Saves the executing thread's floating point area.
1140  *
1141  * @param executing The currently executing thread.
1142  */
1143 static inline void _Thread_Save_fp( Thread_Control *executing )
1144 {
1145 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1146 #if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
1147   if ( executing->fp_context != NULL )
1148     _Context_Save_fp( &executing->fp_context );
1149 #endif
1150 #endif
1151 }
1152 
1153 /**
1154  * @brief Restores the executing thread's floating point area.
1155  *
1156  * @param executing The currently executing thread.
1157  */
1158 static inline void _Thread_Restore_fp( Thread_Control *executing )
1159 {
1160 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1161 #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
1162   if ( (executing->fp_context != NULL) &&
1163        !_Thread_Is_allocated_fp( executing ) ) {
1164     if ( _Thread_Allocated_fp != NULL )
1165       _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
1166     _Context_Restore_fp( &executing->fp_context );
1167     _Thread_Allocated_fp = executing;
1168   }
1169 #else
1170   if ( executing->fp_context != NULL )
1171     _Context_Restore_fp( &executing->fp_context );
1172 #endif
1173 #endif
1174 }
1175 
1176 /**
1177  * @brief Deallocates the currently loaded floating point context.
1178  *
1179  * This routine is invoked when the currently loaded floating
1180  * point context is now longer associated with an active thread.
1181  */
1182 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1183 static inline void _Thread_Deallocate_fp( void )
1184 {
1185   _Thread_Allocated_fp = NULL;
1186 }
1187 #endif
1188 
1189 /**
1190  * @brief Checks if dispatching is disabled.
1191  *
1192  * This function returns true if dispatching is disabled, and false
1193  * otherwise.
1194  *
1195  * @retval true Dispatching is disabled.
1196  * @retval false Dispatching is enabled.
1197  */
1198 static inline bool _Thread_Is_context_switch_necessary( void )
1199 {
1200   return ( _Thread_Dispatch_necessary );
1201 }
1202 
1203 /**
1204  * @brief Gets the maximum number of internal threads.
1205  *
1206  * @return The maximum number of internal threads.
1207  */
1208 static inline uint32_t _Thread_Get_maximum_internal_threads(void)
1209 {
1210   /* Idle threads */
1211   uint32_t maximum_internal_threads =
1212     rtems_configuration_get_maximum_processors();
1213 
1214   /* MPCI thread */
1215 #if defined(RTEMS_MULTIPROCESSING)
1216   if ( _System_state_Is_multiprocessing ) {
1217     ++maximum_internal_threads;
1218   }
1219 #endif
1220 
1221   return maximum_internal_threads;
1222 }
1223 
1224 /**
1225  * @brief Allocates an internal thread and returns it.
1226  *
1227  * @retval pointer Pointer to the allocated Thread_Control.
1228  * @retval NULL The operation failed.
1229  */
1230 static inline Thread_Control *_Thread_Internal_allocate( void )
1231 {
1232   return (Thread_Control *)
1233     _Objects_Allocate_unprotected( &_Thread_Information.Objects );
1234 }
1235 
1236 /**
1237  * @brief Gets the heir of the processor and makes it executing.
1238  *
1239  * Must be called with interrupts disabled.  The thread dispatch necessary
1240  * indicator is cleared as a side-effect.
1241  *
1242  * @param[in, out] cpu_self The processor to get the heir of.
1243  *
1244  * @return The heir thread.
1245  *
1246  * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
1247  * _Thread_Dispatch_update_heir().
1248  */
1249 static inline Thread_Control *_Thread_Get_heir_and_make_it_executing(
1250   Per_CPU_Control *cpu_self
1251 )
1252 {
1253   Thread_Control *heir;
1254 
1255   heir = cpu_self->heir;
1256   cpu_self->dispatch_necessary = false;
1257   cpu_self->executing = heir;
1258 
1259   return heir;
1260 }
1261 
1262 /**
1263  * @brief Updates the cpu time used of the thread.
1264  *
1265  * @param[in, out] the_thread The thread to add additional cpu time that is
1266  *      used.
1267  * @param cpu The cpu.
1268  */
1269 static inline void _Thread_Update_CPU_time_used(
1270   Thread_Control  *the_thread,
1271   Per_CPU_Control *cpu
1272 )
1273 {
1274   Timestamp_Control last;
1275   Timestamp_Control ran;
1276 
1277   last = cpu->cpu_usage_timestamp;
1278   _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
1279   _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
1280   _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
1281 }
1282 
1283 /**
1284  * @brief Updates the used cpu time for the heir and dispatches a new heir.
1285  *
1286  * @param[in, out] cpu_self The current processor.
1287  * @param[in, out] cpu_for_heir The processor to do a dispatch on.
1288  * @param heir The new heir for @a cpu_for_heir.
1289  */
1290 #if defined( RTEMS_SMP )
1291 static inline void _Thread_Dispatch_update_heir(
1292   Per_CPU_Control *cpu_self,
1293   Per_CPU_Control *cpu_for_heir,
1294   Thread_Control  *heir
1295 )
1296 {
1297   _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
1298 
1299   cpu_for_heir->heir = heir;
1300 
1301   _Thread_Dispatch_request( cpu_self, cpu_for_heir );
1302 }
1303 #endif
1304 
1305 /**
1306  * @brief Gets the used processor time of the thread throughout its entire
1307  *   lifetime.
1308  *
1309  * @param[in, out] the_thread is the thread.
1310  *
1311  * @return Returns the used processor time of the thread throughout its entire
1312  *   lifetime.
1313  */
1314 Timestamp_Control _Thread_Get_CPU_time_used( Thread_Control *the_thread );
1315 
1316 /**
1317  * @brief Gets the used processor time of the thread throughout its entire
1318  *   lifetime if the caller already acquired the thread state and home
1319  *   scheduler locks.
1320  *
1321  * @param[in, out] the_thread is the thread.
1322  *
1323  * @return Returns the used processor time of the thread throughout its entire
1324  *   lifetime.
1325  */
1326 Timestamp_Control _Thread_Get_CPU_time_used_locked(
1327   Thread_Control *the_thread
1328 );
1329 
1330 /**
1331  * @brief Gets the used processor time of the thread after the last CPU usage
1332  *   reset.
1333  *
1334  * @param[in, out] the_thread is the thread.
1335  *
1336  * @return Returns the used processor time of the thread after the last CPU usage
1337  *   reset.
1338  */
1339 Timestamp_Control _Thread_Get_CPU_time_used_after_last_reset(
1340   Thread_Control *the_thread
1341 );
1342 
1343 /**
1344  * @brief Initializes the control chain of the action control.
1345  *
1346  * @param[out] action_control The action control to initialize.
1347  */
1348 static inline void _Thread_Action_control_initialize(
1349   Thread_Action_control *action_control
1350 )
1351 {
1352   _Chain_Initialize_empty( &action_control->Chain );
1353 }
1354 
1355 /**
1356  * @brief Initializes the Thread action.
1357  *
1358  * @param[out] action The Thread_Action to initialize.
1359  */
1360 static inline void _Thread_Action_initialize(
1361   Thread_Action *action
1362 )
1363 {
1364   _Chain_Set_off_chain( &action->Node );
1365 }
1366 
1367 /**
1368  * @brief Adds the post switch action to the thread.
1369  *
1370  * The caller shall own the thread state lock.  A thread dispatch is
1371  * requested.
1372  *
1373  * @param[in, out] the_thread is the thread of the action.
1374  *
1375  * @param[in, out] action is the action to add.
1376  *
1377  * @param handler is the handler for the action.
1378  */
1379 static inline void _Thread_Add_post_switch_action(
1380   Thread_Control        *the_thread,
1381   Thread_Action         *action,
1382   Thread_Action_handler  handler
1383 )
1384 {
1385   Per_CPU_Control *cpu_of_thread;
1386 
1387   _Assert( _Thread_State_is_owner( the_thread ) );
1388 
1389   cpu_of_thread = _Thread_Get_CPU( the_thread );
1390 
1391   action->handler = handler;
1392 
1393   _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
1394 
1395   _Chain_Append_if_is_off_chain_unprotected(
1396     &the_thread->Post_switch_actions.Chain,
1397     &action->Node
1398   );
1399 }
1400 
1401 /**
1402  * @brief Appends the post switch action to the thread.
1403  *
1404  * The caller shall own the thread state lock.  The action shall be inactive.
1405  * The handler of the action shall be already set.  A thread dispatch is not
1406  * requested.
1407  *
1408  * @param[in, out] the_thread is the thread of the action.
1409  *
1410  * @param[in, out] action is the action to add.
1411  */
1412 static inline void _Thread_Append_post_switch_action(
1413   Thread_Control *the_thread,
1414   Thread_Action  *action
1415 )
1416 {
1417   _Assert( _Thread_State_is_owner( the_thread ) );
1418   _Assert( action->handler != NULL );
1419 
1420   _Chain_Append_unprotected(
1421     &the_thread->Post_switch_actions.Chain,
1422     &action->Node
1423   );
1424 }
1425 
1426 /**
1427  * @brief Checks if the thread life state is restarting.
1428  *
1429  * @param life_state The thread life state for the verification.
1430  *
1431  * @retval true @a life_state is restarting.
1432  * @retval false @a life_state is not restarting.
1433  */
1434 static inline bool _Thread_Is_life_restarting(
1435   Thread_Life_state life_state
1436 )
1437 {
1438   return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
1439 }
1440 
1441 /**
1442  * @brief Checks if the thread life state is terminating.
1443  *
1444  * @param life_state The thread life state for the verification.
1445  *
1446  * @retval true @a life_state is terminating.
1447  * @retval false @a life_state is not terminating.
1448  */
1449 static inline bool _Thread_Is_life_terminating(
1450   Thread_Life_state life_state
1451 )
1452 {
1453   return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
1454 }
1455 
1456 /**
1457  * @brief Checks if the thread life state allos life change.
1458  *
1459  * @param life_state The thread life state for the verification.
1460  *
1461  * @retval true @a life_state allows life change.
1462  * @retval false @a life_state does not allow life change.
1463  */
1464 static inline bool _Thread_Is_life_change_allowed(
1465   Thread_Life_state life_state
1466 )
1467 {
1468   return ( life_state
1469     & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
1470 }
1471 
1472 /**
1473  * @brief Checks if the thread life state is life changing.
1474  *
1475  * @param life_state The thread life state for the verification.
1476  *
1477  * @retval true @a life_state is life changing.
1478  * @retval false @a life_state is not life changing.
1479  */
1480 static inline bool _Thread_Is_life_changing(
1481   Thread_Life_state life_state
1482 )
1483 {
1484   return ( life_state
1485     & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
1486 }
1487 
1488 /**
1489  * @brief Checks if the thread is joinable.
1490  *
1491  * @param the_thread The thread for the verification.
1492  *
1493  * @retval true @a life_state is joinable.
1494  * @retval false @a life_state is not joinable.
1495  */
1496 static inline bool _Thread_Is_joinable(
1497   const Thread_Control *the_thread
1498 )
1499 {
1500   _Assert( _Thread_State_is_owner( the_thread ) );
1501   return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
1502 }
1503 
1504 /**
1505  * @brief Increments the thread's resource count.
1506  *
1507  * @param[in, out] the_thread The thread to increase the resource count of.
1508  */
1509 static inline void _Thread_Resource_count_increment(
1510   Thread_Control *the_thread
1511 )
1512 {
1513 #if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1514   ++the_thread->resource_count;
1515 #else
1516   (void) the_thread;
1517 #endif
1518 }
1519 
1520 /**
1521  * @brief Decrements the thread's resource count.
1522  *
1523  * @param[in, out] the_thread The thread to decrement the resource count of.
1524  */
1525 static inline void _Thread_Resource_count_decrement(
1526   Thread_Control *the_thread
1527 )
1528 {
1529 #if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1530   --the_thread->resource_count;
1531 #else
1532   (void) the_thread;
1533 #endif
1534 }
1535 
1536 #if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1537 /**
1538  * @brief Checks if the thread owns resources.
1539  *
1540  * Resources are accounted with the Thread_Control::resource_count resource
1541  * counter.  This counter is used by mutex objects for example.
1542  *
1543  * @param the_thread The thread.
1544  *
1545  * @retval true The thread owns resources.
1546  * @retval false The thread does not own resources.
1547  */
1548 static inline bool _Thread_Owns_resources(
1549   const Thread_Control *the_thread
1550 )
1551 {
1552   return the_thread->resource_count != 0;
1553 }
1554 #endif
1555 
1556 /**
1557  * @brief Gets the home scheduler of the thread.
1558  *
1559  * @param the_thread The thread to get the home scheduler of.
1560  *
1561  * @return The thread's home scheduler.
1562  */
1563 static inline const Scheduler_Control *_Thread_Scheduler_get_home(
1564   const Thread_Control *the_thread
1565 )
1566 {
1567 #if defined(RTEMS_SMP)
1568   return the_thread->Scheduler.home_scheduler;
1569 #else
1570   (void) the_thread;
1571   return &_Scheduler_Table[ 0 ];
1572 #endif
1573 }
1574 
1575 /**
1576  * @brief Gets the scheduler's home node.
1577  *
1578  * @param the_thread The thread to get the home node of.
1579  *
1580  * @return The thread's home node.
1581  */
1582 static inline Scheduler_Node *_Thread_Scheduler_get_home_node(
1583   const Thread_Control *the_thread
1584 )
1585 {
1586 #if defined(RTEMS_SMP)
1587   _Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1588   return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
1589     _Chain_First( &the_thread->Scheduler.Wait_nodes )
1590   );
1591 #else
1592   return the_thread->Scheduler.nodes;
1593 #endif
1594 }
1595 
1596 /**
1597  * @brief Gets the thread's scheduler node by index.
1598  *
1599  * @param the_thread The thread of which to get a scheduler node.
1600  * @param scheduler_index The index of the desired scheduler node.
1601  *
1602  * @return The scheduler node with the specified index.
1603  */
1604 static inline Scheduler_Node *_Thread_Scheduler_get_node_by_index(
1605   const Thread_Control *the_thread,
1606   size_t                scheduler_index
1607 )
1608 {
1609   _Assert( scheduler_index < _Scheduler_Count );
1610 #if defined(RTEMS_SMP)
1611   return (Scheduler_Node *)
1612     ( (uintptr_t) the_thread->Scheduler.nodes
1613       + scheduler_index * _Scheduler_Node_size );
1614 #else
1615   (void) scheduler_index;
1616   return the_thread->Scheduler.nodes;
1617 #endif
1618 }
1619 
1620 #if defined(RTEMS_SMP)
1621 /**
1622  * @brief Acquires the lock context in a critical section.
1623  *
1624  * @param the_thread The thread to acquire the lock context.
1625  * @param lock_context The lock context.
1626  */
1627 static inline void _Thread_Scheduler_acquire_critical(
1628   Thread_Control   *the_thread,
1629   ISR_lock_Context *lock_context
1630 )
1631 {
1632   _ISR_lock_Acquire( &the_thread->Scheduler.Lock, lock_context );
1633 }
1634 
1635 /**
1636  * @brief Releases the lock context in a critical section.
1637  *
1638  * @param the_thread The thread to release the lock context.
1639  * @param lock_context The lock context.
1640  */
1641 static inline void _Thread_Scheduler_release_critical(
1642   Thread_Control   *the_thread,
1643   ISR_lock_Context *lock_context
1644 )
1645 {
1646   _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context );
1647 }
1648 
1649 /**
1650  * @brief Process the thread's scheduler requests.
1651  *
1652  * @param[in, out] the_thread The thread for the operation.
1653  */
1654 void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
1655 
1656 /**
1657  * @brief Add a scheduler request to the thread.
1658  *
1659  * @param[in, out] the_thread The thread to add a scheduler request to.
1660  * @param[in, out] scheduler_node The scheduler node for the request.
1661  * @param request The request to add.
1662  */
1663 static inline void _Thread_Scheduler_add_request(
1664   Thread_Control         *the_thread,
1665   Scheduler_Node         *scheduler_node,
1666   Scheduler_Node_request  request
1667 )
1668 {
1669   ISR_lock_Context       lock_context;
1670   Scheduler_Node_request current_request;
1671 
1672   _Thread_Scheduler_acquire_critical( the_thread, &lock_context );
1673 
1674   current_request = scheduler_node->Thread.request;
1675 
1676   if ( current_request == SCHEDULER_NODE_REQUEST_NOT_PENDING ) {
1677     _Assert(
1678       request == SCHEDULER_NODE_REQUEST_ADD
1679         || request == SCHEDULER_NODE_REQUEST_REMOVE
1680     );
1681     _Assert( scheduler_node->Thread.next_request == NULL );
1682     scheduler_node->Thread.next_request = the_thread->Scheduler.requests;
1683     the_thread->Scheduler.requests = scheduler_node;
1684   } else if ( current_request != SCHEDULER_NODE_REQUEST_NOTHING ) {
1685     _Assert(
1686       ( current_request == SCHEDULER_NODE_REQUEST_ADD
1687         && request == SCHEDULER_NODE_REQUEST_REMOVE )
1688       || ( current_request == SCHEDULER_NODE_REQUEST_REMOVE
1689         && request == SCHEDULER_NODE_REQUEST_ADD )
1690     );
1691     request = SCHEDULER_NODE_REQUEST_NOTHING;
1692   }
1693 
1694   scheduler_node->Thread.request = request;
1695 
1696   _Thread_Scheduler_release_critical( the_thread, &lock_context );
1697 }
1698 
1699 /**
1700  * @brief Adds a wait node to the thread and adds a corresponding
1701  *      request to the thread.
1702  *
1703  * @param[in, out] the_thread The thread to add the wait node to.
1704  * @param scheduler_node The scheduler node which provides the wait node.
1705  */
1706 static inline void _Thread_Scheduler_add_wait_node(
1707   Thread_Control *the_thread,
1708   Scheduler_Node *scheduler_node
1709 )
1710 {
1711   _Chain_Append_unprotected(
1712     &the_thread->Scheduler.Wait_nodes,
1713     &scheduler_node->Thread.Wait_node
1714   );
1715   _Thread_Scheduler_add_request(
1716     the_thread,
1717     scheduler_node,
1718     SCHEDULER_NODE_REQUEST_ADD
1719   );
1720 }
1721 
1722 /**
1723  * @brief Remove a wait node from the thread and add a corresponding request to
1724  *      it.
1725  *
1726  * @param the_thread The thread to add the request to remove a wait node.
1727  * @param scheduler_node The scheduler node to remove a wait node from.
1728  */
1729 static inline void _Thread_Scheduler_remove_wait_node(
1730   Thread_Control *the_thread,
1731   Scheduler_Node *scheduler_node
1732 )
1733 {
1734   _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
1735   _Thread_Scheduler_add_request(
1736     the_thread,
1737     scheduler_node,
1738     SCHEDULER_NODE_REQUEST_REMOVE
1739   );
1740 }
1741 #endif
1742 
1743 /**
1744  * @brief Returns the priority of the thread.
1745  *
1746  * Returns the user API and thread wait information relevant thread priority.
1747  * This includes temporary thread priority adjustments due to locking
1748  * protocols, a job release or the POSIX sporadic server for example.
1749  *
1750  * @param the_thread The thread of which to get the priority.
1751  *
1752  * @return The priority of the thread.
1753  */
1754 static inline Priority_Control _Thread_Get_priority(
1755   const Thread_Control *the_thread
1756 )
1757 {
1758   Scheduler_Node *scheduler_node;
1759 
1760   scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1761   return _Priority_Get_priority( &scheduler_node->Wait.Priority );
1762 }
1763 
1764 /**
1765  * @brief Returns the unmapped priority of the thread.
1766  *
1767  * @param the_thread The thread of which to get the unmapped priority.
1768  *
1769  * @return The unmapped priority of the thread.
1770  */
1771 static inline Priority_Control _Thread_Get_unmapped_priority(
1772   const Thread_Control *the_thread
1773 )
1774 {
1775   return SCHEDULER_PRIORITY_UNMAP( _Thread_Get_priority( the_thread ) );
1776 }
1777 
1778 /**
1779  * @brief Returns the unmapped real priority of the thread.
1780  *
1781  * @param the_thread The thread of which to get the unmapped real priority.
1782  *
1783  * @return The unmapped real priority of the thread.
1784  */
1785 static inline Priority_Control _Thread_Get_unmapped_real_priority(
1786   const Thread_Control *the_thread
1787 )
1788 {
1789   return SCHEDULER_PRIORITY_UNMAP( the_thread->Real_priority.priority );
1790 }
1791 
1792 /**
1793  * @brief Acquires the thread wait default lock inside a critical section
1794  * (interrupts disabled).
1795  *
1796  * @param[in, out] the_thread The thread.
1797  * @param lock_context The lock context used for the corresponding lock
1798  *   release.
1799  *
1800  * @see _Thread_Wait_release_default_critical().
1801  */
1802 static inline void _Thread_Wait_acquire_default_critical(
1803   Thread_Control   *the_thread,
1804   ISR_lock_Context *lock_context
1805 )
1806 {
1807   _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
1808 }
1809 
1810 /**
1811  * @brief Acquires the thread wait default lock and returns the executing
1812  * thread.
1813  *
1814  * @param lock_context The lock context used for the corresponding lock
1815  *   release.
1816  *
1817  * @return The executing thread.
1818  *
1819  * @see _Thread_Wait_release_default().
1820  */
1821 static inline Thread_Control *_Thread_Wait_acquire_default_for_executing(
1822   ISR_lock_Context *lock_context
1823 )
1824 {
1825   Thread_Control *executing;
1826 
1827   _ISR_lock_ISR_disable( lock_context );
1828   executing = _Thread_Executing;
1829   _Thread_Wait_acquire_default_critical( executing, lock_context );
1830 
1831   return executing;
1832 }
1833 
1834 /**
1835  * @brief Acquires the thread wait default lock and disables interrupts.
1836  *
1837  * @param[in, out] the_thread The thread.
1838  * @param[out] lock_context The lock context used for the corresponding lock
1839  *   release.
1840  *
1841  * @see _Thread_Wait_release_default().
1842  */
1843 static inline void _Thread_Wait_acquire_default(
1844   Thread_Control   *the_thread,
1845   ISR_lock_Context *lock_context
1846 )
1847 {
1848   _ISR_lock_ISR_disable( lock_context );
1849   _Thread_Wait_acquire_default_critical( the_thread, lock_context );
1850 }
1851 
1852 /**
1853  * @brief Releases the thread wait default lock inside a critical section
1854  * (interrupts disabled).
1855  *
1856  * The previous interrupt status is not restored.
1857  *
1858  * @param[in, out] the_thread The thread.
1859  * @param lock_context The lock context used for the corresponding lock
1860  *   acquire.
1861  */
1862 static inline void _Thread_Wait_release_default_critical(
1863   Thread_Control   *the_thread,
1864   ISR_lock_Context *lock_context
1865 )
1866 {
1867   _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1868 }
1869 
1870 /**
1871  * @brief Releases the thread wait default lock and restores the previous
1872  * interrupt status.
1873  *
1874  * @param[in, out] the_thread The thread.
1875  * @param[out] lock_context The lock context used for the corresponding lock
1876  *   acquire.
1877  */
1878 static inline void _Thread_Wait_release_default(
1879   Thread_Control   *the_thread,
1880   ISR_lock_Context *lock_context
1881 )
1882 {
1883   _Thread_Wait_release_default_critical( the_thread, lock_context );
1884   _ISR_lock_ISR_enable( lock_context );
1885 }
1886 
1887 #if defined(RTEMS_SMP)
1888 #define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1889   RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1890 
1891 /**
1892  * @brief Removes the first pending wait lock request.
1893  *
1894  * @param the_thread The thread to remove the request from.
1895  * @param queue_lock_context The queue lock context.
1896  */
1897 static inline void _Thread_Wait_remove_request_locked(
1898   Thread_Control            *the_thread,
1899   Thread_queue_Lock_context *queue_lock_context
1900 )
1901 {
1902   Chain_Node *first;
1903 
1904   _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1905   first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1906 
1907   if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1908     _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1909   }
1910 }
1911 
1912 /**
1913  * @brief Acquires the wait queue inside a critical section.
1914  *
1915  * @param queue The queue that acquires.
1916  * @param queue_lock_context The queue lock context.
1917  */
1918 static inline void _Thread_Wait_acquire_queue_critical(
1919   Thread_queue_Queue        *queue,
1920   Thread_queue_Lock_context *queue_lock_context
1921 )
1922 {
1923   _Thread_queue_Queue_acquire_critical(
1924     queue,
1925     &_Thread_Executing->Potpourri_stats,
1926     &queue_lock_context->Lock_context
1927   );
1928 }
1929 
1930 /**
1931  * @brief Releases the wait queue inside a critical section.
1932  *
1933  * @param queue The queue that releases.
1934  * @param queue_lock_context The queue lock context.
1935  */
1936 static inline void _Thread_Wait_release_queue_critical(
1937   Thread_queue_Queue        *queue,
1938   Thread_queue_Lock_context *queue_lock_context
1939 )
1940 {
1941   _Thread_queue_Queue_release_critical(
1942     queue,
1943     &queue_lock_context->Lock_context
1944   );
1945 }
1946 #endif
1947 
1948 /**
1949  * @brief Acquires the thread wait lock inside a critical section (interrupts
1950  * disabled).
1951  *
1952  * @param[in, out] the_thread The thread.
1953  * @param[in, out] queue_context The thread queue context for the corresponding
1954  *   _Thread_Wait_release_critical().
1955  */
1956 static inline void _Thread_Wait_acquire_critical(
1957   Thread_Control       *the_thread,
1958   Thread_queue_Context *queue_context
1959 )
1960 {
1961 #if defined(RTEMS_SMP)
1962   Thread_queue_Queue *queue;
1963 
1964   _Thread_Wait_acquire_default_critical(
1965     the_thread,
1966     &queue_context->Lock_context.Lock_context
1967   );
1968 
1969   queue = the_thread->Wait.queue;
1970   queue_context->Lock_context.Wait.queue = queue;
1971 
1972   if ( queue != NULL ) {
1973     _Thread_queue_Gate_add(
1974       &the_thread->Wait.Lock.Pending_requests,
1975       &queue_context->Lock_context.Wait.Gate
1976     );
1977     _Thread_Wait_release_default_critical(
1978       the_thread,
1979       &queue_context->Lock_context.Lock_context
1980     );
1981     _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1982 
1983     if ( queue_context->Lock_context.Wait.queue == NULL ) {
1984       _Thread_Wait_release_queue_critical(
1985         queue,
1986         &queue_context->Lock_context
1987       );
1988       _Thread_Wait_acquire_default_critical(
1989         the_thread,
1990         &queue_context->Lock_context.Lock_context
1991       );
1992       _Thread_Wait_remove_request_locked(
1993         the_thread,
1994         &queue_context->Lock_context
1995       );
1996       _Assert( the_thread->Wait.queue == NULL );
1997     }
1998   }
1999 #else
2000   (void) the_thread;
2001   (void) queue_context;
2002 #endif
2003 }
2004 
2005 /**
2006  * @brief Acquires the thread wait default lock and disables interrupts.
2007  *
2008  * @param[in, out] the_thread The thread.
2009  * @param[in, out] queue_context The thread queue context for the corresponding
2010  *   _Thread_Wait_release().
2011  */
2012 static inline void _Thread_Wait_acquire(
2013   Thread_Control       *the_thread,
2014   Thread_queue_Context *queue_context
2015 )
2016 {
2017   _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
2018   _Thread_Wait_acquire_critical( the_thread, queue_context );
2019 }
2020 
2021 /**
2022  * @brief Releases the thread wait lock inside a critical section (interrupts
2023  * disabled).
2024  *
2025  * The previous interrupt status is not restored.
2026  *
2027  * @param[in, out] the_thread The thread.
2028  * @param[in, out] queue_context The thread queue context used for corresponding
2029  *   _Thread_Wait_acquire_critical().
2030  */
2031 static inline void _Thread_Wait_release_critical(
2032   Thread_Control       *the_thread,
2033   Thread_queue_Context *queue_context
2034 )
2035 {
2036 #if defined(RTEMS_SMP)
2037   Thread_queue_Queue *queue;
2038 
2039   queue = queue_context->Lock_context.Wait.queue;
2040 
2041   if ( queue != NULL ) {
2042     _Thread_Wait_release_queue_critical(
2043       queue, &queue_context->Lock_context
2044     );
2045     _Thread_Wait_acquire_default_critical(
2046       the_thread,
2047       &queue_context->Lock_context.Lock_context
2048     );
2049     _Thread_Wait_remove_request_locked(
2050       the_thread,
2051       &queue_context->Lock_context
2052     );
2053   }
2054 
2055   _Thread_Wait_release_default_critical(
2056     the_thread,
2057     &queue_context->Lock_context.Lock_context
2058   );
2059 #else
2060   (void) the_thread;
2061   (void) queue_context;
2062 #endif
2063 }
2064 
2065 /**
2066  * @brief Releases the thread wait lock and restores the previous interrupt
2067  * status.
2068  *
2069  * @param[in, out] the_thread The thread.
2070  * @param[in, out] queue_context The thread queue context used for corresponding
2071  *   _Thread_Wait_acquire().
2072  */
2073 static inline void _Thread_Wait_release(
2074   Thread_Control       *the_thread,
2075   Thread_queue_Context *queue_context
2076 )
2077 {
2078   _Thread_Wait_release_critical( the_thread, queue_context );
2079   _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
2080 }
2081 
2082 /**
2083  * @brief Claims the thread wait queue.
2084  *
2085  * The caller must not be the owner of the default thread wait lock.  The
2086  * caller must be the owner of the corresponding thread queue lock.  The
2087  * registration of the corresponding thread queue operations is deferred and
2088  * done after the deadlock detection.  This is crucial to support timeouts on
2089  * SMP configurations.
2090  *
2091  * @param[in, out] the_thread The thread.
2092  * @param[in, out] queue The new thread queue.
2093  *
2094  * @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
2095  */
2096 static inline void _Thread_Wait_claim(
2097   Thread_Control     *the_thread,
2098   Thread_queue_Queue *queue
2099 )
2100 {
2101   ISR_lock_Context lock_context;
2102 
2103   _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
2104 
2105   _Assert( the_thread->Wait.queue == NULL );
2106 
2107 #if defined(RTEMS_SMP)
2108   _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
2109   _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
2110   _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
2111 #endif
2112 
2113   the_thread->Wait.queue = queue;
2114 
2115   _Thread_Wait_release_default_critical( the_thread, &lock_context );
2116 }
2117 
2118 /**
2119  * @brief Finalizes the thread wait queue claim via registration of the
2120  * corresponding thread queue operations.
2121  *
2122  * @param[in, out] the_thread The thread.
2123  * @param operations The corresponding thread queue operations.
2124  */
2125 static inline void _Thread_Wait_claim_finalize(
2126   Thread_Control                *the_thread,
2127   const Thread_queue_Operations *operations
2128 )
2129 {
2130   the_thread->Wait.operations = operations;
2131 }
2132 
2133 /**
2134  * @brief Removes a thread wait lock request.
2135  *
2136  * On SMP configurations, removes a thread wait lock request.
2137  *
2138  * On other configurations, this function does nothing.
2139  *
2140  * @param[in, out] the_thread The thread.
2141  * @param[in, out] queue_lock_context The thread queue lock context used for
2142  *   corresponding _Thread_Wait_acquire().
2143  */
2144 static inline void _Thread_Wait_remove_request(
2145   Thread_Control            *the_thread,
2146   Thread_queue_Lock_context *queue_lock_context
2147 )
2148 {
2149 #if defined(RTEMS_SMP)
2150   ISR_lock_Context lock_context;
2151 
2152   _Thread_Wait_acquire_default( the_thread, &lock_context );
2153   _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
2154   _Thread_Wait_release_default( the_thread, &lock_context );
2155 #else
2156   (void) the_thread;
2157   (void) queue_lock_context;
2158 #endif
2159 }
2160 
2161 /**
2162  * @brief Restores the default thread wait queue and operations.
2163  *
2164  * The caller must be the owner of the current thread wait queue lock.
2165  *
2166  * On SMP configurations, the pending requests are updated to use the stale
2167  * thread queue operations.
2168  *
2169  * @param[in, out] the_thread The thread.
2170  *
2171  * @see _Thread_Wait_claim().
2172  */
2173 static inline void _Thread_Wait_restore_default(
2174   Thread_Control *the_thread
2175 )
2176 {
2177 #if defined(RTEMS_SMP)
2178   ISR_lock_Context  lock_context;
2179   Chain_Node       *node;
2180   const Chain_Node *tail;
2181 
2182   _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
2183 
2184   node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
2185   tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
2186 
2187   if ( node != tail ) {
2188     do {
2189       Thread_queue_Context *queue_context;
2190 
2191       queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
2192       queue_context->Lock_context.Wait.queue = NULL;
2193 
2194       node = _Chain_Next( node );
2195     } while ( node != tail );
2196 
2197     _Thread_queue_Gate_add(
2198       &the_thread->Wait.Lock.Pending_requests,
2199       &the_thread->Wait.Lock.Tranquilizer
2200     );
2201   } else {
2202     _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
2203   }
2204 #endif
2205 
2206   the_thread->Wait.queue = NULL;
2207   the_thread->Wait.operations = &_Thread_queue_Operations_default;
2208 
2209 #if defined(RTEMS_SMP)
2210   _Thread_Wait_release_default_critical( the_thread, &lock_context );
2211 #endif
2212 }
2213 
2214 /**
2215  * @brief Tranquilizes the thread after a wait on a thread queue.
2216  *
2217  * After the violent blocking procedure this function makes the thread calm and
2218  * peaceful again so that it can carry out its normal work.
2219  *
2220  * On SMP configurations, ensures that all pending thread wait lock requests
2221  * completed before the thread is able to begin a new thread wait procedure.
2222  *
2223  * On other configurations, this function does nothing.
2224  *
2225  * It must be called after a _Thread_Wait_claim() exactly once
2226  *  - after the corresponding thread queue lock was released, and
2227  *  - the default wait state is restored or some other processor is about to do
2228  *    this.
2229  *
2230  * @param the_thread The thread.
2231  */
2232 static inline void _Thread_Wait_tranquilize(
2233   Thread_Control *the_thread
2234 )
2235 {
2236 #if defined(RTEMS_SMP)
2237   _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
2238 #else
2239   (void) the_thread;
2240 #endif
2241 }
2242 
2243 /**
2244  * @brief Cancels a thread wait on a thread queue.
2245  *
2246  * @param[in, out] the_thread The thread.
2247  * @param queue_context The thread queue context used for corresponding
2248  *   _Thread_Wait_acquire().
2249  */
2250 static inline void _Thread_Wait_cancel(
2251   Thread_Control       *the_thread,
2252   Thread_queue_Context *queue_context
2253 )
2254 {
2255   Thread_queue_Queue *queue;
2256 
2257   queue = the_thread->Wait.queue;
2258 
2259   if ( queue != NULL ) {
2260 #if defined(RTEMS_SMP)
2261     _Assert( queue_context->Lock_context.Wait.queue == queue );
2262 #endif
2263 
2264     ( *the_thread->Wait.operations->extract )(
2265       queue,
2266       the_thread,
2267       queue_context
2268     );
2269     _Thread_Wait_restore_default( the_thread );
2270 
2271 #if defined(RTEMS_SMP)
2272     _Assert( queue_context->Lock_context.Wait.queue == NULL );
2273     queue_context->Lock_context.Wait.queue = queue;
2274 #endif
2275   }
2276 }
2277 
2278 /**
2279  * @brief Mask to get the thread wait state flags.
2280  */
2281 #define THREAD_WAIT_STATE_MASK 0xffU
2282 
2283 /**
2284  * @brief Indicates that the thread does not wait on something.
2285  *
2286  * In this wait state, the wait class is zero.  This wait state is set
2287  * initially by _Thread_Initialize() and after each wait operation once the
2288  * thread is ready again.
2289  */
2290 #define THREAD_WAIT_STATE_READY 0x0U
2291 
2292 /**
2293  * @brief Indicates that the thread begins with the blocking operation.
2294  *
2295  * A blocking operation consists of an optional watchdog initialization and the
2296  * setting of the appropriate thread blocking state with the corresponding
2297  * scheduler block operation.
2298  */
2299 #define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
2300 
2301 /**
2302  * @brief Indicates that the thread completed the blocking operation.
2303  */
2304 #define THREAD_WAIT_STATE_BLOCKED 0x2U
2305 
2306 /**
2307  * @brief Mask to get the thread wait class flags.
2308  */
2309 #define THREAD_WAIT_CLASS_MASK 0xff00U
2310 
2311 /**
2312  * @brief Indicates that the thread waits for an event.
2313  */
2314 #define THREAD_WAIT_CLASS_EVENT 0x100U
2315 
2316 /**
2317  * @brief Indicates that the thread waits for a system event.
2318  */
2319 #define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
2320 
2321 /**
2322  * @brief Indicates that the thread waits for an object.
2323  */
2324 #define THREAD_WAIT_CLASS_OBJECT 0x400U
2325 
2326 /**
2327  * @brief Indicates that the thread waits for a period.
2328  */
2329 #define THREAD_WAIT_CLASS_PERIOD 0x800U
2330 
2331 /**
2332  * @brief Sets the thread's wait flags.
2333  *
2334  * @param[in, out] the_thread The thread to set the wait flags of.
2335  * @param flags The flags to set.
2336  */
2337 static inline void _Thread_Wait_flags_set(
2338   Thread_Control    *the_thread,
2339   Thread_Wait_flags  flags
2340 )
2341 {
2342 #if defined(RTEMS_SMP)
2343   _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
2344 #else
2345   the_thread->Wait.flags = flags;
2346 #endif
2347 }
2348 
2349 /**
2350  * @brief Gets the thread's wait flags according to the ATOMIC_ORDER_RELAXED.
2351  *
2352  * @param the_thread The thread to get the wait flags of.
2353  *
2354  * @return The thread's wait flags.
2355  */
2356 static inline Thread_Wait_flags _Thread_Wait_flags_get(
2357   const Thread_Control *the_thread
2358 )
2359 {
2360 #if defined(RTEMS_SMP)
2361   return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
2362 #else
2363   return the_thread->Wait.flags;
2364 #endif
2365 }
2366 
2367 /**
2368  * @brief Gets the thread's wait flags according to the ATOMIC_ORDER_ACQUIRE.
2369  *
2370  * @param the_thread The thread to get the wait flags of.
2371  *
2372  * @return The thread's wait flags.
2373  */
2374 static inline Thread_Wait_flags _Thread_Wait_flags_get_acquire(
2375   const Thread_Control *the_thread
2376 )
2377 {
2378 #if defined(RTEMS_SMP)
2379   return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
2380 #else
2381   return the_thread->Wait.flags;
2382 #endif
2383 }
2384 
2385 /**
2386  * @brief Tries to change the thread wait flags with release semantics in case
2387  * of success.
2388  *
2389  * Must be called inside a critical section (interrupts disabled).
2390  *
2391  * In case the wait flags are equal to the expected wait flags, then the wait
2392  * flags are set to the desired wait flags.
2393  *
2394  * @param the_thread The thread.
2395  * @param expected_flags The expected wait flags.
2396  * @param desired_flags The desired wait flags.
2397  *
2398  * @retval true The wait flags were equal to the expected wait flags.
2399  * @retval false The wait flags were not equal to the expected wait flags.
2400  */
2401 static inline bool _Thread_Wait_flags_try_change_release(
2402   Thread_Control    *the_thread,
2403   Thread_Wait_flags  expected_flags,
2404   Thread_Wait_flags  desired_flags
2405 )
2406 {
2407   _Assert( _ISR_Get_level() != 0 );
2408 
2409 #if defined(RTEMS_SMP)
2410   return _Atomic_Compare_exchange_uint(
2411     &the_thread->Wait.flags,
2412     &expected_flags,
2413     desired_flags,
2414     ATOMIC_ORDER_RELEASE,
2415     ATOMIC_ORDER_RELAXED
2416   );
2417 #else
2418   bool success = ( the_thread->Wait.flags == expected_flags );
2419 
2420   if ( success ) {
2421     the_thread->Wait.flags = desired_flags;
2422   }
2423 
2424   return success;
2425 #endif
2426 }
2427 
2428 /**
2429  * @brief Tries to change the thread wait flags with acquire semantics.
2430  *
2431  * In case the wait flags are equal to the expected wait flags, then the wait
2432  * flags are set to the desired wait flags.
2433  *
2434  * @param the_thread The thread.
2435  * @param expected_flags The expected wait flags.
2436  * @param desired_flags The desired wait flags.
2437  *
2438  * @retval true The wait flags were equal to the expected wait flags.
2439  * @retval false The wait flags were not equal to the expected wait flags.
2440  */
2441 static inline bool _Thread_Wait_flags_try_change_acquire(
2442   Thread_Control    *the_thread,
2443   Thread_Wait_flags  expected_flags,
2444   Thread_Wait_flags  desired_flags
2445 )
2446 {
2447 #if defined(RTEMS_SMP)
2448   return _Atomic_Compare_exchange_uint(
2449     &the_thread->Wait.flags,
2450     &expected_flags,
2451     desired_flags,
2452     ATOMIC_ORDER_ACQUIRE,
2453     ATOMIC_ORDER_ACQUIRE
2454   );
2455 #else
2456   bool      success;
2457   ISR_Level level;
2458 
2459   _ISR_Local_disable( level );
2460 
2461   success = _Thread_Wait_flags_try_change_release(
2462     the_thread,
2463     expected_flags,
2464     desired_flags
2465   );
2466 
2467   _ISR_Local_enable( level );
2468   return success;
2469 #endif
2470 }
2471 
2472 /**
2473  * @brief Returns the object identifier of the object containing the current
2474  * thread wait queue.
2475  *
2476  * This function may be used for debug and system information purposes.  The
2477  * caller must be the owner of the thread lock.
2478  *
2479  * @param the_thread The thread.
2480  *
2481  * @retval 0 The thread waits on no thread queue currently, the thread wait
2482  *   queue is not contained in an object, or the current thread state provides
2483  *   insufficient information, e.g. the thread is in the middle of a blocking
2484  *   operation.
2485  * @retval other The object identifier of the object containing the thread wait
2486  *   queue.
2487  */
2488 Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
2489 
2490 /**
2491  * @brief Get the status of the wait return code of the thread.
2492  *
2493  * @param the_thread The thread to get the status of the wait return code of.
2494  */
2495 static inline Status_Control _Thread_Wait_get_status(
2496   const Thread_Control *the_thread
2497 )
2498 {
2499   return (Status_Control) the_thread->Wait.return_code;
2500 }
2501 
2502 /**
2503  * @brief Cancels a blocking operation so that the thread can continue its
2504  * execution.
2505  *
2506  * In case this function actually cancelled the blocking operation, then the
2507  * thread wait return code is set to the specified status.
2508  *
2509  * A specialization of this function is _Thread_Timeout().
2510  *
2511  * @param[in, out] the_thread The thread.
2512  * @param status The thread wait status.
2513  */
2514 void _Thread_Continue( Thread_Control *the_thread, Status_Control status );
2515 
2516 /**
2517  * @brief General purpose thread wait timeout.
2518  *
2519  * @param the_watchdog The thread timer watchdog.
2520  */
2521 void _Thread_Timeout( Watchdog_Control *the_watchdog );
2522 
2523 /**
2524  * @brief Initializes the thread timer.
2525  *
2526  * @param [in, out] timer The timer to initialize.
2527  * @param cpu The cpu for the operation.
2528  */
2529 static inline void _Thread_Timer_initialize(
2530   Thread_Timer_information *timer,
2531   Per_CPU_Control          *cpu
2532 )
2533 {
2534   _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
2535   timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ];
2536   _Watchdog_Preinitialize( &timer->Watchdog, cpu );
2537 }
2538 
2539 /**
2540  * @brief Adds timeout ticks to the thread.
2541  *
2542  * @param[in, out] the_thread The thread to add the timeout ticks to.
2543  * @param cpu The cpu for the operation.
2544  * @param ticks The ticks to add to the timeout ticks.
2545  */
2546 static inline void _Thread_Add_timeout_ticks(
2547   Thread_Control    *the_thread,
2548   Per_CPU_Control   *cpu,
2549   Watchdog_Interval  ticks
2550 )
2551 {
2552   ISR_lock_Context lock_context;
2553 
2554   _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
2555 
2556   the_thread->Timer.header =
2557     &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ];
2558   the_thread->Timer.Watchdog.routine = _Thread_Timeout;
2559   _Watchdog_Per_CPU_insert_ticks( &the_thread->Timer.Watchdog, cpu, ticks );
2560 
2561   _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
2562 }
2563 
2564 /**
2565  * @brief Inserts the cpu's watchdog realtime into the thread's timer.
2566  *
2567  * @param[in, out] the_thread for the operation.
2568  * @param cpu The cpu to get the watchdog header from.
2569  * @param routine The watchdog routine for the thread.
2570  * @param expire Expiration for the watchdog.
2571  */
2572 static inline void _Thread_Timer_insert_realtime(
2573   Thread_Control                 *the_thread,
2574   Per_CPU_Control                *cpu,
2575   Watchdog_Service_routine_entry  routine,
2576   uint64_t                        expire
2577 )
2578 {
2579   ISR_lock_Context  lock_context;
2580   Watchdog_Header  *header;
2581 
2582   _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
2583 
2584   header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ];
2585   the_thread->Timer.header = header;
2586   the_thread->Timer.Watchdog.routine = routine;
2587   _Watchdog_Per_CPU_insert( &the_thread->Timer.Watchdog, cpu, header, expire );
2588 
2589   _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
2590 }
2591 
2592 /**
2593  * @brief Remove the watchdog timer from the thread.
2594  *
2595  * @param[in, out] the_thread The thread to remove the watchdog from.
2596  */
2597 static inline void _Thread_Timer_remove( Thread_Control *the_thread )
2598 {
2599   ISR_lock_Context lock_context;
2600 
2601   _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
2602 
2603   _Watchdog_Per_CPU_remove(
2604     &the_thread->Timer.Watchdog,
2605 #if defined(RTEMS_SMP)
2606     the_thread->Timer.Watchdog.cpu,
2607 #else
2608     _Per_CPU_Get(),
2609 #endif
2610     the_thread->Timer.header
2611   );
2612 
2613   _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
2614 }
2615 
2616 /**
2617  * @brief Remove the watchdog timer from the thread and unblock if necessary.
2618  *
2619  * @param[in, out] the_thread The thread to remove the watchdog from and unblock
2620  *      if necessary.
2621  * @param queue The thread queue.
2622  */
2623 static inline void _Thread_Remove_timer_and_unblock(
2624   Thread_Control     *the_thread,
2625   Thread_queue_Queue *queue
2626 )
2627 {
2628   _Thread_Wait_tranquilize( the_thread );
2629   _Thread_Timer_remove( the_thread );
2630 
2631 #if defined(RTEMS_MULTIPROCESSING)
2632   if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
2633     _Thread_Unblock( the_thread );
2634   } else {
2635     _Thread_queue_Unblock_proxy( queue, the_thread );
2636   }
2637 #else
2638   (void) queue;
2639   _Thread_Unblock( the_thread );
2640 #endif
2641 }
2642 
2643 /**
2644  * @brief Sets the name of the thread.
2645  *
2646  * @param[out] the_thread  The thread to change the name of.
2647  * @param name The new name for the thread.
2648  *
2649  * @retval STATUS_SUCCESSFUL The operation succeeded.
2650  * @retval STATUS_RESULT_TOO_LARGE The name was too long.
2651  */
2652 Status_Control _Thread_Set_name(
2653   Thread_Control *the_thread,
2654   const char     *name
2655 );
2656 
2657 /**
2658  * @brief Gets the name of the thread.
2659  *
2660  * @param the_thread The thread to get the name of.
2661  * @param[out] buffer Contains the thread's name.
2662  * @param buffer_size The size of @a buffer.
2663  *
2664  * @return The number of bytes copied to @a buffer.
2665  */
2666 size_t _Thread_Get_name(
2667   const Thread_Control *the_thread,
2668   char                 *buffer,
2669   size_t                buffer_size
2670 );
2671 
2672 #if defined(RTEMS_SMP)
2673 #define THREAD_PIN_STEP 2
2674 
2675 #define THREAD_PIN_PREEMPTION 1
2676 
2677 /**
2678  * @brief Unpins the thread.
2679  *
2680  * @param executing The currently executing thread.
2681  * @param cpu_self The cpu for the operation.
2682  */
2683 void _Thread_Do_unpin(
2684   Thread_Control  *executing,
2685   Per_CPU_Control *cpu_self
2686 );
2687 #endif
2688 
2689 /**
2690  * @brief Pin the executing thread.
2691  *
2692  * @param executing The currently executing thread.
2693  */
2694 static inline void _Thread_Pin( Thread_Control *executing )
2695 {
2696 #if defined(RTEMS_SMP)
2697   _Assert( executing == _Thread_Get_executing() );
2698 
2699   executing->Scheduler.pin_level += THREAD_PIN_STEP;
2700 #else
2701   (void) executing;
2702 #endif
2703 }
2704 
2705 /**
2706  * @brief Unpins the thread.
2707  *
2708  * @param executing The currently executing thread.
2709  * @param cpu_self The cpu for the operation.
2710  */
2711 static inline void _Thread_Unpin(
2712   Thread_Control  *executing,
2713   Per_CPU_Control *cpu_self
2714 )
2715 {
2716 #if defined(RTEMS_SMP)
2717   unsigned int pin_level;
2718 
2719   _Assert( executing == _Per_CPU_Get_executing( cpu_self ) );
2720 
2721   pin_level = executing->Scheduler.pin_level;
2722   _Assert( pin_level > 0 );
2723 
2724   if (
2725     RTEMS_PREDICT_TRUE(
2726       pin_level != ( THREAD_PIN_STEP | THREAD_PIN_PREEMPTION )
2727     )
2728   ) {
2729     executing->Scheduler.pin_level = pin_level - THREAD_PIN_STEP;
2730   } else {
2731     _Thread_Do_unpin( executing, cpu_self );
2732   }
2733 #else
2734   (void) executing;
2735   (void) cpu_self;
2736 #endif
2737 }
2738 
2739 /** @}*/
2740 
2741 #ifdef __cplusplus
2742 }
2743 #endif
2744 
2745 #if defined(RTEMS_MULTIPROCESSING)
2746 #include <rtems/score/threadmp.h>
2747 #endif
2748 
2749 #ifdef __cplusplus
2750 extern "C" {
2751 #endif
2752 
2753 /**
2754  * @ingroup RTEMSScoreThread
2755  *
2756  * @brief Removes the watchdog timer from the thread and lets the thread
2757  *   continue its execution.
2758  *
2759  * @param[in, out] the_thread is the thread.
2760  *
2761  * @param status is the thread wait status.
2762  */
2763 static inline void _Thread_Timer_remove_and_continue(
2764   Thread_Control *the_thread,
2765   Status_Control  status
2766 )
2767 {
2768   _Thread_Timer_remove( the_thread );
2769 #if defined(RTEMS_MULTIPROCESSING)
2770   _Thread_MP_Extract_proxy( the_thread );
2771 #endif
2772   _Thread_Continue( the_thread, status );
2773 }
2774 
2775 #ifdef __cplusplus
2776 }
2777 #endif
2778 
2779 #endif
2780 /* end of include file */