Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:13

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreScheduler
0007  *
0008  * @brief This header file provides interfaces of the
0009  *   @ref RTEMSScoreScheduler which are only used by the implementation.
0010  */
0011 
0012 /*
0013  *  Copyright (C) 2010 Gedare Bloom.
0014  *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
0015  *  Copyright (C) 2014, 2017 embedded brains GmbH & Co. KG
0016  *
0017  * Redistribution and use in source and binary forms, with or without
0018  * modification, are permitted provided that the following conditions
0019  * are met:
0020  * 1. Redistributions of source code must retain the above copyright
0021  *    notice, this list of conditions and the following disclaimer.
0022  * 2. Redistributions in binary form must reproduce the above copyright
0023  *    notice, this list of conditions and the following disclaimer in the
0024  *    documentation and/or other materials provided with the distribution.
0025  *
0026  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0027  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0028  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0029  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0030  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0031  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0032  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0033  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0034  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0035  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0036  * POSSIBILITY OF SUCH DAMAGE.
0037  */
0038 
0039 #ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
0040 #define _RTEMS_SCORE_SCHEDULERIMPL_H
0041 
0042 #include <rtems/score/scheduler.h>
0043 #include <rtems/score/assert.h>
0044 #include <rtems/score/priorityimpl.h>
0045 #include <rtems/score/smpimpl.h>
0046 #include <rtems/score/status.h>
0047 #include <rtems/score/threadimpl.h>
0048 
0049 #ifdef __cplusplus
0050 extern "C" {
0051 #endif
0052 
0053 /**
0054  * @defgroup RTEMSScoreScheduler Scheduler Handler
0055  *
0056  * @ingroup RTEMSScore
0057  *
0058  * @brief This group contains the Scheduler Handler implementation.
0059  *
0060  * This handler encapsulates functionality related to managing sets of threads
0061  * that are ready for execution.
0062  *
0063  * Schedulers are used by the system to manage sets of threads that are ready
0064  * for execution.  A scheduler consists of
0065  *
0066  * * a scheduler algorithm implementation,
0067  *
0068  * * a scheduler index and an associated name, and
0069  *
0070  * * a set of processors owned by the scheduler (may be empty, but never
0071  *   overlaps with a set owned by another scheduler).
0072  *
0073  * Each thread uses exactly one scheduler as its home scheduler.  Threads may
0074  * temporarily use another scheduler due to actions of locking protocols.
0075  *
0076  * All properties of a scheduler can be configured and controlled by the user.
0077  * Some properties are fixed at link time (defined by application configuration
0078  * options), other properties can be changed at runtime through directive
0079  * calls.
0080  *
0081  * The scheduler index, name, and initial processor set are defined for a
0082  * particular application by the application configuration.  The schedulers are
0083  * registered in the ::_Scheduler_Table which has ::_Scheduler_Count entries.
0084  *
0085  * @{
0086  */
0087 
0088 /**
0089  * @brief Initializes the scheduler to the policy chosen by the user.
0090  *
0091  * This routine initializes the scheduler to the policy chosen by the user
0092  * through confdefs, or to the priority scheduler with ready chains by
0093  * default.
0094  */
0095 void _Scheduler_Handler_initialization( void );
0096 
0097 /**
0098  * @brief Gets the context of the scheduler.
0099  *
0100  * @param scheduler The scheduler to get the context of.
0101  *
0102  * @return The context of @a scheduler.
0103  */
0104 static inline Scheduler_Context *_Scheduler_Get_context(
0105   const Scheduler_Control *scheduler
0106 )
0107 {
0108   return scheduler->context;
0109 }
0110 
0111 /**
0112  * @brief Gets the scheduler for the cpu.
0113  *
0114  * @param cpu The cpu control to get the scheduler of.
0115  *
0116  * @return The scheduler for the cpu.
0117  */
0118 static inline const Scheduler_Control *_Scheduler_Get_by_CPU(
0119   const Per_CPU_Control *cpu
0120 )
0121 {
0122 #if defined(RTEMS_SMP)
0123   return cpu->Scheduler.control;
0124 #else
0125   (void) cpu;
0126   return &_Scheduler_Table[ 0 ];
0127 #endif
0128 }
0129 
0130 /**
0131  * @brief Acquires the scheduler instance inside a critical section (interrupts
0132  * disabled).
0133  *
0134  * @param scheduler The scheduler instance.
0135  * @param lock_context The lock context to use for
0136  *   _Scheduler_Release_critical().
0137  */
0138 static inline void _Scheduler_Acquire_critical(
0139   const Scheduler_Control *scheduler,
0140   ISR_lock_Context        *lock_context
0141 )
0142 {
0143 #if defined(RTEMS_SMP)
0144   Scheduler_Context *context;
0145 
0146   context = _Scheduler_Get_context( scheduler );
0147   _ISR_lock_Acquire( &context->Lock, lock_context );
0148 #else
0149   (void) scheduler;
0150   (void) lock_context;
0151 #endif
0152 }
0153 
0154 /**
0155  * @brief Releases the scheduler instance inside a critical section (interrupts
0156  * disabled).
0157  *
0158  * @param scheduler The scheduler instance.
0159  * @param lock_context The lock context used for
0160  *   _Scheduler_Acquire_critical().
0161  */
0162 static inline void _Scheduler_Release_critical(
0163   const Scheduler_Control *scheduler,
0164   ISR_lock_Context        *lock_context
0165 )
0166 {
0167 #if defined(RTEMS_SMP)
0168   Scheduler_Context *context;
0169 
0170   context = _Scheduler_Get_context( scheduler );
0171   _ISR_lock_Release( &context->Lock, lock_context );
0172 #else
0173   (void) scheduler;
0174   (void) lock_context;
0175 #endif
0176 }
0177 
0178 #if defined(RTEMS_SMP)
0179 /**
0180  * @brief Indicate if the thread non-preempt mode is supported by the
0181  * scheduler.
0182  *
0183  * @param scheduler The scheduler instance.
0184  *
0185  * @return True if the non-preempt mode for threads is supported by the
0186  *   scheduler, otherwise false.
0187  */
0188 static inline bool _Scheduler_Is_non_preempt_mode_supported(
0189   const Scheduler_Control *scheduler
0190 )
0191 {
0192   return scheduler->is_non_preempt_mode_supported;
0193 }
0194 #endif
0195 
0196 /**
0197  * The preferred method to add a new scheduler is to define the jump table
0198  * entries and add a case to the _Scheduler_Initialize routine.
0199  *
0200  * Generic scheduling implementations that rely on the ready queue only can
0201  * be found in the _Scheduler_queue_XXX functions.
0202  */
0203 
0204 /*
0205  * Passing the Scheduler_Control* to these functions allows for multiple
0206  * scheduler's to exist simultaneously, which could be useful on an SMP
0207  * system.  Then remote Schedulers may be accessible.  How to protect such
0208  * accesses remains an open problem.
0209  */
0210 
0211 /**
0212  * @brief General scheduling decision.
0213  *
0214  * This kernel routine implements the scheduling decision logic for
0215  * the scheduler. It does NOT dispatch.
0216  *
0217  * @param the_thread The thread which state changed previously.
0218  */
0219 static inline void _Scheduler_Schedule( Thread_Control *the_thread )
0220 {
0221   const Scheduler_Control *scheduler;
0222   ISR_lock_Context         lock_context;
0223 
0224   scheduler = _Thread_Scheduler_get_home( the_thread );
0225   _Scheduler_Acquire_critical( scheduler, &lock_context );
0226 
0227   ( *scheduler->Operations.schedule )( scheduler, the_thread );
0228 
0229   _Scheduler_Release_critical( scheduler, &lock_context );
0230 }
0231 
0232 /**
0233  * @brief Scheduler yield with a particular thread.
0234  *
0235  * This routine is invoked when a thread wishes to voluntarily transfer control
0236  * of the processor to another thread.
0237  *
0238  * @param the_thread The yielding thread.
0239  */
0240 static inline void _Scheduler_Yield( Thread_Control *the_thread )
0241 {
0242   const Scheduler_Control *scheduler;
0243   ISR_lock_Context         lock_context;
0244 
0245   scheduler = _Thread_Scheduler_get_home( the_thread );
0246   _Scheduler_Acquire_critical( scheduler, &lock_context );
0247   ( *scheduler->Operations.yield )(
0248     scheduler,
0249     the_thread,
0250     _Thread_Scheduler_get_home_node( the_thread )
0251   );
0252   _Scheduler_Release_critical( scheduler, &lock_context );
0253 }
0254 
0255 /**
0256  * @brief Blocks a thread with respect to the scheduler.
0257  *
0258  * This routine removes @a the_thread from the scheduling decision for
0259  * the scheduler. The primary task is to remove the thread from the
0260  * ready queue.  It performs any necessary scheduling operations
0261  * including the selection of a new heir thread.
0262  *
0263  * @param the_thread The thread.
0264  */
0265 static inline void _Scheduler_Block( Thread_Control *the_thread )
0266 {
0267 #if defined(RTEMS_SMP)
0268   Chain_Node              *node;
0269   const Chain_Node        *tail;
0270   Scheduler_Node          *scheduler_node;
0271   const Scheduler_Control *scheduler;
0272   ISR_lock_Context         lock_context;
0273 
0274   node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
0275   tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
0276 
0277   scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
0278   scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
0279 
0280   _Scheduler_Acquire_critical( scheduler, &lock_context );
0281   ( *scheduler->Operations.block )(
0282     scheduler,
0283     the_thread,
0284     scheduler_node
0285   );
0286   _Scheduler_Release_critical( scheduler, &lock_context );
0287 
0288   node = _Chain_Next( node );
0289 
0290   while ( node != tail ) {
0291     scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
0292     scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
0293 
0294     _Scheduler_Acquire_critical( scheduler, &lock_context );
0295     ( *scheduler->Operations.withdraw_node )(
0296       scheduler,
0297       the_thread,
0298       scheduler_node,
0299       THREAD_SCHEDULER_BLOCKED
0300     );
0301     _Scheduler_Release_critical( scheduler, &lock_context );
0302 
0303     node = _Chain_Next( node );
0304   }
0305 #else
0306   const Scheduler_Control *scheduler;
0307 
0308   scheduler = _Thread_Scheduler_get_home( the_thread );
0309   ( *scheduler->Operations.block )(
0310     scheduler,
0311     the_thread,
0312     _Thread_Scheduler_get_home_node( the_thread )
0313   );
0314 #endif
0315 }
0316 
0317 /**
0318  * @brief Unblocks a thread with respect to the scheduler.
0319  *
0320  * This operation must fetch the latest thread priority value for this
0321  * scheduler instance and update its internal state if necessary.
0322  *
0323  * @param the_thread The thread.
0324  *
0325  * @see _Scheduler_Node_get_priority().
0326  */
0327 static inline void _Scheduler_Unblock( Thread_Control *the_thread )
0328 {
0329   Scheduler_Node          *scheduler_node;
0330   const Scheduler_Control *scheduler;
0331   ISR_lock_Context         lock_context;
0332 
0333 #if defined(RTEMS_SMP)
0334   scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE(
0335     _Chain_First( &the_thread->Scheduler.Scheduler_nodes )
0336   );
0337   scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
0338 #else
0339   scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
0340   scheduler = _Thread_Scheduler_get_home( the_thread );
0341 #endif
0342 
0343   _Scheduler_Acquire_critical( scheduler, &lock_context );
0344   ( *scheduler->Operations.unblock )( scheduler, the_thread, scheduler_node );
0345   _Scheduler_Release_critical( scheduler, &lock_context );
0346 }
0347 
0348 /**
0349  * @brief Propagates a priority change of a thread to the scheduler.
0350  *
0351  * On uni-processor configurations, this operation must evaluate the thread
0352  * state.  In case the thread is not ready, then the priority update should be
0353  * deferred to the next scheduler unblock operation.
0354  *
0355  * The operation must update the heir and thread dispatch necessary variables
0356  * in case the set of scheduled threads changes.
0357  *
0358  * @param the_thread The thread changing its priority.
0359  *
0360  * @see _Scheduler_Node_get_priority().
0361  */
0362 static inline void _Scheduler_Update_priority( Thread_Control *the_thread )
0363 {
0364 #if defined(RTEMS_SMP)
0365   Chain_Node       *node;
0366   const Chain_Node *tail;
0367 
0368   _Thread_Scheduler_process_requests( the_thread );
0369 
0370   node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
0371   tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
0372 
0373   do {
0374     Scheduler_Node          *scheduler_node;
0375     const Scheduler_Control *scheduler;
0376     ISR_lock_Context         lock_context;
0377 
0378     scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
0379     scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
0380 
0381     _Scheduler_Acquire_critical( scheduler, &lock_context );
0382     ( *scheduler->Operations.update_priority )(
0383       scheduler,
0384       the_thread,
0385       scheduler_node
0386     );
0387     _Scheduler_Release_critical( scheduler, &lock_context );
0388 
0389     node = _Chain_Next( node );
0390   } while ( node != tail );
0391 #else
0392   const Scheduler_Control *scheduler;
0393 
0394   scheduler = _Thread_Scheduler_get_home( the_thread );
0395   ( *scheduler->Operations.update_priority )(
0396     scheduler,
0397     the_thread,
0398     _Thread_Scheduler_get_home_node( the_thread )
0399   );
0400 #endif
0401 }
0402 
0403 /**
0404  * @brief Maps a thread priority from the user domain to the scheduler domain.
0405  *
0406  * Let M be the maximum scheduler priority.  The mapping must be bijective in
0407  * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
0408  * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
0409  * other values the mapping is undefined.
0410  *
0411  * @param scheduler The scheduler instance.
0412  * @param priority The user domain thread priority.
0413  *
0414  * @return The corresponding thread priority of the scheduler domain is returned.
0415  */
0416 static inline Priority_Control _Scheduler_Map_priority(
0417   const Scheduler_Control *scheduler,
0418   Priority_Control         priority
0419 )
0420 {
0421   return ( *scheduler->Operations.map_priority )( scheduler, priority );
0422 }
0423 
0424 /**
0425  * @brief Unmaps a thread priority from the scheduler domain to the user domain.
0426  *
0427  * @param scheduler The scheduler instance.
0428  * @param priority The scheduler domain thread priority.
0429  *
0430  * @return The corresponding thread priority of the user domain is returned.
0431  */
0432 static inline Priority_Control _Scheduler_Unmap_priority(
0433   const Scheduler_Control *scheduler,
0434   Priority_Control         priority
0435 )
0436 {
0437   return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
0438 }
0439 
0440 /**
0441  * @brief Initializes a scheduler node.
0442  *
0443  * The scheduler node contains arbitrary data on function entry.  The caller
0444  * must ensure that _Scheduler_Node_destroy() will be called after a
0445  * _Scheduler_Node_initialize() before the memory of the scheduler node is
0446  * destroyed.
0447  *
0448  * @param scheduler The scheduler instance.
0449  * @param[out] node The scheduler node to initialize.
0450  * @param the_thread The thread of the scheduler node to initialize.
0451  * @param priority The thread priority.
0452  */
0453 static inline void _Scheduler_Node_initialize(
0454   const Scheduler_Control *scheduler,
0455   Scheduler_Node          *node,
0456   Thread_Control          *the_thread,
0457   Priority_Control         priority
0458 )
0459 {
0460   ( *scheduler->Operations.node_initialize )(
0461     scheduler,
0462     node,
0463     the_thread,
0464     priority
0465   );
0466 }
0467 
0468 /**
0469  * @brief Destroys a scheduler node.
0470  *
0471  * The caller must ensure that _Scheduler_Node_destroy() will be called only
0472  * after a corresponding _Scheduler_Node_initialize().
0473  *
0474  * @param scheduler The scheduler instance.
0475  * @param[out] node The scheduler node to destroy.
0476  */
0477 static inline void _Scheduler_Node_destroy(
0478   const Scheduler_Control *scheduler,
0479   Scheduler_Node          *node
0480 )
0481 {
0482   ( *scheduler->Operations.node_destroy )( scheduler, node );
0483 }
0484 
0485 /**
0486  * @brief Releases a job of a thread with respect to the scheduler.
0487  *
0488  * @param the_thread The thread.
0489  * @param priority_node The priority node of the job.
0490  * @param deadline The deadline in watchdog ticks since boot.
0491  * @param queue_context The thread queue context to provide the set of
0492  *   threads for _Thread_Priority_update().
0493  */
0494 static inline void _Scheduler_Release_job(
0495   Thread_Control       *the_thread,
0496   Priority_Node        *priority_node,
0497   uint64_t              deadline,
0498   Thread_queue_Context *queue_context
0499 )
0500 {
0501   const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
0502 
0503   _Thread_queue_Context_clear_priority_updates( queue_context );
0504   ( *scheduler->Operations.release_job )(
0505     scheduler,
0506     the_thread,
0507     priority_node,
0508     deadline,
0509     queue_context
0510   );
0511 }
0512 
0513 /**
0514  * @brief Cancels a job of a thread with respect to the scheduler.
0515  *
0516  * @param the_thread The thread.
0517  * @param priority_node The priority node of the job.
0518  * @param queue_context The thread queue context to provide the set of
0519  *   threads for _Thread_Priority_update().
0520  */
0521 static inline void _Scheduler_Cancel_job(
0522   Thread_Control       *the_thread,
0523   Priority_Node        *priority_node,
0524   Thread_queue_Context *queue_context
0525 )
0526 {
0527   const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
0528 
0529   _Thread_queue_Context_clear_priority_updates( queue_context );
0530   ( *scheduler->Operations.cancel_job )(
0531     scheduler,
0532     the_thread,
0533     priority_node,
0534     queue_context
0535   );
0536 }
0537 
0538 /**
0539  * @brief Starts the idle thread for a particular processor.
0540  *
0541  * @param scheduler The scheduler instance.
0542  * @param[in,out] the_thread The idle thread for the processor.
0543  * @param[in,out] cpu The processor for the idle thread.
0544  *
0545  * @see _Thread_Create_idle().
0546  */
0547 static inline void _Scheduler_Start_idle(
0548   const Scheduler_Control *scheduler,
0549   Thread_Control          *the_thread,
0550   Per_CPU_Control         *cpu
0551 )
0552 {
0553   ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
0554 }
0555 
0556 /**
0557  * @brief Checks if the scheduler of the cpu with the given index is equal
0558  *      to the given scheduler.
0559  *
0560  * @param scheduler The scheduler for the comparison.
0561  * @param cpu_index The index of the cpu for the comparison.
0562  *
0563  * @retval true The scheduler of the cpu is the given @a scheduler.
0564  * @retval false The scheduler of the cpu is not the given @a scheduler.
0565  */
0566 static inline bool _Scheduler_Has_processor_ownership(
0567   const Scheduler_Control *scheduler,
0568   uint32_t                 cpu_index
0569 )
0570 {
0571 #if defined(RTEMS_SMP)
0572   const Per_CPU_Control   *cpu;
0573   const Scheduler_Control *scheduler_of_cpu;
0574 
0575   cpu = _Per_CPU_Get_by_index( cpu_index );
0576   scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
0577 
0578   return scheduler_of_cpu == scheduler;
0579 #else
0580   (void) scheduler;
0581   (void) cpu_index;
0582 
0583   return true;
0584 #endif
0585 }
0586 
0587 /**
0588  * @brief Gets the processors of the scheduler
0589  *
0590  * @param scheduler The scheduler to get the processors of.
0591  *
0592  * @return The processors of the context of the given scheduler.
0593  */
0594 static inline const Processor_mask *_Scheduler_Get_processors(
0595   const Scheduler_Control *scheduler
0596 )
0597 {
0598 #if defined(RTEMS_SMP)
0599   return &_Scheduler_Get_context( scheduler )->Processors;
0600 #else
0601   return &_Processor_mask_The_one_and_only;
0602 #endif
0603 }
0604 
0605 /**
0606  * @brief Copies the thread's scheduler's affinity to the given cpuset.
0607  *
0608  * @param the_thread The thread to get the affinity of its scheduler.
0609  * @param cpusetsize The size of @a cpuset.
0610  * @param[out] cpuset The cpuset that serves as destination for the copy operation
0611  *
0612  * @retval STATUS_SUCCESSFUL The operation succeeded.
0613  *
0614  * @retval STATUS_INVALID_SIZE The processor set was too small.
0615  */
0616 Status_Control _Scheduler_Get_affinity(
0617   Thread_Control *the_thread,
0618   size_t          cpusetsize,
0619   cpu_set_t      *cpuset
0620 );
0621 
0622 /**
0623  * @brief Checks if the affinity is a subset of the online processors.
0624  *
0625  * @param scheduler This parameter is unused.
0626  * @param the_thread This parameter is unused.
0627  * @param node This parameter is unused.
0628  * @param affinity The processor mask to check.
0629  *
0630  * @retval STATUS_SUCCESSFUL The affinity is a subset of the online processors.
0631  *
0632  * @retval STATUS_INVALID_NUMBER The affinity is not a subset of the online
0633  *   processors.
0634  */
0635 static inline Status_Control _Scheduler_default_Set_affinity_body(
0636   const Scheduler_Control *scheduler,
0637   Thread_Control          *the_thread,
0638   Scheduler_Node          *node,
0639   const Processor_mask    *affinity
0640 )
0641 {
0642   (void) scheduler;
0643   (void) the_thread;
0644   (void) node;
0645 
0646   if ( !_Processor_mask_Is_subset( affinity, _SMP_Get_online_processors() ) ) {
0647     return STATUS_INVALID_NUMBER;
0648   }
0649 
0650   return STATUS_SUCCESSFUL;
0651 }
0652 
0653 /**
0654  * @brief Sets the thread's scheduler's affinity.
0655  *
0656  * @param[in, out] the_thread The thread to set the affinity of.
0657  * @param cpusetsize The size of @a cpuset.
0658  * @param cpuset The cpuset to set the affinity.
0659  *
0660  * @retval STATUS_SUCCESSFUL The operation succeeded.
0661  *
0662  * @retval STATUS_INVALID_NUMBER The processor set was not a valid new
0663  *   processor affinity set for the thread.
0664  */
0665 Status_Control _Scheduler_Set_affinity(
0666   Thread_Control  *the_thread,
0667   size_t           cpusetsize,
0668   const cpu_set_t *cpuset
0669 );
0670 
0671 /**
0672  * @brief Gets the number of processors of the scheduler.
0673  *
0674  * @param scheduler The scheduler instance to get the number of processors of.
0675  *
0676  * @return The number of processors.
0677  */
0678 static inline uint32_t _Scheduler_Get_processor_count(
0679   const Scheduler_Control *scheduler
0680 )
0681 {
0682 #if defined(RTEMS_SMP)
0683   const Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0684 
0685   return _Processor_mask_Count( &context->Processors );
0686 #else
0687   (void) scheduler;
0688 
0689   return 1;
0690 #endif
0691 }
0692 
0693 /**
0694  * @brief Builds an object build id.
0695  *
0696  * @param scheduler_index The index to build the build id out of.
0697  *
0698  * @return The build id.
0699  */
0700 static inline Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
0701 {
0702   return _Objects_Build_id(
0703     OBJECTS_FAKE_OBJECTS_API,
0704     OBJECTS_FAKE_OBJECTS_SCHEDULERS,
0705     _Objects_Local_node,
0706     (uint16_t) ( scheduler_index + 1 )
0707   );
0708 }
0709 
0710 /**
0711  * @brief Gets the scheduler index from the given object build id.
0712  *
0713  * @param id The object build id.
0714  *
0715  * @return The scheduler index.
0716  */
0717 static inline uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
0718 {
0719   uint32_t minimum_id = _Scheduler_Build_id( 0 );
0720 
0721   return id - minimum_id;
0722 }
0723 
0724 /**
0725  * @brief Gets the scheduler from the given object build id.
0726  *
0727  * @param id The object build id.
0728  *
0729  * @return The scheduler to the object id.
0730  */
0731 static inline const Scheduler_Control *_Scheduler_Get_by_id(
0732   Objects_Id id
0733 )
0734 {
0735   uint32_t index;
0736 
0737   index = _Scheduler_Get_index_by_id( id );
0738 
0739   if ( index >= _Scheduler_Count ) {
0740     return NULL;
0741   }
0742 
0743   return &_Scheduler_Table[ index ];
0744 }
0745 
0746 /**
0747  * @brief Gets the index of the scheduler
0748  *
0749  * @param scheduler The scheduler to get the index of.
0750  *
0751  * @return The index of the given scheduler.
0752  */
0753 static inline uint32_t _Scheduler_Get_index(
0754   const Scheduler_Control *scheduler
0755 )
0756 {
0757   return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
0758 }
0759 
0760 #if defined(RTEMS_SMP)
0761 /**
0762  * @brief Gets a scheduler node which is owned by an unused idle thread.
0763  *
0764  * @param arg is the handler argument.
0765  *
0766  * @return Returns a scheduler node owned by an idle thread for use.  This
0767  *   handler must always return a node.  If none is available, then this is a
0768  *   fatal error.
0769  */
0770 typedef Scheduler_Node *( *Scheduler_Get_idle_node )( void *arg );
0771 
0772 /**
0773  * @brief Releases the scheduler node which is owned by an idle thread.
0774  *
0775  * @param node is the node to release.
0776  *
0777  * @param arg is the handler argument.
0778  */
0779 typedef void ( *Scheduler_Release_idle_node )(
0780   Scheduler_Node *node,
0781   void           *arg
0782 );
0783 
0784 /**
0785  * @brief Changes the threads state to the given new state.
0786  *
0787  * @param[out] the_thread The thread to change the state of.
0788  * @param new_state The new state for @a the_thread.
0789  */
0790 static inline void _Scheduler_Thread_change_state(
0791   Thread_Control         *the_thread,
0792   Thread_Scheduler_state  new_state
0793 )
0794 {
0795   _Assert(
0796     _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
0797       || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
0798       || !_System_state_Is_up( _System_state_Get() )
0799   );
0800 
0801   the_thread->Scheduler.state = new_state;
0802 }
0803 
0804 /**
0805  * @brief Uses an idle thread for the scheduler node.
0806  *
0807  * @param[in, out] node is the node which wants to use an idle thread.
0808  *
0809  * @param get_idle_node is the get idle node handler.
0810  *
0811  * @param arg is the handler argument.
0812  */
0813 static inline Thread_Control *_Scheduler_Use_idle_thread(
0814   Scheduler_Node          *node,
0815   Scheduler_Get_idle_node  get_idle_node,
0816   void                    *arg
0817 )
0818 {
0819   Scheduler_Node *idle_node;
0820   Thread_Control *idle;
0821 
0822   idle_node = ( *get_idle_node )( arg );
0823   idle = _Scheduler_Node_get_owner( idle_node );
0824   _Assert( idle->is_idle );
0825   _Scheduler_Node_set_idle_user( node, idle );
0826 
0827   return idle;
0828 }
0829 
0830 /**
0831  * @brief Releases the idle thread used by the scheduler node.
0832  *
0833  * @param[in, out] node is the node which wants to release the idle thread.
0834  *
0835  * @param idle is the idle thread to release.
0836  *
0837  * @param release_idle_node is the release idle node handler.
0838  *
0839  * @param arg is the handler argument.
0840  */
0841 static inline void _Scheduler_Release_idle_thread(
0842   Scheduler_Node             *node,
0843   const Thread_Control       *idle,
0844   Scheduler_Release_idle_node release_idle_node,
0845   void                       *arg
0846 )
0847 {
0848   Thread_Control *owner;
0849   Scheduler_Node *idle_node;
0850 
0851   owner = _Scheduler_Node_get_owner( node );
0852   _Assert( _Scheduler_Node_get_user( node ) == idle );
0853   _Scheduler_Node_set_user( node, owner );
0854   node->idle = NULL;
0855   idle_node = _Thread_Scheduler_get_home_node( idle );
0856   ( *release_idle_node )( idle_node, arg );
0857 }
0858 
0859 /**
0860  * @brief Releases the idle thread used by the scheduler node if the node uses
0861  *   an idle thread.
0862  *
0863  * @param[in, out] node is the node which wants to release the idle thread.
0864  *
0865  * @param release_idle_node is the release idle node handler.
0866  *
0867  * @param arg is the handler argument.
0868  *
0869  * @retval NULL The scheduler node did not use an idle thread.
0870  *
0871  * @return Returns the idle thread used by the scheduler node.
0872  */
0873 static inline Thread_Control *_Scheduler_Release_idle_thread_if_necessary(
0874   Scheduler_Node             *node,
0875   Scheduler_Release_idle_node release_idle_node,
0876   void                        *arg
0877 )
0878 {
0879   Thread_Control *idle;
0880 
0881   idle = _Scheduler_Node_get_idle( node );
0882 
0883   if ( idle != NULL ) {
0884     _Scheduler_Release_idle_thread( node, idle, release_idle_node, arg );
0885   }
0886 
0887   return idle;
0888 }
0889 
0890 /**
0891  * @brief Discards the idle thread used by the scheduler node.
0892  *
0893  * @param[in, out] the_thread is the thread owning the node.
0894  *
0895  * @param[in, out] node is the node which wants to release the idle thread.
0896  *
0897  * @param release_idle_node is the release idle node handler.
0898  *
0899  * @param arg is the handler argument.
0900  */
0901 static inline void _Scheduler_Discard_idle_thread(
0902   Thread_Control             *the_thread,
0903   Scheduler_Node             *node,
0904   Scheduler_Release_idle_node release_idle_node,
0905   void                       *arg
0906 )
0907 {
0908   Thread_Control  *idle;
0909   Per_CPU_Control *cpu;
0910 
0911   idle = _Scheduler_Node_get_idle( node );
0912   _Scheduler_Release_idle_thread( node, idle, release_idle_node, arg );
0913 
0914   cpu = _Thread_Get_CPU( idle );
0915   _Thread_Set_CPU( the_thread, cpu );
0916   _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
0917 }
0918 #endif
0919 
0920 /**
0921  * @brief Sets a new scheduler.
0922  *
0923  * @param new_scheduler The new scheduler to set.
0924  * @param[in, out] the_thread The thread for the operations.
0925  * @param priority The initial priority for the thread with the new scheduler.
0926  *
0927  * @retval STATUS_SUCCESSFUL The operation succeeded.
0928  * @retval STATUS_RESOURCE_IN_USE The thread's wait queue is not empty.
0929  * @retval STATUS_UNSATISFIED The new scheduler has no processors.
0930  */
0931 static inline Status_Control _Scheduler_Set(
0932   const Scheduler_Control *new_scheduler,
0933   Thread_Control          *the_thread,
0934   Priority_Control         priority
0935 )
0936 {
0937   Scheduler_Node          *new_scheduler_node;
0938   Scheduler_Node          *old_scheduler_node;
0939 #if defined(RTEMS_SMP)
0940   ISR_lock_Context         lock_context;
0941   const Scheduler_Control *old_scheduler;
0942 
0943 #endif
0944 
0945 #if defined(RTEMS_SCORE_THREAD_HAS_SCHEDULER_CHANGE_INHIBITORS)
0946   if ( the_thread->is_scheduler_change_inhibited ) {
0947     return STATUS_RESOURCE_IN_USE;
0948   }
0949 #endif
0950 
0951   if ( the_thread->Wait.queue != NULL ) {
0952     return STATUS_RESOURCE_IN_USE;
0953   }
0954 
0955   old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
0956   _Priority_Plain_extract(
0957     &old_scheduler_node->Wait.Priority,
0958     &the_thread->Real_priority
0959   );
0960 
0961   if (
0962     !_Priority_Is_empty( &old_scheduler_node->Wait.Priority )
0963 #if defined(RTEMS_SMP)
0964       || !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes )
0965       || the_thread->Scheduler.pin_level != 0
0966 #endif
0967   ) {
0968     _Priority_Plain_insert(
0969       &old_scheduler_node->Wait.Priority,
0970       &the_thread->Real_priority,
0971       the_thread->Real_priority.priority
0972     );
0973     return STATUS_RESOURCE_IN_USE;
0974   }
0975 
0976 #if defined(RTEMS_SMP)
0977   old_scheduler = _Thread_Scheduler_get_home( the_thread );
0978   new_scheduler_node = _Thread_Scheduler_get_node_by_index(
0979     the_thread,
0980     _Scheduler_Get_index( new_scheduler )
0981   );
0982 
0983   _Scheduler_Acquire_critical( new_scheduler, &lock_context );
0984 
0985   if (
0986     _Scheduler_Get_processor_count( new_scheduler ) == 0
0987       || ( *new_scheduler->Operations.set_affinity )(
0988         new_scheduler,
0989         the_thread,
0990         new_scheduler_node,
0991         &the_thread->Scheduler.Affinity
0992       ) != STATUS_SUCCESSFUL
0993   ) {
0994     _Scheduler_Release_critical( new_scheduler, &lock_context );
0995     _Priority_Plain_insert(
0996       &old_scheduler_node->Wait.Priority,
0997       &the_thread->Real_priority,
0998       the_thread->Real_priority.priority
0999     );
1000     return STATUS_UNSATISFIED;
1001   }
1002 
1003   _Assert( the_thread->Scheduler.pinned_scheduler == NULL );
1004   the_thread->Scheduler.home_scheduler = new_scheduler;
1005 
1006   _Scheduler_Release_critical( new_scheduler, &lock_context );
1007 
1008   _Thread_Scheduler_process_requests( the_thread );
1009 #else
1010   new_scheduler_node = old_scheduler_node;
1011 #endif
1012 
1013   the_thread->Start.initial_priority = priority;
1014   _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1015   _Priority_Initialize_one(
1016     &new_scheduler_node->Wait.Priority,
1017     &the_thread->Real_priority
1018   );
1019 
1020 #if defined(RTEMS_SMP)
1021   if ( old_scheduler != new_scheduler ) {
1022     States_Control current_state;
1023 
1024     current_state = the_thread->current_state;
1025 
1026     if ( _States_Is_ready( current_state ) ) {
1027       _Scheduler_Block( the_thread );
1028     }
1029 
1030     _Assert( old_scheduler_node->sticky_level == 0 );
1031     _Assert( new_scheduler_node->sticky_level == 0 );
1032 
1033     _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1034     _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1035     _Chain_Initialize_one(
1036       &the_thread->Scheduler.Wait_nodes,
1037       &new_scheduler_node->Thread.Wait_node
1038     );
1039     _Chain_Extract_unprotected(
1040       &old_scheduler_node->Thread.Scheduler_node.Chain
1041     );
1042     _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1043     _Chain_Initialize_one(
1044       &the_thread->Scheduler.Scheduler_nodes,
1045       &new_scheduler_node->Thread.Scheduler_node.Chain
1046     );
1047 
1048     _Scheduler_Node_set_priority(
1049       new_scheduler_node,
1050       priority,
1051       PRIORITY_GROUP_LAST
1052     );
1053 
1054     if ( _States_Is_ready( current_state ) ) {
1055       _Scheduler_Unblock( the_thread );
1056     }
1057 
1058     return STATUS_SUCCESSFUL;
1059   }
1060 #endif
1061 
1062   _Scheduler_Node_set_priority(
1063     new_scheduler_node,
1064     priority,
1065     PRIORITY_GROUP_LAST
1066   );
1067   _Scheduler_Update_priority( the_thread );
1068   return STATUS_SUCCESSFUL;
1069 }
1070 
1071 /** @} */
1072 
1073 #ifdef __cplusplus
1074 }
1075 #endif
1076 
1077 #endif
1078 /* end of include file */