Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:26

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreSchedulerSMPEDF
0007  *
0008  * @brief This source file contains the implementation of
0009  *   _Scheduler_EDF_SMP_Add_processor(), _Scheduler_EDF_SMP_Ask_for_help(),
0010  *   _Scheduler_EDF_SMP_Block(), _Scheduler_EDF_SMP_Initialize(),
0011  *   _Scheduler_EDF_SMP_Node_initialize(), _Scheduler_EDF_SMP_Pin(),
0012  *   _Scheduler_EDF_SMP_Reconsider_help_request(),
0013  *   _Scheduler_EDF_SMP_Remove_processor(), _Scheduler_EDF_SMP_Set_affinity(),
0014  *   _Scheduler_EDF_SMP_Start_idle(), _Scheduler_EDF_SMP_Unblock(),
0015  *   _Scheduler_EDF_SMP_Unpin(), _Scheduler_EDF_SMP_Update_priority(),
0016  *   _Scheduler_EDF_SMP_Withdraw_node(), _Scheduler_EDF_SMP_Make_sticky(),
0017  *   _Scheduler_EDF_SMP_Clean_sticky(), and _Scheduler_EDF_SMP_Yield().
0018  */
0019 
0020 /*
0021  * Copyright (c) 2017 embedded brains GmbH & Co. KG
0022  *
0023  * Redistribution and use in source and binary forms, with or without
0024  * modification, are permitted provided that the following conditions
0025  * are met:
0026  * 1. Redistributions of source code must retain the above copyright
0027  *    notice, this list of conditions and the following disclaimer.
0028  * 2. Redistributions in binary form must reproduce the above copyright
0029  *    notice, this list of conditions and the following disclaimer in the
0030  *    documentation and/or other materials provided with the distribution.
0031  *
0032  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0033  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0034  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0035  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0036  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0037  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0038  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0039  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0040  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0041  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0042  * POSSIBILITY OF SUCH DAMAGE.
0043  */
0044 
0045 #ifdef HAVE_CONFIG_H
0046 #include "config.h"
0047 #endif
0048 
0049 #include <rtems/score/scheduleredfsmp.h>
0050 #include <rtems/score/schedulersmpimpl.h>
0051 
0052 static inline Scheduler_EDF_SMP_Context *
0053 _Scheduler_EDF_SMP_Get_context( const Scheduler_Control *scheduler )
0054 {
0055   return (Scheduler_EDF_SMP_Context *) _Scheduler_Get_context( scheduler );
0056 }
0057 
0058 static inline Scheduler_EDF_SMP_Context *
0059 _Scheduler_EDF_SMP_Get_self( Scheduler_Context *context )
0060 {
0061   return (Scheduler_EDF_SMP_Context *) context;
0062 }
0063 
0064 static inline Scheduler_EDF_SMP_Node *
0065 _Scheduler_EDF_SMP_Node_downcast( Scheduler_Node *node )
0066 {
0067   return (Scheduler_EDF_SMP_Node *) node;
0068 }
0069 
0070 static inline bool _Scheduler_EDF_SMP_Priority_less_equal(
0071   const void        *left,
0072   const RBTree_Node *right
0073 )
0074 {
0075   const Priority_Control   *the_left;
0076   const Scheduler_SMP_Node *the_right;
0077   Priority_Control          prio_left;
0078   Priority_Control          prio_right;
0079 
0080   the_left = left;
0081   the_right = RTEMS_CONTAINER_OF( right, Scheduler_SMP_Node, Base.Node.RBTree );
0082 
0083   prio_left = *the_left;
0084   prio_right = the_right->priority;
0085 
0086   return prio_left <= prio_right;
0087 }
0088 
0089 static inline bool _Scheduler_EDF_SMP_Overall_less_equal(
0090   const void       *key,
0091   const Chain_Node *to_insert,
0092   const Chain_Node *next
0093 )
0094 {
0095   Priority_Control              insert_priority;
0096   Priority_Control              next_priority;
0097   const Scheduler_EDF_SMP_Node *node_to_insert;
0098   const Scheduler_EDF_SMP_Node *node_next;
0099 
0100   insert_priority = *(const Priority_Control *) key;
0101   insert_priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
0102   node_to_insert = (const Scheduler_EDF_SMP_Node *) to_insert;
0103   node_next = (const Scheduler_EDF_SMP_Node *) next;
0104   next_priority = node_next->Base.priority;
0105 
0106   return insert_priority < next_priority ||
0107     ( insert_priority == next_priority &&
0108       node_to_insert->generation <= node_next->generation );
0109 }
0110 
0111 void _Scheduler_EDF_SMP_Initialize( const Scheduler_Control *scheduler )
0112 {
0113   Scheduler_EDF_SMP_Context *self =
0114     _Scheduler_EDF_SMP_Get_context( scheduler );
0115 
0116   _Scheduler_SMP_Initialize( &self->Base );
0117   _Chain_Initialize_empty( &self->Affine_queues );
0118   /* The ready queues are zero initialized and thus empty */
0119 }
0120 
0121 void _Scheduler_EDF_SMP_Node_initialize(
0122   const Scheduler_Control *scheduler,
0123   Scheduler_Node          *node,
0124   Thread_Control          *the_thread,
0125   Priority_Control         priority
0126 )
0127 {
0128   Scheduler_SMP_Node *smp_node;
0129 
0130   smp_node = _Scheduler_SMP_Node_downcast( node );
0131   _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );
0132 }
0133 
0134 static inline void _Scheduler_EDF_SMP_Do_update(
0135   Scheduler_Context *context,
0136   Scheduler_Node    *node,
0137   Priority_Control   new_priority
0138 )
0139 {
0140   Scheduler_SMP_Node *smp_node;
0141 
0142   (void) context;
0143 
0144   smp_node = _Scheduler_SMP_Node_downcast( node );
0145   _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
0146 }
0147 
0148 static inline bool _Scheduler_EDF_SMP_Has_ready( Scheduler_Context *context )
0149 {
0150   Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context );
0151 
0152   return !_RBTree_Is_empty( &self->Ready[ 0 ].Queue );
0153 }
0154 
0155 static inline bool _Scheduler_EDF_SMP_Overall_less(
0156   const Scheduler_EDF_SMP_Node *left,
0157   const Scheduler_EDF_SMP_Node *right
0158 )
0159 {
0160   Priority_Control lp;
0161   Priority_Control rp;
0162 
0163   lp = left->Base.priority;
0164   rp = right->Base.priority;
0165 
0166   return lp < rp || (lp == rp && left->generation < right->generation );
0167 }
0168 
0169 static inline Scheduler_EDF_SMP_Node *
0170 _Scheduler_EDF_SMP_Challenge_highest_ready(
0171   Scheduler_EDF_SMP_Context *self,
0172   Scheduler_EDF_SMP_Node    *highest_ready,
0173   RBTree_Control            *ready_queue
0174 )
0175 {
0176   Scheduler_EDF_SMP_Node *other;
0177 
0178   other = (Scheduler_EDF_SMP_Node *) _RBTree_Minimum( ready_queue );
0179   _Assert( other != NULL );
0180 
0181   if ( _Scheduler_EDF_SMP_Overall_less( other, highest_ready ) ) {
0182     return other;
0183   }
0184 
0185   return highest_ready;
0186 }
0187 
0188 static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready(
0189   Scheduler_Context *context,
0190   Scheduler_Node    *filter
0191 )
0192 {
0193   Scheduler_EDF_SMP_Context *self;
0194   Scheduler_EDF_SMP_Node    *highest_ready;
0195   Scheduler_EDF_SMP_Node    *node;
0196   uint8_t                    rqi;
0197   const Chain_Node          *tail;
0198   Chain_Node                *next;
0199 
0200   self = _Scheduler_EDF_SMP_Get_self( context );
0201   highest_ready = (Scheduler_EDF_SMP_Node *)
0202     _RBTree_Minimum( &self->Ready[ 0 ].Queue );
0203   _Assert( highest_ready != NULL );
0204 
0205   /*
0206    * The filter node is a scheduled node which is no longer on the scheduled
0207    * chain.  In case this is an affine thread, then we have to check the
0208    * corresponding affine ready queue.
0209    */
0210 
0211   node = (Scheduler_EDF_SMP_Node *) filter;
0212   rqi = node->ready_queue_index;
0213 
0214   if ( rqi != 0 && !_RBTree_Is_empty( &self->Ready[ rqi ].Queue ) ) {
0215     highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready(
0216       self,
0217       highest_ready,
0218       &self->Ready[ rqi ].Queue
0219     );
0220   }
0221 
0222   tail = _Chain_Immutable_tail( &self->Affine_queues );
0223   next = _Chain_First( &self->Affine_queues );
0224 
0225   while ( next != tail ) {
0226     Scheduler_EDF_SMP_Ready_queue *ready_queue;
0227 
0228     ready_queue = (Scheduler_EDF_SMP_Ready_queue *) next;
0229     highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready(
0230       self,
0231       highest_ready,
0232       &ready_queue->Queue
0233     );
0234 
0235     next = _Chain_Next( next );
0236   }
0237 
0238   return &highest_ready->Base.Base;
0239 }
0240 
0241 static inline void _Scheduler_EDF_SMP_Set_allocated(
0242   Scheduler_EDF_SMP_Context *self,
0243   Scheduler_EDF_SMP_Node    *allocated,
0244   const Per_CPU_Control     *cpu
0245 )
0246 {
0247   self->Ready[ _Per_CPU_Get_index( cpu ) + 1 ].allocated = allocated;
0248 }
0249 
0250 static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_allocated(
0251   const Scheduler_EDF_SMP_Context *self,
0252   uint8_t                          rqi
0253 )
0254 {
0255   return self->Ready[ rqi ].allocated;
0256 }
0257 
0258 static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled(
0259   Scheduler_Context *context,
0260   Scheduler_Node    *filter_base
0261 )
0262 {
0263   Scheduler_EDF_SMP_Node *filter;
0264   uint8_t                 rqi;
0265 
0266   filter = _Scheduler_EDF_SMP_Node_downcast( filter_base );
0267   rqi = filter->ready_queue_index;
0268 
0269   if ( rqi != 0 ) {
0270     Scheduler_EDF_SMP_Context *self;
0271     Scheduler_EDF_SMP_Node    *affine_scheduled;
0272 
0273     self = _Scheduler_EDF_SMP_Get_self( context );
0274     affine_scheduled = self->Ready[ rqi ].affine_scheduled;
0275 
0276     if ( affine_scheduled != NULL ) {
0277       _Assert( affine_scheduled->ready_queue_index == rqi );
0278       return &affine_scheduled->Base.Base;
0279     }
0280   }
0281 
0282   return _Scheduler_SMP_Get_lowest_scheduled( context, filter_base );
0283 }
0284 
0285 static inline void _Scheduler_EDF_SMP_Update_generation(
0286   Scheduler_Context *context,
0287   Scheduler_Node    *node_base,
0288   Priority_Control   insert_priority
0289 )
0290 {
0291   Scheduler_EDF_SMP_Context *self;
0292   Scheduler_EDF_SMP_Node    *node;
0293   int                        generation_index;
0294   int                        increment;
0295   int64_t                    generation;
0296 
0297   self = _Scheduler_EDF_SMP_Get_self( context );
0298   node = _Scheduler_EDF_SMP_Node_downcast( node_base );
0299   generation_index = SCHEDULER_PRIORITY_IS_APPEND( insert_priority );
0300   increment = ( generation_index << 1 ) - 1;
0301 
0302   generation = self->generations[ generation_index ];
0303   node->generation = generation;
0304   self->generations[ generation_index ] = generation + increment;
0305 }
0306 
0307 static inline void _Scheduler_EDF_SMP_Insert_scheduled(
0308   Scheduler_Context *context,
0309   Scheduler_Node    *node_base,
0310   Priority_Control   priority_to_insert
0311 )
0312 {
0313   Scheduler_EDF_SMP_Context     *self;
0314   Scheduler_EDF_SMP_Node        *node;
0315   uint8_t                        rqi;
0316   Scheduler_EDF_SMP_Ready_queue *ready_queue;
0317 
0318   self = _Scheduler_EDF_SMP_Get_self( context );
0319   node = _Scheduler_EDF_SMP_Node_downcast( node_base );
0320   rqi = node->ready_queue_index;
0321   ready_queue = &self->Ready[ rqi ];
0322 
0323   _Chain_Insert_ordered_unprotected(
0324     &self->Base.Scheduled,
0325     &node_base->Node.Chain,
0326     &priority_to_insert,
0327     _Scheduler_EDF_SMP_Overall_less_equal
0328   );
0329 
0330   if ( rqi != 0 ) {
0331     ready_queue->affine_scheduled = node;
0332 
0333     if ( !_RBTree_Is_empty( &ready_queue->Queue ) ) {
0334       _Chain_Extract_unprotected( &ready_queue->Node );
0335     }
0336   }
0337 }
0338 
0339 static inline void _Scheduler_EDF_SMP_Activate_ready_queue_if_necessary(
0340   Scheduler_EDF_SMP_Context     *self,
0341   uint8_t                        rqi,
0342   Scheduler_EDF_SMP_Ready_queue *ready_queue
0343 )
0344 {
0345   if (
0346     rqi != 0 &&
0347     _RBTree_Is_empty( &ready_queue->Queue ) &&
0348     ready_queue->affine_scheduled == NULL
0349   ) {
0350     _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
0351   }
0352 }
0353 
0354 static inline void _Scheduler_EDF_SMP_Insert_ready(
0355   Scheduler_Context *context,
0356   Scheduler_Node    *node_base,
0357   Priority_Control   insert_priority
0358 )
0359 {
0360   Scheduler_EDF_SMP_Context     *self;
0361   Scheduler_EDF_SMP_Node        *node;
0362   uint8_t                        rqi;
0363   Scheduler_EDF_SMP_Ready_queue *ready_queue;
0364 
0365   self = _Scheduler_EDF_SMP_Get_self( context );
0366   node = _Scheduler_EDF_SMP_Node_downcast( node_base );
0367   rqi = node->ready_queue_index;
0368   ready_queue = &self->Ready[ rqi ];
0369 
0370   _Scheduler_EDF_SMP_Activate_ready_queue_if_necessary( self, rqi, ready_queue );
0371   _RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
0372   _RBTree_Insert_inline(
0373     &ready_queue->Queue,
0374     &node->Base.Base.Node.RBTree,
0375     &insert_priority,
0376     _Scheduler_EDF_SMP_Priority_less_equal
0377   );
0378 }
0379 
0380 static inline void _Scheduler_EDF_SMP_Extract_from_scheduled(
0381   Scheduler_Context *context,
0382   Scheduler_Node    *node_to_extract
0383 )
0384 {
0385   Scheduler_EDF_SMP_Context     *self;
0386   Scheduler_EDF_SMP_Node        *node;
0387   uint8_t                        rqi;
0388   Scheduler_EDF_SMP_Ready_queue *ready_queue;
0389 
0390   self = _Scheduler_EDF_SMP_Get_self( context );
0391   node = _Scheduler_EDF_SMP_Node_downcast( node_to_extract );
0392 
0393   _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->Base.Base );
0394 
0395   rqi = node->ready_queue_index;
0396   ready_queue = &self->Ready[ rqi ];
0397 
0398   if ( rqi != 0 && !_RBTree_Is_empty( &ready_queue->Queue ) ) {
0399     _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
0400   }
0401 
0402   ready_queue->affine_scheduled = NULL;
0403 }
0404 
0405 static inline void _Scheduler_EDF_SMP_Extract_from_ready(
0406   Scheduler_Context *context,
0407   Scheduler_Node    *node_to_extract
0408 )
0409 {
0410   Scheduler_EDF_SMP_Context     *self;
0411   Scheduler_EDF_SMP_Node        *node;
0412   uint8_t                        rqi;
0413   Scheduler_EDF_SMP_Ready_queue *ready_queue;
0414 
0415   self = _Scheduler_EDF_SMP_Get_self( context );
0416   node = _Scheduler_EDF_SMP_Node_downcast( node_to_extract );
0417   rqi = node->ready_queue_index;
0418   ready_queue = &self->Ready[ rqi ];
0419 
0420   _RBTree_Extract( &ready_queue->Queue, &node->Base.Base.Node.RBTree );
0421   _Chain_Initialize_node( &node->Base.Base.Node.Chain );
0422 
0423   if (
0424     rqi != 0
0425       && _RBTree_Is_empty( &ready_queue->Queue )
0426       && ready_queue->affine_scheduled == NULL
0427   ) {
0428     _Chain_Extract_unprotected( &ready_queue->Node );
0429   }
0430 }
0431 
0432 static inline void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready(
0433   Scheduler_Context *context,
0434   Scheduler_Node    *scheduled_to_ready
0435 )
0436 {
0437   Scheduler_EDF_SMP_Context     *self;
0438   Scheduler_EDF_SMP_Node        *node;
0439   uint8_t                        rqi;
0440   Scheduler_EDF_SMP_Ready_queue *ready_queue;
0441 
0442   _Scheduler_EDF_SMP_Extract_from_scheduled( context, scheduled_to_ready );
0443 
0444   self = _Scheduler_EDF_SMP_Get_self( context );
0445   node = _Scheduler_EDF_SMP_Node_downcast( scheduled_to_ready );
0446   rqi = node->ready_queue_index;
0447   ready_queue = &self->Ready[ rqi ];
0448 
0449   _Scheduler_EDF_SMP_Activate_ready_queue_if_necessary( self, rqi, ready_queue );
0450   _RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
0451   _RBTree_Prepend( &ready_queue->Queue, &node->Base.Base.Node.RBTree );
0452 }
0453 
0454 static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
0455   Scheduler_Context *context,
0456   Scheduler_Node    *ready_to_scheduled
0457 )
0458 {
0459   Priority_Control insert_priority;
0460 
0461   _Scheduler_EDF_SMP_Extract_from_ready( context, ready_to_scheduled );
0462   insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
0463   insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
0464   _Scheduler_EDF_SMP_Insert_scheduled(
0465     context,
0466     ready_to_scheduled,
0467     insert_priority
0468   );
0469 }
0470 
0471 static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_idle( void *arg )
0472 {
0473   Scheduler_EDF_SMP_Context *self;
0474   Scheduler_Node            *lowest_ready;
0475 
0476   self = _Scheduler_EDF_SMP_Get_self( arg );
0477   lowest_ready = (Scheduler_Node *) _RBTree_Maximum( &self->Ready[ 0 ].Queue );
0478   _Assert( lowest_ready != NULL );
0479   _RBTree_Extract( &self->Ready[ 0 ].Queue, &lowest_ready->Node.RBTree );
0480   _Chain_Initialize_node( &lowest_ready->Node.Chain );
0481 
0482   return lowest_ready;
0483 }
0484 
0485 static inline void _Scheduler_EDF_SMP_Release_idle(
0486   Scheduler_Node *node,
0487   void           *arg
0488 )
0489 {
0490   Scheduler_EDF_SMP_Context *self;
0491 
0492   self = _Scheduler_EDF_SMP_Get_self( arg );
0493   _RBTree_Initialize_node( &node->Node.RBTree );
0494   _RBTree_Append( &self->Ready[ 0 ].Queue, &node->Node.RBTree );
0495 }
0496 
0497 static inline void _Scheduler_EDF_SMP_Allocate_processor(
0498   Scheduler_Context *context,
0499   Scheduler_Node    *scheduled_base,
0500   Per_CPU_Control   *cpu
0501 )
0502 {
0503   Scheduler_EDF_SMP_Context     *self;
0504   Scheduler_EDF_SMP_Node        *scheduled;
0505   uint8_t                        rqi;
0506 
0507   self = _Scheduler_EDF_SMP_Get_self( context );
0508   scheduled = _Scheduler_EDF_SMP_Node_downcast( scheduled_base );
0509   rqi = scheduled->ready_queue_index;
0510 
0511   if ( rqi != 0 ) {
0512     Per_CPU_Control *affine_cpu;
0513 
0514     affine_cpu = _Per_CPU_Get_by_index( rqi - 1 );
0515 
0516     if ( cpu != affine_cpu ) {
0517       Scheduler_EDF_SMP_Node *node;
0518 
0519       node = _Scheduler_EDF_SMP_Get_allocated( self, rqi );
0520       _Assert( node->ready_queue_index == 0 );
0521       _Scheduler_EDF_SMP_Set_allocated( self, node, cpu );
0522       _Scheduler_SMP_Allocate_processor_exact(
0523         context,
0524         &node->Base.Base,
0525         cpu
0526       );
0527       cpu = affine_cpu;
0528     }
0529   }
0530 
0531   _Scheduler_EDF_SMP_Set_allocated( self, scheduled, cpu );
0532   _Scheduler_SMP_Allocate_processor_exact(
0533     context,
0534     &scheduled->Base.Base,
0535     cpu
0536   );
0537 }
0538 
0539 void _Scheduler_EDF_SMP_Block(
0540   const Scheduler_Control *scheduler,
0541   Thread_Control          *thread,
0542   Scheduler_Node          *node
0543 )
0544 {
0545   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0546 
0547   _Scheduler_SMP_Block(
0548     context,
0549     thread,
0550     node,
0551     _Scheduler_EDF_SMP_Extract_from_scheduled,
0552     _Scheduler_EDF_SMP_Extract_from_ready,
0553     _Scheduler_EDF_SMP_Get_highest_ready,
0554     _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
0555     _Scheduler_EDF_SMP_Allocate_processor,
0556     _Scheduler_EDF_SMP_Get_idle
0557   );
0558 }
0559 
0560 static inline bool _Scheduler_EDF_SMP_Enqueue(
0561   Scheduler_Context *context,
0562   Scheduler_Node    *node,
0563   Priority_Control   insert_priority
0564 )
0565 {
0566   _Scheduler_EDF_SMP_Update_generation( context, node, insert_priority );
0567 
0568   return _Scheduler_SMP_Enqueue(
0569     context,
0570     node,
0571     insert_priority,
0572     _Scheduler_EDF_SMP_Overall_less_equal,
0573     _Scheduler_EDF_SMP_Insert_ready,
0574     _Scheduler_EDF_SMP_Insert_scheduled,
0575     _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
0576     _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
0577     _Scheduler_EDF_SMP_Get_lowest_scheduled,
0578     _Scheduler_EDF_SMP_Allocate_processor,
0579     _Scheduler_EDF_SMP_Get_idle,
0580     _Scheduler_EDF_SMP_Release_idle
0581   );
0582 }
0583 
0584 static inline void _Scheduler_EDF_SMP_Enqueue_scheduled(
0585   Scheduler_Context *context,
0586   Scheduler_Node    *node,
0587   Priority_Control   insert_priority
0588 )
0589 {
0590   _Scheduler_EDF_SMP_Update_generation( context, node, insert_priority );
0591   _Scheduler_SMP_Enqueue_scheduled(
0592     context,
0593     node,
0594     insert_priority,
0595     _Scheduler_EDF_SMP_Overall_less_equal,
0596     _Scheduler_EDF_SMP_Extract_from_ready,
0597     _Scheduler_EDF_SMP_Get_highest_ready,
0598     _Scheduler_EDF_SMP_Insert_ready,
0599     _Scheduler_EDF_SMP_Insert_scheduled,
0600     _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
0601     _Scheduler_EDF_SMP_Allocate_processor,
0602     _Scheduler_EDF_SMP_Get_idle,
0603     _Scheduler_EDF_SMP_Release_idle
0604   );
0605 }
0606 
0607 void _Scheduler_EDF_SMP_Unblock(
0608   const Scheduler_Control *scheduler,
0609   Thread_Control          *thread,
0610   Scheduler_Node          *node
0611 )
0612 {
0613   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0614 
0615   _Scheduler_SMP_Unblock(
0616     context,
0617     thread,
0618     node,
0619     _Scheduler_EDF_SMP_Do_update,
0620     _Scheduler_EDF_SMP_Enqueue,
0621     _Scheduler_EDF_SMP_Release_idle
0622   );
0623 }
0624 
0625 static inline bool _Scheduler_EDF_SMP_Do_ask_for_help(
0626   Scheduler_Context *context,
0627   Thread_Control    *the_thread,
0628   Scheduler_Node    *node
0629 )
0630 {
0631   return _Scheduler_SMP_Ask_for_help(
0632     context,
0633     the_thread,
0634     node,
0635     _Scheduler_EDF_SMP_Overall_less_equal,
0636     _Scheduler_EDF_SMP_Insert_ready,
0637     _Scheduler_EDF_SMP_Insert_scheduled,
0638     _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
0639     _Scheduler_EDF_SMP_Get_lowest_scheduled,
0640     _Scheduler_EDF_SMP_Allocate_processor,
0641     _Scheduler_EDF_SMP_Release_idle
0642   );
0643 }
0644 
0645 void _Scheduler_EDF_SMP_Update_priority(
0646   const Scheduler_Control *scheduler,
0647   Thread_Control          *thread,
0648   Scheduler_Node          *node
0649 )
0650 {
0651   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0652 
0653   _Scheduler_SMP_Update_priority(
0654     context,
0655     thread,
0656     node,
0657     _Scheduler_EDF_SMP_Extract_from_scheduled,
0658     _Scheduler_EDF_SMP_Extract_from_ready,
0659     _Scheduler_EDF_SMP_Do_update,
0660     _Scheduler_EDF_SMP_Enqueue,
0661     _Scheduler_EDF_SMP_Enqueue_scheduled,
0662     _Scheduler_EDF_SMP_Do_ask_for_help
0663   );
0664 }
0665 
0666 bool _Scheduler_EDF_SMP_Ask_for_help(
0667   const Scheduler_Control *scheduler,
0668   Thread_Control          *the_thread,
0669   Scheduler_Node          *node
0670 )
0671 {
0672   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0673 
0674   return _Scheduler_EDF_SMP_Do_ask_for_help( context, the_thread, node );
0675 }
0676 
0677 void _Scheduler_EDF_SMP_Reconsider_help_request(
0678   const Scheduler_Control *scheduler,
0679   Thread_Control          *the_thread,
0680   Scheduler_Node          *node
0681 )
0682 {
0683   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0684 
0685   _Scheduler_SMP_Reconsider_help_request(
0686     context,
0687     the_thread,
0688     node,
0689     _Scheduler_EDF_SMP_Extract_from_ready
0690   );
0691 }
0692 
0693 void _Scheduler_EDF_SMP_Withdraw_node(
0694   const Scheduler_Control *scheduler,
0695   Thread_Control          *the_thread,
0696   Scheduler_Node          *node,
0697   Thread_Scheduler_state   next_state
0698 )
0699 {
0700   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0701 
0702   _Scheduler_SMP_Withdraw_node(
0703     context,
0704     the_thread,
0705     node,
0706     next_state,
0707     _Scheduler_EDF_SMP_Extract_from_scheduled,
0708     _Scheduler_EDF_SMP_Extract_from_ready,
0709     _Scheduler_EDF_SMP_Get_highest_ready,
0710     _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
0711     _Scheduler_EDF_SMP_Allocate_processor,
0712     _Scheduler_EDF_SMP_Get_idle
0713   );
0714 }
0715 
0716 void _Scheduler_EDF_SMP_Make_sticky(
0717   const Scheduler_Control *scheduler,
0718   Thread_Control          *the_thread,
0719   Scheduler_Node          *node
0720 )
0721 {
0722   _Scheduler_SMP_Make_sticky(
0723     scheduler,
0724     the_thread,
0725     node,
0726     _Scheduler_EDF_SMP_Do_update,
0727     _Scheduler_EDF_SMP_Enqueue
0728   );
0729 }
0730 
0731 void _Scheduler_EDF_SMP_Clean_sticky(
0732   const Scheduler_Control *scheduler,
0733   Thread_Control          *the_thread,
0734   Scheduler_Node          *node
0735 )
0736 {
0737   _Scheduler_SMP_Clean_sticky(
0738     scheduler,
0739     the_thread,
0740     node,
0741     _Scheduler_EDF_SMP_Extract_from_scheduled,
0742     _Scheduler_EDF_SMP_Extract_from_ready,
0743     _Scheduler_EDF_SMP_Get_highest_ready,
0744     _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
0745     _Scheduler_EDF_SMP_Allocate_processor,
0746     _Scheduler_EDF_SMP_Get_idle,
0747     _Scheduler_EDF_SMP_Release_idle
0748   );
0749 }
0750 
0751 static inline void _Scheduler_EDF_SMP_Register_idle(
0752   Scheduler_Context *context,
0753   Scheduler_Node    *idle_base,
0754   Per_CPU_Control   *cpu
0755 )
0756 {
0757   Scheduler_EDF_SMP_Context *self;
0758   Scheduler_EDF_SMP_Node    *idle;
0759 
0760   self = _Scheduler_EDF_SMP_Get_self( context );
0761   idle = _Scheduler_EDF_SMP_Node_downcast( idle_base );
0762   _Scheduler_EDF_SMP_Set_allocated( self, idle, cpu );
0763   _Scheduler_EDF_SMP_Update_generation(
0764     context,
0765     idle_base,
0766     PRIORITY_GROUP_LAST
0767   );
0768 }
0769 
0770 void _Scheduler_EDF_SMP_Add_processor(
0771   const Scheduler_Control *scheduler,
0772   Thread_Control          *idle
0773 )
0774 {
0775   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0776 
0777   _Scheduler_SMP_Add_processor(
0778     context,
0779     idle,
0780     _Scheduler_EDF_SMP_Has_ready,
0781     _Scheduler_EDF_SMP_Enqueue_scheduled,
0782     _Scheduler_EDF_SMP_Register_idle
0783   );
0784 }
0785 
0786 Thread_Control *_Scheduler_EDF_SMP_Remove_processor(
0787   const Scheduler_Control *scheduler,
0788   Per_CPU_Control         *cpu
0789 )
0790 {
0791   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0792 
0793   return _Scheduler_SMP_Remove_processor(
0794     context,
0795     cpu,
0796     _Scheduler_EDF_SMP_Extract_from_scheduled,
0797     _Scheduler_EDF_SMP_Extract_from_ready,
0798     _Scheduler_EDF_SMP_Enqueue,
0799     _Scheduler_EDF_SMP_Get_idle,
0800     _Scheduler_EDF_SMP_Release_idle
0801   );
0802 }
0803 
0804 void _Scheduler_EDF_SMP_Yield(
0805   const Scheduler_Control *scheduler,
0806   Thread_Control          *thread,
0807   Scheduler_Node          *node
0808 )
0809 {
0810   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0811 
0812   _Scheduler_SMP_Yield(
0813     context,
0814     thread,
0815     node,
0816     _Scheduler_EDF_SMP_Extract_from_scheduled,
0817     _Scheduler_EDF_SMP_Extract_from_ready,
0818     _Scheduler_EDF_SMP_Enqueue,
0819     _Scheduler_EDF_SMP_Enqueue_scheduled
0820   );
0821 }
0822 
0823 static inline void _Scheduler_EDF_SMP_Do_set_affinity(
0824   Scheduler_Context *context,
0825   Scheduler_Node    *node_base,
0826   void              *arg
0827 )
0828 {
0829   Scheduler_EDF_SMP_Node *node;
0830   const uint8_t          *rqi;
0831 
0832   node = _Scheduler_EDF_SMP_Node_downcast( node_base );
0833   rqi = arg;
0834   node->ready_queue_index = *rqi;
0835 }
0836 
0837 void _Scheduler_EDF_SMP_Start_idle(
0838   const Scheduler_Control *scheduler,
0839   Thread_Control          *idle,
0840   Per_CPU_Control         *cpu
0841 )
0842 {
0843   Scheduler_Context *context;
0844 
0845   context = _Scheduler_Get_context( scheduler );
0846 
0847   _Scheduler_SMP_Do_start_idle(
0848     context,
0849     idle,
0850     cpu,
0851     _Scheduler_EDF_SMP_Register_idle
0852   );
0853 }
0854 
0855 void _Scheduler_EDF_SMP_Pin(
0856   const Scheduler_Control *scheduler,
0857   Thread_Control          *thread,
0858   Scheduler_Node          *node_base,
0859   struct Per_CPU_Control  *cpu
0860 )
0861 {
0862   Scheduler_EDF_SMP_Node *node;
0863   uint8_t                 rqi;
0864 
0865   (void) scheduler;
0866   node = _Scheduler_EDF_SMP_Node_downcast( node_base );
0867 
0868   _Assert(
0869     _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED
0870   );
0871 
0872   rqi = (uint8_t) _Per_CPU_Get_index( cpu ) + 1;
0873   node->ready_queue_index = rqi;
0874   node->pinning_ready_queue_index = rqi;
0875 }
0876 
0877 void _Scheduler_EDF_SMP_Unpin(
0878   const Scheduler_Control *scheduler,
0879   Thread_Control          *thread,
0880   Scheduler_Node          *node_base,
0881   struct Per_CPU_Control  *cpu
0882 )
0883 {
0884   Scheduler_EDF_SMP_Node *node;
0885 
0886   (void) scheduler;
0887   (void) cpu;
0888   node = _Scheduler_EDF_SMP_Node_downcast( node_base );
0889 
0890   _Assert(
0891     _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED
0892   );
0893 
0894   node->ready_queue_index = node->affinity_ready_queue_index;
0895   node->pinning_ready_queue_index = 0;
0896 }
0897 
0898 Status_Control _Scheduler_EDF_SMP_Set_affinity(
0899   const Scheduler_Control *scheduler,
0900   Thread_Control          *thread,
0901   Scheduler_Node          *node_base,
0902   const Processor_mask    *affinity
0903 )
0904 {
0905   Scheduler_Context      *context;
0906   Scheduler_EDF_SMP_Node *node;
0907   uint8_t                 rqi;
0908 
0909   context = _Scheduler_Get_context( scheduler );
0910 
0911   /*
0912    * We support a thread to processor affinity to all online processors and an
0913    * affinity to exactly one processor.  This restriction is necessary to avoid
0914    * issues if processors are added or removed to or from the scheduler.
0915    */
0916 
0917   if ( _Processor_mask_Is_equal( affinity, &_SMP_Online_processors ) ) {
0918     rqi = 0;
0919   } else {
0920     Processor_mask local_affinity;
0921     Processor_mask one_to_one;
0922     uint32_t       last;
0923 
0924     _Processor_mask_And( &local_affinity, &context->Processors, affinity );
0925 
0926     if ( _Processor_mask_Is_zero( &local_affinity ) ) {
0927       return STATUS_INVALID_NUMBER;
0928     }
0929 
0930     last = _Processor_mask_Find_last_set( affinity );
0931     _Processor_mask_From_index( &one_to_one, last - 1 );
0932 
0933     /*
0934      * Use the global affinity set and not the affinity set local to the
0935      * scheduler to check for a one-to-one affinity.
0936      */
0937     if ( !_Processor_mask_Is_equal( &one_to_one, affinity ) ) {
0938       return STATUS_INVALID_NUMBER;
0939     }
0940 
0941     rqi = last;
0942   }
0943 
0944   node = _Scheduler_EDF_SMP_Node_downcast( node_base );
0945   node->affinity_ready_queue_index = rqi;
0946 
0947   if ( node->pinning_ready_queue_index == 0 ) {
0948     _Scheduler_SMP_Set_affinity(
0949       context,
0950       thread,
0951       node_base,
0952       &rqi,
0953       _Scheduler_EDF_SMP_Do_set_affinity,
0954       _Scheduler_EDF_SMP_Extract_from_scheduled,
0955       _Scheduler_EDF_SMP_Extract_from_ready,
0956       _Scheduler_EDF_SMP_Get_highest_ready,
0957       _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
0958       _Scheduler_EDF_SMP_Enqueue,
0959       _Scheduler_EDF_SMP_Allocate_processor,
0960       _Scheduler_EDF_SMP_Get_idle,
0961       _Scheduler_EDF_SMP_Release_idle
0962     );
0963   }
0964 
0965   return STATUS_SUCCESSFUL;
0966 }