Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:26

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreSchedulerPriorityAffinitySMP
0007  *
0008  * @brief This source file contains the implementation of
0009  *   _Scheduler_priority_affinity_SMP_Add_processor(),
0010  *   _Scheduler_priority_affinity_SMP_Ask_for_help(),
0011  *   _Scheduler_priority_affinity_SMP_Block(),
0012  *   _Scheduler_priority_affinity_SMP_Node_initialize(),
0013  *   _Scheduler_priority_affinity_SMP_Reconsider_help_request(),
0014  *   _Scheduler_priority_affinity_SMP_Remove_processor(),
0015  *   _Scheduler_priority_affinity_SMP_Set_affinity(),
0016  *   _Scheduler_priority_affinity_SMP_Unblock(),
0017  *   _Scheduler_priority_affinity_SMP_Update_priority(),
0018  *   _Scheduler_priority_affinity_SMP_Withdraw_node(),
0019  *   _Scheduler_priority_affinity_SMP_Make_sticky(), and
0020  *   _Scheduler_priority_affinity_SMP_Clean_sticky().
0021  */
0022 
0023 /*
0024  *  COPYRIGHT (c) 2014.
0025  *  On-Line Applications Research Corporation (OAR).
0026  *
0027  * Redistribution and use in source and binary forms, with or without
0028  * modification, are permitted provided that the following conditions
0029  * are met:
0030  * 1. Redistributions of source code must retain the above copyright
0031  *    notice, this list of conditions and the following disclaimer.
0032  * 2. Redistributions in binary form must reproduce the above copyright
0033  *    notice, this list of conditions and the following disclaimer in the
0034  *    documentation and/or other materials provided with the distribution.
0035  *
0036  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0037  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0038  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0039  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0040  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0041  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0042  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0043  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0044  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0045  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0046  * POSSIBILITY OF SUCH DAMAGE.
0047  */
0048 
0049 #ifdef HAVE_CONFIG_H
0050 #include "config.h"
0051 #endif
0052 
0053 #include <rtems/score/schedulerpriorityaffinitysmp.h>
0054 #include <rtems/score/schedulerpriorityimpl.h>
0055 #include <rtems/score/schedulersmpimpl.h>
0056 #include <rtems/score/schedulerprioritysmpimpl.h>
0057 #include <rtems/score/priority.h>
0058 
0059 /*
0060  * The following methods which initially were static in schedulerprioritysmp.c
0061  * are shared with this scheduler. They are now public so they can be shared.
0062  *
0063  *  + _Scheduler_priority_SMP_Get_self
0064  *  + _Scheduler_priority_SMP_Insert_ready_fifo
0065  *  + _Scheduler_priority_SMP_Insert_ready_lifo
0066  *  + _Scheduler_priority_SMP_Thread_get_node
0067  *  + _Scheduler_priority_SMP_Move_from_scheduled_to_ready
0068  *  + _Scheduler_priority_SMP_Move_from_ready_to_scheduled
0069  *  + _Scheduler_priority_SMP_Extract_from_ready
0070  *  + _Scheduler_priority_SMP_Do_update
0071  */
0072 
0073 static bool _Scheduler_priority_affinity_SMP_Priority_less_equal(
0074   const void       *key,
0075   const Chain_Node *to_insert,
0076   const Chain_Node *next
0077 )
0078 {
0079   return next != NULL
0080     && _Scheduler_SMP_Priority_less_equal( key, to_insert, next );
0081 }
0082 
0083 static Scheduler_priority_affinity_SMP_Node *
0084 _Scheduler_priority_affinity_SMP_Node_downcast(
0085   Scheduler_Node *node
0086 )
0087 {
0088   return (Scheduler_priority_affinity_SMP_Node *) node;
0089 }
0090 
0091 /*
0092  * This method initializes the scheduler control information for
0093  * this scheduler instance.
0094  */
0095 void _Scheduler_priority_affinity_SMP_Node_initialize(
0096   const Scheduler_Control *scheduler,
0097   Scheduler_Node          *node,
0098   Thread_Control          *the_thread,
0099   Priority_Control         priority
0100 )
0101 {
0102   Scheduler_priority_affinity_SMP_Node *the_node;
0103 
0104   _Scheduler_priority_SMP_Node_initialize( scheduler, node, the_thread, priority );
0105 
0106   /*
0107    *  All we add is affinity information to the basic SMP node.
0108    */
0109   the_node = _Scheduler_priority_affinity_SMP_Node_downcast( node );
0110   _Processor_mask_Assign( &the_node->Affinity, _SMP_Get_online_processors() );
0111 }
0112 
0113 /*
0114  * This method is unique to this scheduler because it takes into
0115  * account affinity as it determines the highest ready thread.
0116  * Since this is used to pick a new thread to replace the victim,
0117  * the highest ready thread must have affinity such that it can
0118  * be executed on the victim's processor.
0119  */
0120 static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready(
0121   Scheduler_Context *context,
0122   Scheduler_Node    *victim
0123 )
0124 {
0125   Scheduler_priority_SMP_Context       *self =
0126     _Scheduler_priority_SMP_Get_self( context );
0127   Priority_Control                      index;
0128   Scheduler_Node                       *highest = NULL;
0129   Thread_Control                       *victim_thread;
0130   uint32_t                              victim_cpu_index;
0131   Scheduler_priority_affinity_SMP_Node *node;
0132 
0133   /*
0134    * This is done when we need to check if reevaluations are needed.
0135    */
0136   if ( victim == NULL ) {
0137     node = (Scheduler_priority_affinity_SMP_Node *)
0138       _Scheduler_priority_Ready_queue_first(
0139         &self->Bit_map,
0140         &self->Ready[ 0 ]
0141       );
0142 
0143     return &node->Base.Base.Base;
0144   }
0145 
0146   victim_thread = _Scheduler_Node_get_owner( victim );
0147   victim_cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( victim_thread ) );
0148 
0149   /**
0150    * @todo The deterministic priority scheduler structure is optimized
0151    * for insertion, extraction, and finding the highest priority
0152    * thread. Scanning the list of ready threads is not a purpose
0153    * for which it was optimized. There are optimizations to be
0154    * made in this loop.
0155    *
0156    * + by checking the major bit, we could potentially skip entire
0157    *   groups of 16.
0158    *
0159    * When using this scheduler as implemented, the application's
0160    * choice of numeric priorities and their distribution can have
0161    * an impact on performance.
0162    */
0163   for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ;
0164         index <= PRIORITY_MAXIMUM;
0165         index++ )
0166   {
0167     Chain_Control   *chain =  &self->Ready[index];
0168     Chain_Node      *chain_node;
0169     for ( chain_node = _Chain_First( chain );
0170           chain_node != _Chain_Immutable_tail( chain ) ;
0171           chain_node = _Chain_Next( chain_node ) )
0172     {
0173       node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
0174 
0175       /*
0176        * Can this thread run on this CPU?
0177        */
0178       if ( _Processor_mask_Is_set( &node->Affinity, victim_cpu_index ) ) {
0179         highest = &node->Base.Base.Base;
0180         break;
0181       }
0182     }
0183     if ( highest )
0184       break;
0185   }
0186 
0187   _Assert( highest != NULL );
0188 
0189   return highest;
0190 }
0191 
0192 /*
0193  * This method is very similar to _Scheduler_priority_affinity_SMP_Block
0194  * but has the difference that is invokes this scheduler's
0195  * get_highest_ready() support method.
0196  */
0197 void _Scheduler_priority_affinity_SMP_Block(
0198   const Scheduler_Control *scheduler,
0199   Thread_Control          *thread,
0200   Scheduler_Node          *node
0201 )
0202 {
0203   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0204 
0205   _Scheduler_SMP_Block(
0206     context,
0207     thread,
0208     node,
0209     _Scheduler_SMP_Extract_from_scheduled,
0210     _Scheduler_priority_SMP_Extract_from_ready,
0211     _Scheduler_priority_affinity_SMP_Get_highest_ready,
0212     _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
0213     _Scheduler_SMP_Allocate_processor_exact,
0214     _Scheduler_priority_SMP_Get_idle
0215   );
0216 
0217   /*
0218    * Since this removed a single thread from the scheduled set
0219    * and selected the most appropriate thread from the ready
0220    * set to replace it, there should be no need for thread
0221    * migrations.
0222    */
0223 }
0224 
0225 /*
0226  * This method is unique to this scheduler because it must take into
0227  * account affinity as it searches for the lowest priority scheduled
0228  * thread. It ignores those which cannot be replaced by the filter
0229  * thread because the potential victim thread does not have affinity
0230  * for that processor.
0231  */
0232 static Scheduler_Node * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
0233   Scheduler_Context *context,
0234   Scheduler_Node    *filter_base
0235 )
0236 {
0237   Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
0238   Scheduler_Node *lowest_scheduled = NULL;
0239   Chain_Control   *scheduled = &self->Scheduled;
0240   Chain_Node      *chain_node;
0241   Scheduler_priority_affinity_SMP_Node *filter =
0242     _Scheduler_priority_affinity_SMP_Node_downcast( filter_base );
0243 
0244   for ( chain_node = _Chain_Last( scheduled );
0245         chain_node != _Chain_Immutable_head( scheduled ) ;
0246         chain_node = _Chain_Previous( chain_node ) ) {
0247     Scheduler_priority_affinity_SMP_Node *node;
0248     Thread_Control                       *thread;
0249     uint32_t                              cpu_index;
0250 
0251     node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
0252 
0253     /* cpu_index is the processor number thread is executing on */
0254     thread = _Scheduler_Node_get_owner( &node->Base.Base.Base );
0255     cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( thread ) );
0256 
0257     if ( _Processor_mask_Is_set( &filter->Affinity, cpu_index ) ) {
0258       lowest_scheduled = &node->Base.Base.Base;
0259       break;
0260     }
0261 
0262   }
0263 
0264   return lowest_scheduled;
0265 }
0266 
0267 /*
0268  * This method is unique to this scheduler because it must pass
0269  * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled into
0270  * _Scheduler_SMP_Enqueue.
0271  */
0272 static bool _Scheduler_priority_affinity_SMP_Enqueue_fifo(
0273   Scheduler_Context *context,
0274   Scheduler_Node    *node,
0275   Priority_Control   insert_priority
0276 )
0277 {
0278   return _Scheduler_SMP_Enqueue(
0279     context,
0280     node,
0281     insert_priority,
0282     _Scheduler_priority_affinity_SMP_Priority_less_equal,
0283     _Scheduler_priority_SMP_Insert_ready,
0284     _Scheduler_SMP_Insert_scheduled,
0285     _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
0286     _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
0287     _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
0288     _Scheduler_SMP_Allocate_processor_exact,
0289     _Scheduler_priority_SMP_Get_idle,
0290     _Scheduler_priority_SMP_Release_idle
0291   );
0292 }
0293 
0294 /*
0295  * This method is invoked at the end of certain scheduling operations
0296  * to ensure that the highest priority ready thread cannot be scheduled
0297  * to execute. When we schedule with affinity, there is the possibility
0298  * that we need to migrate a thread to another core to ensure that the
0299  * highest priority ready threads are in fact scheduled.
0300  */
0301 static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
0302   Scheduler_Context *context
0303 )
0304 {
0305   Scheduler_priority_SMP_Context *self;
0306   Scheduler_Node                 *lowest_scheduled;
0307   Scheduler_Node                 *highest_ready;
0308 
0309   self = _Scheduler_priority_SMP_Get_self( context );
0310 
0311   while (1) {
0312     Priority_Control lowest_scheduled_priority;
0313     Priority_Control insert_priority;
0314 
0315     if ( _Priority_bit_map_Is_empty( &self->Bit_map ) ) {
0316       /* Nothing to do */
0317       break;
0318     }
0319 
0320     highest_ready =
0321       _Scheduler_priority_affinity_SMP_Get_highest_ready( context, NULL );
0322 
0323     lowest_scheduled =
0324       _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
0325         context,
0326         highest_ready
0327       );
0328 
0329     /*
0330      * If we can't find a thread to displace from the scheduled set,
0331      * then we have placed all the highest priority threads possible
0332      * in the scheduled set.
0333      *
0334      * We found the absolute highest priority thread without
0335      * considering affinity. But now we have to consider that thread's
0336      * affinity as we look to place it.
0337      */
0338 
0339     if ( lowest_scheduled == NULL )
0340       break;
0341 
0342     lowest_scheduled_priority =
0343       _Scheduler_SMP_Node_priority( lowest_scheduled );
0344 
0345     if (
0346       _Scheduler_SMP_Priority_less_equal(
0347         &lowest_scheduled_priority,
0348         &lowest_scheduled->Node.Chain,
0349         &highest_ready->Node.Chain
0350       )
0351     ) {
0352       break;
0353     }
0354 
0355     /*
0356      * But if we found a thread which is lower priority than one
0357      * in the ready set, then we need to swap them out.
0358      */
0359 
0360     _Scheduler_priority_SMP_Extract_from_ready( context, highest_ready );
0361     insert_priority = _Scheduler_SMP_Node_priority( highest_ready );
0362     insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
0363     _Scheduler_SMP_Enqueue_to_scheduled(
0364       context,
0365       highest_ready,
0366       insert_priority,
0367       lowest_scheduled,
0368       _Scheduler_SMP_Insert_scheduled,
0369       _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
0370       _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
0371       _Scheduler_SMP_Allocate_processor_exact,
0372       _Scheduler_priority_SMP_Get_idle,
0373       _Scheduler_priority_SMP_Release_idle
0374     );
0375   }
0376 }
0377 
0378 /*
0379  * This is the public scheduler specific Unblock operation.
0380  */
0381 void _Scheduler_priority_affinity_SMP_Unblock(
0382   const Scheduler_Control *scheduler,
0383   Thread_Control          *thread,
0384   Scheduler_Node          *node
0385 )
0386 {
0387   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0388 
0389   _Scheduler_SMP_Unblock(
0390     context,
0391     thread,
0392     node,
0393     _Scheduler_priority_SMP_Do_update,
0394     _Scheduler_priority_affinity_SMP_Enqueue_fifo,
0395     _Scheduler_priority_SMP_Release_idle
0396   );
0397 
0398   /*
0399    * Perform any thread migrations that are needed due to these changes.
0400    */
0401   _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
0402 }
0403 
0404 /*
0405  *  This is unique to this scheduler because it passes scheduler specific
0406  *  get_lowest_scheduled helper to _Scheduler_SMP_Enqueue.
0407  */
0408 static bool _Scheduler_priority_affinity_SMP_Enqueue(
0409   Scheduler_Context *context,
0410   Scheduler_Node    *node,
0411   Priority_Control   insert_priority
0412 )
0413 {
0414   return _Scheduler_SMP_Enqueue(
0415     context,
0416     node,
0417     insert_priority,
0418     _Scheduler_priority_affinity_SMP_Priority_less_equal,
0419     _Scheduler_priority_SMP_Insert_ready,
0420     _Scheduler_SMP_Insert_scheduled,
0421     _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
0422     _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
0423     _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
0424     _Scheduler_SMP_Allocate_processor_exact,
0425     _Scheduler_priority_SMP_Get_idle,
0426     _Scheduler_priority_SMP_Release_idle
0427   );
0428 }
0429 
0430 /*
0431  * This method is unique to this scheduler because it must
0432  * invoke _Scheduler_SMP_Enqueue_scheduled() with
0433  * this scheduler's get_highest_ready() helper.
0434  */
0435 static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled(
0436   Scheduler_Context *context,
0437   Scheduler_Node    *node,
0438   Priority_Control   insert_priority
0439 )
0440 {
0441   _Scheduler_SMP_Enqueue_scheduled(
0442     context,
0443     node,
0444     insert_priority,
0445     _Scheduler_SMP_Priority_less_equal,
0446     _Scheduler_priority_SMP_Extract_from_ready,
0447     _Scheduler_priority_affinity_SMP_Get_highest_ready,
0448     _Scheduler_priority_SMP_Insert_ready,
0449     _Scheduler_SMP_Insert_scheduled,
0450     _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
0451     _Scheduler_SMP_Allocate_processor_exact,
0452     _Scheduler_priority_SMP_Get_idle,
0453     _Scheduler_priority_SMP_Release_idle
0454   );
0455 }
0456 
0457 static bool _Scheduler_priority_affinity_SMP_Do_ask_for_help(
0458   Scheduler_Context *context,
0459   Thread_Control    *the_thread,
0460   Scheduler_Node    *node
0461 )
0462 {
0463   return _Scheduler_SMP_Ask_for_help(
0464     context,
0465     the_thread,
0466     node,
0467     _Scheduler_SMP_Priority_less_equal,
0468     _Scheduler_priority_SMP_Insert_ready,
0469     _Scheduler_SMP_Insert_scheduled,
0470     _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
0471     _Scheduler_SMP_Get_lowest_scheduled,
0472     _Scheduler_SMP_Allocate_processor_lazy,
0473     _Scheduler_priority_SMP_Release_idle
0474   );
0475 }
0476 
0477 void _Scheduler_priority_affinity_SMP_Update_priority(
0478   const Scheduler_Control *scheduler,
0479   Thread_Control          *thread,
0480   Scheduler_Node          *node
0481 )
0482 {
0483   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0484 
0485   _Scheduler_SMP_Update_priority(
0486     context,
0487     thread,
0488     node,
0489     _Scheduler_SMP_Extract_from_scheduled,
0490     _Scheduler_priority_SMP_Extract_from_ready,
0491     _Scheduler_priority_SMP_Do_update,
0492     _Scheduler_priority_affinity_SMP_Enqueue,
0493     _Scheduler_priority_affinity_SMP_Enqueue_scheduled,
0494     _Scheduler_priority_affinity_SMP_Do_ask_for_help
0495   );
0496 
0497   /*
0498    * Perform any thread migrations that are needed due to these changes.
0499    */
0500   _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
0501 }
0502 
0503 bool _Scheduler_priority_affinity_SMP_Ask_for_help(
0504   const Scheduler_Control *scheduler,
0505   Thread_Control          *the_thread,
0506   Scheduler_Node          *node
0507 )
0508 {
0509   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0510 
0511   return _Scheduler_priority_affinity_SMP_Do_ask_for_help( context, the_thread, node );
0512 }
0513 
0514 void _Scheduler_priority_affinity_SMP_Reconsider_help_request(
0515   const Scheduler_Control *scheduler,
0516   Thread_Control          *the_thread,
0517   Scheduler_Node          *node
0518 )
0519 {
0520   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0521 
0522   _Scheduler_SMP_Reconsider_help_request(
0523     context,
0524     the_thread,
0525     node,
0526     _Scheduler_priority_SMP_Extract_from_ready
0527   );
0528 }
0529 
0530 void _Scheduler_priority_affinity_SMP_Withdraw_node(
0531   const Scheduler_Control *scheduler,
0532   Thread_Control          *the_thread,
0533   Scheduler_Node          *node,
0534   Thread_Scheduler_state   next_state
0535 )
0536 {
0537   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0538 
0539   _Scheduler_SMP_Withdraw_node(
0540     context,
0541     the_thread,
0542     node,
0543     next_state,
0544     _Scheduler_SMP_Extract_from_scheduled,
0545     _Scheduler_priority_SMP_Extract_from_ready,
0546     _Scheduler_priority_affinity_SMP_Get_highest_ready,
0547     _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
0548     _Scheduler_SMP_Allocate_processor_lazy,
0549     _Scheduler_priority_SMP_Get_idle
0550   );
0551 }
0552 
0553 void _Scheduler_priority_affinity_SMP_Make_sticky(
0554   const Scheduler_Control *scheduler,
0555   Thread_Control          *the_thread,
0556   Scheduler_Node          *node
0557 )
0558 {
0559   _Scheduler_SMP_Make_sticky(
0560     scheduler,
0561     the_thread,
0562     node,
0563     _Scheduler_priority_SMP_Do_update,
0564     _Scheduler_priority_affinity_SMP_Enqueue
0565   );
0566 }
0567 
0568 void _Scheduler_priority_affinity_SMP_Clean_sticky(
0569   const Scheduler_Control *scheduler,
0570   Thread_Control          *the_thread,
0571   Scheduler_Node          *node
0572 )
0573 {
0574   _Scheduler_SMP_Clean_sticky(
0575     scheduler,
0576     the_thread,
0577     node,
0578     _Scheduler_SMP_Extract_from_scheduled,
0579     _Scheduler_priority_SMP_Extract_from_ready,
0580     _Scheduler_priority_affinity_SMP_Get_highest_ready,
0581     _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
0582     _Scheduler_SMP_Allocate_processor_exact,
0583     _Scheduler_priority_SMP_Get_idle,
0584     _Scheduler_priority_SMP_Release_idle
0585   );
0586 }
0587 
0588 void _Scheduler_priority_affinity_SMP_Add_processor(
0589   const Scheduler_Control *scheduler,
0590   Thread_Control          *idle
0591 )
0592 {
0593   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0594 
0595   _Scheduler_SMP_Add_processor(
0596     context,
0597     idle,
0598     _Scheduler_priority_SMP_Has_ready,
0599     _Scheduler_priority_affinity_SMP_Enqueue_scheduled,
0600     _Scheduler_SMP_Do_nothing_register_idle
0601   );
0602 }
0603 
0604 Thread_Control *_Scheduler_priority_affinity_SMP_Remove_processor(
0605   const Scheduler_Control *scheduler,
0606   Per_CPU_Control         *cpu
0607 )
0608 {
0609   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0610 
0611   return _Scheduler_SMP_Remove_processor(
0612     context,
0613     cpu,
0614     _Scheduler_SMP_Extract_from_scheduled,
0615     _Scheduler_priority_SMP_Extract_from_ready,
0616     _Scheduler_priority_affinity_SMP_Enqueue,
0617     _Scheduler_priority_SMP_Get_idle,
0618     _Scheduler_priority_SMP_Release_idle
0619   );
0620 }
0621 
0622 void _Scheduler_priority_affinity_SMP_Yield(
0623   const Scheduler_Control *scheduler,
0624   Thread_Control          *thread,
0625   Scheduler_Node          *node
0626 )
0627 {
0628   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0629 
0630   _Scheduler_SMP_Yield(
0631     context,
0632     thread,
0633     node,
0634     _Scheduler_SMP_Extract_from_scheduled,
0635     _Scheduler_priority_SMP_Extract_from_ready,
0636     _Scheduler_priority_affinity_SMP_Enqueue,
0637     _Scheduler_priority_affinity_SMP_Enqueue_scheduled
0638   );
0639 }
0640 
0641 Status_Control _Scheduler_priority_affinity_SMP_Set_affinity(
0642   const Scheduler_Control *scheduler,
0643   Thread_Control          *thread,
0644   Scheduler_Node          *node_base,
0645   const Processor_mask    *affinity
0646 )
0647 {
0648   Scheduler_Context                    *context;
0649   Scheduler_priority_affinity_SMP_Node *node;
0650   States_Control                        current_state;
0651   Processor_mask                        my_affinity;
0652 
0653   context = _Scheduler_Get_context( scheduler );
0654   _Processor_mask_And( &my_affinity, &context->Processors, affinity );
0655 
0656   if ( _Processor_mask_Count( &my_affinity ) == 0 ) {
0657     return STATUS_INVALID_NUMBER;
0658   }
0659 
0660   node = _Scheduler_priority_affinity_SMP_Node_downcast( node_base );
0661 
0662   /*
0663    * The old and new set are the same, there is no point in
0664    * doing anything.
0665    */
0666   if ( _Processor_mask_Is_equal( &node->Affinity, affinity ) )
0667     return STATUS_SUCCESSFUL;
0668 
0669   current_state = thread->current_state;
0670 
0671   if ( _States_Is_ready( current_state ) ) {
0672     _Scheduler_priority_affinity_SMP_Block( scheduler, thread, &node->Base.Base.Base );
0673   }
0674 
0675   _Processor_mask_Assign( &node->Affinity, affinity );
0676 
0677   if ( _States_Is_ready( current_state ) ) {
0678     /*
0679      * FIXME: Do not ignore threads in need for help.
0680      */
0681     (void) _Scheduler_priority_affinity_SMP_Unblock( scheduler, thread, &node->Base.Base.Base );
0682   }
0683 
0684   return STATUS_SUCCESSFUL;
0685 }