Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:27

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreSchedulerStrongAPA
0007  *
0008  * @brief This source file contains the implementation of
0009  *   _Scheduler_strong_APA_Add_processor(),
0010  *   _Scheduler_strong_APA_Allocate_processor(),
0011  *   _Scheduler_strong_APA_Ask_for_help(), _Scheduler_strong_APA_Block(),
0012  *   _Scheduler_strong_APA_Do_ask_for_help(),
0013  *   _Scheduler_strong_APA_Do_enqueue(),
0014  *   _Scheduler_strong_APA_Do_set_affinity(),
0015  *   _Scheduler_strong_APA_Do_update(), _Scheduler_strong_APA_Enqueue(),
0016  *   _Scheduler_strong_APA_Enqueue_scheduled(),
0017  *   _Scheduler_strong_APA_Extract_from_ready(),
0018  *   _Scheduler_strong_APA_Extract_from_scheduled(),
0019  *   _Scheduler_strong_APA_Find_highest_ready(),
0020  *   _Scheduler_strong_APA_Get_highest_ready(),
0021  *   _Scheduler_strong_APA_Get_lowest_reachable(),
0022  *   _Scheduler_strong_APA_Get_lowest_scheduled(),
0023  *   _Scheduler_strong_APA_Has_ready(),
0024  *   _Scheduler_strong_APA_Initialize(), _Scheduler_strong_APA_Insert_ready(),
0025  *   _Scheduler_strong_APA_Move_from_ready_to_scheduled(),
0026  *   _Scheduler_strong_APA_Move_from_scheduled_to_ready(),
0027  *   _Scheduler_strong_APA_Node_initialize(),
0028  *   _Scheduler_strong_APA_Reconsider_help_request(),
0029  *   _Scheduler_strong_APA_Register_idle(),
0030  *   _Scheduler_strong_APA_Remove_processor(),
0031  *   _Scheduler_strong_APA_Set_affinity(),
0032  *   _Scheduler_strong_APA_Set_scheduled(), _Scheduler_strong_APA_Start_idle(),
0033  *   _Scheduler_strong_APA_Unblock(), _Scheduler_strong_APA_Update_priority(),
0034  *   _Scheduler_strong_APA_Withdraw_node(),
0035  *   _Scheduler_strong_APA_Make_sticky(), _Scheduler_strong_APA_Clean_sticky(),
0036  *   and _Scheduler_strong_APA_Yield().
0037  */
0038 
0039 /*
0040  * Copyright (C) 2020 Richi Dubey
0041  * Copyright (C) 2013, 2016 embedded brains GmbH & Co. KG
0042  *
0043  * Redistribution and use in source and binary forms, with or without
0044  * modification, are permitted provided that the following conditions
0045  * are met:
0046  * 1. Redistributions of source code must retain the above copyright
0047  *    notice, this list of conditions and the following disclaimer.
0048  * 2. Redistributions in binary form must reproduce the above copyright
0049  *    notice, this list of conditions and the following disclaimer in the
0050  *    documentation and/or other materials provided with the distribution.
0051  *
0052  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0053  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0054  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0055  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0056  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0057  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0058  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0059  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0060  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0061  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0062  * POSSIBILITY OF SUCH DAMAGE.
0063  */
0064 
0065 #ifdef HAVE_CONFIG_H
0066 #include "config.h"
0067 #endif
0068 
0069 #include <rtems/score/schedulerstrongapa.h>
0070 #include <rtems/score/schedulersmpimpl.h>
0071 #include <rtems/score/assert.h>
0072 
0073 #define STRONG_SCHEDULER_NODE_OF_CHAIN( node ) \
0074   RTEMS_CONTAINER_OF( node, Scheduler_strong_APA_Node, Ready_node )
0075 
0076 static inline Scheduler_strong_APA_Context *
0077 _Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler )
0078 {
0079   return (Scheduler_strong_APA_Context *) _Scheduler_Get_context( scheduler );
0080 }
0081 
0082 static inline Scheduler_strong_APA_Context *
0083 _Scheduler_strong_APA_Get_self( Scheduler_Context *context )
0084 {
0085   return (Scheduler_strong_APA_Context *) context;
0086 }
0087 
0088 static inline Scheduler_strong_APA_Node *
0089 _Scheduler_strong_APA_Node_downcast( Scheduler_Node *node )
0090 {
0091   return (Scheduler_strong_APA_Node *) node;
0092 }
0093 
0094 static inline void _Scheduler_strong_APA_Do_update(
0095   Scheduler_Context *context,
0096   Scheduler_Node    *node,
0097   Priority_Control   new_priority
0098 )
0099 {
0100   Scheduler_SMP_Node *smp_node;
0101   (void) context;
0102 
0103   smp_node = _Scheduler_SMP_Node_downcast( node );
0104   _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
0105 }
0106 
0107 /*
0108  * Returns true if the Strong APA scheduler has ready nodes
0109  * available for scheduling.
0110  */
0111 static inline bool _Scheduler_strong_APA_Has_ready(
0112   Scheduler_Context *context
0113 )
0114 {
0115   Scheduler_strong_APA_Context *self;
0116   const Chain_Node             *tail;
0117   Chain_Node                   *next;
0118   Scheduler_strong_APA_Node    *node;
0119 
0120   self = _Scheduler_strong_APA_Get_self( context );
0121   tail = _Chain_Immutable_tail( &self->Ready );
0122   next = _Chain_First( &self->Ready );
0123 
0124   while ( next != tail ) {
0125     node = (Scheduler_strong_APA_Node *)STRONG_SCHEDULER_NODE_OF_CHAIN( next );
0126 
0127     if (
0128       _Scheduler_SMP_Node_state( &node->Base.Base ) ==
0129       SCHEDULER_SMP_NODE_READY
0130     ) {
0131       return true;
0132     }
0133 
0134     next = _Chain_Next( next );
0135   }
0136 
0137   return false;
0138 }
0139 
0140 static inline void _Scheduler_strong_APA_Set_scheduled(
0141   Scheduler_strong_APA_Context *self,
0142   Scheduler_Node                *executing,
0143   const Per_CPU_Control         *cpu
0144 )
0145 {
0146   self->CPU[ _Per_CPU_Get_index( cpu ) ].executing = executing;
0147 }
0148 
0149 static inline Scheduler_Node *_Scheduler_strong_APA_Get_scheduled(
0150   const Scheduler_strong_APA_Context *self,
0151   const Per_CPU_Control               *cpu
0152 )
0153 {
0154   return self->CPU[ _Per_CPU_Get_index( cpu ) ].executing;
0155 }
0156 
0157 static inline void _Scheduler_strong_APA_Allocate_processor(
0158   Scheduler_Context *context,
0159   Scheduler_Node    *scheduled_base,
0160   Per_CPU_Control   *cpu
0161 )
0162 {
0163   Scheduler_strong_APA_Node    *scheduled;
0164   Scheduler_strong_APA_Context *self;
0165 
0166   scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base );
0167   self = _Scheduler_strong_APA_Get_self( context );
0168 
0169   _Scheduler_strong_APA_Set_scheduled( self, scheduled_base, cpu );
0170 
0171   _Scheduler_SMP_Allocate_processor_exact(
0172     context,
0173     &( scheduled->Base.Base ),
0174     cpu
0175   );
0176 }
0177 
0178 /*
0179  * Finds and returns the highest ready node present by accessing the
0180  * _Strong_APA_Context->CPU with front and rear values.
0181  */
0182 static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready(
0183   Scheduler_strong_APA_Context *self,
0184   uint32_t                      front,
0185   uint32_t                      rear
0186 )
0187 {
0188   Scheduler_Node              *highest_ready = NULL;
0189   Scheduler_strong_APA_CPU    *CPU;
0190   const Chain_Node            *tail;
0191   Chain_Node                  *next;
0192   Scheduler_strong_APA_Node   *node;
0193   Priority_Control             min_priority_num;
0194   Priority_Control             curr_priority;
0195   Per_CPU_Control             *assigned_cpu;
0196   Scheduler_SMP_Node_state     curr_state;
0197   Per_CPU_Control             *curr_CPU;
0198 
0199   CPU = self->CPU;
0200   /*
0201    * When the first task accessed has nothing to compare its priority against.
0202    * So, it is the task with the highest priority witnessed so far.
0203    */
0204   min_priority_num = UINT64_MAX;
0205 
0206   while ( front <= rear ) {
0207     curr_CPU = CPU[ front++ ].cpu;
0208 
0209     tail = _Chain_Immutable_tail( &self->Ready );
0210     next = _Chain_First( &self->Ready );
0211 
0212     while ( next != tail ) {
0213       node = (Scheduler_strong_APA_Node*) STRONG_SCHEDULER_NODE_OF_CHAIN( next );
0214       /*
0215        * Check if the curr_CPU is in the affinity set of the node.
0216        */
0217       if (
0218         _Processor_mask_Is_set( &node->Affinity, _Per_CPU_Get_index( curr_CPU ) )
0219       ) {
0220         curr_state = _Scheduler_SMP_Node_state( &node->Base.Base );
0221 
0222         if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
0223           assigned_cpu = _Thread_Get_CPU( node->Base.Base.user );
0224 
0225           if ( CPU[ _Per_CPU_Get_index( assigned_cpu ) ].visited == false ) {
0226             CPU[ ++rear ].cpu = assigned_cpu;
0227             CPU[ _Per_CPU_Get_index( assigned_cpu ) ].visited = true;
0228             /*
0229              * The curr CPU of the queue invoked this node to add its CPU
0230              * that it is executing on to the queue. So this node might get
0231              * preempted because of the invoker curr_CPU and this curr_CPU
0232              * is the CPU that node should preempt in case this node
0233              * gets preempted.
0234              */
0235             node->cpu_to_preempt = curr_CPU;
0236           }
0237         } else if ( curr_state == SCHEDULER_SMP_NODE_READY ) {
0238           curr_priority = _Scheduler_Node_get_priority( &node->Base.Base );
0239           curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
0240 
0241           if (
0242             min_priority_num == UINT64_MAX ||
0243             curr_priority < min_priority_num
0244           ) {
0245             min_priority_num = curr_priority;
0246             highest_ready = &node->Base.Base;
0247             /*
0248              * In case curr_CPU is filter_CPU, we need to store the
0249              * cpu_to_preempt value so that we go back to SMP_*
0250              * function, rather than preempting the node ourselves.
0251              */
0252             node->cpu_to_preempt = curr_CPU;
0253           }
0254         }
0255       }
0256     next = _Chain_Next( next );
0257     }
0258   }
0259 
0260   /*
0261    * By definition, the system would always have a ready node,
0262    * hence highest_ready would not be NULL.
0263    */
0264   _Assert( highest_ready != NULL );
0265 
0266   return highest_ready;
0267 }
0268 
0269 static inline Scheduler_Node *_Scheduler_strong_APA_Get_idle( void *arg )
0270 {
0271   Scheduler_strong_APA_Context *self;
0272   Scheduler_strong_APA_Node    *lowest_ready = NULL;
0273   Priority_Control              max_priority_num;
0274   const Chain_Node             *tail;
0275   Chain_Node                   *next;
0276 
0277   self = _Scheduler_strong_APA_Get_self( arg );
0278   tail = _Chain_Immutable_tail( &self->Ready );
0279   next = _Chain_First( &self->Ready );
0280   max_priority_num = 0;
0281 
0282   while ( next != tail ) {
0283     Scheduler_strong_APA_Node *node;
0284     Scheduler_SMP_Node_state   curr_state;
0285 
0286     node = (Scheduler_strong_APA_Node*) STRONG_SCHEDULER_NODE_OF_CHAIN( next );
0287     curr_state = _Scheduler_SMP_Node_state( &node->Base.Base );
0288 
0289     if ( curr_state == SCHEDULER_SMP_NODE_READY ) {
0290       Priority_Control curr_priority;
0291 
0292       curr_priority = _Scheduler_Node_get_priority( &node->Base.Base );
0293 
0294       if ( curr_priority > max_priority_num ) {
0295         max_priority_num = curr_priority;
0296         lowest_ready = node;
0297       }
0298     }
0299 
0300     next = _Chain_Next( next );
0301   }
0302 
0303   _Assert( lowest_ready != NULL );
0304   _Chain_Extract_unprotected( &lowest_ready->Ready_node );
0305   _Chain_Set_off_chain( &lowest_ready->Ready_node );
0306 
0307   return &lowest_ready->Base.Base;
0308 }
0309 
0310 static inline void _Scheduler_strong_APA_Release_idle(
0311   Scheduler_Node *node_base,
0312   void           *arg
0313 )
0314 {
0315   Scheduler_strong_APA_Context *self;
0316   Scheduler_strong_APA_Node    *node;
0317 
0318   self = _Scheduler_strong_APA_Get_self( arg );
0319   node = _Scheduler_strong_APA_Node_downcast( node_base );
0320 
0321   if ( _Chain_Is_node_off_chain( &node->Ready_node ) ) {
0322     _Chain_Append_unprotected( &self->Ready, &node->Ready_node );
0323   }
0324 }
0325 
0326 static inline void  _Scheduler_strong_APA_Move_from_ready_to_scheduled(
0327   Scheduler_Context *context,
0328   Scheduler_Node    *ready_to_scheduled
0329 )
0330 {
0331   Priority_Control insert_priority;
0332 
0333   insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
0334   insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
0335   _Scheduler_SMP_Insert_scheduled(
0336     context,
0337     ready_to_scheduled,
0338     insert_priority
0339   );
0340 }
0341 
0342 static inline void _Scheduler_strong_APA_Insert_ready(
0343   Scheduler_Context *context,
0344   Scheduler_Node    *node_base,
0345   Priority_Control   insert_priority
0346 )
0347 {
0348   Scheduler_strong_APA_Context *self;
0349   Scheduler_strong_APA_Node    *node;
0350 
0351   self = _Scheduler_strong_APA_Get_self( context );
0352   node = _Scheduler_strong_APA_Node_downcast( node_base );
0353 
0354   if( _Chain_Is_node_off_chain( &node->Ready_node ) ) {
0355     _Chain_Append_unprotected( &self->Ready, &node->Ready_node );
0356   }  else {
0357     _Chain_Extract_unprotected( &node->Ready_node );
0358     _Chain_Set_off_chain( &node->Ready_node );
0359     _Chain_Append_unprotected( &self->Ready, &node->Ready_node );
0360   }
0361 }
0362 
0363 static inline void _Scheduler_strong_APA_Move_from_scheduled_to_ready(
0364   Scheduler_Context *context,
0365   Scheduler_Node    *scheduled_to_ready
0366 )
0367 {
0368   Priority_Control insert_priority;
0369 
0370   if( !_Chain_Is_node_off_chain(  &scheduled_to_ready->Node.Chain ) ) {
0371     _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready );
0372   }
0373 
0374   insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
0375 
0376   _Scheduler_strong_APA_Insert_ready(
0377     context,
0378     scheduled_to_ready,
0379     insert_priority
0380   );
0381 }
0382 
0383 /*
0384  * Implement the BFS Algorithm for task departure to get the highest ready task
0385  * for a particular CPU, returns the highest ready Scheduler_Node
0386  * Scheduler_Node filter here points to the victim node that is blocked
0387  * resulting which this function is called.
0388  */
0389 static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready(
0390   Scheduler_Context *context,
0391   Scheduler_Node    *filter
0392 )
0393 {
0394   Scheduler_strong_APA_Context *self;
0395   Per_CPU_Control              *filter_cpu;
0396   Scheduler_strong_APA_Node    *node;
0397   Scheduler_Node               *highest_ready;
0398   Scheduler_Node               *curr_node;
0399   Scheduler_Node               *next_node;
0400   Scheduler_strong_APA_CPU     *CPU;
0401   uint32_t                      front;
0402   uint32_t                      rear;
0403   uint32_t                      cpu_max;
0404   uint32_t                      cpu_index;
0405 
0406   self = _Scheduler_strong_APA_Get_self( context );
0407   /*
0408    * Denotes front and rear of the queue
0409    */
0410   front = 0;
0411   rear = -1;
0412 
0413   filter_cpu = _Thread_Get_CPU( filter->user );
0414   CPU = self->CPU;
0415   cpu_max = _SMP_Get_processor_maximum();
0416 
0417   for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
0418     CPU[ cpu_index ].visited = false;
0419   }
0420 
0421   CPU[ ++rear ].cpu = filter_cpu;
0422   CPU[ _Per_CPU_Get_index( filter_cpu ) ].visited = true;
0423 
0424   highest_ready = _Scheduler_strong_APA_Find_highest_ready(
0425                     self,
0426                     front,
0427                     rear
0428                   );
0429 
0430   if ( highest_ready != filter ) {
0431     /*
0432      * Backtrack on the path from
0433      * filter_cpu to highest_ready, shifting along every task.
0434      */
0435 
0436     node = _Scheduler_strong_APA_Node_downcast( highest_ready );
0437     /*
0438      * Highest ready is not just directly reachable from the victim cpu
0439      * So there is need for task shifting.
0440      */
0441     while ( node->cpu_to_preempt != filter_cpu ) {
0442       Thread_Control *next_node_idle;
0443 
0444       curr_node = &node->Base.Base;
0445       next_node = _Scheduler_strong_APA_Get_scheduled(
0446                     self,
0447                     node->cpu_to_preempt
0448                   );
0449       next_node_idle = _Scheduler_Release_idle_thread_if_necessary(
0450         next_node,
0451         _Scheduler_strong_APA_Release_idle,
0452         context
0453       );
0454 
0455       (void) _Scheduler_SMP_Preempt(
0456                context,
0457                curr_node,
0458                next_node,
0459                next_node_idle,
0460                _Scheduler_strong_APA_Allocate_processor
0461              );
0462 
0463       if ( curr_node == highest_ready ) {
0464         _Scheduler_strong_APA_Move_from_ready_to_scheduled( context, curr_node );
0465       }
0466 
0467       node = _Scheduler_strong_APA_Node_downcast( next_node );
0468     }
0469     /*
0470      * To save the last node so that the caller SMP_* function
0471      * can do the allocation
0472      */
0473       curr_node = &node->Base.Base;
0474       highest_ready = curr_node;
0475       _Scheduler_strong_APA_Move_from_scheduled_to_ready( context, curr_node );
0476     }
0477 
0478   return highest_ready;
0479 }
0480 
0481 /*
0482  * Checks the lowest scheduled directly reachable task
0483  */
0484 static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled(
0485   Scheduler_Context *context,
0486   Scheduler_Node    *filter_base
0487 )
0488 {
0489   uint32_t                  cpu_max;
0490   uint32_t                  cpu_index;
0491   Scheduler_Node               *curr_node;
0492   Scheduler_Node               *lowest_scheduled = NULL;
0493   Priority_Control              max_priority_num;
0494   Priority_Control              curr_priority;
0495   Scheduler_strong_APA_Node    *filter_strong_node;
0496   Scheduler_strong_APA_Context *self;
0497 
0498   self = _Scheduler_strong_APA_Get_self( context );
0499   max_priority_num = 0;    /* Max (Lowest) priority encountered so far */
0500   filter_strong_node = _Scheduler_strong_APA_Node_downcast( filter_base );
0501 
0502   /* lowest_scheduled is NULL if affinity of a node is 0 */
0503   _Assert( !_Processor_mask_Is_zero( &filter_strong_node->Affinity ) );
0504   cpu_max = _SMP_Get_processor_maximum();
0505 
0506   for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
0507     /* Checks if the CPU is in the affinity set of filter_strong_node */
0508     if ( _Processor_mask_Is_set( &filter_strong_node->Affinity, cpu_index ) ) {
0509       Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
0510 
0511       if ( _Per_CPU_Is_processor_online( cpu ) ) {
0512         curr_node = _Scheduler_strong_APA_Get_scheduled( self, cpu );
0513         curr_priority = _Scheduler_Node_get_priority( curr_node );
0514         curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
0515 
0516         if ( curr_priority > max_priority_num ) {
0517           lowest_scheduled = curr_node;
0518           max_priority_num = curr_priority;
0519         }
0520       }
0521     }
0522   }
0523 
0524   _Assert( lowest_scheduled != NULL );
0525   return lowest_scheduled;
0526 }
0527 
0528 static inline void _Scheduler_strong_APA_Extract_from_scheduled(
0529   Scheduler_Context *context,
0530   Scheduler_Node    *node_to_extract
0531 )
0532 {
0533   Scheduler_strong_APA_Context *self;
0534   Scheduler_strong_APA_Node    *node;
0535 
0536   self = _Scheduler_strong_APA_Get_self( context );
0537   node = _Scheduler_strong_APA_Node_downcast( node_to_extract );
0538 
0539   _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->Base.Base );
0540   /* Not removing it from Ready since the node could go in the READY state */
0541 }
0542 
0543 static inline void _Scheduler_strong_APA_Extract_from_ready(
0544   Scheduler_Context *context,
0545   Scheduler_Node    *node_to_extract
0546 )
0547 {
0548   Scheduler_strong_APA_Node    *node;
0549 
0550   node = _Scheduler_strong_APA_Node_downcast( node_to_extract );
0551 
0552   if( !_Chain_Is_node_off_chain( &node->Ready_node ) ) {
0553     _Chain_Extract_unprotected( &node->Ready_node );
0554     _Chain_Set_off_chain( &node->Ready_node );
0555   }
0556 
0557 }
0558 
0559 static inline Scheduler_Node* _Scheduler_strong_APA_Get_lowest_reachable(
0560   Scheduler_strong_APA_Context *self,
0561   uint32_t                      front,
0562   uint32_t                      rear,
0563   Per_CPU_Control             **cpu_to_preempt
0564 )
0565 {
0566   Scheduler_Node              *lowest_reachable = NULL;
0567   Priority_Control             max_priority_num;
0568   uint32_t                 cpu_max;
0569   uint32_t                 cpu_index;
0570   Thread_Control              *curr_thread;
0571   Per_CPU_Control             *curr_CPU;
0572   Priority_Control             curr_priority;
0573   Scheduler_Node              *curr_node;
0574   Scheduler_strong_APA_Node   *curr_strong_node;
0575   Scheduler_strong_APA_CPU    *CPU;
0576 
0577   /* Max (Lowest) priority encountered so far */
0578   max_priority_num = 0;
0579   CPU = self->CPU;
0580   cpu_max = _SMP_Get_processor_maximum();
0581 
0582   while ( front <= rear ) {
0583     curr_CPU = CPU[ front ].cpu;
0584     front = front + 1;
0585 
0586     curr_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
0587     curr_thread = curr_node->user;
0588 
0589     curr_priority = _Scheduler_Node_get_priority( curr_node );
0590     curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
0591 
0592     curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
0593 
0594     if ( curr_priority > max_priority_num ) {
0595       lowest_reachable = curr_node;
0596       max_priority_num = curr_priority;
0597       *cpu_to_preempt = curr_CPU;
0598     }
0599 
0600     if ( !curr_thread->is_idle ) {
0601       for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
0602         if ( _Processor_mask_Is_set( &curr_strong_node->Affinity, cpu_index ) ) {
0603           /* Checks if the thread_CPU is in the affinity set of the node */
0604           Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
0605           if (
0606             _Per_CPU_Is_processor_online( cpu ) &&
0607             CPU[ cpu_index ].visited == false )
0608           {
0609             rear = rear + 1;
0610             CPU[ rear ].cpu = cpu;
0611             CPU[ cpu_index ].visited = true;
0612             CPU[ cpu_index ].preempting_node = curr_node;
0613           }
0614         }
0615       }
0616     }
0617   }
0618   /*
0619    * Since it is not allowed for a task to have an empty affinity set,
0620    * there would always be a lowest_reachable task, hence it would not be NULL
0621    */
0622   _Assert( lowest_reachable != NULL );
0623 
0624   return lowest_reachable;
0625 }
0626 
0627 static inline bool _Scheduler_strong_APA_Do_enqueue(
0628   Scheduler_Context *context,
0629   Scheduler_Node    *lowest_reachable,
0630   Scheduler_Node    *node,
0631   Priority_Control  insert_priority,
0632   Per_CPU_Control  *cpu_to_preempt
0633 )
0634 {
0635   bool                          needs_help;
0636   Priority_Control              node_priority;
0637   Priority_Control              lowest_priority;
0638   Scheduler_strong_APA_CPU     *CPU;
0639   Scheduler_Node               *curr_node;
0640   /* The first node that gets removed from the cpu */
0641   Scheduler_Node               *first_node;
0642   Scheduler_strong_APA_Node    *curr_strong_node;
0643   Per_CPU_Control              *curr_CPU;
0644   Scheduler_strong_APA_Context *self;
0645   Scheduler_Node               *next_node;
0646 
0647 
0648   self = _Scheduler_strong_APA_Get_self( context );
0649   CPU = self->CPU;
0650 
0651   _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
0652 
0653   node_priority = _Scheduler_Node_get_priority( node );
0654   node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority );
0655 
0656   if( lowest_reachable == NULL ) {
0657     /*
0658      * This means the affinity set of the newly arrived node
0659      * is empty.
0660      */
0661     lowest_priority = UINT64_MAX;
0662   } else {
0663     lowest_priority =  _Scheduler_Node_get_priority( lowest_reachable );
0664     lowest_priority = SCHEDULER_PRIORITY_PURIFY( lowest_priority );
0665   }
0666 
0667   if ( lowest_priority > node_priority ) {
0668     /*
0669      * Backtrack on the path from
0670      * _Thread_Get_CPU(lowest_reachable->user) to lowest_reachable, shifting
0671      * along every task
0672      */
0673 
0674     curr_node = CPU[ _Per_CPU_Get_index( cpu_to_preempt ) ].preempting_node;
0675     curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
0676     curr_strong_node->cpu_to_preempt = cpu_to_preempt;
0677 
0678     /* Save which cpu to preempt in cpu_to_preempt value of the node */
0679     while ( curr_node != node ) {
0680       curr_CPU = _Thread_Get_CPU( curr_node->user );
0681       curr_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].preempting_node;
0682       curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
0683       curr_strong_node->cpu_to_preempt =  curr_CPU;
0684      }
0685 
0686     curr_CPU = curr_strong_node->cpu_to_preempt;
0687     next_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
0688 
0689     node_priority = _Scheduler_Node_get_priority( curr_node );
0690     node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority );
0691 
0692     _Scheduler_SMP_Enqueue_to_scheduled(
0693       context,
0694       curr_node,
0695       node_priority,
0696       next_node,
0697       _Scheduler_SMP_Insert_scheduled,
0698       _Scheduler_strong_APA_Move_from_scheduled_to_ready,
0699       _Scheduler_strong_APA_Move_from_ready_to_scheduled,
0700       _Scheduler_strong_APA_Allocate_processor,
0701       _Scheduler_strong_APA_Get_idle,
0702       _Scheduler_strong_APA_Release_idle
0703     );
0704 
0705     curr_node = next_node;
0706     first_node = curr_node;
0707     curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
0708 
0709     while ( curr_node != lowest_reachable ) {
0710       Thread_Control *next_node_idle;
0711 
0712       curr_CPU = curr_strong_node->cpu_to_preempt;
0713       next_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
0714       next_node_idle = _Scheduler_Release_idle_thread_if_necessary(
0715         next_node,
0716         _Scheduler_strong_APA_Release_idle,
0717         context
0718       );
0719       /* curr_node preempts the next_node; */
0720       _Scheduler_SMP_Preempt(
0721         context,
0722         curr_node,
0723         next_node,
0724         next_node_idle,
0725         _Scheduler_strong_APA_Allocate_processor
0726       );
0727 
0728       if(curr_node == first_node) {
0729         _Scheduler_strong_APA_Move_from_ready_to_scheduled(context, first_node);
0730       }
0731       curr_node = next_node;
0732       curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
0733     }
0734 
0735     _Scheduler_strong_APA_Move_from_scheduled_to_ready( context, lowest_reachable );
0736 
0737     needs_help = false;
0738   } else {
0739     needs_help = true;
0740   }
0741 
0742   /* Add it to Ready chain since it is now either scheduled or just ready. */
0743   _Scheduler_strong_APA_Insert_ready( context,node, insert_priority );
0744 
0745   return needs_help;
0746 }
0747 
0748 /*
0749  * BFS Algorithm for task arrival
0750  * Enqueue node either in the scheduled chain or in the ready chain.
0751  * node is the newly arrived node and is currently not scheduled.
0752  */
0753 static inline bool _Scheduler_strong_APA_Enqueue(
0754   Scheduler_Context *context,
0755   Scheduler_Node    *node,
0756   Priority_Control   insert_priority
0757 )
0758 {
0759   Scheduler_strong_APA_Context *self;
0760   Scheduler_strong_APA_CPU     *CPU;
0761   uint32_t                  cpu_max;
0762   uint32_t                  cpu_index;
0763   Per_CPU_Control              *cpu_to_preempt = NULL;
0764   Scheduler_Node               *lowest_reachable;
0765   Scheduler_strong_APA_Node    *strong_node;
0766 
0767   /* Denotes front and rear of the queue */
0768   uint32_t  front;
0769   uint32_t  rear;
0770 
0771   front = 0;
0772   rear = -1;
0773 
0774   self = _Scheduler_strong_APA_Get_self( context );
0775   strong_node = _Scheduler_strong_APA_Node_downcast( node );
0776   cpu_max = _SMP_Get_processor_maximum();
0777   CPU = self->CPU;
0778 
0779   for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
0780     CPU[ cpu_index ].visited = false;
0781 
0782     /* Checks if the thread_CPU is in the affinity set of the node */
0783     if ( _Processor_mask_Is_set( &strong_node->Affinity, cpu_index ) ) {
0784       Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
0785 
0786       if ( _Per_CPU_Is_processor_online( cpu ) ) {
0787         rear = rear + 1;
0788         CPU[ rear ].cpu = cpu;
0789         CPU[ cpu_index ].visited = true;
0790         CPU[ cpu_index ].preempting_node = node;
0791       }
0792     }
0793   }
0794 
0795   lowest_reachable = _Scheduler_strong_APA_Get_lowest_reachable(
0796                        self,
0797                        front,
0798                        rear,
0799                        &cpu_to_preempt
0800                      );
0801   /*
0802    * Since it is not allowed for a task to have an empty affinity set,
0803    * there would always be a lowest_reachable task, hence cpu_to_preempt
0804    * would not be NULL.
0805    */
0806   _Assert( cpu_to_preempt != NULL );
0807   return _Scheduler_strong_APA_Do_enqueue(
0808            context,
0809            lowest_reachable,
0810            node,
0811            insert_priority,
0812            cpu_to_preempt
0813          );
0814 }
0815 
0816 static inline void _Scheduler_strong_APA_Enqueue_scheduled(
0817   Scheduler_Context *context,
0818   Scheduler_Node    *node,
0819   Priority_Control   insert_priority
0820 )
0821 {
0822   _Scheduler_SMP_Enqueue_scheduled(
0823     context,
0824     node,
0825     insert_priority,
0826     _Scheduler_SMP_Priority_less_equal,
0827     _Scheduler_strong_APA_Extract_from_ready,
0828     _Scheduler_strong_APA_Get_highest_ready,
0829     _Scheduler_strong_APA_Insert_ready,
0830     _Scheduler_SMP_Insert_scheduled,
0831     _Scheduler_strong_APA_Move_from_ready_to_scheduled,
0832     _Scheduler_strong_APA_Allocate_processor,
0833     _Scheduler_strong_APA_Get_idle,
0834     _Scheduler_strong_APA_Release_idle
0835   );
0836 }
0837 
0838 static inline bool _Scheduler_strong_APA_Do_ask_for_help(
0839   Scheduler_Context *context,
0840   Thread_Control    *the_thread,
0841   Scheduler_Node    *node
0842 )
0843 {
0844   return _Scheduler_SMP_Ask_for_help(
0845     context,
0846     the_thread,
0847     node,
0848     _Scheduler_SMP_Priority_less_equal,
0849     _Scheduler_strong_APA_Insert_ready,
0850     _Scheduler_SMP_Insert_scheduled,
0851     _Scheduler_strong_APA_Move_from_scheduled_to_ready,
0852     _Scheduler_strong_APA_Get_lowest_scheduled,
0853     _Scheduler_strong_APA_Allocate_processor,
0854     _Scheduler_strong_APA_Release_idle
0855   );
0856 }
0857 
0858 static inline  void  _Scheduler_strong_APA_Do_set_affinity(
0859   Scheduler_Context *context,
0860   Scheduler_Node    *node_base,
0861   void              *arg
0862 )
0863 {
0864   Scheduler_strong_APA_Node *node;
0865 
0866   node = _Scheduler_strong_APA_Node_downcast( node_base );
0867   node->Affinity = *( (const Processor_mask *) arg );
0868 }
0869 
0870 void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler )
0871 {
0872   Scheduler_strong_APA_Context *self =
0873       _Scheduler_strong_APA_Get_context( scheduler );
0874 
0875   _Scheduler_SMP_Initialize( &self->Base );
0876   _Chain_Initialize_empty( &self->Ready );
0877 }
0878 
0879 void _Scheduler_strong_APA_Yield(
0880   const Scheduler_Control *scheduler,
0881   Thread_Control          *thread,
0882   Scheduler_Node          *node
0883 )
0884 {
0885   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0886 
0887   _Scheduler_SMP_Yield(
0888     context,
0889     thread,
0890     node,
0891     _Scheduler_strong_APA_Extract_from_scheduled,
0892     _Scheduler_strong_APA_Extract_from_ready,
0893     _Scheduler_strong_APA_Enqueue,
0894     _Scheduler_strong_APA_Enqueue_scheduled
0895   );
0896 }
0897 
0898 void _Scheduler_strong_APA_Block(
0899   const Scheduler_Control *scheduler,
0900   Thread_Control          *thread,
0901   Scheduler_Node          *node
0902 )
0903 {
0904   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0905 
0906   /*
0907    * Needed in case the node is scheduled node, since _SMP_Block only extracts
0908    * from the SMP scheduled chain and from the Strong APA Ready_chain
0909    * when the node is ready. But the Strong APA Ready_chain stores both
0910    * ready and scheduled nodes.
0911    */
0912   _Scheduler_strong_APA_Extract_from_ready(context, node);
0913 
0914   _Scheduler_SMP_Block(
0915     context,
0916     thread,
0917     node,
0918     _Scheduler_strong_APA_Extract_from_scheduled,
0919     _Scheduler_strong_APA_Extract_from_ready,
0920     _Scheduler_strong_APA_Get_highest_ready,
0921     _Scheduler_strong_APA_Move_from_ready_to_scheduled,
0922     _Scheduler_strong_APA_Allocate_processor,
0923     _Scheduler_strong_APA_Get_idle
0924   );
0925 }
0926 
0927 void _Scheduler_strong_APA_Unblock(
0928   const Scheduler_Control *scheduler,
0929   Thread_Control          *thread,
0930   Scheduler_Node          *node
0931 )
0932 {
0933   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0934 
0935   _Scheduler_SMP_Unblock(
0936     context,
0937     thread,
0938     node,
0939     _Scheduler_strong_APA_Do_update,
0940     _Scheduler_strong_APA_Enqueue,
0941     _Scheduler_strong_APA_Release_idle
0942   );
0943 }
0944 
0945 void _Scheduler_strong_APA_Update_priority(
0946   const Scheduler_Control *scheduler,
0947   Thread_Control          *thread,
0948   Scheduler_Node          *node
0949 )
0950 {
0951   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0952 
0953   _Scheduler_SMP_Update_priority(
0954     context,
0955     thread,
0956     node,
0957     _Scheduler_strong_APA_Extract_from_scheduled,
0958     _Scheduler_strong_APA_Extract_from_ready,
0959     _Scheduler_strong_APA_Do_update,
0960     _Scheduler_strong_APA_Enqueue,
0961     _Scheduler_strong_APA_Enqueue_scheduled,
0962     _Scheduler_strong_APA_Do_ask_for_help
0963   );
0964 }
0965 
0966 bool _Scheduler_strong_APA_Ask_for_help(
0967   const Scheduler_Control *scheduler,
0968   Thread_Control          *the_thread,
0969   Scheduler_Node          *node
0970 )
0971 {
0972   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0973 
0974   return _Scheduler_strong_APA_Do_ask_for_help(
0975     context,
0976     the_thread,
0977     node
0978   );
0979 }
0980 
0981 void _Scheduler_strong_APA_Reconsider_help_request(
0982   const Scheduler_Control *scheduler,
0983   Thread_Control          *the_thread,
0984   Scheduler_Node          *node
0985 )
0986 {
0987   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
0988 
0989   _Scheduler_SMP_Reconsider_help_request(
0990     context,
0991     the_thread,
0992     node,
0993     _Scheduler_strong_APA_Extract_from_ready
0994   );
0995 }
0996 
0997 void _Scheduler_strong_APA_Withdraw_node(
0998   const Scheduler_Control *scheduler,
0999   Thread_Control          *the_thread,
1000   Scheduler_Node          *node,
1001   Thread_Scheduler_state   next_state
1002 )
1003 {
1004   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
1005 
1006   _Scheduler_SMP_Withdraw_node(
1007     context,
1008     the_thread,
1009     node,
1010     next_state,
1011     _Scheduler_strong_APA_Extract_from_scheduled,
1012     _Scheduler_strong_APA_Extract_from_ready,
1013     _Scheduler_strong_APA_Get_highest_ready,
1014     _Scheduler_strong_APA_Move_from_ready_to_scheduled,
1015     _Scheduler_strong_APA_Allocate_processor,
1016     _Scheduler_strong_APA_Get_idle
1017   );
1018 }
1019 
1020 void _Scheduler_strong_APA_Make_sticky(
1021   const Scheduler_Control *scheduler,
1022   Thread_Control          *the_thread,
1023   Scheduler_Node          *node
1024 )
1025 {
1026   _Scheduler_SMP_Make_sticky(
1027     scheduler,
1028     the_thread,
1029     node,
1030     _Scheduler_strong_APA_Do_update,
1031     _Scheduler_strong_APA_Enqueue
1032   );
1033 }
1034 
1035 void _Scheduler_strong_APA_Clean_sticky(
1036   const Scheduler_Control *scheduler,
1037   Thread_Control          *the_thread,
1038   Scheduler_Node          *node
1039 )
1040 {
1041   _Scheduler_SMP_Clean_sticky(
1042     scheduler,
1043     the_thread,
1044     node,
1045     _Scheduler_SMP_Extract_from_scheduled,
1046     _Scheduler_strong_APA_Extract_from_ready,
1047     _Scheduler_strong_APA_Get_highest_ready,
1048     _Scheduler_strong_APA_Move_from_ready_to_scheduled,
1049     _Scheduler_strong_APA_Allocate_processor,
1050     _Scheduler_strong_APA_Get_idle,
1051     _Scheduler_strong_APA_Release_idle
1052   );
1053 }
1054 
1055 static inline void _Scheduler_strong_APA_Register_idle(
1056   Scheduler_Context *context,
1057   Scheduler_Node    *idle_base,
1058   Per_CPU_Control   *cpu
1059 )
1060 {
1061   Scheduler_strong_APA_Context *self;
1062   self = _Scheduler_strong_APA_Get_self( context );
1063 
1064   _Scheduler_strong_APA_Set_scheduled( self, idle_base, cpu );
1065 }
1066 
1067 void _Scheduler_strong_APA_Add_processor(
1068   const Scheduler_Control *scheduler,
1069   Thread_Control          *idle
1070 )
1071 {
1072   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
1073 
1074   _Scheduler_SMP_Add_processor(
1075     context,
1076     idle,
1077     _Scheduler_strong_APA_Has_ready,
1078     _Scheduler_strong_APA_Enqueue_scheduled,
1079     _Scheduler_strong_APA_Register_idle
1080   );
1081 }
1082 
1083 void _Scheduler_strong_APA_Start_idle(
1084   const Scheduler_Control *scheduler,
1085   Thread_Control          *idle,
1086   Per_CPU_Control         *cpu
1087 )
1088 {
1089   Scheduler_Context *context;
1090 
1091   context = _Scheduler_Get_context( scheduler );
1092 
1093   _Scheduler_SMP_Do_start_idle(
1094     context,
1095     idle,
1096     cpu,
1097     _Scheduler_strong_APA_Register_idle
1098   );
1099 }
1100 
1101 Thread_Control *_Scheduler_strong_APA_Remove_processor(
1102   const Scheduler_Control *scheduler,
1103   Per_CPU_Control         *cpu
1104 )
1105 {
1106   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
1107 
1108   return _Scheduler_SMP_Remove_processor(
1109     context,
1110     cpu,
1111     _Scheduler_strong_APA_Extract_from_scheduled,
1112     _Scheduler_strong_APA_Extract_from_ready,
1113     _Scheduler_strong_APA_Enqueue,
1114     _Scheduler_strong_APA_Get_idle,
1115     _Scheduler_strong_APA_Release_idle
1116   );
1117 }
1118 
1119 void _Scheduler_strong_APA_Node_initialize(
1120   const Scheduler_Control *scheduler,
1121   Scheduler_Node          *node,
1122   Thread_Control          *the_thread,
1123   Priority_Control         priority
1124 )
1125 {
1126   Scheduler_SMP_Node *smp_node;
1127   Scheduler_strong_APA_Node *strong_node;
1128 
1129   smp_node = _Scheduler_SMP_Node_downcast( node );
1130   strong_node = _Scheduler_strong_APA_Node_downcast( node );
1131 
1132   _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );
1133 
1134   _Processor_mask_Assign(
1135     &strong_node->Affinity,
1136    _SMP_Get_online_processors()
1137   );
1138 }
1139 
1140 Status_Control _Scheduler_strong_APA_Set_affinity(
1141   const Scheduler_Control *scheduler,
1142   Thread_Control          *thread,
1143   Scheduler_Node          *node_base,
1144   const Processor_mask    *affinity
1145 )
1146 {
1147   Scheduler_Context         *context;
1148   Scheduler_strong_APA_Node *node;
1149   Processor_mask             local_affinity;
1150 
1151   context = _Scheduler_Get_context( scheduler );
1152   _Processor_mask_And( &local_affinity, &context->Processors, affinity );
1153 
1154   if ( _Processor_mask_Is_zero( &local_affinity ) ) {
1155     return STATUS_INVALID_NUMBER;
1156   }
1157 
1158   node = _Scheduler_strong_APA_Node_downcast( node_base );
1159 
1160   if ( _Processor_mask_Is_equal( &node->Affinity, affinity ) )
1161     return STATUS_SUCCESSFUL;   /* Nothing to do. Return true. */
1162 
1163  _Processor_mask_Assign( &node->Affinity, &local_affinity );
1164 
1165  _Scheduler_SMP_Set_affinity(
1166    context,
1167    thread,
1168    node_base,
1169    &local_affinity,
1170    _Scheduler_strong_APA_Do_set_affinity,
1171    _Scheduler_strong_APA_Extract_from_scheduled,
1172    _Scheduler_strong_APA_Extract_from_ready,
1173    _Scheduler_strong_APA_Get_highest_ready,
1174    _Scheduler_strong_APA_Move_from_ready_to_scheduled,
1175    _Scheduler_strong_APA_Enqueue,
1176    _Scheduler_strong_APA_Allocate_processor,
1177    _Scheduler_strong_APA_Get_idle,
1178    _Scheduler_strong_APA_Release_idle
1179  );
1180 
1181   return STATUS_SUCCESSFUL;
1182 }